gradio_deploy / main.py
youngdeveloper130910's picture
Upload folder using huggingface_hub
375209b verified
import gradio as gr
import requests
import base64
import os
import re
import csv
from datetime import datetime
from pathlib import Path
# API Key and model lists
API_KEY = "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"
VISION_MODELS = [
"meta-llama/llama-4-maverick:free",
"google/gemini-pro-vision:free",
"openai/gpt-4-vision-preview"
"google/gemini-2.0-flash-exp:free"
]
TEXT_MODELS = [
"mistralai/devstral-small:free",
"openchat/openchat-3.5-1210:free",
"nousresearch/nous-capybara-7b:free"
"deepseek/deepseek-r1-0528:free"
"deepseek/deepseek-chat-v3-0324:free"
]
# Ensure ./data and ./data/saved_images exist
BASE_DATA_FOLDER = Path("data")
BASE_DATA_FOLDER.mkdir(exist_ok=True)
IMAGE_SAVE_FOLDER = BASE_DATA_FOLDER / "saved_images"
IMAGE_SAVE_FOLDER.mkdir(exist_ok=True)
LOG_FILE = BASE_DATA_FOLDER / "chat_logs.csv"
# Memory to store conversation context
chat_history = []
# Text cleaning function
def clean_text(text):
text = re.sub(r"\\[a-zA-Z]+\{.*?\}", "", text)
text = re.sub(r"\\[a-zA-Z]+", "", text)
text = re.sub(r"\$+", "", text)
text = re.sub(r"[\{\}\[\]\(\)]", "", text)
return text.strip()
# Function to query a model
def try_model(image_b64, question, model_name, is_vision=False):
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
messages = chat_history.copy() # maintain context
content = [{"type": "text", "text": question}]
if is_vision:
content.append({
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_b64}"}
})
messages.append({"role": "user", "content": content})
payload = {
"model": model_name,
"messages": messages
}
response = requests.post("https://openrouter.ai/api/v1/chat/completions", json=payload, headers=headers)
try:
data = response.json()
if "error" in data:
raise Exception(data["error"].get("message", "Unknown error"))
return data["choices"][0]["message"]["content"]
except Exception:
return None
# Main chatbot function
def ask_bot(image, question):
image_path = ""
image_b64 = None
if image:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
image_path = str(IMAGE_SAVE_FOLDER / f"img_{timestamp}.jpg")
image.save(image_path)
with open(image_path, "rb") as f:
image_b64 = base64.b64encode(f.read()).decode("utf-8")
models = VISION_MODELS if image_b64 else TEXT_MODELS
answer = None
for model in models:
result = try_model(image_b64, question, model, is_vision=bool(image_b64))
if result:
answer = result
break
if not answer:
answer = "❌ All free models have exceeded their daily limit or failed."
clean_answer = clean_text(answer)
# Store in memory for follow-ups
chat_history.append({"role": "assistant", "content": clean_answer})
with open(LOG_FILE, "a", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
question,
clean_answer,
image_path
])
return clean_answer
logo_path = r"681487a8a36e5_download.jpg"
def encode_image_to_base64(image_path):
if not os.path.exists(image_path):
return None
with open(image_path, "rb") as img_file:
encoded = base64.b64encode(img_file.read()).decode("utf-8")
return f"data:image/jpeg;base64,{encoded}"
encoded_logo = encode_image_to_base64(logo_path)
# Encode the logo image to base64
def encode_image_to_base64(image_path):
if not os.path.exists(image_path):
return None
with open(image_path, "rb") as img_file:
encoded = base64.b64encode(img_file.read()).decode("utf-8")
return f"data:image/jpeg;base64,{encoded}"
logo_path = "681487a8a36e5_download.jpg"
encoded_logo = encode_image_to_base64(logo_path)
# Gradio UI with welcome message
with gr.Blocks(css="footer {display: none !important;}") as demo:
with gr.Row(elem_id="header-row"):
gr.HTML(f"""
<div style="
display: flex;
align-items: center;
gap: 15px;
padding: 15px 20px;
background-color: #f5f5f5;
border-radius: 12px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
width: 100%;
">
<img src="{encoded_logo}" style="height: 50px; width: 50px; border-radius: 8px;">
<h1 style="
font-family: 'Segoe UI', sans-serif;
font-size: 28px;
margin: 0;
color: #333;
">Camb AI</h1>
</div>
""")
with gr.Row():
image_input = gr.Image(type="pil", label="πŸ“Έ Upload an Image (optional)")
question_input = gr.Textbox(label="πŸ“ Ask something", placeholder="What would you like to know?")
submit_btn = gr.Button(" Submit")
output_box = gr.Textbox(label="πŸ’‘ Answer", lines=4)
submit_btn.click(fn=ask_bot, inputs=[image_input, question_input], outputs=output_box)
demo.load(lambda: " Hi! I'm Camb AI. Ask me anything!", outputs=output_box)
# Launch the app
if __name__ == "__main__":
demo.launch()