File size: 2,808 Bytes
e365f12 949c875 08a24af 949c875 08a24af 949c875 08a24af 949c875 08a24af 949c875 08a24af 949c875 08a24af 949c875 08a24af 949c875 08a24af 949c875 08a24af e365f12 b3ec9e8 e365f12 08a24af e365f12 949c875 08a24af 949c875 e365f12 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import torch
import spaces
# Custom inline CSS for dark/black theme
custom_css = """
:root {
--color-bg-light: #000000;
--color-accent: #35a387;
--color-primary-dark: #0e3229;
--color-text-dark: #f3f3f3;
}
"""
# Force dark mode + black theme overrides
force_dark_css = """
<style>
html {
color-scheme: dark;
}
body {
background-color: #000000 !important;
color: #f3f3f3 !important;
}
.gr-box,
.gr-panel,
.gr-chatbox,
input,
textarea {
background-color: #000000 !important;
border-color: #333 !important;
color: #f3f3f3 !important;
}
button.gr-button {
background-color: #35a387 !important;
color: white !important;
}
</style>
"""
# Load model and tokenizer
model_name = "Qwen/Qwen2.5-0.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
low_cpu_mem_usage=True
)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=275,
temperature=0.7,
top_p=0.95,
do_sample=True
)
@spaces.GPU(duration=60)
def sales_agent(message, chat_history=[]):
# Manually map prompt prefix to flag
if message.startswith("π«π·"):
lang_flag = "π«π·"
prompt_lang = "en franΓ§ais"
elif message.startswith("π³π±"):
lang_flag = "π³π±"
prompt_lang = "in het Nederlands"
else:
lang_flag = "π¬π§"
prompt_lang = "in English"
prompt = f"""
You are a helpful pre-sales agent at Cotubex, a tech store in Brussels. Answer in {prompt_lang}.
Product List:
- Samsung 990 PRO 1TB NVME β EUR 119.00 β In Stock
- Travel adapter Europe to Switzerland + Italy + Brazil β EUR 15.79 β In Stock
- Be Quiet! Pure Loop 2 240 Watercooling β EUR 109.90 β In Stock
- Zotac 5060 TI 16GB OC β EUR 535.00 β In Stock
- Logitech G502 HERO β EUR 49.99 β In Stock
Question: {message}
Answer ({lang_flag}):
"""
response = pipe(prompt)
answer = response[0]['generated_text'].replace(prompt, "").strip()
return f"{lang_flag} {answer}"
# Example prompts
examples = [
["π«π· Quel est le prix de la carte graphique Zotac 5060 TI ?"],
["π³π± Wat kost de Zotac 5060 TI videokaart?"],
["π¬π§ Is the Be Quiet water cooler available?"]
]
# Build Gradio interface
with gr.Blocks(css=custom_css) as demo:
gr.HTML(force_dark_css)
gr.Markdown("### Cotubex Pre-Sales Assistant π«π·π§πͺπ³π±\nAsk us anything about products, pricing, availability.")
gr.ChatInterface(
fn=sales_agent,
chatbot=gr.Chatbot(height=400, type="messages"),
examples=examples
)
if __name__ == "__main__":
demo.launch() |