alixagari2000 commited on
Commit
2171a69
Β·
verified Β·
1 Parent(s): a4385a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +327 -326
app.py CHANGED
@@ -1,326 +1,327 @@
1
- import os
2
- import logging
3
- import gradio as gr
4
- import re
5
-
6
- #from langchain_chroma import Chroma
7
- #from langchain_huggingface import HuggingFaceEmbeddings
8
-
9
- from langchain_community.vectorstores import Chroma
10
- from langchain_community.embeddings import HuggingFaceEmbeddings
11
-
12
-
13
- from groq import Groq
14
-
15
- from transformers import pipeline
16
-
17
-
18
- import requests # for calling SerpAPI
19
- # Add your SerpAPI key here
20
- SERPAPI_API_KEY = "48125364c49a1952f1c8fdadf0d22e0d5bc3d195a5a98ad64d7e935aad503efa"
21
-
22
- # Load zero-shot classification model
23
- classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
24
-
25
- # Suppress noisy logs
26
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
27
- logging.getLogger('tensorflow').setLevel(logging.ERROR)
28
- logging.getLogger('torch').setLevel(logging.ERROR)
29
-
30
- # Config
31
- CHROMA_DIR = "chroma_country_info"
32
- GROQ_MODEL = "llama-3.3-70b-versatile" # You can also try: mixtral-8x7b or gemma-7b-it
33
-
34
-
35
-
36
-
37
- # Load Groq client
38
- client = Groq(api_key="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")
39
-
40
- # Load vector DB
41
- print("πŸ” Loading Chroma Vector DB...")
42
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
43
- vectordb = Chroma(persist_directory=CHROMA_DIR, embedding_function=embeddings)
44
-
45
- # Memory for user-provided factual information (in-session only)
46
- user_memory = []
47
-
48
-
49
-
50
-
51
- def is_question(text):
52
-
53
- QUESTION_STARTERS = [
54
- "who", "what", "when", "where", "why", "which", "whose", "whom", "how",
55
- "is", "are", "am", "was", "were",
56
- "do", "does", "did",
57
- "have", "has", "had",
58
- "can", "could", "will", "would", "shall", "should",
59
- "may", "might", "must",
60
- "didn't", "isn't", "aren't", "wasn't", "weren't",
61
- "won't", "shouldn't", "couldn't", "wouldn't"
62
- ]
63
-
64
- text_clean = text.strip().lower()
65
-
66
- if text_clean.endswith('?'):
67
- return True
68
-
69
- # Rule 2: Starts with known question word
70
- words = text_clean.split()
71
- if words and words[0] in QUESTION_STARTERS:
72
- return True
73
-
74
-
75
- return False
76
-
77
- def is_fact(text):
78
-
79
- if is_question(text):
80
- return False
81
-
82
- candidate_labels = ["fact", "opinion", "speculation"]
83
- result = classifier(text, candidate_labels)
84
-
85
- label = result['labels'][0]
86
- score = result['scores'][0]
87
-
88
- # Rule: High-confidence fact => keep as fact
89
- if label == "fact" and score > 0.75:
90
- final_label = "fact"
91
-
92
- # Rule: If label is opinion or speculation, but sentence sounds assertive => custom fact
93
- elif label in ["opinion", "speculation"] and score < 0.9:
94
- final_label = "custom fact"
95
-
96
- # Rule: Low-confidence fact => custom fact
97
- elif label == "fact" and score <= 0.75:
98
- final_label = "custom fact"
99
-
100
- else:
101
- final_label = "custom fact"
102
-
103
- # check if there is "fact in final_label and return True
104
-
105
- is_fact = "fact" in final_label
106
-
107
- return is_fact
108
-
109
- def clear_user_memory():
110
- user_memory.clear()
111
- return [], [] # Clear chat and state too (optional)
112
-
113
-
114
- def generate_answer(user_input):
115
- print("πŸ“š generate_answer:\n", user_input)
116
-
117
- # Handle memory commands
118
- if "what do you remember" in user_input.lower():
119
- if not user_memory:
120
- return "I don't remember anything yet."
121
- return "Here's what I remember:\n" + "\n".join(f"- {fact}" for fact in user_memory)
122
-
123
- if "forget everything" in user_input.lower():
124
- user_memory.clear()
125
- return "Okay, I’ve forgotten everything you told me."
126
-
127
- if not user_input.strip():
128
- return "Please enter a question."
129
-
130
- # Store statements as memory
131
- Isitquestion=True
132
- if is_fact(user_input):
133
- Isitquestion=False
134
- if user_input not in user_memory:
135
- user_memory.append(user_input)
136
- print("βœ… Added to memory:", user_input)
137
-
138
-
139
-
140
- # Retrieve context from Chroma
141
- docs = vectordb.similarity_search(user_input, k=3)
142
- context = "\n\n".join([doc.page_content for doc in docs]) if docs else ""
143
- memory_context = "\n".join(user_memory)
144
-
145
- system_prompt = (
146
- "You are a helpful AI assistant. Use ONLY the context and memory provided below. "
147
- "If the answer is not in the context or memory, respond with: 'I don't know based on the context.'\n\n"
148
- f"Context from documents:\n{context if context else 'None'}\n\n"
149
- f"Memory from conversation:\n{memory_context if memory_context else 'None'}"
150
- )
151
-
152
- system_prompt = (
153
- "You are a helpful AI assistant. "
154
- "You MUST NOT use any knowledge from your pretraining. "
155
- "Only use the information provided in the context or memory below. "
156
- "If the information is not found in either, always reply with:\n"
157
- "'I don't know based on the context.'\n\n"
158
- f"Context from documents:\n{context if context else 'None'}\n\n"
159
- f"Memory from conversation:\n{memory_context if memory_context else 'None'}"
160
- )
161
-
162
- system_prompt = (
163
- "You are a helpful AI assistant.\n"
164
- "You MUST NOT use any knowledge from your pretraining.\n"
165
- "Use ONLY the information in the context or memory below.\n"
166
- "ONLY include facts from memory or context **if they directly answer or support the user's input**.\n"
167
- "If the information is not found, reply with:\n"
168
- "'I don't know based on the context.'\n\n"
169
- f"Context from documents:\n{context if context else 'None'}\n\n"
170
- f"Memory from conversation:\n{memory_context if memory_context else 'None'}"
171
- )
172
-
173
- system_prompt = (
174
- "You are a helpful AI assistant.\n"
175
- "You must NEVER use your own knowledge or make any assumptions.\n"
176
- "Only respond using the information provided in the CONTEXT and MEMORY sections below.\n"
177
- "If the answer is not found there, you MUST reply with:\n"
178
- "'I don't know based on the context.'\n"
179
- "Do not guess or calculate anything that is not already mentioned.\n"
180
- "Do not try to verify or correct any user-provided statements.\n\n"
181
- f"CONTEXT:\n{context if context else 'None'}\n\n"
182
- f"MEMORY:\n{memory_context if memory_context else 'None'}"
183
- )
184
-
185
-
186
- messages = [
187
- {"role": "system", "content": system_prompt},
188
- {"role": "user", "content": user_input}
189
- ]
190
-
191
- chat_completion = client.chat.completions.create(
192
- model=GROQ_MODEL,
193
- messages=messages
194
- )
195
-
196
- final_answer = chat_completion.choices[0].message.content.strip()
197
- print("πŸ€– Model Response:", final_answer)
198
-
199
-
200
- if not Isitquestion and final_answer.lower() == "i don't know based on the context." :
201
- return "Got it"
202
-
203
- return final_answer
204
-
205
-
206
- def search_google(query):
207
- url = f"https://serpapi.com/search.json?q={query}&engine=google&api_key={SERPAPI_API_KEY}"
208
- resp = requests.get(url)
209
- data = resp.json()
210
-
211
- if "answer_box" in data:
212
- answer_box = data["answer_box"]
213
- if "answer" in answer_box and answer_box["answer"]:
214
- return answer_box["answer"]
215
- if "snippet" in answer_box and answer_box["snippet"]:
216
- return answer_box["snippet"]
217
-
218
- snippets = []
219
- for i, result in enumerate(data.get("organic_results", [])[:3], 1):
220
- snippet = result.get("snippet", "")
221
- if snippet:
222
- snippets.append(f"{i}. {snippet}")
223
-
224
- if snippets:
225
- return "\n\n".join(snippets)
226
-
227
- return "No snippet found."
228
-
229
-
230
-
231
- def search_bing(query):
232
- # Bing search via SerpAPI
233
- url = f"https://serpapi.com/search.json?q={query}&engine=bing&api_key={SERPAPI_API_KEY}"
234
- resp = requests.get(url)
235
- data = resp.json()
236
- try:
237
- snippet = data['organic_results'][0].get('snippet', 'No snippet found.')
238
- except Exception:
239
- snippet = "No snippet found."
240
- return snippet
241
-
242
-
243
-
244
- # Gradio UI
245
- def chat_interface(message, history):
246
- if not message.strip():
247
- return history, history, "" , gr.update(visible=False), gr.update(visible=False) # hide buttons on empty input
248
-
249
- reply = generate_answer(message)
250
-
251
- messagegui = f"You said: {message}"
252
- history.append({"role": "user", "content": messagegui})
253
- history.append({"role": "assistant", "content": reply})
254
-
255
- # Show buttons only if reply is exactly "I don't know based on the context."
256
- show_buttons = reply.lower() == "i don't know based on the context."
257
-
258
- return (
259
- history,
260
- history,
261
- "", # clear input box
262
- gr.update(visible=show_buttons),
263
- gr.update(visible=show_buttons),
264
- )
265
-
266
-
267
-
268
- def google_search_button_click(history, state):
269
- # Find last user message to search
270
- last_user_message = ""
271
- for msg in reversed(history):
272
- if msg["role"] == "user":
273
- # Strip "You said: " prefix to get original query
274
- last_user_message = msg["content"].replace("You said: ", "", 1)
275
- break
276
-
277
- snippet = search_google(last_user_message)
278
- history.append({"role": "assistant", "content": f"Google search snippet:\n{snippet}"})
279
- # After search, keep buttons hidden until next unknown answer
280
- return history, history, gr.update(visible=True), gr.update(visible=True)
281
-
282
-
283
- def bing_search_button_click(history, state):
284
- last_user_message = ""
285
- for msg in reversed(history):
286
- if msg["role"] == "user":
287
- last_user_message = msg["content"].replace("You said: ", "", 1)
288
- break
289
-
290
- snippet = search_bing(last_user_message)
291
- history.append({"role": "assistant", "content": f"Bing search snippet:\n{snippet}"})
292
- return history, history, gr.update(visible=True), gr.update(visible=True)
293
-
294
-
295
- # Launch UI
296
- with gr.Blocks() as demo:
297
- gr.Markdown("## 🌍 Chatbot : countries and their capital city and population)")
298
- chatbot = gr.Chatbot(label="Chat History", type="messages")
299
-
300
- with gr.Row():
301
- # Add buttons for search, initially hidden
302
- google_btn = gr.Button("Search on Google", visible=False)
303
- bing_btn = gr.Button("Search on Bing", visible=False)
304
-
305
- msg = gr.Textbox(label="Your message", placeholder="Ask or tell me anything about countries and capital city", lines=2)
306
-
307
- with gr.Row():
308
- submit_btn = gr.Button("Submit")
309
- clear = gr.Button("Clear chat")
310
- clear_memory = gr.Button("Clear My Short Memory") # πŸ‘ˆ New button
311
-
312
-
313
-
314
-
315
- state = gr.State([])
316
-
317
- submit_btn.click(chat_interface, [msg, state], [chatbot, state, msg, google_btn, bing_btn])
318
- clear.click(lambda: ([], []), None, [chatbot, state])
319
- clear_memory.click(clear_user_memory, None, [chatbot, state])
320
-
321
- google_btn.click(google_search_button_click, [state, state], [chatbot, state, google_btn, bing_btn])
322
- bing_btn.click(bing_search_button_click, [state, state], [chatbot, state, google_btn, bing_btn])
323
-
324
-
325
- demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
326
-
 
 
1
+ import os
2
+ import logging
3
+ import gradio as gr
4
+ import re
5
+
6
+ #from langchain_chroma import Chroma
7
+ #from langchain_huggingface import HuggingFaceEmbeddings
8
+
9
+ from langchain_community.vectorstores import Chroma
10
+ from langchain_community.embeddings import HuggingFaceEmbeddings
11
+
12
+
13
+ from groq import Groq
14
+
15
+ from transformers import pipeline
16
+
17
+
18
+ import requests # for calling SerpAPI
19
+ # Add your SerpAPI key here
20
+ SERPAPI_API_KEY = "48125364c49a1952f1c8fdadf0d22e0d5bc3d195a5a98ad64d7e935aad503efa"
21
+
22
+ # Load zero-shot classification model
23
+ classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
24
+
25
+ # Suppress noisy logs
26
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
27
+ logging.getLogger('tensorflow').setLevel(logging.ERROR)
28
+ logging.getLogger('torch').setLevel(logging.ERROR)
29
+
30
+ # Config
31
+ CHROMA_DIR = "chroma_country_info"
32
+ GROQ_MODEL = "llama-3.3-70b-versatile" # You can also try: mixtral-8x7b or gemma-7b-it
33
+
34
+
35
+
36
+
37
+ # Load Groq client
38
+ client = Groq(api_key="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")
39
+
40
+ # Load vector DB
41
+ print("πŸ” Loading Chroma Vector DB...")
42
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
43
+ vectordb = Chroma(persist_directory=CHROMA_DIR, embedding_function=embeddings)
44
+
45
+ # Memory for user-provided factual information (in-session only)
46
+ user_memory = []
47
+
48
+
49
+
50
+
51
+ def is_question(text):
52
+
53
+ QUESTION_STARTERS = [
54
+ "who", "what", "when", "where", "why", "which", "whose", "whom", "how",
55
+ "is", "are", "am", "was", "were",
56
+ "do", "does", "did",
57
+ "have", "has", "had",
58
+ "can", "could", "will", "would", "shall", "should",
59
+ "may", "might", "must",
60
+ "didn't", "isn't", "aren't", "wasn't", "weren't",
61
+ "won't", "shouldn't", "couldn't", "wouldn't"
62
+ ]
63
+
64
+ text_clean = text.strip().lower()
65
+
66
+ if text_clean.endswith('?'):
67
+ return True
68
+
69
+ # Rule 2: Starts with known question word
70
+ words = text_clean.split()
71
+ if words and words[0] in QUESTION_STARTERS:
72
+ return True
73
+
74
+
75
+ return False
76
+
77
+ def is_fact(text):
78
+
79
+ if is_question(text):
80
+ return False
81
+
82
+ candidate_labels = ["fact", "opinion", "speculation"]
83
+ result = classifier(text, candidate_labels)
84
+
85
+ label = result['labels'][0]
86
+ score = result['scores'][0]
87
+
88
+ # Rule: High-confidence fact => keep as fact
89
+ if label == "fact" and score > 0.75:
90
+ final_label = "fact"
91
+
92
+ # Rule: If label is opinion or speculation, but sentence sounds assertive => custom fact
93
+ elif label in ["opinion", "speculation"] and score < 0.9:
94
+ final_label = "custom fact"
95
+
96
+ # Rule: Low-confidence fact => custom fact
97
+ elif label == "fact" and score <= 0.75:
98
+ final_label = "custom fact"
99
+
100
+ else:
101
+ final_label = "custom fact"
102
+
103
+ # check if there is "fact in final_label and return True
104
+
105
+ is_fact = "fact" in final_label
106
+
107
+ return is_fact
108
+
109
+ def clear_user_memory():
110
+ user_memory.clear()
111
+ return [], [] # Clear chat and state too (optional)
112
+
113
+
114
+ def generate_answer(user_input):
115
+ print("πŸ“š generate_answer:\n", user_input)
116
+
117
+ # Handle memory commands
118
+ if "what do you remember" in user_input.lower():
119
+ if not user_memory:
120
+ return "I don't remember anything yet."
121
+ return "Here's what I remember:\n" + "\n".join(f"- {fact}" for fact in user_memory)
122
+
123
+ if "forget everything" in user_input.lower():
124
+ user_memory.clear()
125
+ return "Okay, I’ve forgotten everything you told me."
126
+
127
+ if not user_input.strip():
128
+ return "Please enter a question."
129
+
130
+ # Store statements as memory
131
+ Isitquestion=True
132
+ if is_fact(user_input):
133
+ Isitquestion=False
134
+ if user_input not in user_memory:
135
+ user_memory.append(user_input)
136
+ print("βœ… Added to memory:", user_input)
137
+
138
+
139
+
140
+ # Retrieve context from Chroma
141
+ docs = vectordb.similarity_search(user_input, k=3)
142
+ context = "\n\n".join([doc.page_content for doc in docs]) if docs else ""
143
+ memory_context = "\n".join(user_memory)
144
+
145
+ system_prompt = (
146
+ "You are a helpful AI assistant. Use ONLY the context and memory provided below. "
147
+ "If the answer is not in the context or memory, respond with: 'I don't know based on the context.'\n\n"
148
+ f"Context from documents:\n{context if context else 'None'}\n\n"
149
+ f"Memory from conversation:\n{memory_context if memory_context else 'None'}"
150
+ )
151
+
152
+ system_prompt = (
153
+ "You are a helpful AI assistant. "
154
+ "You MUST NOT use any knowledge from your pretraining. "
155
+ "Only use the information provided in the context or memory below. "
156
+ "If the information is not found in either, always reply with:\n"
157
+ "'I don't know based on the context.'\n\n"
158
+ f"Context from documents:\n{context if context else 'None'}\n\n"
159
+ f"Memory from conversation:\n{memory_context if memory_context else 'None'}"
160
+ )
161
+
162
+ system_prompt = (
163
+ "You are a helpful AI assistant.\n"
164
+ "You MUST NOT use any knowledge from your pretraining.\n"
165
+ "Use ONLY the information in the context or memory below.\n"
166
+ "ONLY include facts from memory or context **if they directly answer or support the user's input**.\n"
167
+ "If the information is not found, reply with:\n"
168
+ "'I don't know based on the context.'\n\n"
169
+ f"Context from documents:\n{context if context else 'None'}\n\n"
170
+ f"Memory from conversation:\n{memory_context if memory_context else 'None'}"
171
+ )
172
+
173
+ system_prompt = (
174
+ "You are a helpful AI assistant.\n"
175
+ "You must NEVER use your own knowledge or make any assumptions.\n"
176
+ "Only respond using the information provided in the CONTEXT and MEMORY sections below.\n"
177
+ "If the answer is not found there, you MUST reply with:\n"
178
+ "'I don't know based on the context.'\n"
179
+ "Do not guess or calculate anything that is not already mentioned.\n"
180
+ "Do not try to verify or correct any user-provided statements.\n\n"
181
+ f"CONTEXT:\n{context if context else 'None'}\n\n"
182
+ f"MEMORY:\n{memory_context if memory_context else 'None'}"
183
+ )
184
+
185
+
186
+ messages = [
187
+ {"role": "system", "content": system_prompt},
188
+ {"role": "user", "content": user_input}
189
+ ]
190
+
191
+ chat_completion = client.chat.completions.create(
192
+ model=GROQ_MODEL,
193
+ messages=messages
194
+ )
195
+
196
+ final_answer = chat_completion.choices[0].message.content.strip()
197
+ print("πŸ€– Model Response:", final_answer)
198
+
199
+
200
+ if not Isitquestion and final_answer.lower() == "i don't know based on the context." :
201
+ return "Got it"
202
+
203
+ return final_answer
204
+
205
+
206
+ def search_google(query):
207
+ url = f"https://serpapi.com/search.json?q={query}&engine=google&api_key={SERPAPI_API_KEY}"
208
+ resp = requests.get(url)
209
+ data = resp.json()
210
+
211
+ if "answer_box" in data:
212
+ answer_box = data["answer_box"]
213
+ if "answer" in answer_box and answer_box["answer"]:
214
+ return answer_box["answer"]
215
+ if "snippet" in answer_box and answer_box["snippet"]:
216
+ return answer_box["snippet"]
217
+
218
+ snippets = []
219
+ for i, result in enumerate(data.get("organic_results", [])[:3], 1):
220
+ snippet = result.get("snippet", "")
221
+ if snippet:
222
+ snippets.append(f"{i}. {snippet}")
223
+
224
+ if snippets:
225
+ return "\n\n".join(snippets)
226
+
227
+ return "No snippet found."
228
+
229
+
230
+
231
+ def search_bing(query):
232
+ # Bing search via SerpAPI
233
+ url = f"https://serpapi.com/search.json?q={query}&engine=bing&api_key={SERPAPI_API_KEY}"
234
+ resp = requests.get(url)
235
+ data = resp.json()
236
+ try:
237
+ snippet = data['organic_results'][0].get('snippet', 'No snippet found.')
238
+ except Exception:
239
+ snippet = "No snippet found."
240
+ return snippet
241
+
242
+
243
+
244
+ # Gradio UI
245
+ def chat_interface(message, history):
246
+ if not message.strip():
247
+ return history, history, "" , gr.update(visible=False), gr.update(visible=False) # hide buttons on empty input
248
+
249
+ reply = generate_answer(message)
250
+
251
+ messagegui = f"You said: {message}"
252
+ history.append({"role": "user", "content": messagegui})
253
+ history.append({"role": "assistant", "content": reply})
254
+
255
+ # Show buttons only if reply is exactly "I don't know based on the context."
256
+ show_buttons = reply.lower() == "i don't know based on the context."
257
+
258
+ return (
259
+ history,
260
+ history,
261
+ "", # clear input box
262
+ gr.update(visible=show_buttons),
263
+ gr.update(visible=show_buttons),
264
+ )
265
+
266
+
267
+
268
+ def google_search_button_click(history, state):
269
+ # Find last user message to search
270
+ last_user_message = ""
271
+ for msg in reversed(history):
272
+ if msg["role"] == "user":
273
+ # Strip "You said: " prefix to get original query
274
+ last_user_message = msg["content"].replace("You said: ", "", 1)
275
+ break
276
+
277
+ snippet = search_google(last_user_message)
278
+ history.append({"role": "assistant", "content": f"Google search snippet:\n{snippet}"})
279
+ # After search, keep buttons hidden until next unknown answer
280
+ return history, history, gr.update(visible=True), gr.update(visible=True)
281
+
282
+
283
+ def bing_search_button_click(history, state):
284
+ last_user_message = ""
285
+ for msg in reversed(history):
286
+ if msg["role"] == "user":
287
+ last_user_message = msg["content"].replace("You said: ", "", 1)
288
+ break
289
+
290
+ snippet = search_bing(last_user_message)
291
+ history.append({"role": "assistant", "content": f"Bing search snippet:\n{snippet}"})
292
+ return history, history, gr.update(visible=True), gr.update(visible=True)
293
+
294
+
295
+ # Launch UI
296
+ with gr.Blocks() as demo:
297
+ gr.Markdown("## 🌍 Chatbot : countries and their capital city and population)")
298
+ chatbot = gr.Chatbot(label="Chat History", type="messages")
299
+
300
+ with gr.Row():
301
+ # Add buttons for search, initially hidden
302
+ google_btn = gr.Button("Search on Google", visible=False)
303
+ bing_btn = gr.Button("Search on Bing", visible=False)
304
+
305
+ msg = gr.Textbox(label="Your message", placeholder="Ask or tell me anything about countries and capital city", lines=2)
306
+ myAuthToken = gr.Textbox(label="Token", placeholder="Type here the AUTH Token", lines=1)
307
+
308
+ with gr.Row():
309
+ submit_btn = gr.Button("Submit")
310
+ clear = gr.Button("Clear chat")
311
+ clear_memory = gr.Button("Clear My Short Memory") # πŸ‘ˆ New button
312
+
313
+
314
+
315
+
316
+ state = gr.State([])
317
+
318
+ submit_btn.click(chat_interface, [msg, state], [chatbot, state, msg, google_btn, bing_btn])
319
+ clear.click(lambda: ([], []), None, [chatbot, state])
320
+ clear_memory.click(clear_user_memory, None, [chatbot, state])
321
+
322
+ google_btn.click(google_search_button_click, [state, state], [chatbot, state, google_btn, bing_btn])
323
+ bing_btn.click(bing_search_button_click, [state, state], [chatbot, state, google_btn, bing_btn])
324
+
325
+
326
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
327
+