Shreyas094 commited on
Commit
0f26a54
·
verified ·
1 Parent(s): f36a838

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -29
app.py CHANGED
@@ -11,6 +11,8 @@ from langchain_community.embeddings import HuggingFaceEmbeddings
11
  from langchain_core.documents import Document
12
  from huggingface_hub import InferenceClient
13
  import logging
 
 
14
 
15
  # Set up basic configuration for logging
16
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -41,6 +43,43 @@ Your goal is to synthesize the given context into a coherent and detailed respon
41
  Please ensure that your response is well-structured, factual.
42
  If you detect that you made a mistake in your reasoning at any point, correct yourself inside <reflection> tags."""
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  def get_embeddings():
45
  return HuggingFaceEmbeddings(model_name="sentence-transformers/stsb-roberta-large")
46
 
@@ -199,36 +238,47 @@ def initial_conversation():
199
  "To get started, ask me a question!")
200
  ]
201
 
202
- demo = gr.ChatInterface(
203
- respond,
204
- additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=True, render=False),
205
- additional_inputs=[
206
- gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
207
- gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
208
- gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
209
- gr.Checkbox(label="Use Embeddings", value=False),
210
- gr.Textbox(label="System Prompt", lines=5, value=DEFAULT_SYSTEM_PROMPT),
211
- ],
212
- title="AI-powered Web Search Assistant",
213
- description="Ask questions and get answers from web search results.",
214
- theme=gr.Theme.from_hub("allenai/gradio-theme"),
215
- css=css,
216
- examples=[
217
- ["What are the latest developments in artificial intelligence?"],
218
- ["Can you explain the basics of quantum computing?"],
219
- ["What are the current global economic trends?"]
220
- ],
221
- cache_examples=False,
222
- analytics_enabled=False,
223
- textbox=gr.Textbox(placeholder="Ask a question", container=False, scale=7),
224
- chatbot = gr.Chatbot(
225
- show_copy_button=True,
226
- likeable=True,
227
- layout="bubble",
228
- height=400,
229
- value=initial_conversation()
 
 
 
 
 
 
 
 
 
 
 
 
230
  )
231
- )
232
 
233
  if __name__ == "__main__":
234
  demo.launch(share=True)
 
11
  from langchain_core.documents import Document
12
  from huggingface_hub import InferenceClient
13
  import logging
14
+ import pandas as pd
15
+ import tempfile
16
 
17
  # Set up basic configuration for logging
18
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
43
  Please ensure that your response is well-structured, factual.
44
  If you detect that you made a mistake in your reasoning at any point, correct yourself inside <reflection> tags."""
45
 
46
+ def process_excel_file(file, model, temperature, num_calls, use_embeddings, system_prompt):
47
+ try:
48
+ df = pd.read_excel(file.name)
49
+ results = []
50
+
51
+ for _, row in df.iterrows():
52
+ question = row['Question']
53
+ custom_system_prompt = row['System Prompt']
54
+
55
+ # Use the existing get_response_with_search function
56
+ response_generator = get_response_with_search(question, model, num_calls, temperature, use_embeddings, custom_system_prompt)
57
+
58
+ full_response = ""
59
+ for partial_response, _ in response_generator:
60
+ full_response = partial_response # Keep updating with the latest response
61
+
62
+ if not full_response:
63
+ full_response = "No response generated. Please check the input parameters and try again."
64
+
65
+ results.append(full_response)
66
+
67
+ df['Response'] = results
68
+
69
+ # Save to a temporary file
70
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.xlsx') as tmp:
71
+ df.to_excel(tmp.name, index=False)
72
+ return tmp.name
73
+ except Exception as e:
74
+ logging.error(f"Error processing Excel file: {str(e)}")
75
+ return None
76
+
77
+ def upload_file(file):
78
+ return file.name if file else None
79
+
80
+ def download_file(file_path):
81
+ return file_path
82
+
83
  def get_embeddings():
84
  return HuggingFaceEmbeddings(model_name="sentence-transformers/stsb-roberta-large")
85
 
 
238
  "To get started, ask me a question!")
239
  ]
240
 
241
+ # Modify the Gradio interface
242
+ with gr.Blocks() as demo:
243
+ gr.Markdown("# AI-powered Web Search Assistant")
244
+ gr.Markdown("Ask questions and get answers from web search results.")
245
+
246
+ with gr.Row():
247
+ chatbot = gr.Chatbot(
248
+ show_copy_button=True,
249
+ likeable=True,
250
+ layout="bubble",
251
+ height=400,
252
+ value=initial_conversation()
253
+ )
254
+
255
+ with gr.Row():
256
+ message = gr.Textbox(placeholder="Ask a question", container=False, scale=7)
257
+ submit_button = gr.Button("Submit")
258
+
259
+ with gr.Accordion("⚙️ Parameters", open=False):
260
+ model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3])
261
+ temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature")
262
+ num_calls = gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls")
263
+ use_embeddings = gr.Checkbox(label="Use Embeddings", value=False)
264
+ system_prompt = gr.Textbox(label="System Prompt", lines=5, value=DEFAULT_SYSTEM_PROMPT)
265
+
266
+ with gr.Accordion("Batch Processing", open=False):
267
+ excel_file = gr.File(label="Upload Excel File", file_types=[".xlsx"])
268
+ process_button = gr.Button("Process Excel File")
269
+ download_button = gr.File(label="Download Processed File")
270
+
271
+ # Event handlers
272
+ submit_button.click(chatbot_interface, inputs=[message, chatbot, model, temperature, num_calls, use_embeddings, system_prompt], outputs=chatbot)
273
+ message.submit(chatbot_interface, inputs=[message, chatbot, model, temperature, num_calls, use_embeddings, system_prompt], outputs=chatbot)
274
+
275
+ # Excel processing
276
+ excel_file.change(upload_file, inputs=[excel_file], outputs=[excel_file])
277
+ process_button.click(
278
+ process_excel_file,
279
+ inputs=[excel_file, model, temperature, num_calls, use_embeddings, system_prompt],
280
+ outputs=[download_button]
281
  )
 
282
 
283
  if __name__ == "__main__":
284
  demo.launch(share=True)