Files changed (1) hide show
  1. app.py +154 -128
app.py CHANGED
@@ -1,103 +1,189 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
- # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
14
- def __init__(self):
15
- print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
23
- """
24
- Fetches all questions, runs the BasicAgent on them, submits all answers,
25
- and displays the results.
26
- """
27
- # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
 
 
 
30
  if profile:
31
- username= f"{profile.username}"
32
  print(f"User logged in: {username}")
33
  else:
34
- print("User not logged in.")
35
  return "Please Login to Hugging Face with the button.", None
36
-
37
  api_url = DEFAULT_API_URL
38
  questions_url = f"{api_url}/questions"
39
  submit_url = f"{api_url}/submit"
40
-
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
  try:
43
- agent = BasicAgent()
44
  except Exception as e:
45
- print(f"Error instantiating agent: {e}")
46
  return f"Error initializing agent: {e}", None
47
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
  print(agent_code)
50
-
51
- # 2. Fetch Questions
52
- print(f"Fetching questions from: {questions_url}")
53
  try:
54
  response = requests.get(questions_url, timeout=15)
55
  response.raise_for_status()
56
  questions_data = response.json()
57
- if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
  print(f"Fetched {len(questions_data)} questions.")
61
- except requests.exceptions.RequestException as e:
62
- print(f"Error fetching questions: {e}")
63
- return f"Error fetching questions: {e}", None
64
- except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
  except Exception as e:
69
- print(f"An unexpected error occurred fetching questions: {e}")
70
- return f"An unexpected error occurred fetching questions: {e}", None
71
-
72
- # 3. Run your Agent
73
  results_log = []
74
  answers_payload = []
75
- print(f"Running agent on {len(questions_data)} questions...")
76
  for item in questions_data:
77
  task_id = item.get("task_id")
78
  question_text = item.get("question")
79
- if not task_id or question_text is None:
80
- print(f"Skipping item with missing task_id or question: {item}")
81
  continue
82
  try:
83
- submitted_answer = agent(question_text)
84
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
  except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
-
90
  if not answers_payload:
91
- print("Agent did not produce any answers to submit.")
92
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
-
94
- # 4. Prepare Submission
95
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
- print(status_update)
98
-
99
- # 5. Submit
100
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
  try:
102
  response = requests.post(submit_url, json=submission_data, timeout=60)
103
  response.raise_for_status()
@@ -109,88 +195,28 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
109
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
110
  f"Message: {result_data.get('message', 'No message received.')}"
111
  )
112
- print("Submission successful.")
113
- results_df = pd.DataFrame(results_log)
114
- return final_status, results_df
115
- except requests.exceptions.HTTPError as e:
116
- error_detail = f"Server responded with status {e.response.status_code}."
117
- try:
118
- error_json = e.response.json()
119
- error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
120
- except requests.exceptions.JSONDecodeError:
121
- error_detail += f" Response: {e.response.text[:500]}"
122
- status_message = f"Submission Failed: {error_detail}"
123
- print(status_message)
124
- results_df = pd.DataFrame(results_log)
125
- return status_message, results_df
126
- except requests.exceptions.Timeout:
127
- status_message = "Submission Failed: The request timed out."
128
- print(status_message)
129
- results_df = pd.DataFrame(results_log)
130
- return status_message, results_df
131
- except requests.exceptions.RequestException as e:
132
- status_message = f"Submission Failed: Network error - {e}"
133
- print(status_message)
134
- results_df = pd.DataFrame(results_log)
135
- return status_message, results_df
136
  except Exception as e:
137
- status_message = f"An unexpected error occurred during submission: {e}"
138
- print(status_message)
139
- results_df = pd.DataFrame(results_log)
140
- return status_message, results_df
141
 
142
-
143
- # --- Build Gradio Interface using Blocks ---
144
  with gr.Blocks() as demo:
145
- gr.Markdown("# Basic Agent Evaluation Runner")
146
  gr.Markdown(
147
  """
148
  **Instructions:**
149
-
150
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
153
-
154
- ---
155
- **Disclaimers:**
156
- Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
157
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
158
  """
159
  )
160
-
161
  gr.LoginButton()
162
-
163
  run_button = gr.Button("Run Evaluation & Submit All Answers")
164
-
165
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
166
- # Removed max_rows=10 from DataFrame constructor
167
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
168
-
169
- run_button.click(
170
- fn=run_and_submit_all,
171
- outputs=[status_output, results_table]
172
- )
173
 
174
  if __name__ == "__main__":
175
- print("\n" + "-"*30 + " App Starting " + "-"*30)
176
- # Check for SPACE_HOST and SPACE_ID at startup for information
177
- space_host_startup = os.getenv("SPACE_HOST")
178
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
179
-
180
- if space_host_startup:
181
- print(f"βœ… SPACE_HOST found: {space_host_startup}")
182
- print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
183
- else:
184
- print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
185
-
186
- if space_id_startup: # Print repo URLs if SPACE_ID is found
187
- print(f"βœ… SPACE_ID found: {space_id_startup}")
188
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
189
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
190
- else:
191
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
192
-
193
- print("-"*(60 + len(" App Starting ")) + "\n")
194
-
195
- print("Launching Gradio Interface for Basic Agent Evaluation...")
196
  demo.launch(debug=True, share=False)
 
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  import pandas as pd
5
+ from langchain.agents import AgentExecutor, create_react_agent
6
+ from langchain_openai import ChatOpenAI
7
+ from langchain_core.prompts import PromptTemplate
8
+ from langchain_community.tools import DuckDuckGoSearchRun
9
+ from langchain.tools import Tool
10
+ from langchain_community.tools import PythonREPLTool
11
+ import tempfile
12
+ import base64
13
+ from langchain_core.messages import HumanMessage
14
+
15
+ # For PDF and Excel handling - these imports will be used in process_file
16
+ try:
17
+ from langchain_community.document_loaders import PyPDFLoader
18
+ import openpyxl # For Excel
19
+ except ImportError:
20
+ pass # Assume installed during HF build
21
 
 
22
  # --- Constants ---
23
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
24
 
25
+ # --- Advanced Agent Definition ---
26
+ # ----- THIS IS WHERE THE ADVANCED LOGIC IS BUILT FOR HIGHER SCORES -----
27
  class BasicAgent:
28
+ def __init__(self, api_url):
29
+ print("Advanced BasicAgent initialized with tool support.")
30
+ openai_api_key = os.getenv("OPENAI_API_KEY")
31
+ if not openai_api_key:
32
+ raise ValueError("OPENAI_API_KEY must be set in Hugging Face Space variables for the agent to work.")
33
+
34
+ # Use a strong model like gpt-4o for better reasoning and vision
35
+ self.llm = ChatOpenAI(temperature=0, model="gpt-4o", api_key=openai_api_key)
36
+
37
+ # Tools for web search, code execution, and file processing
38
+ self.search_tool = DuckDuckGoSearchRun(name="web_search", description="Search the web for information.")
39
+ self.python_tool = PythonREPLTool(description="Execute Python code for calculations or data processing. Input should be valid Python code.")
40
+
41
+ # Custom tool for processing files (downloads from API, handles images/PDFs/Excel/text)
42
+ self.file_tool = Tool(
43
+ name="process_file",
44
+ func=self._process_file,
45
+ description="Download and process a file associated with a task. Input format: 'task_id: <id>, file_name: <name>'"
46
+ )
47
+
48
+ self.tools = [self.search_tool, self.python_tool, self.file_tool]
49
+
50
+ # React agent prompt template (inspired by GAIA prompting for exact answers)
51
+ self.prompt_template = PromptTemplate.from_template("""
52
+ You are an expert AI agent solving GAIA benchmark questions. These questions require reasoning, tool use, and sometimes file processing.
53
+
54
+ Question: {question}
55
+
56
+ If the question mentions a file or attachment, use the 'process_file' tool with 'task_id: <task_id>, file_name: <file_name>'.
57
+
58
+ Reason step-by-step using tools as needed. Output ONLY the final answer in the exact format required by the question. No explanations, no extra text.
59
+
60
+ {agent_scratchpad}
61
+ """)
62
+
63
+ self.agent = create_react_agent(self.llm, self.tools, self.prompt_template)
64
+ self.executor = AgentExecutor(agent=self.agent, tools=self.tools, verbose=True, handle_parsing_errors=True, max_iterations=10)
65
+
66
+ self.api_url = api_url
67
 
68
+ def _process_file(self, input_str: str) -> str:
69
+ """Internal function to download and process files."""
70
+ try:
71
+ # Parse input
72
+ parts = dict(part.strip().split(': ', 1) for part in input_str.split(', '))
73
+ task_id = parts.get('task_id')
74
+ file_name = parts.get('file_name')
75
+ if not task_id or not file_name:
76
+ return "Invalid input for process_file. Need 'task_id' and 'file_name'."
77
+
78
+ # Download file
79
+ file_url = f"{self.api_url}/files/{task_id}"
80
+ response = requests.get(file_url, timeout=10)
81
+ response.raise_for_status()
82
+
83
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file_name)[1]) as tmp:
84
+ tmp.write(response.content)
85
+ file_path = tmp.name
86
+
87
+ ext = os.path.splitext(file_name)[1].lower()
88
+
89
+ if ext in ['.jpg', '.png', '.jpeg', '.gif']:
90
+ # Use vision to describe image
91
+ with open(file_path, "rb") as img_file:
92
+ base64_image = base64.b64encode(img_file.read()).decode('utf-8')
93
+ message = HumanMessage(content=[
94
+ {"type": "text", "text": "Describe this image in detail, focusing on elements relevant to the question."},
95
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
96
+ ])
97
+ description = self.llm.invoke([message]).content
98
+ os.unlink(file_path)
99
+ return description
100
+
101
+ elif ext == '.pdf':
102
+ loader = PyPDFLoader(file_path)
103
+ docs = loader.load()
104
+ text = "\n\n".join(doc.page_content for doc in docs)
105
+ os.unlink(file_path)
106
+ return text[:20000] # Truncate if too long
107
+
108
+ elif ext in ['.xlsx', '.xls']:
109
+ import pandas as pd
110
+ df = pd.read_excel(file_path)
111
+ os.unlink(file_path)
112
+ return df.to_string()
113
+
114
+ else:
115
+ # Text file
116
+ with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
117
+ text = f.read()
118
+ os.unlink(file_path)
119
+ return text[:20000]
120
+
121
+ except Exception as e:
122
+ return f"Error processing file: {str(e)}"
123
+
124
+ def __call__(self, question: str, task_id: str, file_name: str | None = None) -> str:
125
+ print(f"Agent processing question (first 50 chars): {question[:50]}... (task_id: {task_id}, file: {file_name})")
126
+ input_prompt = question
127
+ if file_name:
128
+ input_prompt += f"\nThere is an attached file '{file_name}'. Use the 'process_file' tool with 'task_id: {task_id}, file_name: {file_name}' to access it."
129
+
130
+ try:
131
+ response = self.executor.invoke({"question": input_prompt})
132
+ answer = response['output'].strip()
133
+ print(f"Agent returning answer: {answer}")
134
+ return answer
135
+ except Exception as e:
136
+ print(f"Error generating answer: {e}")
137
+ return "Agent error occurred."
138
 
139
+ # Update the run_and_submit_all to pass task_id and file_name to agent
140
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
141
+ space_id = os.getenv("SPACE_ID")
142
  if profile:
143
+ username = profile.username
144
  print(f"User logged in: {username}")
145
  else:
 
146
  return "Please Login to Hugging Face with the button.", None
147
+
148
  api_url = DEFAULT_API_URL
149
  questions_url = f"{api_url}/questions"
150
  submit_url = f"{api_url}/submit"
151
+
 
152
  try:
153
+ agent = BasicAgent(api_url)
154
  except Exception as e:
 
155
  return f"Error initializing agent: {e}", None
156
+
157
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
158
  print(agent_code)
159
+
 
 
160
  try:
161
  response = requests.get(questions_url, timeout=15)
162
  response.raise_for_status()
163
  questions_data = response.json()
 
 
 
164
  print(f"Fetched {len(questions_data)} questions.")
 
 
 
 
 
 
 
165
  except Exception as e:
166
+ return f"Error fetching questions: {e}", None
167
+
 
 
168
  results_log = []
169
  answers_payload = []
 
170
  for item in questions_data:
171
  task_id = item.get("task_id")
172
  question_text = item.get("question")
173
+ file_name = item.get("file_name") # Assuming the API provides 'file_name'; if not, check item for attachments
174
+ if not task_id or not question_text:
175
  continue
176
  try:
177
+ submitted_answer = agent(question_text, task_id, file_name)
178
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
179
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
180
  except Exception as e:
181
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
182
+
 
183
  if not answers_payload:
 
184
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
185
+
 
186
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
 
 
 
 
 
187
  try:
188
  response = requests.post(submit_url, json=submission_data, timeout=60)
189
  response.raise_for_status()
 
195
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
196
  f"Message: {result_data.get('message', 'No message received.')}"
197
  )
198
+ return final_status, pd.DataFrame(results_log)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  except Exception as e:
200
+ return f"Submission Failed: {e}", pd.DataFrame(results_log)
 
 
 
201
 
202
+ # --- Gradio Interface ---
 
203
  with gr.Blocks() as demo:
204
+ gr.Markdown("# Advanced Agent Evaluation Runner for GAIA (Aiming for 60%+)")
205
  gr.Markdown(
206
  """
207
  **Instructions:**
208
+ 1. Set OPENAI_API_KEY in Hugging Face Space variables (Settings > Variables).
209
+ 2. Log in to Hugging Face.
210
+ 3. Click 'Run Evaluation & Submit All Answers'.
211
+
212
+ This agent uses GPT-4o with tools for search, code execution, and file processing (images/PDFs/Excel) to achieve higher scores.
 
 
 
 
213
  """
214
  )
 
215
  gr.LoginButton()
 
216
  run_button = gr.Button("Run Evaluation & Submit All Answers")
 
217
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
 
218
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
219
+ run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
 
 
 
 
220
 
221
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  demo.launch(debug=True, share=False)