import os import gradio as gr import requests import pandas as pd import re import logging from agent import initialize_agent # Import the agent initialization function # --- Constants --- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" # --- Logging Configuration --- logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s") logger = logging.getLogger(__name__) # --- Global Agent Initialization --- # The agent is initialized once when the Space starts up. # This is critical for performance and to avoid reloading the model on every request. logger.info("šŸš€ Application starting up! Initializing the GAIA agent...") AGENT = initialize_agent() if AGENT is None: logger.error("šŸ’„ FATAL: Agent initialization failed. The application will not be able to process questions.") else: logger.info("āœ… Agent initialized successfully.") # --- Helper Functions --- def _fetch_questions(api_url: str) -> list: """Fetches evaluation questions from the API.""" questions_url = f"{api_url}/questions" logger.info(f"Fetching questions from: {questions_url}") try: response = requests.get(questions_url, timeout=15) response.raise_for_status() questions_data = response.json() if not questions_data: raise ValueError("Fetched questions list is empty or invalid format.") logger.info(f"Fetched {len(questions_data)} questions.") return questions_data except requests.exceptions.RequestException as e: raise RuntimeError(f"Error fetching questions: {e}") from e except requests.exceptions.JSONDecodeError as e: raise RuntimeError(f"Error decoding JSON response from questions endpoint: {e}. Response: {response.text[:500]}") from e except Exception as e: raise RuntimeError(f"An unexpected error occurred fetching questions: {e}") from e def _run_agent_on_questions(agent, questions_data: list) -> tuple[list, list]: """Runs the agent on each question and collects answers and logs.""" results_log = [] answers_payload = [] logger.info(f"Running agent on {len(questions_data)} questions...") for item in questions_data: task_id = item.get("task_id") question_text = item.get("question") if not task_id or question_text is None: logger.warning(f"Skipping item with missing task_id or question: {item}") continue try: logger.info(f"Processing task {task_id}: {question_text[:100]}...") # The agent wrapper returns the final, normalized answer directly. submitted_answer = agent(question_text) logger.info(f"Task {task_id} - Final answer from agent: {submitted_answer}") answers_payload.append({ "task_id": task_id, "submitted_answer": submitted_answer }) results_log.append({ "Task ID": task_id, "Question": question_text, "Final Answer": submitted_answer }) except Exception as e: error_msg = f"AGENT ERROR: {e}" logger.error(f"Error running agent on task {task_id}: {e}", exc_info=True) answers_payload.append({ "task_id": task_id, "submitted_answer": error_msg }) results_log.append({ "Task ID": task_id, "Question": question_text, "Final Answer": error_msg }) return answers_payload, results_log def _submit_answers(api_url: str, username: str, agent_code_url: str, answers_payload: list) -> dict: """Submits the agent's answers to the evaluation API.""" submit_url = f"{api_url}/submit" submission_data = { "username": username.strip(), "agent_code": agent_code_url, "answers": answers_payload } logger.info(f"Submitting {len(answers_payload)} answers for user '{username}' to: {submit_url}") try: response = requests.post(submit_url, json=submission_data, timeout=60) response.raise_for_status() return response.json() except requests.exceptions.HTTPError as e: error_detail = f"Server responded with status {e.response.status_code}." try: error_json = e.response.json() error_detail += f" Detail: {error_json.get('detail', e.response.text)}" except requests.exceptions.JSONDecodeError: error_detail += f" Response: {e.response.text[:500]}" raise RuntimeError(f"Submission Failed: {error_detail}") from e except requests.exceptions.Timeout: raise RuntimeError("Submission Failed: The request timed out.") from e except requests.exceptions.RequestException as e: raise RuntimeError(f"Submission Failed: Network error - {e}") from e except Exception as e: raise RuntimeError(f"An unexpected error occurred during submission: {e}") from e # --- Main Gradio Function --- def run_and_submit_all(profile: gr.OAuthProfile | None): """ Orchestrates the fetching of questions, running the agent, and submitting answers. """ if not profile: logger.warning("Attempted to run evaluation without being logged in.") return "Please Login to Hugging Face with the button above.", None username = profile.username logger.info(f"User '{username}' initiated evaluation.") if AGENT is None: return "āŒ Error: The agent failed to initialize on startup. Please check the Space logs for details.", None space_id = os.getenv("SPACE_ID") if not space_id: logger.error("SPACE_ID environment variable not found. Cannot determine agent_code URL.") return "āŒ Error: SPACE_ID not set. This is required for submission.", None agent_code_url = f"https://huggingface.co/spaces/{space_id}/tree/main" status_message = "" results_df = pd.DataFrame() results_log = [] try: # 1. Fetch Questions questions_data = _fetch_questions(DEFAULT_API_URL) # 2. Run Agent on Questions (using the pre-initialized global agent) answers_payload, results_log = _run_agent_on_questions(AGENT, questions_data) if not answers_payload: status_message = "Agent did not produce any answers to submit." return status_message, pd.DataFrame(results_log) # 3. Submit Answers submission_result = _submit_answers(DEFAULT_API_URL, username, agent_code_url, answers_payload) final_status = ( f"šŸŽ‰ Submission Successful!\n" f"šŸ‘¤ User: {submission_result.get('username')}\n" f"šŸ“Š Overall Score: {submission_result.get('score', 'N/A')}% " f"({submission_result.get('correct_count', '?')}/{submission_result.get('total_attempted', '?')} correct)\n" f"šŸ’¬ Message: {submission_result.get('message', 'No message received.')}\n" f"šŸ”— Agent Code: {agent_code_url}" ) status_message = final_status results_df = pd.DataFrame(results_log) except RuntimeError as e: status_message = f"āŒ Operation Failed: {e}" logger.error(status_message) results_df = pd.DataFrame(results_log) if results_log else pd.DataFrame([{"Status": "Error", "Details": str(e)}]) except Exception as e: status_message = f"šŸ’„ Critical Error: {e}" logger.error(status_message, exc_info=True) results_df = pd.DataFrame([{"Status": "Critical Error", "Details": str(e)}]) return status_message, results_df # --- Gradio Interface Definition --- with gr.Blocks(title="GAIA Benchmark Agent", theme=gr.themes.Soft()) as demo: gr.Markdown(""" # 🧠 GAIA Benchmark Evaluation Agent **An advanced agent designed to tackle the General AI Assistant (GAIA) benchmark.** """) gr.Markdown(""" ## šŸ“‹ Instructions: 1. **Add Secrets**: If you have cloned this Space, go to the **Settings** tab and add your API keys as **Secrets**. * `TOGETHER_API_KEY`: Your key from Together AI. * `SERPAPI_API_KEY`: Your key from SerpApi for Google Search (optional but recommended). 2. **Login**: Use the button below to log in with your Hugging Face account. Your username is required for submission. 3. **Run**: Click 'Run Evaluation & Submit' to start the process. The agent will fetch all questions, solve them, and submit the answers automatically. 4. **Wait**: The process can take several minutes. You can monitor the progress in the status box and see detailed results in the table below. --- ### šŸŽÆ GAIA Answer Formatting The agent is designed to automatically format answers according to GAIA's strict requirements (e.g., no commas in numbers, no articles in strings). """) with gr.Row(): gr.LoginButton(scale=1) run_button = gr.Button("šŸš€ Run Evaluation & Submit All Answers", variant="primary", scale=2) status_output = gr.Textbox( label="šŸ“Š Evaluation Status & Results", lines=8, interactive=False, placeholder="Click 'Run Evaluation' to start the process..." ) results_table = gr.DataFrame( label="šŸ“ Detailed Question Results", wrap=True, interactive=False, column_widths=["10%", "60%", "30%"] ) run_button.click( fn=run_and_submit_all, outputs=[status_output, results_table] ) if __name__ == "__main__": print("\n" + "="*70) print("šŸš€ GAIA BENCHMARK AGENT STARTING UP") print("="*70) # Check environment variables loaded from HF Secrets space_id = os.getenv("SPACE_ID") together_key = os.getenv("TOGETHER_API_KEY") serpapi_key = os.getenv("SERPAPI_API_KEY") if space_id: print(f"āœ… SPACE_ID: {space_id}") print(f" - Submission URL will be: https://huggingface.co/spaces/{space_id}") else: print("āš ļø SPACE_ID not found - submissions will fail. This is normal for local dev.") print(f"šŸ”‘ API Keys Status (from Secrets):") print(f" - Together AI: {'āœ… Set' if together_key else 'āŒ Missing - Agent will fail to initialize!'}") print(f" - SerpAPI: {'āœ… Set' if serpapi_key else 'āš ļø Missing - Google Search tool will be disabled.'}") if not together_key: print("\nā€¼ļø CRITICAL: TOGETHER_API_KEY is not set in the Space Secrets.") print(" Please add it in the 'Settings' tab of your Space.") print("="*70) print("šŸŽÆ Launching Gradio Interface...") print("="*70 + "\n") demo.launch(debug=False, share=False)