Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,6 @@ from langchain_groq import ChatGroq
|
|
5 |
from langchain_community.utilities import ArxivAPIWrapper, WikipediaAPIWrapper
|
6 |
from langchain_community.tools import ArxivQueryRun, WikipediaQueryRun, DuckDuckGoSearchRun
|
7 |
from langchain.agents import initialize_agent, AgentType
|
8 |
-
from langchain.callbacks import StreamlitCallbackHandler
|
9 |
import os
|
10 |
import requests
|
11 |
import pandas as pd
|
@@ -17,12 +16,21 @@ load_dotenv()
|
|
17 |
# Constants for Basic Agent Evaluation
|
18 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
19 |
|
20 |
-
# Initialize search tools
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
# Streamlit app layout
|
28 |
st.title("Langchain - Chat with Search & Evaluation")
|
@@ -50,14 +58,11 @@ if prompt := st.chat_input(placeholder="What is machine learning?"):
|
|
50 |
st.error("Please enter your Groq API key in the sidebar.")
|
51 |
st.stop()
|
52 |
|
53 |
-
llm = ChatGroq(groq_api_key=api_key, model_name="Llama3-8b-8192"
|
54 |
-
tools = [search, arxiv, wiki]
|
55 |
-
|
56 |
search_agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True)
|
57 |
|
58 |
with st.chat_message("assistant"):
|
59 |
-
|
60 |
-
response = search_agent.run(st.session_state.messages, callbacks=[st_cb])
|
61 |
st.session_state.messages.append({'role': 'assistant', "content": response})
|
62 |
st.write(response)
|
63 |
|
@@ -65,84 +70,96 @@ if prompt := st.chat_input(placeholder="What is machine learning?"):
|
|
65 |
st.sidebar.title("Basic Agent Evaluation")
|
66 |
|
67 |
def run_evaluation():
|
68 |
-
"""Function to run the evaluation
|
69 |
if not api_key:
|
70 |
st.error("Please enter your Groq API key in the sidebar.")
|
71 |
return "API key required", pd.DataFrame()
|
72 |
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
74 |
api_url = DEFAULT_API_URL
|
75 |
questions_url = f"{api_url}/questions"
|
76 |
submit_url = f"{api_url}/submit"
|
77 |
space_id = os.getenv("SPACE_ID", "local")
|
78 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id != "local" else "local_execution"
|
79 |
|
80 |
-
# 1. Instantiate Agent
|
81 |
-
try:
|
82 |
-
llm = ChatGroq(groq_api_key=api_key, model_name="Llama3-8b-8192")
|
83 |
-
tools = [search, arxiv, wiki]
|
84 |
-
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True)
|
85 |
-
except Exception as e:
|
86 |
-
return f"Error initializing agent: {e}", pd.DataFrame()
|
87 |
-
|
88 |
-
# 2. Fetch Questions
|
89 |
try:
|
|
|
|
|
90 |
response = requests.get(questions_url, timeout=15)
|
91 |
response.raise_for_status()
|
92 |
questions_data = response.json()
|
|
|
|
|
|
|
93 |
if not questions_data:
|
94 |
-
return "
|
95 |
-
except Exception as e:
|
96 |
-
return f"Error fetching questions: {e}", pd.DataFrame()
|
97 |
-
|
98 |
-
# 3. Run Agent
|
99 |
-
results_log = []
|
100 |
-
answers_payload = []
|
101 |
-
for item in questions_data:
|
102 |
-
task_id = item.get("task_id")
|
103 |
-
question_text = item.get("question")
|
104 |
-
if not task_id or question_text is None:
|
105 |
-
continue
|
106 |
-
try:
|
107 |
-
submitted_answer = agent.run(question_text)
|
108 |
-
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
109 |
-
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
110 |
-
except Exception as e:
|
111 |
-
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
112 |
-
|
113 |
-
if not answers_payload:
|
114 |
-
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
115 |
-
|
116 |
-
# 4. Prepare and Submit
|
117 |
-
submission_data = {
|
118 |
-
"username": username,
|
119 |
-
"agent_code": agent_code,
|
120 |
-
"answers": answers_payload
|
121 |
-
}
|
122 |
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
response = requests.post(submit_url, json=submission_data, timeout=60)
|
125 |
response.raise_for_status()
|
126 |
result_data = response.json()
|
|
|
127 |
final_status = (
|
128 |
-
f"Submission Successful!\n"
|
129 |
-
f"
|
130 |
-
f"
|
131 |
-
f"
|
132 |
-
f"Message: {result_data.get('message', 'No message received.')}"
|
133 |
)
|
134 |
return final_status, pd.DataFrame(results_log)
|
|
|
135 |
except Exception as e:
|
136 |
-
return f"
|
|
|
|
|
|
|
|
|
137 |
|
138 |
# Evaluation button in sidebar
|
139 |
-
if st.sidebar.button("Run Evaluation & Submit Answers"):
|
140 |
-
with st.spinner("
|
141 |
status, results = run_evaluation()
|
142 |
-
|
143 |
-
st.sidebar.
|
|
|
144 |
|
145 |
if not results.empty:
|
146 |
-
st.subheader("
|
147 |
-
st.dataframe(results)
|
148 |
|
|
|
5 |
from langchain_community.utilities import ArxivAPIWrapper, WikipediaAPIWrapper
|
6 |
from langchain_community.tools import ArxivQueryRun, WikipediaQueryRun, DuckDuckGoSearchRun
|
7 |
from langchain.agents import initialize_agent, AgentType
|
|
|
8 |
import os
|
9 |
import requests
|
10 |
import pandas as pd
|
|
|
16 |
# Constants for Basic Agent Evaluation
|
17 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
18 |
|
19 |
+
# Initialize search tools (with warm-up)
|
20 |
+
@st.cache_resource
|
21 |
+
def load_tools():
|
22 |
+
with st.spinner("Initializing tools (first time may take a few seconds)..."):
|
23 |
+
api_wrapper_arxiv = ArxivAPIWrapper(top_k_results=1, doc_content_chars_max=250)
|
24 |
+
arxiv = ArxivQueryRun(api_wrapper=api_wrapper_arxiv)
|
25 |
+
api_wrapper_wiki = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=250)
|
26 |
+
wiki = WikipediaQueryRun(api_wrapper=api_wrapper_wiki)
|
27 |
+
search = DuckDuckGoSearchRun(name="Search")
|
28 |
+
# Warm up tools
|
29 |
+
arxiv.run("machine learning")
|
30 |
+
wiki.run("machine learning")
|
31 |
+
return [search, arxiv, wiki]
|
32 |
+
|
33 |
+
tools = load_tools()
|
34 |
|
35 |
# Streamlit app layout
|
36 |
st.title("Langchain - Chat with Search & Evaluation")
|
|
|
58 |
st.error("Please enter your Groq API key in the sidebar.")
|
59 |
st.stop()
|
60 |
|
61 |
+
llm = ChatGroq(groq_api_key=api_key, model_name="Llama3-8b-8192")
|
|
|
|
|
62 |
search_agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True)
|
63 |
|
64 |
with st.chat_message("assistant"):
|
65 |
+
response = search_agent.run(st.session_state.messages)
|
|
|
66 |
st.session_state.messages.append({'role': 'assistant', "content": response})
|
67 |
st.write(response)
|
68 |
|
|
|
70 |
st.sidebar.title("Basic Agent Evaluation")
|
71 |
|
72 |
def run_evaluation():
|
73 |
+
"""Function to run the evaluation with progress updates"""
|
74 |
if not api_key:
|
75 |
st.error("Please enter your Groq API key in the sidebar.")
|
76 |
return "API key required", pd.DataFrame()
|
77 |
|
78 |
+
# Setup progress tracking
|
79 |
+
progress_bar = st.sidebar.progress(0)
|
80 |
+
status_text = st.sidebar.empty()
|
81 |
+
results_container = st.empty()
|
82 |
+
|
83 |
+
username = "streamlit_user"
|
84 |
api_url = DEFAULT_API_URL
|
85 |
questions_url = f"{api_url}/questions"
|
86 |
submit_url = f"{api_url}/submit"
|
87 |
space_id = os.getenv("SPACE_ID", "local")
|
88 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id != "local" else "local_execution"
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
try:
|
91 |
+
# 1. Fetch Questions
|
92 |
+
status_text.text("π‘ Fetching questions...")
|
93 |
response = requests.get(questions_url, timeout=15)
|
94 |
response.raise_for_status()
|
95 |
questions_data = response.json()
|
96 |
+
total_questions = len(questions_data)
|
97 |
+
status_text.text(f"β
Found {total_questions} questions")
|
98 |
+
|
99 |
if not questions_data:
|
100 |
+
return "No questions found", pd.DataFrame()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
+
# 2. Initialize Agent (reuse tools from cache)
|
103 |
+
llm = ChatGroq(groq_api_key=api_key, model_name="Llama3-8b-8192")
|
104 |
+
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True)
|
105 |
+
|
106 |
+
# 3. Process Questions
|
107 |
+
results_log = []
|
108 |
+
answers_payload = []
|
109 |
+
|
110 |
+
for i, item in enumerate(questions_data):
|
111 |
+
progress = (i + 1) / total_questions
|
112 |
+
progress_bar.progress(progress)
|
113 |
+
status_text.text(f"π Processing question {i+1}/{total_questions}...")
|
114 |
+
|
115 |
+
task_id = item.get("task_id")
|
116 |
+
question_text = item.get("question")
|
117 |
+
if not task_id or not question_text:
|
118 |
+
continue
|
119 |
+
|
120 |
+
try:
|
121 |
+
submitted_answer = agent.run(question_text)
|
122 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
123 |
+
results_log.append({"Task ID": task_id, "Question": question_text[:100] + "..." if len(question_text) > 100 else question_text,
|
124 |
+
"Submitted Answer": submitted_answer[:200] + "..." if len(submitted_answer) > 200 else submitted_answer})
|
125 |
+
|
126 |
+
# Update results table progressively
|
127 |
+
if (i + 1) % 3 == 0 or (i + 1) == total_questions: # Update every 3 questions or at end
|
128 |
+
results_container.dataframe(pd.DataFrame(results_log))
|
129 |
+
except Exception as e:
|
130 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"β Error: {str(e)}"})
|
131 |
+
|
132 |
+
# 4. Submit Answers
|
133 |
+
status_text.text("π€ Submitting answers...")
|
134 |
+
submission_data = {"username": username, "agent_code": agent_code, "answers": answers_payload}
|
135 |
response = requests.post(submit_url, json=submission_data, timeout=60)
|
136 |
response.raise_for_status()
|
137 |
result_data = response.json()
|
138 |
+
|
139 |
final_status = (
|
140 |
+
f"β
Submission Successful!\n"
|
141 |
+
f"π Score: {result_data.get('score', 'N/A')}%\n"
|
142 |
+
f"π Correct: {result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')}\n"
|
143 |
+
f"π¬ Message: {result_data.get('message', 'No message')}"
|
|
|
144 |
)
|
145 |
return final_status, pd.DataFrame(results_log)
|
146 |
+
|
147 |
except Exception as e:
|
148 |
+
return f"β Failed: {str(e)}", pd.DataFrame(results_log if 'results_log' in locals() else [])
|
149 |
+
|
150 |
+
finally:
|
151 |
+
progress_bar.empty()
|
152 |
+
status_text.empty()
|
153 |
|
154 |
# Evaluation button in sidebar
|
155 |
+
if st.sidebar.button("π Run Evaluation & Submit Answers"):
|
156 |
+
with st.spinner("Starting evaluation..."):
|
157 |
status, results = run_evaluation()
|
158 |
+
|
159 |
+
st.sidebar.success("Evaluation completed!")
|
160 |
+
st.sidebar.text_area("Results", value=status, height=150)
|
161 |
|
162 |
if not results.empty:
|
163 |
+
st.subheader("π Detailed Results")
|
164 |
+
st.dataframe(results, use_container_width=True)
|
165 |
|