Spaces:
Running
Running
File size: 3,716 Bytes
67ca165 f136769 67ca165 5792acb 10954d3 ca302c3 67ca165 fcf24cd ca302c3 7ace26a 79c9060 67ca165 79c9060 f8e33c8 67ca165 9f3d821 79c9060 57800c5 67ca165 79c9060 67ca165 0f3992f f136769 fef4c18 c0f5ad3 fef4c18 9f3d821 67ca165 11fd643 10954d3 ca302c3 11fd643 57800c5 79c9060 57800c5 79c9060 11fd643 67ca165 79c9060 fef4c18 67ca165 79c9060 57800c5 67ca165 fef4c18 9f3d821 79c9060 67ca165 57800c5 79c9060 67ca165 7ace26a 67ca165 7ace26a ca302c3 67ca165 afa5837 79c9060 57800c5 afa5837 57800c5 afa5837 79c9060 afa5837 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
from smolagents import (
LiteLLMModel,
OpenAIServerModel,
ToolCallingAgent,
CodeAgent,
VisitWebpageTool,
WikipediaSearchTool,
SpeechToTextTool,
PythonInterpreterTool,
)
import os
import yaml
import time
from typing import Optional
from tools import MySearchTool, DuckDuckGoSearchTool
google_search_tool = {"type": "google_search"}
gemini_api_key = os.environ.get("GEMINI_API_KEY", None)
open_router_api_key = os.environ.get("OPENROUTER_API_KEY", None)
gemini_model = LiteLLMModel(
# model_id="gemini/gemini-2.0-flash",
model_id="gemini/gemini-2.0-flash-001",
api_key=gemini_api_key,
tools=[google_search_tool],
)
# alternative model
model = OpenAIServerModel(
# model_id='deepseek/deepseek-r1-0528-qwen3-8b:free',
# model_id="thudm/glm-4-32b:free" function calling
model_id="google/gemini-2.0-flash-exp:free", # function calling support
api_key=open_router_api_key,
)
search_tool = DuckDuckGoSearchTool()
speech_tool = SpeechToTextTool()
wiki_tool = WikipediaSearchTool()
python_tool = PythonInterpreterTool()
visit_webpage_tool = VisitWebpageTool()
my_search_tool = MySearchTool()
with open("prompts.yaml", "r") as stream:
prompt_templates = yaml.safe_load(stream)
prompt_templates["final_answer"] = {
"pre_messages": "",
"final_answer": "Here is the final answer from your managed agent '{{name}}':\n{{final_answer}}",
"post_messages": "",
}
class SkynetMultiAgent:
def __init__(self):
web_agent = ToolCallingAgent(
tools=[],
model=gemini_model,
name="web_search_agent",
description="""
This agent has access to Google Search to search for information about a specific topic.
""",
max_steps=2,
prompt_templates=prompt_templates,
)
self.agent = CodeAgent(
tools=[],
model=gemini_model,
name="manager_agent",
description="Manages the web agents and it runs code.",
managed_agents=[web_agent],
prompt_templates=prompt_templates,
max_steps=2,
)
def __call__(self, question: str, file_path: Optional[str] = None) -> str:
print(f"Agent received question (first 50 chars): {question[:50]}...")
# Adding extra content if provided
if file_path and os.path.exists(file_path):
eval_question = f"{question}\n\nFile provided: {file_path}"
print(f"Processing question with file: {file_path}")
else:
eval_question = question
time.sleep(2) # 2-second delay
fixed_answer = self.agent.run(eval_question, max_steps=5)
print(f"Agent returning fixed answer: {fixed_answer}")
return fixed_answer
class SkynetSingleAgent:
def __init__(self):
self.agent = ToolCallingAgent(
tools=[],
model=gemini_model,
name="skynet_agent",
description="Agent to answer questions and to search in the web using google search tool",
prompt_templates=prompt_templates,
)
def __call__(self, question: str, file_path: Optional[str] = None) -> str:
print(f"Agent received question (first 50 chars): {question[:50]}...")
# Adding extra content if provided
if file_path and os.path.exists(file_path):
eval_question = f"{question}\n\nFile provided: {file_path}"
print(f"Processing question with file: {file_path}")
else:
eval_question = question
fixed_answer = self.agent.run(eval_question)
print(f"Agent returning fixed answer: {fixed_answer}")
return fixed_answer
|