boning123 commited on
Commit
6cad2cb
·
verified ·
1 Parent(s): 6f4013b

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +20 -22
agent.py CHANGED
@@ -1,48 +1,46 @@
1
  from llama_cpp import Llama
2
  from duckduckgo_search import DDGS
 
3
  import gradio as gr
4
 
5
- # Load model
6
  llm = Llama(model_path="models/Sam-reason-A1.Q4_K_S.gguf", n_ctx=2048)
7
 
8
- # Define tools
9
- def search_tool(query):
10
  with DDGS() as ddgs:
11
- results = ddgs.text(query)
12
  return "\n".join([r["title"] + ": " + r["href"] for r in results[:3]])
13
 
14
  def calc_tool(expr):
15
  try: return str(eval(expr))
16
- except: return "Error in expression."
 
 
 
 
 
17
 
18
  tools = {
19
  "search": search_tool,
20
- "calc": calc_tool
 
21
  }
22
 
23
- # Tool registry parser
24
- def parse_tools(output):
25
- for name in tools:
26
- if f"<tool:{name}>" in output:
27
- arg_start = output.find(f"<tool:{name}>") + len(f"<tool:{name}>")
28
- arg_end = output.find(f"</tool:{name}>")
29
- arg = output[arg_start:arg_end].strip()
30
- result = tools[name](arg)
31
- return f"Tool [{name}] → {result}"
32
  return None
33
 
34
- # Agent loop
35
  def agent_chat(user_input, history=[]):
36
  history.append({"role": "user", "content": user_input})
37
  prompt = "\n".join([f"{m['role']}: {m['content']}" for m in history])
38
-
39
  output = llm(prompt=prompt, stop=["user:", "system:"], echo=False)
40
  response = output["choices"][0]["text"].strip()
41
-
42
- tool_result = parse_tools(response)
43
- if tool_result:
44
- response += f"\n{tool_result}"
45
-
46
  history.append({"role": "assistant", "content": response})
47
  return response
48
 
 
1
  from llama_cpp import Llama
2
  from duckduckgo_search import DDGS
3
+ from e2b import Sandbox
4
  import gradio as gr
5
 
 
6
  llm = Llama(model_path="models/Sam-reason-A1.Q4_K_S.gguf", n_ctx=2048)
7
 
8
+ def search_tool(q):
 
9
  with DDGS() as ddgs:
10
+ results = ddgs.text(q)
11
  return "\n".join([r["title"] + ": " + r["href"] for r in results[:3]])
12
 
13
  def calc_tool(expr):
14
  try: return str(eval(expr))
15
+ except Exception as e: return f"Math error: {e}"
16
+
17
+ def run_tool(command):
18
+ with Sandbox(template="base") as sb:
19
+ out = sb.run(command)
20
+ return out.stdout or out.stderr or "No output."
21
 
22
  tools = {
23
  "search": search_tool,
24
+ "calc": calc_tool,
25
+ "run": run_tool
26
  }
27
 
28
+ def parse_tools(text):
29
+ for key in tools:
30
+ if f"<tool:{key}>" in text:
31
+ start = text.find(f"<tool:{key}>") + len(f"<tool:{key}>")
32
+ end = text.find(f"</tool:{key}>")
33
+ arg = text[start:end].strip()
34
+ return tools[key](arg)
 
 
35
  return None
36
 
 
37
  def agent_chat(user_input, history=[]):
38
  history.append({"role": "user", "content": user_input})
39
  prompt = "\n".join([f"{m['role']}: {m['content']}" for m in history])
 
40
  output = llm(prompt=prompt, stop=["user:", "system:"], echo=False)
41
  response = output["choices"][0]["text"].strip()
42
+ result = parse_tools(response)
43
+ if result: response += f"\n🔧 Tool Output:\n{result}"
 
 
 
44
  history.append({"role": "assistant", "content": response})
45
  return response
46