boning123 commited on
Commit
69c76c8
·
verified ·
1 Parent(s): 3de4627

Create agent.py

Browse files
Files changed (1) hide show
  1. agent.py +49 -0
agent.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llama_cpp import Llama
2
+ from duckduckgo_search import DDGS
3
+ import gradio as gr
4
+
5
+ # Load model
6
+ llm = Llama(model_path="models/Sam-reason-A1.Q4_K_S.gguf", n_ctx=2048)
7
+
8
+ # Define tools
9
+ def search_tool(query):
10
+ with DDGS() as ddgs:
11
+ results = ddgs.text(query)
12
+ return "\n".join([r["title"] + ": " + r["href"] for r in results[:3]])
13
+
14
+ def calc_tool(expr):
15
+ try: return str(eval(expr))
16
+ except: return "Error in expression."
17
+
18
+ tools = {
19
+ "search": search_tool,
20
+ "calc": calc_tool
21
+ }
22
+
23
+ # Tool registry parser
24
+ def parse_tools(output):
25
+ for name in tools:
26
+ if f"<tool:{name}>" in output:
27
+ arg_start = output.find(f"<tool:{name}>") + len(f"<tool:{name}>")
28
+ arg_end = output.find(f"</tool:{name}>")
29
+ arg = output[arg_start:arg_end].strip()
30
+ result = tools[name](arg)
31
+ return f"Tool [{name}] → {result}"
32
+ return None
33
+
34
+ # Agent loop
35
+ def agent_chat(user_input, history=[]):
36
+ history.append({"role": "user", "content": user_input})
37
+ prompt = "\n".join([f"{m['role']}: {m['content']}" for m in history])
38
+
39
+ output = llm(prompt=prompt, stop=["user:", "system:"], echo=False)
40
+ response = output["choices"][0]["text"].strip()
41
+
42
+ tool_result = parse_tools(response)
43
+ if tool_result:
44
+ response += f"\n{tool_result}"
45
+
46
+ history.append({"role": "assistant", "content": response})
47
+ return response
48
+
49
+ gr.ChatInterface(fn=agent_chat).launch()