mobinln commited on
Commit
e5f6777
·
verified ·
1 Parent(s): 08bebda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -11
app.py CHANGED
@@ -6,11 +6,7 @@ from openai import OpenAI
6
 
7
  subprocess.Popen("bash /home/user/app/start.sh", shell=True)
8
 
9
- client = OpenAI(
10
- base_url="http://0.0.0.0:8000/v1",
11
- api_key="sk-local",
12
- timeout=600
13
- )
14
 
15
 
16
  def respond(
@@ -24,7 +20,7 @@ def respond(
24
  messages = []
25
  if system_message:
26
  messages = [{"role": "system", "content": system_message}]
27
-
28
  for user, assistant in history:
29
  if user:
30
  messages.append({"role": "user", "content": user})
@@ -41,14 +37,43 @@ def respond(
41
  temperature=temperature,
42
  top_p=top_p,
43
  stream=True,
44
- tools=[{"type": "browser_search"}, {"type": "code_interpreter"}]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  )
46
 
47
  print("messages", messages)
48
  output = ""
49
  for chunk in stream:
50
  delta = chunk.choices[0].delta
51
-
52
  try:
53
  output += delta.reasoning_content
54
  except:
@@ -60,9 +85,8 @@ def respond(
60
  print(f"[Error] {e}")
61
  yield "⚠️ Llama.cpp server error"
62
 
63
- demo = gr.ChatInterface(
64
- respond
65
- )
66
 
67
  if __name__ == "__main__":
68
  demo.launch(show_api=False)
 
6
 
7
  subprocess.Popen("bash /home/user/app/start.sh", shell=True)
8
 
9
+ client = OpenAI(base_url="http://0.0.0.0:8000/v1", api_key="sk-local", timeout=600)
 
 
 
 
10
 
11
 
12
  def respond(
 
20
  messages = []
21
  if system_message:
22
  messages = [{"role": "system", "content": system_message}]
23
+
24
  for user, assistant in history:
25
  if user:
26
  messages.append({"role": "user", "content": user})
 
37
  temperature=temperature,
38
  top_p=top_p,
39
  stream=True,
40
+ tools=[
41
+ {
42
+ "type": "function",
43
+ "function": {
44
+ "name": "browser_search",
45
+ "description": (
46
+ "Search the web for a given query and return the most relevant results."
47
+ ),
48
+ "parameters": {
49
+ "type": "object",
50
+ "properties": {
51
+ "query": {
52
+ "type": "string",
53
+ "description": "The search query string.",
54
+ },
55
+ "max_results": {
56
+ "type": "integer",
57
+ "description": (
58
+ "Maximum number of search results to return. "
59
+ "If omitted the service will use its default."
60
+ ),
61
+ "default": 5,
62
+ },
63
+ },
64
+ "required": ["query"],
65
+ },
66
+ },
67
+ },
68
+ {"type": "code_interpreter"},
69
+ ],
70
  )
71
 
72
  print("messages", messages)
73
  output = ""
74
  for chunk in stream:
75
  delta = chunk.choices[0].delta
76
+
77
  try:
78
  output += delta.reasoning_content
79
  except:
 
85
  print(f"[Error] {e}")
86
  yield "⚠️ Llama.cpp server error"
87
 
88
+
89
+ demo = gr.ChatInterface(respond)
 
90
 
91
  if __name__ == "__main__":
92
  demo.launch(show_api=False)