yym68686 commited on
Commit
1af48fa
·
1 Parent(s): a064fa5

Adapt to Gemini

Browse files
Files changed (6) hide show
  1. .gitignore +2 -1
  2. json_str/gemini/request.json +52 -0
  3. main.py +43 -113
  4. models.py +48 -0
  5. request.py +163 -0
  6. response.py +135 -0
.gitignore CHANGED
@@ -1,3 +1,4 @@
1
  api.json
2
  api.yaml
3
- .env
 
 
1
  api.json
2
  api.yaml
3
+ .env
4
+ __pycache__
json_str/gemini/request.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "contents": [
3
+ {
4
+ "role": "user",
5
+ "parts": [
6
+ {
7
+ "text": "hi"
8
+ }
9
+ ]
10
+ },
11
+ {
12
+ "role": "model",
13
+ "parts": [
14
+ {
15
+ "text": "Hi! \n\nHow are you today? What can I do for you? \n"
16
+ }
17
+ ]
18
+ },
19
+ {
20
+ "role": "user",
21
+ "parts": [
22
+ {
23
+ "text": "怎么解决"
24
+ },
25
+ {
26
+ "inlineData": {
27
+ "mimeType": "image/jpeg",
28
+ "data": "/9j/***"
29
+ }
30
+ }
31
+ ]
32
+ }
33
+ ],
34
+ "safetySettings": [
35
+ {
36
+ "category": "HARM_CATEGORY_HARASSMENT",
37
+ "threshold": "BLOCK_NONE"
38
+ },
39
+ {
40
+ "category": "HARM_CATEGORY_HATE_SPEECH",
41
+ "threshold": "BLOCK_NONE"
42
+ },
43
+ {
44
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
45
+ "threshold": "BLOCK_NONE"
46
+ },
47
+ {
48
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
49
+ "threshold": "BLOCK_NONE"
50
+ }
51
+ ]
52
+ }
main.py CHANGED
@@ -2,29 +2,19 @@ import os
2
  import json
3
  import httpx
4
  import yaml
 
5
  from contextlib import asynccontextmanager
6
 
7
  from fastapi import FastAPI, HTTPException, Depends
8
  from fastapi.responses import StreamingResponse
9
  from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
10
 
11
- from pydantic import BaseModel
12
- from typing import List, Dict, Any, Optional, Union
 
13
 
14
- # 模拟存储API Key的数据库
15
- api_keys_db = {
16
- "sk-KjjI60Yf0JFcsvgRmXqFwgGmWUd9GZnmi3KlvowmRWpWpQRo": "user1",
17
- # 可以添加更多的API Key
18
- }
19
-
20
- # 安全性依赖
21
- security = HTTPBearer()
22
-
23
- def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
24
- token = credentials.credentials
25
- if token not in api_keys_db:
26
- raise HTTPException(status_code=403, detail="Invalid or missing API Key")
27
- return token
28
 
29
  @asynccontextmanager
30
  async def lifespan(app: FastAPI):
@@ -36,6 +26,15 @@ async def lifespan(app: FastAPI):
36
 
37
  app = FastAPI(lifespan=lifespan)
38
 
 
 
 
 
 
 
 
 
 
39
  # 读取YAML配置文件
40
  def load_config():
41
  try:
@@ -51,107 +50,29 @@ def load_config():
51
  config = load_config()
52
  # print(config)
53
 
54
- # 定义 Function 参数模型
55
- class FunctionParameter(BaseModel):
56
- type: str
57
- properties: Dict[str, Dict[str, str]]
58
- required: List[str]
59
-
60
- # 定义 Function 模型
61
- class Function(BaseModel):
62
- name: str
63
- description: str
64
- parameters: FunctionParameter
65
-
66
- # 定义 Tool 模型
67
- class Tool(BaseModel):
68
- type: str
69
- function: Function
70
-
71
- class ImageUrl(BaseModel):
72
- url: str
73
-
74
- class ContentItem(BaseModel):
75
- type: str
76
- text: Optional[str] = None
77
- image_url: Optional[ImageUrl] = None
78
-
79
- class Message(BaseModel):
80
- role: str
81
- name: Optional[str] = None
82
- content: Union[str, List[ContentItem]]
83
-
84
- class RequestModel(BaseModel):
85
- model: str
86
- messages: List[Message]
87
- logprobs: Optional[bool] = None
88
- top_logprobs: Optional[int] = None
89
- stream: Optional[bool] = None
90
- include_usage: Optional[bool] = None
91
- temperature: Optional[float] = 0.5
92
- top_p: Optional[float] = 1.0
93
- max_tokens: Optional[int] = None
94
- presence_penalty: Optional[float] = 0.0
95
- frequency_penalty: Optional[float] = 0.0
96
- n: Optional[int] = 1
97
- user: Optional[str] = None
98
- tool_choice: Optional[str] = None
99
- tools: Optional[List[Tool]] = None
100
-
101
- async def fetch_response_stream(client, url, headers, payload):
102
- async with client.stream('POST', url, headers=headers, json=payload) as response:
103
- async for chunk in response.aiter_bytes():
104
- print(chunk.decode('utf-8'))
105
- yield chunk
106
-
107
- async def fetch_response(client, url, headers, payload):
108
- response = await client.post(url, headers=headers, json=payload)
109
- return response.json()
110
-
111
  async def process_request(request: RequestModel, provider: Dict):
112
  print("provider: ", provider['provider'])
113
  url = provider['base_url']
114
- headers = {
115
- 'Authorization': f"Bearer {provider['api']}",
116
- 'Content-Type': 'application/json'
117
- }
118
-
119
- # 转换消息格式
120
- messages = []
121
- for msg in request.messages:
122
- if isinstance(msg.content, list):
123
- content = []
124
- for item in msg.content:
125
- if item.type == "text":
126
- content.append({"type": "text", "text": item.text})
127
- elif item.type == "image_url":
128
- content.append({"type": "image_url", "image_url": item.image_url.dict()})
129
- else:
130
- content = msg.content
131
- name = msg.name
132
- if name:
133
- messages.append({"role": msg.role, "name": name, "content": content})
134
- else:
135
- messages.append({"role": msg.role, "content": content})
136
-
137
-
138
- payload = {
139
- "model": request.model,
140
- "messages": messages
141
- }
142
-
143
- for field, value in request.dict(exclude_unset=True).items():
144
- if field not in ['model', 'messages'] and value is not None:
145
- payload[field] = value
146
-
147
- request_info = {
148
- "url": url,
149
- "headers": headers,
150
- "payload": payload
151
- }
152
- print(f"Request details: {json.dumps(request_info, indent=2, ensure_ascii=False)}")
153
  if request.stream:
154
- return StreamingResponse(fetch_response_stream(app.state.client, url, headers, payload), media_type="text/event-stream")
155
  else:
156
  return await fetch_response(app.state.client, url, headers, payload)
157
 
@@ -191,13 +112,22 @@ class ModelRequestHandler:
191
  response = await process_request(request, provider)
192
  return response
193
  except Exception as e:
 
194
  print(f"Error with provider {provider['provider']}: {str(e)}")
 
 
195
  continue
196
 
197
  raise HTTPException(status_code=500, detail="All providers failed")
198
 
199
  model_handler = ModelRequestHandler()
200
 
 
 
 
 
 
 
201
  @app.post("/v1/chat/completions")
202
  async def request_model(request: RequestModel, token: str = Depends(verify_api_key)):
203
  return await model_handler.request_model(request, token)
 
2
  import json
3
  import httpx
4
  import yaml
5
+ import traceback
6
  from contextlib import asynccontextmanager
7
 
8
  from fastapi import FastAPI, HTTPException, Depends
9
  from fastapi.responses import StreamingResponse
10
  from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
11
 
12
+ from models import RequestModel
13
+ from request import get_payload
14
+ from response import fetch_response, fetch_response_stream
15
 
16
+ from typing import List, Dict
17
+ from urllib.parse import urlparse
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  @asynccontextmanager
20
  async def lifespan(app: FastAPI):
 
26
 
27
  app = FastAPI(lifespan=lifespan)
28
 
29
+ # 模拟存储API Key的数据库
30
+ api_keys_db = {
31
+ "sk-KjjI60Yf0JFcsvgRmXqFwgGmWUd9GZnmi3KlvowmRWpWpQRo": "user1",
32
+ # 可以添加更多的API Key
33
+ }
34
+
35
+ # 安全性依赖
36
+ security = HTTPBearer()
37
+
38
  # 读取YAML配置文件
39
  def load_config():
40
  try:
 
50
  config = load_config()
51
  # print(config)
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  async def process_request(request: RequestModel, provider: Dict):
54
  print("provider: ", provider['provider'])
55
  url = provider['base_url']
56
+ parsed_url = urlparse(url)
57
+ engine = None
58
+ if parsed_url.netloc == 'generativelanguage.googleapis.com':
59
+ engine = "gemini"
60
+ elif parsed_url.netloc == 'api.anthropic.com':
61
+ engine = "claude"
62
+ else:
63
+ engine = "gpt"
64
+
65
+ url, headers, payload = await get_payload(request, engine, provider)
66
+
67
+ # request_info = {
68
+ # "url": url,
69
+ # "headers": headers,
70
+ # "payload": payload
71
+ # }
72
+ # print(f"Request details: {json.dumps(request_info, indent=2, ensure_ascii=False)}")
73
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  if request.stream:
75
+ return StreamingResponse(fetch_response_stream(app.state.client, url, headers, payload, engine, request.model), media_type="text/event-stream")
76
  else:
77
  return await fetch_response(app.state.client, url, headers, payload)
78
 
 
112
  response = await process_request(request, provider)
113
  return response
114
  except Exception as e:
115
+ print('\033[31m')
116
  print(f"Error with provider {provider['provider']}: {str(e)}")
117
+ traceback.print_exc()
118
+ print('\033[0m')
119
  continue
120
 
121
  raise HTTPException(status_code=500, detail="All providers failed")
122
 
123
  model_handler = ModelRequestHandler()
124
 
125
+ def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
126
+ token = credentials.credentials
127
+ if token not in api_keys_db:
128
+ raise HTTPException(status_code=403, detail="Invalid or missing API Key")
129
+ return token
130
+
131
  @app.post("/v1/chat/completions")
132
  async def request_model(request: RequestModel, token: str = Depends(verify_api_key)):
133
  return await model_handler.request_model(request, token)
models.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import List, Dict, Optional, Union
3
+
4
+ class FunctionParameter(BaseModel):
5
+ type: str
6
+ properties: Dict[str, Dict[str, str]]
7
+ required: List[str]
8
+
9
+ # 定义 Function 模型
10
+ class Function(BaseModel):
11
+ name: str
12
+ description: str
13
+ parameters: FunctionParameter
14
+
15
+ # 定义 Tool 模型
16
+ class Tool(BaseModel):
17
+ type: str
18
+ function: Function
19
+
20
+ class ImageUrl(BaseModel):
21
+ url: str
22
+
23
+ class ContentItem(BaseModel):
24
+ type: str
25
+ text: Optional[str] = None
26
+ image_url: Optional[ImageUrl] = None
27
+
28
+ class Message(BaseModel):
29
+ role: str
30
+ name: Optional[str] = None
31
+ content: Union[str, List[ContentItem]]
32
+
33
+ class RequestModel(BaseModel):
34
+ model: str
35
+ messages: List[Message]
36
+ logprobs: Optional[bool] = None
37
+ top_logprobs: Optional[int] = None
38
+ stream: Optional[bool] = None
39
+ include_usage: Optional[bool] = None
40
+ temperature: Optional[float] = 0.5
41
+ top_p: Optional[float] = 1.0
42
+ max_tokens: Optional[int] = None
43
+ presence_penalty: Optional[float] = 0.0
44
+ frequency_penalty: Optional[float] = 0.0
45
+ n: Optional[int] = 1
46
+ user: Optional[str] = None
47
+ tool_choice: Optional[str] = None
48
+ tools: Optional[List[Tool]] = None
request.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from models import RequestModel
2
+
3
+ async def get_image_message(base64_image, engine = None):
4
+ if "gpt" == engine:
5
+ return {
6
+ "type": "image_url",
7
+ "image_url": {
8
+ "url": base64_image,
9
+ }
10
+ }
11
+ if "claude" == engine:
12
+ return {
13
+ "type": "image",
14
+ "source": {
15
+ "type": "base64",
16
+ "media_type": "image/jpeg",
17
+ "data": base64_image.split(",")[1],
18
+ }
19
+ }
20
+ if "gemini" == engine:
21
+ return {
22
+ "inlineData": {
23
+ "mimeType": "image/jpeg",
24
+ "data": base64_image.split(",")[1],
25
+ }
26
+ }
27
+ raise ValueError("Unknown engine")
28
+
29
+ async def get_text_message(role, message, engine = None):
30
+ if "gpt" == engine or "claude" == engine:
31
+ return {"type": "text", "text": message}
32
+ if "gemini" == engine:
33
+ return {"text": message}
34
+ raise ValueError("Unknown engine")
35
+
36
+ async def get_gemini_payload(request, engine, provider):
37
+ headers = {
38
+ 'Content-Type': 'application/json'
39
+ }
40
+ url = provider['base_url']
41
+ if request.stream:
42
+ gemini_stream = "streamGenerateContent"
43
+ url = url.format(model=request.model, stream=gemini_stream, api_key=provider['api'])
44
+
45
+ messages = []
46
+ for msg in request.messages:
47
+ if isinstance(msg.content, list):
48
+ content = []
49
+ for item in msg.content:
50
+ if item.type == "text":
51
+ text_message = await get_text_message(msg.role, item.text, engine)
52
+ # print("text_message", text_message)
53
+ content.append(text_message)
54
+ elif item.type == "image_url":
55
+ image_message = await get_image_message(item.image_url.url, engine)
56
+ content.append(image_message)
57
+ else:
58
+ content = msg.content
59
+ if msg.role != "system":
60
+ messages.append({"role": msg.role, "parts": content})
61
+
62
+
63
+ payload = {
64
+ "contents": messages,
65
+ "safetySettings": [
66
+ {
67
+ "category": "HARM_CATEGORY_HARASSMENT",
68
+ "threshold": "BLOCK_NONE"
69
+ },
70
+ {
71
+ "category": "HARM_CATEGORY_HATE_SPEECH",
72
+ "threshold": "BLOCK_NONE"
73
+ },
74
+ {
75
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
76
+ "threshold": "BLOCK_NONE"
77
+ },
78
+ {
79
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
80
+ "threshold": "BLOCK_NONE"
81
+ }
82
+ ]
83
+ }
84
+
85
+ miss_fields = [
86
+ 'model',
87
+ 'messages',
88
+ 'stream',
89
+ 'tools',
90
+ 'tool_choice',
91
+ 'temperature',
92
+ 'top_p',
93
+ 'max_tokens',
94
+ 'presence_penalty',
95
+ 'frequency_penalty',
96
+ 'n',
97
+ 'user',
98
+ 'include_usage',
99
+ 'logprobs',
100
+ 'top_logprobs'
101
+ ]
102
+
103
+ for field, value in request.model_dump(exclude_unset=True).items():
104
+ if field not in miss_fields and value is not None:
105
+ payload[field] = value
106
+
107
+ return url, headers, payload
108
+
109
+ async def get_gpt_payload(request, engine, provider):
110
+ headers = {
111
+ 'Authorization': f"Bearer {provider['api']}",
112
+ 'Content-Type': 'application/json'
113
+ }
114
+ url = provider['base_url']
115
+ url = url.format(model=request.model, stream=request.stream, api_key=provider['api'])
116
+
117
+ messages = []
118
+ for msg in request.messages:
119
+ if isinstance(msg.content, list):
120
+ content = []
121
+ for item in msg.content:
122
+ if item.type == "text":
123
+ text_message = await get_text_message(msg.role, item.text, engine)
124
+ content.append(text_message)
125
+ elif item.type == "image_url":
126
+ image_message = await get_image_message(item.image_url.url, engine)
127
+ content.append(image_message)
128
+ else:
129
+ content = msg.content
130
+ name = msg.name
131
+ if name:
132
+ messages.append({"role": msg.role, "name": name, "content": content})
133
+ else:
134
+ messages.append({"role": msg.role, "content": content})
135
+
136
+ payload = {
137
+ "model": request.model,
138
+ "messages": messages,
139
+ }
140
+
141
+ miss_fields = [
142
+ 'model',
143
+ 'messages'
144
+ ]
145
+
146
+ for field, value in request.model_dump(exclude_unset=True).items():
147
+ if field not in miss_fields and value is not None:
148
+ payload[field] = value
149
+
150
+ return url, headers, payload
151
+
152
+ async def get_claude_payload(request, engine, provider):
153
+ pass
154
+
155
+ async def get_payload(request: RequestModel, engine, provider):
156
+ if engine == "gemini":
157
+ return await get_gemini_payload(request, engine, provider)
158
+ elif engine == "claude":
159
+ return await get_claude_payload(request, engine, provider)
160
+ elif engine == "gpt":
161
+ return await get_gpt_payload(request, engine, provider)
162
+ else:
163
+ raise ValueError("Unknown payload")
response.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import json
3
+ import httpx
4
+
5
+ async def generate_sse_response(timestamp, model, content):
6
+ sample_data = {
7
+ "id": "chatcmpl-9ijPeRHa0wtyA2G8wq5z8FC3wGMzc",
8
+ "object": "chat.completion.chunk",
9
+ "created": timestamp,
10
+ "model": model,
11
+ "system_fingerprint": "fp_d576307f90",
12
+ "choices": [
13
+ {
14
+ "index": 0,
15
+ "delta": {"content": content},
16
+ "logprobs": None,
17
+ "finish_reason": None
18
+ }
19
+ ],
20
+ "usage": None
21
+ }
22
+ json_data = json.dumps(sample_data, ensure_ascii=False)
23
+
24
+ # 构建SSE响应
25
+ sse_response = f"data: {json_data}\n\n"
26
+
27
+ return sse_response
28
+
29
+ async def fetch_gemini_response_stream(client, url, headers, payload, model):
30
+ try:
31
+ timestamp = datetime.timestamp(datetime.now())
32
+ async with client.stream('POST', url, headers=headers, json=payload) as response:
33
+ buffer = ""
34
+ async for chunk in response.aiter_text():
35
+ buffer += chunk
36
+ while "\n" in buffer:
37
+ line, buffer = buffer.split("\n", 1)
38
+ print(line)
39
+ if line and '\"text\": \"' in line:
40
+ try:
41
+ json_data = json.loads( "{" + line + "}")
42
+ content = json_data.get('text', '')
43
+ content = "\n".join(content.split("\\n"))
44
+ sse_string = await generate_sse_response(timestamp, model, content)
45
+ yield sse_string
46
+ except json.JSONDecodeError:
47
+ print(f"无法解析JSON: {line}")
48
+
49
+ # 处理缓冲区中剩余的内容
50
+ if buffer:
51
+ # print(buffer)
52
+ if '\"text\": \"' in buffer:
53
+ try:
54
+ json_data = json.loads(buffer)
55
+ content = json_data.get('text', '')
56
+ content = "\n".join(content.split("\\n"))
57
+ sse_string = await generate_sse_response(timestamp, model, content)
58
+ yield sse_string
59
+ except json.JSONDecodeError:
60
+ print(f"无法解析JSON: {buffer}")
61
+
62
+ yield "data: [DONE]\n\n"
63
+ except httpx.ConnectError as e:
64
+ print(f"连接错误: {e}")
65
+
66
+ async def fetch_gpt_response_stream(client, url, headers, payload):
67
+ try:
68
+ async with client.stream('POST', url, headers=headers, json=payload) as response:
69
+ async for chunk in response.aiter_bytes():
70
+ print(chunk.decode('utf-8'))
71
+ yield chunk
72
+ except httpx.ConnectError as e:
73
+ print(f"连接错误: {e}")
74
+
75
+ async def fetch_claude_response_stream(client, url, headers, payload, engine, model):
76
+ try:
77
+ timestamp = datetime.timestamp(datetime.now())
78
+ async with client.stream('POST', url, headers=headers, json=payload) as response:
79
+ buffer = ""
80
+ async for chunk in response.aiter_text():
81
+ buffer += chunk
82
+ while "\n" in buffer:
83
+ line, buffer = buffer.split("\n", 1)
84
+ # print(line)
85
+ if engine == "gemini":
86
+ if line and '\"text\": \"' in line:
87
+ try:
88
+ json_data = json.loads( "{" + line + "}")
89
+ content = json_data.get('text', '')
90
+ content = "\n".join(content.split("\\n"))
91
+ sse_string = await generate_sse_response(timestamp, model, content)
92
+ yield sse_string
93
+ except json.JSONDecodeError:
94
+ print(f"无法解析JSON: {line}")
95
+ else:
96
+ yield line + "\n"
97
+
98
+ # 处理缓冲区中剩余的内容
99
+ if buffer:
100
+ # print(buffer)
101
+ if engine == "gemini":
102
+ if '\"text\": \"' in buffer:
103
+ try:
104
+ json_data = json.loads(buffer)
105
+ content = json_data.get('text', '')
106
+ content = "\n".join(content.split("\\n"))
107
+ sse_string = await generate_sse_response(timestamp, model, content)
108
+ yield sse_string
109
+ except json.JSONDecodeError:
110
+ print(f"无法解析JSON: {buffer}")
111
+ else:
112
+ yield buffer
113
+
114
+ if engine == "gemini":
115
+ yield "data: [DONE]\n\n"
116
+ except httpx.ConnectError as e:
117
+ print(f"连接错误: {e}")
118
+
119
+ async def fetch_response(client, url, headers, payload):
120
+ response = await client.post(url, headers=headers, json=payload)
121
+ return response.json()
122
+
123
+ async def fetch_response_stream(client, url, headers, payload, engine, model):
124
+ print(f"Engine: {engine}")
125
+ if engine == "gemini":
126
+ async for chunk in fetch_gemini_response_stream(client, url, headers, payload, model):
127
+ yield chunk
128
+ elif engine == "claude":
129
+ async for chunk in fetch_claude_response_stream(client, url, headers, payload, engine, model):
130
+ yield chunk
131
+ elif engine == "gpt":
132
+ async for chunk in fetch_gpt_response_stream(client, url, headers, payload):
133
+ yield chunk
134
+ else:
135
+ raise ValueError("Unknown response")