File size: 10,439 Bytes
1af48fa 819dd2f a1d3f78 1af48fa 52bcfe4 1af48fa e09244d 52bcfe4 1af48fa 819dd2f 766167a 819dd2f 93c3f12 819dd2f 1af48fa 819dd2f 1af48fa a1d3f78 1af48fa f0d6a67 1af48fa 133f622 27b85ef ffc6707 133f622 c405f98 819dd2f f0d6a67 819dd2f e81d06a e154153 819dd2f 2cdadb6 6916ae4 2cdadb6 819dd2f e81d06a 3e3ea9a 819dd2f 5200484 819dd2f e09244d 819dd2f 1af48fa 819dd2f 1af48fa a1d3f78 1af48fa 359a819 a1d3f78 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
import json
import httpx
from datetime import datetime
from log_config import logger
async def generate_sse_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, tokens_use=None, total_tokens=None):
sample_data = {
"id": "chatcmpl-9ijPeRHa0wtyA2G8wq5z8FC3wGMzc",
"object": "chat.completion.chunk",
"created": timestamp,
"model": model,
"system_fingerprint": "fp_d576307f90",
"choices": [
{
"index": 0,
"delta": {"content": content},
"logprobs": None,
"finish_reason": None
}
],
"usage": None
}
if function_call_content:
sample_data["choices"][0]["delta"] = {"tool_calls":[{"index":0,"function":{"arguments": function_call_content}}]}
if tools_id and function_call_name:
sample_data["choices"][0]["delta"] = {"tool_calls":[{"index":0,"id":tools_id,"type":"function","function":{"name":function_call_name,"arguments":""}}]}
# sample_data["choices"][0]["delta"] = {"tool_calls":[{"index":0,"function":{"id": tools_id, "name": function_call_name}}]}
if role:
sample_data["choices"][0]["delta"] = {"role": role, "content": ""}
json_data = json.dumps(sample_data, ensure_ascii=False)
# 构建SSE响应
sse_response = f"data: {json_data}\n\n"
return sse_response
async def fetch_gemini_response_stream(client, url, headers, payload, model):
timestamp = datetime.timestamp(datetime.now())
async with client.stream('POST', url, headers=headers, json=payload) as response:
if response.status_code != 200:
error_message = await response.aread()
error_str = error_message.decode('utf-8', errors='replace')
try:
error_json = json.loads(error_str)
except json.JSONDecodeError:
error_json = error_str
yield {"error": f"fetch_gpt_response_stream HTTP Error {response.status_code}", "details": error_json}
buffer = ""
async for chunk in response.aiter_text():
buffer += chunk
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
# print(line)
if line and '\"text\": \"' in line:
try:
json_data = json.loads( "{" + line + "}")
content = json_data.get('text', '')
content = "\n".join(content.split("\\n"))
sse_string = await generate_sse_response(timestamp, model, content)
yield sse_string
except json.JSONDecodeError:
logger.error(f"无法解析JSON: {line}")
# # 处理缓冲区中剩余的内容
# if buffer:
# # print(buffer)
# if '\"text\": \"' in buffer:
# try:
# json_data = json.loads(buffer)
# content = json_data.get('text', '')
# content = "\n".join(content.split("\\n"))
# sse_string = await generate_sse_response(timestamp, model, content)
# yield sse_string
# except json.JSONDecodeError:
# print(f"无法解析JSON: {buffer}")
async def fetch_gpt_response_stream(client, url, headers, payload, max_redirects=5):
redirect_count = 0
while redirect_count < max_redirects:
# logger.info(f"fetch_gpt_response_stream: {url}")
async with client.stream('POST', url, headers=headers, json=payload) as response:
if response.status_code != 200:
error_message = await response.aread()
error_str = error_message.decode('utf-8', errors='replace')
try:
error_json = json.loads(error_str)
except json.JSONDecodeError:
error_json = error_str
yield {"error": f"fetch_gpt_response_stream HTTP Error {response.status_code}", "details": error_json}
return
buffer = ""
try:
async for chunk in response.aiter_text():
# logger.info(f"chunk: {repr(chunk)}")
buffer += chunk
if chunk.startswith("<script"):
import re
redirect_match = re.search(r"window\.location\.href\s*=\s*'([^']+)'", chunk)
if redirect_match:
new_url = redirect_match.group(1)
# logger.info(f"new_url: {new_url}")
if not new_url.startswith('http'):
# 如果是相对路径,构造完整URL
# logger.info(url.split('/'))
base_url = '/'.join(url.split('/')[:3])
new_url = base_url + new_url
url = new_url
# logger.info(f"new_url: {new_url}")
redirect_count += 1
break
redirect_count = 0
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
# logger.info("line: %s", repr(line))
if line and line != "data: " and line != "data:" and not line.startswith(": "):
yield line + "\n"
except httpx.RemoteProtocolError as e:
yield {"error": f"fetch_gpt_response_stream RemoteProtocolError {e.__class__.__name__}", "details": str(e)}
return
if redirect_count == 0:
return
yield {"error": "Too many redirects", "details": f"Reached maximum of {max_redirects} redirects"}
async def fetch_claude_response_stream(client, url, headers, payload, model):
timestamp = datetime.timestamp(datetime.now())
async with client.stream('POST', url, headers=headers, json=payload) as response:
if response.status_code != 200:
error_message = await response.aread()
error_str = error_message.decode('utf-8', errors='replace')
try:
error_json = json.loads(error_str)
except json.JSONDecodeError:
error_json = error_str
yield {"error": f"fetch_claude_response_stream HTTP Error {response.status_code}", "details": error_json}
buffer = ""
async for chunk in response.aiter_text():
# logger.info(f"chunk: {repr(chunk)}")
buffer += chunk
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
# logger.info(line)
if line.startswith("data:"):
line = line[5:]
if line.startswith(" "):
line = line[1:]
resp: dict = json.loads(line)
message = resp.get("message")
if message:
tokens_use = resp.get("usage")
role = message.get("role")
if role:
sse_string = await generate_sse_response(timestamp, model, None, None, None, None, role)
yield sse_string
if tokens_use:
total_tokens = tokens_use["input_tokens"] + tokens_use["output_tokens"]
# print("\n\rtotal_tokens", total_tokens)
tool_use = resp.get("content_block")
tools_id = None
function_call_name = None
if tool_use and "tool_use" == tool_use['type']:
# print("tool_use", tool_use)
tools_id = tool_use["id"]
if "name" in tool_use:
function_call_name = tool_use["name"]
sse_string = await generate_sse_response(timestamp, model, None, tools_id, function_call_name, None)
yield sse_string
delta = resp.get("delta")
# print("delta", delta)
if not delta:
continue
if "text" in delta:
content = delta["text"]
sse_string = await generate_sse_response(timestamp, model, content, None, None)
yield sse_string
if "partial_json" in delta:
# {"type":"input_json_delta","partial_json":""}
function_call_content = delta["partial_json"]
sse_string = await generate_sse_response(timestamp, model, None, None, None, function_call_content)
yield sse_string
async def fetch_response(client, url, headers, payload):
try:
response = await client.post(url, headers=headers, json=payload)
return response.json()
except httpx.ConnectError as e:
return {"error": f"500", "details": "fetch_response Connect Error"}
except httpx.ReadTimeout as e:
return {"error": f"500", "details": "fetch_response Read Response Timeout"}
async def fetch_response_stream(client, url, headers, payload, engine, model):
try:
if engine == "gemini":
async for chunk in fetch_gemini_response_stream(client, url, headers, payload, model):
yield chunk
elif engine == "claude":
async for chunk in fetch_claude_response_stream(client, url, headers, payload, model):
yield chunk
elif engine == "gpt":
async for chunk in fetch_gpt_response_stream(client, url, headers, payload):
yield chunk
elif engine == "openrouter":
async for chunk in fetch_gpt_response_stream(client, url, headers, payload):
yield chunk
else:
raise ValueError("Unknown response")
except httpx.ConnectError as e:
yield {"error": f"500", "details": "fetch_response_stream Connect Error"}
except httpx.ReadTimeout as e:
yield {"error": f"500", "details": "fetch_response_stream Read Response Timeout"} |