File size: 6,479 Bytes
3f0a3dd
cff82fa
0b0a2f7
52bcfe4
1ddc959
1af48fa
1ddc959
 
52bcfe4
1ddc959
 
 
1af48fa
 
 
0b0a2f7
1af48fa
 
1ddc959
 
 
 
 
 
 
 
 
 
0b0a2f7
1af48fa
 
 
 
 
 
 
 
 
1ddc959
0b0a2f7
 
1ddc959
 
0b0a2f7
1ddc959
0b0a2f7
1ddc959
 
0b0a2f7
 
 
52bcfe4
 
 
 
 
 
 
 
 
 
0b0a2f7
3f0a3dd
 
 
1af48fa
3e3ea9a
1af48fa
 
 
3e3ea9a
1af48fa
 
 
3e3ea9a
1af48fa
 
 
52bcfe4
 
 
 
 
 
1af48fa
3f0a3dd
52bcfe4
 
3f0a3dd
 
 
 
 
 
 
 
52bcfe4
 
 
 
 
3f0a3dd
 
 
 
e09244d
3f0a3dd
 
 
 
 
 
 
b3ed199
0b0a2f7
b3ed199
3f0a3dd
b3ed199
3f0a3dd
b3ed199
d780861
 
b3ed199
d780861
b3ed199
 
3f0a3dd
 
 
 
 
1af48fa
3f0a3dd
1af48fa
 
3f0a3dd
b3ed199
3f0a3dd
 
 
 
52bcfe4
 
 
 
 
 
 
 
 
 
 
 
1af48fa
 
 
 
 
 
3f0a3dd
 
 
0b0a2f7
3790bd8
 
f27ccad
3790bd8
 
52bcfe4
f27ccad
 
 
 
 
 
 
 
 
3790bd8
 
 
 
 
 
 
 
 
 
0b0a2f7
 
1ddc959
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import os
import json
import httpx
import logging
import yaml
import traceback
from contextlib import asynccontextmanager

from fastapi import FastAPI, Request, HTTPException, Depends
from fastapi.responses import StreamingResponse
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials

from models import RequestModel
from request import get_payload
from response import fetch_response, fetch_response_stream

from typing import List, Dict
from urllib.parse import urlparse

@asynccontextmanager
async def lifespan(app: FastAPI):
    # 启动时的代码
    app.state.client = httpx.AsyncClient()
    yield
    # 关闭时的代码
    await app.state.client.aclose()

app = FastAPI(lifespan=lifespan)

# 模拟存储API Key的数据库
api_keys_db = {
    "sk-KjjI60Yf0JFcsvgRmXqFwgGmWUd9GZnmi3KlvowmRWpWpQRo": "user1",
    # 可以添加更多的API Key
}

# 安全性依赖
security = HTTPBearer()

# 读取YAML配置文件
def load_config():
    try:
        with open('api.yaml', 'r') as f:
            return yaml.safe_load(f)
    except FileNotFoundError:
        print("配置文件 'config.yaml' 未找到。请确保文件存在于正确的位置。")
        return []
    except yaml.YAMLError:
        print("配置文件 'config.yaml' 格式不正确。请检查YAML格式。")
        return []

config = load_config()
for index, provider in enumerate(config):
    model_dict = {}
    for model in provider['model']:
        if type(model) == str:
            model_dict[model] = model
        if type(model) == dict:
            model_dict.update({value: key for key, value in model.items()})
    provider['model'] = model_dict
    config[index] = provider
# print(json.dumps(config, indent=4, ensure_ascii=False))

async def process_request(request: RequestModel, provider: Dict):
    print("provider: ", provider['provider'])
    url = provider['base_url']
    parsed_url = urlparse(url)
    # print(parsed_url)
    engine = None
    if parsed_url.netloc == 'generativelanguage.googleapis.com':
        engine = "gemini"
    elif parsed_url.netloc == 'api.anthropic.com' or parsed_url.path.endswith("v1/message"):
        engine = "claude"
    else:
        engine = "gpt"
    print(engine)

    url, headers, payload = await get_payload(request, engine, provider)

    # request_info = {
    #     "url": url,
    #     "headers": headers,
    #     "payload": payload
    # }
    # print(f"Request details: {json.dumps(request_info, indent=4, ensure_ascii=False)}")

    if request.stream:
        model = provider['model'][request.model]
        return StreamingResponse(fetch_response_stream(app.state.client, url, headers, payload, engine, model), media_type="text/event-stream")
    else:
        return await fetch_response(app.state.client, url, headers, payload)

class ModelRequestHandler:
    def __init__(self):
        self.last_provider_index = -1

    def get_matching_providers(self, model_name):
        # for provider in config:
        #     print("provider", model_name, list(provider['model'].keys()))
        #     if model_name in provider['model'].keys():
        #         print("provider", provider)
        return [provider for provider in config if model_name in provider['model'].keys()]

    async def request_model(self, request: RequestModel, token: str):
        model_name = request.model
        matching_providers = self.get_matching_providers(model_name)
        # print("matching_providers", json.dumps(matching_providers, indent=4, ensure_ascii=False))

        if not matching_providers:
            raise HTTPException(status_code=404, detail="No matching model found")

        # 检查是否启用轮询
        use_round_robin = os.environ.get('USE_ROUND_ROBIN', 'false').lower() == 'true'

        return await self.try_all_providers(request, matching_providers, use_round_robin)

    async def try_all_providers(self, request: RequestModel, providers: List[Dict], use_round_robin: bool):
        num_providers = len(providers)

        for i in range(num_providers):
            if use_round_robin:
                # 始终从第一个提供者开始轮询
                self.last_provider_index = i % num_providers
            else:
                # 非轮询模式,按顺序尝试
                self.last_provider_index = i

            provider = providers[self.last_provider_index]
            try:
                response = await process_request(request, provider)
                return response
            except Exception as e:
                print('\033[31m')
                print(f"Error with provider {provider['provider']}: {str(e)}")
                traceback.print_exc()
                print('\033[0m')
                continue

        raise HTTPException(status_code=500, detail="All providers failed")

model_handler = ModelRequestHandler()

@app.middleware("http")
async def log_requests(request: Request, call_next):
    # 打印请求信息
    logging.info(f"Request: {request.method} {request.url}")
    # 打印请求体(如果有)
    if request.method in ["POST", "PUT", "PATCH"]:
        body = await request.body()
        logging.info(f"Request Body: {body.decode('utf-8')}")

    response = await call_next(request)
    return response

def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
    token = credentials.credentials
    if token not in api_keys_db:
        raise HTTPException(status_code=403, detail="Invalid or missing API Key")
    return token

@app.post("/v1/chat/completions")
async def request_model(request: RequestModel, token: str = Depends(verify_api_key)):
    return await model_handler.request_model(request, token)

def get_all_models():
    all_models = []
    unique_models = set()

    for provider in config:
        for model in provider['model'].keys():
            if model not in unique_models:
                unique_models.add(model)
                model_info = {
                    "id": model,
                    "object": "model",
                    "created": 1720524448858,
                    "owned_by": provider['provider']
                }
                all_models.append(model_info)

    return all_models

@app.get("/v1/models")
async def list_models():
    models = get_all_models()
    return {
        "object": "list",
        "data": models
    }
if __name__ == '__main__':
    import uvicorn
    uvicorn.run("__main__:app", host="0.0.0.0", port=8000, reload=True)