File size: 4,642 Bytes
3f0a3dd
cff82fa
0b0a2f7
1ddc959
1af48fa
1ddc959
 
 
 
 
 
1af48fa
 
 
0b0a2f7
1af48fa
 
1ddc959
 
 
 
 
 
 
 
 
 
0b0a2f7
1af48fa
 
 
 
 
 
 
 
 
1ddc959
0b0a2f7
 
1ddc959
 
0b0a2f7
1ddc959
0b0a2f7
1ddc959
 
0b0a2f7
 
 
1ddc959
0b0a2f7
3f0a3dd
 
 
1af48fa
 
 
 
 
 
 
 
 
 
 
c405f98
 
 
 
 
e09244d
1af48fa
3f0a3dd
1af48fa
3f0a3dd
 
 
 
 
 
 
 
 
 
 
 
 
e09244d
3f0a3dd
 
 
 
 
 
 
b3ed199
0b0a2f7
b3ed199
3f0a3dd
b3ed199
3f0a3dd
b3ed199
d780861
 
b3ed199
d780861
b3ed199
 
3f0a3dd
 
 
 
 
1af48fa
3f0a3dd
1af48fa
 
3f0a3dd
b3ed199
3f0a3dd
 
 
 
1af48fa
 
 
 
 
 
3f0a3dd
 
 
0b0a2f7
 
 
1ddc959
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
import json
import httpx
import yaml
import traceback
from contextlib import asynccontextmanager

from fastapi import FastAPI, HTTPException, Depends
from fastapi.responses import StreamingResponse
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials

from models import RequestModel
from request import get_payload
from response import fetch_response, fetch_response_stream

from typing import List, Dict
from urllib.parse import urlparse

@asynccontextmanager
async def lifespan(app: FastAPI):
    # 启动时的代码
    app.state.client = httpx.AsyncClient()
    yield
    # 关闭时的代码
    await app.state.client.aclose()

app = FastAPI(lifespan=lifespan)

# 模拟存储API Key的数据库
api_keys_db = {
    "sk-KjjI60Yf0JFcsvgRmXqFwgGmWUd9GZnmi3KlvowmRWpWpQRo": "user1",
    # 可以添加更多的API Key
}

# 安全性依赖
security = HTTPBearer()

# 读取YAML配置文件
def load_config():
    try:
        with open('api.yaml', 'r') as f:
            return yaml.safe_load(f)
    except FileNotFoundError:
        print("配置文件 'config.yaml' 未找到。请确保文件存在于正确的位置。")
        return []
    except yaml.YAMLError:
        print("配置文件 'config.yaml' 格式不正确。请检查YAML格式。")
        return []

config = load_config()
# print(config)

async def process_request(request: RequestModel, provider: Dict):
    print("provider: ", provider['provider'])
    url = provider['base_url']
    parsed_url = urlparse(url)
    engine = None
    if parsed_url.netloc == 'generativelanguage.googleapis.com':
        engine = "gemini"
    elif parsed_url.netloc == 'api.anthropic.com':
        engine = "claude"
    else:
        engine = "gpt"

    url, headers, payload = await get_payload(request, engine, provider)

    request_info = {
        "url": url,
        "headers": headers,
        "payload": payload
    }
    print(f"Request details: {json.dumps(request_info, indent=4, ensure_ascii=False)}")

    if request.stream:
        return StreamingResponse(fetch_response_stream(app.state.client, url, headers, payload, engine, request.model), media_type="text/event-stream")
    else:
        return await fetch_response(app.state.client, url, headers, payload)

class ModelRequestHandler:
    def __init__(self):
        self.last_provider_index = -1

    def get_matching_providers(self, model_name):
        return [provider for provider in config if model_name in provider['model']]

    async def request_model(self, request: RequestModel, token: str):
        model_name = request.model
        matching_providers = self.get_matching_providers(model_name)
        # print("matching_providers", json.dumps(matching_providers, indent=4, ensure_ascii=False))

        if not matching_providers:
            raise HTTPException(status_code=404, detail="No matching model found")

        # 检查是否启用轮询
        use_round_robin = os.environ.get('USE_ROUND_ROBIN', 'false').lower() == 'true'

        return await self.try_all_providers(request, matching_providers, use_round_robin)

    async def try_all_providers(self, request: RequestModel, providers: List[Dict], use_round_robin: bool):
        num_providers = len(providers)

        for i in range(num_providers):
            if use_round_robin:
                # 始终从第一个提供者开始轮询
                self.last_provider_index = i % num_providers
            else:
                # 非轮询模式,按顺序尝试
                self.last_provider_index = i

            provider = providers[self.last_provider_index]
            try:
                response = await process_request(request, provider)
                return response
            except Exception as e:
                print('\033[31m')
                print(f"Error with provider {provider['provider']}: {str(e)}")
                traceback.print_exc()
                print('\033[0m')
                continue

        raise HTTPException(status_code=500, detail="All providers failed")

model_handler = ModelRequestHandler()

def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
    token = credentials.credentials
    if token not in api_keys_db:
        raise HTTPException(status_code=403, detail="Invalid or missing API Key")
    return token

@app.post("/v1/chat/completions")
async def request_model(request: RequestModel, token: str = Depends(verify_api_key)):
    return await model_handler.request_model(request, token)

if __name__ == '__main__':
    import uvicorn
    uvicorn.run("__main__:app", host="0.0.0.0", port=8000, reload=True)