集中处理环境变量
Browse files
examples/lightrag_api_openai_compatible_demo.py
CHANGED
@@ -18,6 +18,13 @@ app = FastAPI(title="LightRAG API", description="API for RAG operations")
|
|
18 |
# Configure working directory
|
19 |
WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
|
20 |
print(f"WORKING_DIR: {WORKING_DIR}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
if not os.path.exists(WORKING_DIR):
|
22 |
os.mkdir(WORKING_DIR)
|
23 |
|
@@ -29,7 +36,7 @@ async def llm_model_func(
|
|
29 |
prompt, system_prompt=None, history_messages=[], **kwargs
|
30 |
) -> str:
|
31 |
return await openai_complete_if_cache(
|
32 |
-
|
33 |
prompt,
|
34 |
system_prompt=system_prompt,
|
35 |
history_messages=history_messages,
|
@@ -43,7 +50,7 @@ async def llm_model_func(
|
|
43 |
async def embedding_func(texts: list[str]) -> np.ndarray:
|
44 |
return await openai_embedding(
|
45 |
texts,
|
46 |
-
model=
|
47 |
)
|
48 |
|
49 |
|
@@ -60,7 +67,7 @@ rag = LightRAG(
|
|
60 |
working_dir=WORKING_DIR,
|
61 |
llm_model_func=llm_model_func,
|
62 |
embedding_func=EmbeddingFunc(embedding_dim=asyncio.run(get_embedding_dim()),
|
63 |
-
max_token_size=
|
64 |
func=embedding_func),
|
65 |
)
|
66 |
|
|
|
18 |
# Configure working directory
|
19 |
WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
|
20 |
print(f"WORKING_DIR: {WORKING_DIR}")
|
21 |
+
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o-mini")
|
22 |
+
print(f"LLM_MODEL: {LLM_MODEL}")
|
23 |
+
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large")
|
24 |
+
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
|
25 |
+
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
|
26 |
+
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
|
27 |
+
|
28 |
if not os.path.exists(WORKING_DIR):
|
29 |
os.mkdir(WORKING_DIR)
|
30 |
|
|
|
36 |
prompt, system_prompt=None, history_messages=[], **kwargs
|
37 |
) -> str:
|
38 |
return await openai_complete_if_cache(
|
39 |
+
LLM_MODEL,
|
40 |
prompt,
|
41 |
system_prompt=system_prompt,
|
42 |
history_messages=history_messages,
|
|
|
50 |
async def embedding_func(texts: list[str]) -> np.ndarray:
|
51 |
return await openai_embedding(
|
52 |
texts,
|
53 |
+
model=EMBEDDING_MODEL,
|
54 |
)
|
55 |
|
56 |
|
|
|
67 |
working_dir=WORKING_DIR,
|
68 |
llm_model_func=llm_model_func,
|
69 |
embedding_func=EmbeddingFunc(embedding_dim=asyncio.run(get_embedding_dim()),
|
70 |
+
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
71 |
func=embedding_func),
|
72 |
)
|
73 |
|