Larfii
commited on
Commit
·
94cd4d3
1
Parent(s):
7b6db35
Fix: unexpected keyword argument error.
Browse files- README.md +1 -1
- examples/lightrag_api_openai_compatible_demo.py +1 -1
- examples/lightrag_api_oracle_demo.py +1 -1
- examples/lightrag_azure_openai_demo.py +1 -1
- examples/lightrag_lmdeploy_demo.py +1 -1
- examples/lightrag_openai_compatible_demo.py +1 -1
- examples/lightrag_oracle_demo.py +1 -1
- examples/lightrag_siliconcloud_demo.py +1 -1
README.md
CHANGED
@@ -114,7 +114,7 @@ print(rag.query("What are the top themes in this story?", param=QueryParam(mode=
|
|
114 |
* LightRAG also supports Open AI-like chat/embeddings APIs:
|
115 |
```python
|
116 |
async def llm_model_func(
|
117 |
-
prompt, system_prompt=None, history_messages=[], **kwargs
|
118 |
) -> str:
|
119 |
return await openai_complete_if_cache(
|
120 |
"solar-mini",
|
|
|
114 |
* LightRAG also supports Open AI-like chat/embeddings APIs:
|
115 |
```python
|
116 |
async def llm_model_func(
|
117 |
+
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
118 |
) -> str:
|
119 |
return await openai_complete_if_cache(
|
120 |
"solar-mini",
|
examples/lightrag_api_openai_compatible_demo.py
CHANGED
@@ -33,7 +33,7 @@ if not os.path.exists(WORKING_DIR):
|
|
33 |
|
34 |
|
35 |
async def llm_model_func(
|
36 |
-
prompt, system_prompt=None, history_messages=[], **kwargs
|
37 |
) -> str:
|
38 |
return await openai_complete_if_cache(
|
39 |
LLM_MODEL,
|
|
|
33 |
|
34 |
|
35 |
async def llm_model_func(
|
36 |
+
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
37 |
) -> str:
|
38 |
return await openai_complete_if_cache(
|
39 |
LLM_MODEL,
|
examples/lightrag_api_oracle_demo.py
CHANGED
@@ -50,7 +50,7 @@ if not os.path.exists(WORKING_DIR):
|
|
50 |
|
51 |
|
52 |
async def llm_model_func(
|
53 |
-
prompt, system_prompt=None, history_messages=[], **kwargs
|
54 |
) -> str:
|
55 |
return await openai_complete_if_cache(
|
56 |
LLM_MODEL,
|
|
|
50 |
|
51 |
|
52 |
async def llm_model_func(
|
53 |
+
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
54 |
) -> str:
|
55 |
return await openai_complete_if_cache(
|
56 |
LLM_MODEL,
|
examples/lightrag_azure_openai_demo.py
CHANGED
@@ -30,7 +30,7 @@ os.mkdir(WORKING_DIR)
|
|
30 |
|
31 |
|
32 |
async def llm_model_func(
|
33 |
-
prompt, system_prompt=None, history_messages=[], **kwargs
|
34 |
) -> str:
|
35 |
client = AzureOpenAI(
|
36 |
api_key=AZURE_OPENAI_API_KEY,
|
|
|
30 |
|
31 |
|
32 |
async def llm_model_func(
|
33 |
+
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
34 |
) -> str:
|
35 |
client = AzureOpenAI(
|
36 |
api_key=AZURE_OPENAI_API_KEY,
|
examples/lightrag_lmdeploy_demo.py
CHANGED
@@ -12,7 +12,7 @@ if not os.path.exists(WORKING_DIR):
|
|
12 |
|
13 |
|
14 |
async def lmdeploy_model_complete(
|
15 |
-
prompt=None, system_prompt=None, history_messages=[], **kwargs
|
16 |
) -> str:
|
17 |
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
18 |
return await lmdeploy_model_if_cache(
|
|
|
12 |
|
13 |
|
14 |
async def lmdeploy_model_complete(
|
15 |
+
prompt=None, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
16 |
) -> str:
|
17 |
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
18 |
return await lmdeploy_model_if_cache(
|
examples/lightrag_openai_compatible_demo.py
CHANGED
@@ -12,7 +12,7 @@ if not os.path.exists(WORKING_DIR):
|
|
12 |
|
13 |
|
14 |
async def llm_model_func(
|
15 |
-
prompt, system_prompt=None, history_messages=[], **kwargs
|
16 |
) -> str:
|
17 |
return await openai_complete_if_cache(
|
18 |
"solar-mini",
|
|
|
12 |
|
13 |
|
14 |
async def llm_model_func(
|
15 |
+
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
16 |
) -> str:
|
17 |
return await openai_complete_if_cache(
|
18 |
"solar-mini",
|
examples/lightrag_oracle_demo.py
CHANGED
@@ -27,7 +27,7 @@ if not os.path.exists(WORKING_DIR):
|
|
27 |
|
28 |
|
29 |
async def llm_model_func(
|
30 |
-
prompt, system_prompt=None, history_messages=[], **kwargs
|
31 |
) -> str:
|
32 |
return await openai_complete_if_cache(
|
33 |
CHATMODEL,
|
|
|
27 |
|
28 |
|
29 |
async def llm_model_func(
|
30 |
+
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
31 |
) -> str:
|
32 |
return await openai_complete_if_cache(
|
33 |
CHATMODEL,
|
examples/lightrag_siliconcloud_demo.py
CHANGED
@@ -12,7 +12,7 @@ if not os.path.exists(WORKING_DIR):
|
|
12 |
|
13 |
|
14 |
async def llm_model_func(
|
15 |
-
prompt, system_prompt=None, history_messages=[], **kwargs
|
16 |
) -> str:
|
17 |
return await openai_complete_if_cache(
|
18 |
"Qwen/Qwen2.5-7B-Instruct",
|
|
|
12 |
|
13 |
|
14 |
async def llm_model_func(
|
15 |
+
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
16 |
) -> str:
|
17 |
return await openai_complete_if_cache(
|
18 |
"Qwen/Qwen2.5-7B-Instruct",
|