yangdx
commited on
Commit
·
90d5ed5
1
Parent(s):
67da933
fix: Fix potential mutable default parameter issue
Browse files- lightrag/api/lightrag_server.py +6 -2
- lightrag/llm/openai.py +15 -5
lightrag/api/lightrag_server.py
CHANGED
@@ -752,10 +752,12 @@ def create_app(args):
|
|
752 |
async def openai_alike_model_complete(
|
753 |
prompt,
|
754 |
system_prompt=None,
|
755 |
-
history_messages=
|
756 |
keyword_extraction=False,
|
757 |
**kwargs,
|
758 |
) -> str:
|
|
|
|
|
759 |
return await openai_complete_if_cache(
|
760 |
args.llm_model,
|
761 |
prompt,
|
@@ -769,10 +771,12 @@ def create_app(args):
|
|
769 |
async def azure_openai_model_complete(
|
770 |
prompt,
|
771 |
system_prompt=None,
|
772 |
-
history_messages=
|
773 |
keyword_extraction=False,
|
774 |
**kwargs,
|
775 |
) -> str:
|
|
|
|
|
776 |
return await azure_openai_complete_if_cache(
|
777 |
args.llm_model,
|
778 |
prompt,
|
|
|
752 |
async def openai_alike_model_complete(
|
753 |
prompt,
|
754 |
system_prompt=None,
|
755 |
+
history_messages=None,
|
756 |
keyword_extraction=False,
|
757 |
**kwargs,
|
758 |
) -> str:
|
759 |
+
if history_messages is None:
|
760 |
+
history_messages = []
|
761 |
return await openai_complete_if_cache(
|
762 |
args.llm_model,
|
763 |
prompt,
|
|
|
771 |
async def azure_openai_model_complete(
|
772 |
prompt,
|
773 |
system_prompt=None,
|
774 |
+
history_messages=None,
|
775 |
keyword_extraction=False,
|
776 |
**kwargs,
|
777 |
) -> str:
|
778 |
+
if history_messages is None:
|
779 |
+
history_messages = []
|
780 |
return await azure_openai_complete_if_cache(
|
781 |
args.llm_model,
|
782 |
prompt,
|
lightrag/llm/openai.py
CHANGED
@@ -89,11 +89,13 @@ async def openai_complete_if_cache(
|
|
89 |
model,
|
90 |
prompt,
|
91 |
system_prompt=None,
|
92 |
-
history_messages=
|
93 |
base_url=None,
|
94 |
api_key=None,
|
95 |
**kwargs,
|
96 |
) -> str:
|
|
|
|
|
97 |
if api_key:
|
98 |
os.environ["OPENAI_API_KEY"] = api_key
|
99 |
|
@@ -146,8 +148,10 @@ async def openai_complete_if_cache(
|
|
146 |
|
147 |
|
148 |
async def openai_complete(
|
149 |
-
prompt, system_prompt=None, history_messages=
|
150 |
) -> Union[str, AsyncIterator[str]]:
|
|
|
|
|
151 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
152 |
if keyword_extraction:
|
153 |
kwargs["response_format"] = "json"
|
@@ -162,8 +166,10 @@ async def openai_complete(
|
|
162 |
|
163 |
|
164 |
async def gpt_4o_complete(
|
165 |
-
prompt, system_prompt=None, history_messages=
|
166 |
) -> str:
|
|
|
|
|
167 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
168 |
if keyword_extraction:
|
169 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
@@ -177,8 +183,10 @@ async def gpt_4o_complete(
|
|
177 |
|
178 |
|
179 |
async def gpt_4o_mini_complete(
|
180 |
-
prompt, system_prompt=None, history_messages=
|
181 |
) -> str:
|
|
|
|
|
182 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
183 |
if keyword_extraction:
|
184 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
@@ -192,8 +200,10 @@ async def gpt_4o_mini_complete(
|
|
192 |
|
193 |
|
194 |
async def nvidia_openai_complete(
|
195 |
-
prompt, system_prompt=None, history_messages=
|
196 |
) -> str:
|
|
|
|
|
197 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
198 |
result = await openai_complete_if_cache(
|
199 |
"nvidia/llama-3.1-nemotron-70b-instruct", # context length 128k
|
|
|
89 |
model,
|
90 |
prompt,
|
91 |
system_prompt=None,
|
92 |
+
history_messages=None,
|
93 |
base_url=None,
|
94 |
api_key=None,
|
95 |
**kwargs,
|
96 |
) -> str:
|
97 |
+
if history_messages is None:
|
98 |
+
history_messages = []
|
99 |
if api_key:
|
100 |
os.environ["OPENAI_API_KEY"] = api_key
|
101 |
|
|
|
148 |
|
149 |
|
150 |
async def openai_complete(
|
151 |
+
prompt, system_prompt=None, history_messages=None, keyword_extraction=False, **kwargs
|
152 |
) -> Union[str, AsyncIterator[str]]:
|
153 |
+
if history_messages is None:
|
154 |
+
history_messages = []
|
155 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
156 |
if keyword_extraction:
|
157 |
kwargs["response_format"] = "json"
|
|
|
166 |
|
167 |
|
168 |
async def gpt_4o_complete(
|
169 |
+
prompt, system_prompt=None, history_messages=None, keyword_extraction=False, **kwargs
|
170 |
) -> str:
|
171 |
+
if history_messages is None:
|
172 |
+
history_messages = []
|
173 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
174 |
if keyword_extraction:
|
175 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
|
|
183 |
|
184 |
|
185 |
async def gpt_4o_mini_complete(
|
186 |
+
prompt, system_prompt=None, history_messages=None, keyword_extraction=False, **kwargs
|
187 |
) -> str:
|
188 |
+
if history_messages is None:
|
189 |
+
history_messages = []
|
190 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
191 |
if keyword_extraction:
|
192 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
|
|
200 |
|
201 |
|
202 |
async def nvidia_openai_complete(
|
203 |
+
prompt, system_prompt=None, history_messages=None, keyword_extraction=False, **kwargs
|
204 |
) -> str:
|
205 |
+
if history_messages is None:
|
206 |
+
history_messages = []
|
207 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
208 |
result = await openai_complete_if_cache(
|
209 |
"nvidia/llama-3.1-nemotron-70b-instruct", # context length 128k
|