Merge pull request #722 from danielaskdd/fix-mutable-default-param
Browse files- lightrag/api/lightrag_server.py +6 -2
- lightrag/llm/openai.py +31 -5
lightrag/api/lightrag_server.py
CHANGED
@@ -753,13 +753,15 @@ def create_app(args):
|
|
753 |
async def openai_alike_model_complete(
|
754 |
prompt,
|
755 |
system_prompt=None,
|
756 |
-
history_messages=
|
757 |
keyword_extraction=False,
|
758 |
**kwargs,
|
759 |
) -> str:
|
760 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
761 |
if keyword_extraction:
|
762 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
|
|
|
|
763 |
return await openai_complete_if_cache(
|
764 |
args.llm_model,
|
765 |
prompt,
|
@@ -773,13 +775,15 @@ def create_app(args):
|
|
773 |
async def azure_openai_model_complete(
|
774 |
prompt,
|
775 |
system_prompt=None,
|
776 |
-
history_messages=
|
777 |
keyword_extraction=False,
|
778 |
**kwargs,
|
779 |
) -> str:
|
780 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
781 |
if keyword_extraction:
|
782 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
|
|
|
|
783 |
return await azure_openai_complete_if_cache(
|
784 |
args.llm_model,
|
785 |
prompt,
|
|
|
753 |
async def openai_alike_model_complete(
|
754 |
prompt,
|
755 |
system_prompt=None,
|
756 |
+
history_messages=None,
|
757 |
keyword_extraction=False,
|
758 |
**kwargs,
|
759 |
) -> str:
|
760 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
761 |
if keyword_extraction:
|
762 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
763 |
+
if history_messages is None:
|
764 |
+
history_messages = []
|
765 |
return await openai_complete_if_cache(
|
766 |
args.llm_model,
|
767 |
prompt,
|
|
|
775 |
async def azure_openai_model_complete(
|
776 |
prompt,
|
777 |
system_prompt=None,
|
778 |
+
history_messages=None,
|
779 |
keyword_extraction=False,
|
780 |
**kwargs,
|
781 |
) -> str:
|
782 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
783 |
if keyword_extraction:
|
784 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
785 |
+
if history_messages is None:
|
786 |
+
history_messages = []
|
787 |
return await azure_openai_complete_if_cache(
|
788 |
args.llm_model,
|
789 |
prompt,
|
lightrag/llm/openai.py
CHANGED
@@ -89,11 +89,13 @@ async def openai_complete_if_cache(
|
|
89 |
model,
|
90 |
prompt,
|
91 |
system_prompt=None,
|
92 |
-
history_messages=
|
93 |
base_url=None,
|
94 |
api_key=None,
|
95 |
**kwargs,
|
96 |
) -> str:
|
|
|
|
|
97 |
if api_key:
|
98 |
os.environ["OPENAI_API_KEY"] = api_key
|
99 |
|
@@ -146,8 +148,14 @@ async def openai_complete_if_cache(
|
|
146 |
|
147 |
|
148 |
async def openai_complete(
|
149 |
-
prompt,
|
|
|
|
|
|
|
|
|
150 |
) -> Union[str, AsyncIterator[str]]:
|
|
|
|
|
151 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
152 |
if keyword_extraction:
|
153 |
kwargs["response_format"] = "json"
|
@@ -162,8 +170,14 @@ async def openai_complete(
|
|
162 |
|
163 |
|
164 |
async def gpt_4o_complete(
|
165 |
-
prompt,
|
|
|
|
|
|
|
|
|
166 |
) -> str:
|
|
|
|
|
167 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
168 |
if keyword_extraction:
|
169 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
@@ -177,8 +191,14 @@ async def gpt_4o_complete(
|
|
177 |
|
178 |
|
179 |
async def gpt_4o_mini_complete(
|
180 |
-
prompt,
|
|
|
|
|
|
|
|
|
181 |
) -> str:
|
|
|
|
|
182 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
183 |
if keyword_extraction:
|
184 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
@@ -192,8 +212,14 @@ async def gpt_4o_mini_complete(
|
|
192 |
|
193 |
|
194 |
async def nvidia_openai_complete(
|
195 |
-
prompt,
|
|
|
|
|
|
|
|
|
196 |
) -> str:
|
|
|
|
|
197 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
198 |
result = await openai_complete_if_cache(
|
199 |
"nvidia/llama-3.1-nemotron-70b-instruct", # context length 128k
|
|
|
89 |
model,
|
90 |
prompt,
|
91 |
system_prompt=None,
|
92 |
+
history_messages=None,
|
93 |
base_url=None,
|
94 |
api_key=None,
|
95 |
**kwargs,
|
96 |
) -> str:
|
97 |
+
if history_messages is None:
|
98 |
+
history_messages = []
|
99 |
if api_key:
|
100 |
os.environ["OPENAI_API_KEY"] = api_key
|
101 |
|
|
|
148 |
|
149 |
|
150 |
async def openai_complete(
|
151 |
+
prompt,
|
152 |
+
system_prompt=None,
|
153 |
+
history_messages=None,
|
154 |
+
keyword_extraction=False,
|
155 |
+
**kwargs,
|
156 |
) -> Union[str, AsyncIterator[str]]:
|
157 |
+
if history_messages is None:
|
158 |
+
history_messages = []
|
159 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
160 |
if keyword_extraction:
|
161 |
kwargs["response_format"] = "json"
|
|
|
170 |
|
171 |
|
172 |
async def gpt_4o_complete(
|
173 |
+
prompt,
|
174 |
+
system_prompt=None,
|
175 |
+
history_messages=None,
|
176 |
+
keyword_extraction=False,
|
177 |
+
**kwargs,
|
178 |
) -> str:
|
179 |
+
if history_messages is None:
|
180 |
+
history_messages = []
|
181 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
182 |
if keyword_extraction:
|
183 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
|
|
191 |
|
192 |
|
193 |
async def gpt_4o_mini_complete(
|
194 |
+
prompt,
|
195 |
+
system_prompt=None,
|
196 |
+
history_messages=None,
|
197 |
+
keyword_extraction=False,
|
198 |
+
**kwargs,
|
199 |
) -> str:
|
200 |
+
if history_messages is None:
|
201 |
+
history_messages = []
|
202 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
203 |
if keyword_extraction:
|
204 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
|
|
212 |
|
213 |
|
214 |
async def nvidia_openai_complete(
|
215 |
+
prompt,
|
216 |
+
system_prompt=None,
|
217 |
+
history_messages=None,
|
218 |
+
keyword_extraction=False,
|
219 |
+
**kwargs,
|
220 |
) -> str:
|
221 |
+
if history_messages is None:
|
222 |
+
history_messages = []
|
223 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
224 |
result = await openai_complete_if_cache(
|
225 |
"nvidia/llama-3.1-nemotron-70b-instruct", # context length 128k
|