Merge branch 'main' into ollama-emulation
Browse files- lightrag/llm.py +2 -2
lightrag/llm.py
CHANGED
@@ -622,11 +622,11 @@ async def nvidia_openai_complete(
|
|
622 |
|
623 |
|
624 |
async def azure_openai_complete(
|
625 |
-
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
626 |
) -> str:
|
627 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
628 |
result = await azure_openai_complete_if_cache(
|
629 |
-
|
630 |
prompt,
|
631 |
system_prompt=system_prompt,
|
632 |
history_messages=history_messages,
|
|
|
622 |
|
623 |
|
624 |
async def azure_openai_complete(
|
625 |
+
model: str = "gpt-4o-mini", prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
626 |
) -> str:
|
627 |
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
628 |
result = await azure_openai_complete_if_cache(
|
629 |
+
model,
|
630 |
prompt,
|
631 |
system_prompt=system_prompt,
|
632 |
history_messages=history_messages,
|