Larfii
commited on
Commit
·
ace829c
1
Parent(s):
8540606
fix: unexpected keyword argument error
Browse files- lightrag/llm.py +7 -0
lightrag/llm.py
CHANGED
@@ -478,6 +478,7 @@ class GPTKeywordExtractionFormat(BaseModel):
|
|
478 |
async def gpt_4o_complete(
|
479 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
480 |
) -> str:
|
|
|
481 |
if keyword_extraction:
|
482 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
483 |
return await openai_complete_if_cache(
|
@@ -492,6 +493,7 @@ async def gpt_4o_complete(
|
|
492 |
async def gpt_4o_mini_complete(
|
493 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
494 |
) -> str:
|
|
|
495 |
if keyword_extraction:
|
496 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
497 |
return await openai_complete_if_cache(
|
@@ -506,6 +508,7 @@ async def gpt_4o_mini_complete(
|
|
506 |
async def nvidia_openai_complete(
|
507 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
508 |
) -> str:
|
|
|
509 |
result = await openai_complete_if_cache(
|
510 |
"nvidia/llama-3.1-nemotron-70b-instruct", # context length 128k
|
511 |
prompt,
|
@@ -522,6 +525,7 @@ async def nvidia_openai_complete(
|
|
522 |
async def azure_openai_complete(
|
523 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
524 |
) -> str:
|
|
|
525 |
result = await azure_openai_complete_if_cache(
|
526 |
"conversation-4o-mini",
|
527 |
prompt,
|
@@ -537,6 +541,7 @@ async def azure_openai_complete(
|
|
537 |
async def bedrock_complete(
|
538 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
539 |
) -> str:
|
|
|
540 |
result = await bedrock_complete_if_cache(
|
541 |
"anthropic.claude-3-haiku-20240307-v1:0",
|
542 |
prompt,
|
@@ -552,6 +557,7 @@ async def bedrock_complete(
|
|
552 |
async def hf_model_complete(
|
553 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
554 |
) -> str:
|
|
|
555 |
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
556 |
result = await hf_model_if_cache(
|
557 |
model_name,
|
@@ -568,6 +574,7 @@ async def hf_model_complete(
|
|
568 |
async def ollama_model_complete(
|
569 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
570 |
) -> str:
|
|
|
571 |
if keyword_extraction:
|
572 |
kwargs["format"] = "json"
|
573 |
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
|
|
478 |
async def gpt_4o_complete(
|
479 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
480 |
) -> str:
|
481 |
+
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
482 |
if keyword_extraction:
|
483 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
484 |
return await openai_complete_if_cache(
|
|
|
493 |
async def gpt_4o_mini_complete(
|
494 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
495 |
) -> str:
|
496 |
+
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
497 |
if keyword_extraction:
|
498 |
kwargs["response_format"] = GPTKeywordExtractionFormat
|
499 |
return await openai_complete_if_cache(
|
|
|
508 |
async def nvidia_openai_complete(
|
509 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
510 |
) -> str:
|
511 |
+
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
512 |
result = await openai_complete_if_cache(
|
513 |
"nvidia/llama-3.1-nemotron-70b-instruct", # context length 128k
|
514 |
prompt,
|
|
|
525 |
async def azure_openai_complete(
|
526 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
527 |
) -> str:
|
528 |
+
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
529 |
result = await azure_openai_complete_if_cache(
|
530 |
"conversation-4o-mini",
|
531 |
prompt,
|
|
|
541 |
async def bedrock_complete(
|
542 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
543 |
) -> str:
|
544 |
+
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
545 |
result = await bedrock_complete_if_cache(
|
546 |
"anthropic.claude-3-haiku-20240307-v1:0",
|
547 |
prompt,
|
|
|
557 |
async def hf_model_complete(
|
558 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
559 |
) -> str:
|
560 |
+
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
561 |
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
562 |
result = await hf_model_if_cache(
|
563 |
model_name,
|
|
|
574 |
async def ollama_model_complete(
|
575 |
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
576 |
) -> str:
|
577 |
+
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
578 |
if keyword_extraction:
|
579 |
kwargs["format"] = "json"
|
580 |
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|