yangdx
commited on
Commit
·
a6f5190
1
Parent(s):
3cfa955
Fix LLM binding variable name in create_app function
Browse filesCorrected variable name from llm_binding_host
Updated conditional checks for LLM bindings
lightrag/api/lightrag_server.py
CHANGED
@@ -719,14 +719,14 @@ def create_app(args):
|
|
719 |
|
720 |
# Create working directory if it doesn't exist
|
721 |
Path(args.working_dir).mkdir(parents=True, exist_ok=True)
|
722 |
-
if args.
|
723 |
from lightrag.llm.lollms import lollms_model_complete, lollms_embed
|
724 |
-
if args.
|
725 |
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
726 |
-
if args.
|
727 |
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
728 |
if (
|
729 |
-
args.
|
730 |
or args.embedding_binding == "azure_openai"
|
731 |
):
|
732 |
from lightrag.llm.azure_openai import (
|
|
|
719 |
|
720 |
# Create working directory if it doesn't exist
|
721 |
Path(args.working_dir).mkdir(parents=True, exist_ok=True)
|
722 |
+
if args.llm_binding == "lollms" or args.embedding_binding == "lollms":
|
723 |
from lightrag.llm.lollms import lollms_model_complete, lollms_embed
|
724 |
+
if args.llm_binding == "ollama" or args.embedding_binding == "ollama":
|
725 |
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
726 |
+
if args.llm_binding == "openai" or args.embedding_binding == "openai":
|
727 |
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
728 |
if (
|
729 |
+
args.llm_binding == "azure_openai"
|
730 |
or args.embedding_binding == "azure_openai"
|
731 |
):
|
732 |
from lightrag.llm.azure_openai import (
|