yangdx commited on
Commit
9c0a381
·
1 Parent(s): 013d2ef

Use LLM_MODEL env var in Azure OpenAI function

Browse files

- Remove model parameter from azure_openai_complete (all LLM complete functions must have the same parameter structure)
- Use LLM_MODEL env var in Azure OpenAI function
- Comment out Lollms example in .env.example (duplication with Ollama example)

Files changed (2) hide show
  1. .env.example +3 -3
  2. lightrag/llm.py +2 -2
.env.example CHANGED
@@ -13,9 +13,9 @@ LLM_BINDING_HOST=http://host.docker.internal:11434
13
  LLM_MODEL=mistral-nemo:latest
14
 
15
  # Lollms example
16
- LLM_BINDING=lollms
17
- LLM_BINDING_HOST=http://host.docker.internal:9600
18
- LLM_MODEL=mistral-nemo:latest
19
 
20
 
21
  # Embedding Configuration (Use valid host. For local services, you can use host.docker.internal)
 
13
  LLM_MODEL=mistral-nemo:latest
14
 
15
  # Lollms example
16
+ # LLM_BINDING=lollms
17
+ # LLM_BINDING_HOST=http://host.docker.internal:9600
18
+ # LLM_MODEL=mistral-nemo:latest
19
 
20
 
21
  # Embedding Configuration (Use valid host. For local services, you can use host.docker.internal)
lightrag/llm.py CHANGED
@@ -622,11 +622,11 @@ async def nvidia_openai_complete(
622
 
623
 
624
  async def azure_openai_complete(
625
- model: str = "gpt-4o-mini", prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
626
  ) -> str:
627
  keyword_extraction = kwargs.pop("keyword_extraction", None)
628
  result = await azure_openai_complete_if_cache(
629
- model,
630
  prompt,
631
  system_prompt=system_prompt,
632
  history_messages=history_messages,
 
622
 
623
 
624
  async def azure_openai_complete(
625
+ prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
626
  ) -> str:
627
  keyword_extraction = kwargs.pop("keyword_extraction", None)
628
  result = await azure_openai_complete_if_cache(
629
+ os.getenv("LLM_MODEL", "gpt-4o-mini"),
630
  prompt,
631
  system_prompt=system_prompt,
632
  history_messages=history_messages,