yangdx
commited on
Commit
·
492b91c
1
Parent(s):
e911ecd
Allow configuration of LLM parameters through environment variables
Browse files- lightrag/lightrag.py +2 -2
lightrag/lightrag.py
CHANGED
@@ -158,8 +158,8 @@ class LightRAG:
|
|
158 |
# LLM
|
159 |
llm_model_func: callable = None # This must be set (we do want to separate llm from the corte, so no more default initialization)
|
160 |
llm_model_name: str = "meta-llama/Llama-3.2-1B-Instruct" # 'meta-llama/Llama-3.2-1B'#'google/gemma-2-2b-it'
|
161 |
-
llm_model_max_token_size: int = 32768
|
162 |
-
llm_model_max_async: int = 16
|
163 |
llm_model_kwargs: dict = field(default_factory=dict)
|
164 |
|
165 |
# storage
|
|
|
158 |
# LLM
|
159 |
llm_model_func: callable = None # This must be set (we do want to separate llm from the corte, so no more default initialization)
|
160 |
llm_model_name: str = "meta-llama/Llama-3.2-1B-Instruct" # 'meta-llama/Llama-3.2-1B'#'google/gemma-2-2b-it'
|
161 |
+
llm_model_max_token_size: int = int(os.getenv("MAX_TOKENS", "32768"))
|
162 |
+
llm_model_max_async: int = int(os.getenv("MAX_ASYNC", "16"))
|
163 |
llm_model_kwargs: dict = field(default_factory=dict)
|
164 |
|
165 |
# storage
|