YanSte commited on
Commit
578aacb
·
1 Parent(s): 049ff37

added fields

Browse files
Files changed (1) hide show
  1. lightrag/lightrag.py +7 -7
lightrag/lightrag.py CHANGED
@@ -307,26 +307,26 @@ class LightRAG:
307
  - random_seed: Seed value for reproducibility.
308
  """
309
 
310
- embedding_func: EmbeddingFunc | None = None
311
  """Function for computing text embeddings. Must be set before use."""
312
 
313
- embedding_batch_num: int = 32
314
  """Batch size for embedding computations."""
315
 
316
- embedding_func_max_async: int = 16
317
  """Maximum number of concurrent embedding function calls."""
318
 
319
  # LLM Configuration
320
- llm_model_func: Callable[..., object] | None = None
321
  """Function for interacting with the large language model (LLM). Must be set before use."""
322
 
323
- llm_model_name: str = "meta-llama/Llama-3.2-1B-Instruct"
324
  """Name of the LLM model used for generating responses."""
325
 
326
- llm_model_max_token_size: int = int(os.getenv("MAX_TOKENS", "32768"))
327
  """Maximum number of tokens allowed per LLM response."""
328
 
329
- llm_model_max_async: int = int(os.getenv("MAX_ASYNC", "16"))
330
  """Maximum number of concurrent LLM calls."""
331
 
332
  llm_model_kwargs: dict[str, Any] = field(default_factory=dict)
 
307
  - random_seed: Seed value for reproducibility.
308
  """
309
 
310
+ embedding_func: EmbeddingFunc | None = field(default=None)
311
  """Function for computing text embeddings. Must be set before use."""
312
 
313
+ embedding_batch_num: int = field(default=32)
314
  """Batch size for embedding computations."""
315
 
316
+ embedding_func_max_async: int = field(default=16)
317
  """Maximum number of concurrent embedding function calls."""
318
 
319
  # LLM Configuration
320
+ llm_model_func: Callable[..., object] | None = field(default=None)
321
  """Function for interacting with the large language model (LLM). Must be set before use."""
322
 
323
+ llm_model_name: str = field(default="gpt-4o-mini")
324
  """Name of the LLM model used for generating responses."""
325
 
326
+ llm_model_max_token_size: int = field(default=int(os.getenv("MAX_TOKENS", 32768)))
327
  """Maximum number of tokens allowed per LLM response."""
328
 
329
+ llm_model_max_async: int = field(default=int(os.getenv("MAX_ASYNC", 16)))
330
  """Maximum number of concurrent LLM calls."""
331
 
332
  llm_model_kwargs: dict[str, Any] = field(default_factory=dict)