zrguo commited on
Commit
107389b
·
1 Parent(s): cd8677f

Update QueryParam

Browse files
env.example CHANGED
@@ -61,9 +61,10 @@ OLLAMA_EMULATING_MODEL_TAG=latest
61
  ### Number of text chunks to retrieve initially from vector search and keep after reranking
62
  # CHUNK_TOP_K=5
63
 
 
 
 
64
  ### Rerank Configuration
65
- ### Note: Reranking is now controlled per query via the 'enable_rerank' parameter (default: true)
66
- ### The following configuration is only needed when you want to use reranking
67
  ### Rerank model configuration (required when enable_rerank=true in query parameters)
68
  # RERANK_MODEL=BAAI/bge-reranker-v2-m3
69
  # RERANK_BINDING_HOST=https://api.your-rerank-provider.com/v1/rerank
 
61
  ### Number of text chunks to retrieve initially from vector search and keep after reranking
62
  # CHUNK_TOP_K=5
63
 
64
+ ### Enable reranking for retrieved text chunks (default: true)
65
+ # ENABLE_RERANK=true
66
+
67
  ### Rerank Configuration
 
 
68
  ### Rerank model configuration (required when enable_rerank=true in query parameters)
69
  # RERANK_MODEL=BAAI/bge-reranker-v2-m3
70
  # RERANK_BINDING_HOST=https://api.your-rerank-provider.com/v1/rerank
lightrag/base.py CHANGED
@@ -36,7 +36,7 @@ T = TypeVar("T")
36
  class QueryParam:
37
  """Configuration parameters for query execution in LightRAG."""
38
 
39
- mode: Literal["local", "global", "hybrid", "naive", "mix", "bypass"] = "global"
40
  """Specifies the retrieval mode:
41
  - "local": Focuses on context-dependent information.
42
  - "global": Utilizes global knowledge.
@@ -85,7 +85,7 @@ class QueryParam:
85
  Format: [{"role": "user/assistant", "content": "message"}].
86
  """
87
 
88
- history_turns: int = 3
89
  """Number of complete conversation turns (user-assistant pairs) to consider in the response context."""
90
 
91
  ids: list[str] | None = None
@@ -102,7 +102,7 @@ class QueryParam:
102
  If proivded, this will be use instead of the default vaulue from prompt template.
103
  """
104
 
105
- enable_rerank: bool = True
106
  """Enable reranking for retrieved text chunks. If True but no rerank model is configured, a warning will be issued.
107
  Default is True to enable reranking when rerank model is available.
108
  """
 
36
  class QueryParam:
37
  """Configuration parameters for query execution in LightRAG."""
38
 
39
+ mode: Literal["local", "global", "hybrid", "naive", "mix", "bypass"] = "mix"
40
  """Specifies the retrieval mode:
41
  - "local": Focuses on context-dependent information.
42
  - "global": Utilizes global knowledge.
 
85
  Format: [{"role": "user/assistant", "content": "message"}].
86
  """
87
 
88
+ history_turns: int = int(os.getenv("HISTORY_TURNS", "3"))
89
  """Number of complete conversation turns (user-assistant pairs) to consider in the response context."""
90
 
91
  ids: list[str] | None = None
 
102
  If proivded, this will be use instead of the default vaulue from prompt template.
103
  """
104
 
105
+ enable_rerank: bool = os.getenv("ENABLE_RERANK", "true").lower() == "true"
106
  """Enable reranking for retrieved text chunks. If True but no rerank model is configured, a warning will be issued.
107
  Default is True to enable reranking when rerank model is available.
108
  """
lightrag/kg/shared_storage.py CHANGED
@@ -22,7 +22,7 @@ def direct_log(message, enable_output: bool = False, level: str = "DEBUG"):
22
  """
23
  if not enable_output:
24
  return
25
-
26
  # Get the current logger level from the lightrag logger
27
  try:
28
  from lightrag.utils import logger
 
22
  """
23
  if not enable_output:
24
  return
25
+
26
  # Get the current logger level from the lightrag logger
27
  try:
28
  from lightrag.utils import logger