Fix linting
Browse files- lightrag/base.py +12 -4
- lightrag/operate.py +12 -4
lightrag/base.py
CHANGED
@@ -74,13 +74,19 @@ class QueryParam:
|
|
74 |
If None, defaults to top_k value.
|
75 |
"""
|
76 |
|
77 |
-
max_entity_tokens: int = int(
|
|
|
|
|
78 |
"""Maximum number of tokens allocated for entity context in unified token control system."""
|
79 |
|
80 |
-
max_relation_tokens: int = int(
|
|
|
|
|
81 |
"""Maximum number of tokens allocated for relationship context in unified token control system."""
|
82 |
|
83 |
-
max_total_tokens: int = int(
|
|
|
|
|
84 |
"""Maximum total tokens budget for the entire query context (entities + relations + chunks + system prompt)."""
|
85 |
|
86 |
hl_keywords: list[str] = field(default_factory=list)
|
@@ -111,7 +117,9 @@ class QueryParam:
|
|
111 |
If proivded, this will be use instead of the default vaulue from prompt template.
|
112 |
"""
|
113 |
|
114 |
-
enable_rerank: bool =
|
|
|
|
|
115 |
"""Enable reranking for retrieved text chunks. If True but no rerank model is configured, a warning will be issued.
|
116 |
Default is True to enable reranking when rerank model is available.
|
117 |
"""
|
|
|
74 |
If None, defaults to top_k value.
|
75 |
"""
|
76 |
|
77 |
+
max_entity_tokens: int = int(
|
78 |
+
os.getenv("MAX_ENTITY_TOKENS", str(DEFAULT_MAX_ENTITY_TOKENS))
|
79 |
+
)
|
80 |
"""Maximum number of tokens allocated for entity context in unified token control system."""
|
81 |
|
82 |
+
max_relation_tokens: int = int(
|
83 |
+
os.getenv("MAX_RELATION_TOKENS", str(DEFAULT_MAX_RELATION_TOKENS))
|
84 |
+
)
|
85 |
"""Maximum number of tokens allocated for relationship context in unified token control system."""
|
86 |
|
87 |
+
max_total_tokens: int = int(
|
88 |
+
os.getenv("MAX_TOTAL_TOKENS", str(DEFAULT_MAX_TOTAL_TOKENS))
|
89 |
+
)
|
90 |
"""Maximum total tokens budget for the entire query context (entities + relations + chunks + system prompt)."""
|
91 |
|
92 |
hl_keywords: list[str] = field(default_factory=list)
|
|
|
117 |
If proivded, this will be use instead of the default vaulue from prompt template.
|
118 |
"""
|
119 |
|
120 |
+
enable_rerank: bool = (
|
121 |
+
os.getenv("ENABLE_RERANK", str(DEFAULT_ENABLE_RERANK).lower()).lower() == "true"
|
122 |
+
)
|
123 |
"""Enable reranking for retrieved text chunks. If True but no rerank model is configured, a warning will be issued.
|
124 |
Default is True to enable reranking when rerank model is available.
|
125 |
"""
|
lightrag/operate.py
CHANGED
@@ -1964,17 +1964,23 @@ async def _build_query_context(
|
|
1964 |
max_entity_tokens = getattr(
|
1965 |
query_param,
|
1966 |
"max_entity_tokens",
|
1967 |
-
text_chunks_db.global_config.get(
|
|
|
|
|
1968 |
)
|
1969 |
max_relation_tokens = getattr(
|
1970 |
query_param,
|
1971 |
"max_relation_tokens",
|
1972 |
-
text_chunks_db.global_config.get(
|
|
|
|
|
1973 |
)
|
1974 |
max_total_tokens = getattr(
|
1975 |
query_param,
|
1976 |
"max_total_tokens",
|
1977 |
-
text_chunks_db.global_config.get(
|
|
|
|
|
1978 |
)
|
1979 |
|
1980 |
# Truncate entities based on complete JSON serialization
|
@@ -2692,7 +2698,9 @@ async def naive_query(
|
|
2692 |
# Calculate dynamic token limit for chunks
|
2693 |
# Get token limits from query_param (with fallback to global_config)
|
2694 |
max_total_tokens = getattr(
|
2695 |
-
query_param,
|
|
|
|
|
2696 |
)
|
2697 |
|
2698 |
# Calculate conversation history tokens
|
|
|
1964 |
max_entity_tokens = getattr(
|
1965 |
query_param,
|
1966 |
"max_entity_tokens",
|
1967 |
+
text_chunks_db.global_config.get(
|
1968 |
+
"MAX_ENTITY_TOKENS", DEFAULT_MAX_ENTITY_TOKENS
|
1969 |
+
),
|
1970 |
)
|
1971 |
max_relation_tokens = getattr(
|
1972 |
query_param,
|
1973 |
"max_relation_tokens",
|
1974 |
+
text_chunks_db.global_config.get(
|
1975 |
+
"MAX_RELATION_TOKENS", DEFAULT_MAX_RELATION_TOKENS
|
1976 |
+
),
|
1977 |
)
|
1978 |
max_total_tokens = getattr(
|
1979 |
query_param,
|
1980 |
"max_total_tokens",
|
1981 |
+
text_chunks_db.global_config.get(
|
1982 |
+
"MAX_TOTAL_TOKENS", DEFAULT_MAX_TOTAL_TOKENS
|
1983 |
+
),
|
1984 |
)
|
1985 |
|
1986 |
# Truncate entities based on complete JSON serialization
|
|
|
2698 |
# Calculate dynamic token limit for chunks
|
2699 |
# Get token limits from query_param (with fallback to global_config)
|
2700 |
max_total_tokens = getattr(
|
2701 |
+
query_param,
|
2702 |
+
"max_total_tokens",
|
2703 |
+
global_config.get("MAX_TOTAL_TOKENS", DEFAULT_MAX_TOTAL_TOKENS),
|
2704 |
)
|
2705 |
|
2706 |
# Calculate conversation history tokens
|