gzdaniel commited on
Commit
f70ff5d
·
1 Parent(s): a13a257

Fix linting

Browse files
Files changed (1) hide show
  1. lightrag/operate.py +11 -11
lightrag/operate.py CHANGED
@@ -1198,7 +1198,6 @@ async def mix_kg_vector_query(
1198
  traceback.print_exc()
1199
  return None
1200
 
1201
-
1202
  # 3. Execute both retrievals in parallel
1203
  kg_context, vector_context = await asyncio.gather(
1204
  get_kg_context(), _get_vector_context(query, chunks_vdb, query_param, tokenizer)
@@ -1984,7 +1983,7 @@ async def naive_query(
1984
 
1985
  tokenizer: Tokenizer = global_config["tokenizer"]
1986
  section = await _get_vector_context(query, chunks_vdb, query_param, tokenizer)
1987
-
1988
  if section is None:
1989
  return PROMPTS["fail_response"]
1990
 
@@ -2207,26 +2206,28 @@ async def _get_vector_context(
2207
  ) -> str | None:
2208
  """
2209
  Retrieve vector context from the vector database.
2210
-
2211
  This function performs vector search to find relevant text chunks for a query,
2212
  formats them with file path and creation time information, and truncates
2213
  the results to fit within token limits.
2214
-
2215
  Args:
2216
  query: The query string to search for
2217
  chunks_vdb: Vector database containing document chunks
2218
  query_param: Query parameters including top_k and ids
2219
  tokenizer: Tokenizer for counting tokens
2220
-
2221
  Returns:
2222
  Formatted string containing relevant text chunks, or None if no results found
2223
  """
2224
  try:
2225
  # Reduce top_k for vector search in hybrid mode since we have structured information from KG
2226
- mix_topk = min(10, query_param.top_k) if hasattr(query_param, 'mode') and query_param.mode == 'mix' else query_param.top_k
2227
- results = await chunks_vdb.query(
2228
- query, top_k=mix_topk, ids=query_param.ids
 
2229
  )
 
2230
  if not results:
2231
  return None
2232
 
@@ -2254,9 +2255,7 @@ async def _get_vector_context(
2254
  logger.debug(
2255
  f"Truncate chunks from {len(valid_chunks)} to {len(maybe_trun_chunks)} (max tokens:{query_param.max_token_for_text_unit})"
2256
  )
2257
- logger.info(
2258
- f"Vector query: {len(maybe_trun_chunks)} chunks, top_k: {mix_topk}"
2259
- )
2260
 
2261
  if not maybe_trun_chunks:
2262
  return None
@@ -2277,6 +2276,7 @@ async def _get_vector_context(
2277
  logger.error(f"Error in _get_vector_context: {e}")
2278
  return None
2279
 
 
2280
  async def query_with_keywords(
2281
  query: str,
2282
  prompt: str,
 
1198
  traceback.print_exc()
1199
  return None
1200
 
 
1201
  # 3. Execute both retrievals in parallel
1202
  kg_context, vector_context = await asyncio.gather(
1203
  get_kg_context(), _get_vector_context(query, chunks_vdb, query_param, tokenizer)
 
1983
 
1984
  tokenizer: Tokenizer = global_config["tokenizer"]
1985
  section = await _get_vector_context(query, chunks_vdb, query_param, tokenizer)
1986
+
1987
  if section is None:
1988
  return PROMPTS["fail_response"]
1989
 
 
2206
  ) -> str | None:
2207
  """
2208
  Retrieve vector context from the vector database.
2209
+
2210
  This function performs vector search to find relevant text chunks for a query,
2211
  formats them with file path and creation time information, and truncates
2212
  the results to fit within token limits.
2213
+
2214
  Args:
2215
  query: The query string to search for
2216
  chunks_vdb: Vector database containing document chunks
2217
  query_param: Query parameters including top_k and ids
2218
  tokenizer: Tokenizer for counting tokens
2219
+
2220
  Returns:
2221
  Formatted string containing relevant text chunks, or None if no results found
2222
  """
2223
  try:
2224
  # Reduce top_k for vector search in hybrid mode since we have structured information from KG
2225
+ mix_topk = (
2226
+ min(10, query_param.top_k)
2227
+ if hasattr(query_param, "mode") and query_param.mode == "mix"
2228
+ else query_param.top_k
2229
  )
2230
+ results = await chunks_vdb.query(query, top_k=mix_topk, ids=query_param.ids)
2231
  if not results:
2232
  return None
2233
 
 
2255
  logger.debug(
2256
  f"Truncate chunks from {len(valid_chunks)} to {len(maybe_trun_chunks)} (max tokens:{query_param.max_token_for_text_unit})"
2257
  )
2258
+ logger.info(f"Vector query: {len(maybe_trun_chunks)} chunks, top_k: {mix_topk}")
 
 
2259
 
2260
  if not maybe_trun_chunks:
2261
  return None
 
2276
  logger.error(f"Error in _get_vector_context: {e}")
2277
  return None
2278
 
2279
+
2280
  async def query_with_keywords(
2281
  query: str,
2282
  prompt: str,