jin commited on
Commit
18a40e5
·
1 Parent(s): 82d36b3

support pipeline mode

Browse files
examples/lightrag_oracle_demo.py CHANGED
@@ -87,12 +87,14 @@ async def main():
87
  # We use Oracle DB as the KV/vector/graph storage
88
  # You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
89
  rag = LightRAG(
90
- working_dir=WORKING_DIR,
 
91
  entity_extract_max_gleaning = 1,
92
 
93
- enable_llm_cache=False,
94
- embedding_cache_config= None, # {"enabled": True,"similarity_threshold": 0.90},
95
  enable_llm_cache_for_entity_extract = True,
 
 
96
 
97
  chunk_token_size=CHUNK_TOKEN_SIZE,
98
  llm_model_max_token_size = MAX_TOKENS,
@@ -106,34 +108,30 @@ async def main():
106
  graph_storage = "OracleGraphStorage",
107
  kv_storage = "OracleKVStorage",
108
  vector_storage="OracleVectorDBStorage",
109
- doc_status_storage="OracleDocStatusStorage",
110
 
111
- addon_params = {"example_number":1, "language":"Simplfied Chinese"},
 
 
 
 
112
  )
113
 
114
  # Setthe KV/vector/graph storage's `db` property, so all operation will use same connection pool
115
- rag.key_string_value_json_storage_cls.db = oracle_db
116
- rag.vector_db_storage_cls.db = oracle_db
117
- rag.graph_storage_cls.db = oracle_db
118
- rag.doc_status_storage_cls.db = oracle_db
119
- rag.doc_status.db = oracle_db
120
- rag.full_docs.db = oracle_db
121
- rag.text_chunks.db = oracle_db
122
- rag.llm_response_cache.db = oracle_db
123
- rag.key_string_value_json_storage_cls.db = oracle_db
124
- rag.chunks_vdb.db = oracle_db
125
- rag.relationships_vdb.db = oracle_db
126
- rag.entities_vdb.db = oracle_db
127
- rag.graph_storage_cls.db = oracle_db
128
- rag.chunk_entity_relation_graph.db = oracle_db
129
- rag.llm_response_cache.db = oracle_db
130
-
131
- rag.chunk_entity_relation_graph.embedding_func = rag.embedding_func
132
-
133
  # Extract and Insert into LightRAG storage
134
- with open("./dickens/demo.txt", "r", encoding="utf-8") as f:
135
- await rag.ainsert(f.read())
 
 
 
 
 
 
136
 
 
 
 
137
  # Perform search in different modes
138
  modes = ["naive", "local", "global", "hybrid"]
139
  for mode in modes:
 
87
  # We use Oracle DB as the KV/vector/graph storage
88
  # You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
89
  rag = LightRAG(
90
+ # log_level="DEBUG",
91
+ working_dir=WORKING_DIR,
92
  entity_extract_max_gleaning = 1,
93
 
94
+ enable_llm_cache=True,
 
95
  enable_llm_cache_for_entity_extract = True,
96
+ embedding_cache_config= None, # {"enabled": True,"similarity_threshold": 0.90},
97
+
98
 
99
  chunk_token_size=CHUNK_TOKEN_SIZE,
100
  llm_model_max_token_size = MAX_TOKENS,
 
108
  graph_storage = "OracleGraphStorage",
109
  kv_storage = "OracleKVStorage",
110
  vector_storage="OracleVectorDBStorage",
 
111
 
112
+ addon_params = {"example_number":1,
113
+ "language":"Simplfied Chinese",
114
+ "entity_types": ["organization", "person", "geo", "event"],
115
+ "insert_batch_size":2,
116
+ }
117
  )
118
 
119
  # Setthe KV/vector/graph storage's `db` property, so all operation will use same connection pool
120
+ rag.set_storage_client(db_client = oracle_db)
121
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  # Extract and Insert into LightRAG storage
123
+ with open(WORKING_DIR+"/docs.txt", "r", encoding="utf-8") as f:
124
+ all_text = f.read()
125
+ texts = [x for x in all_text.split("\n") if x]
126
+
127
+ # New mode use pipeline
128
+ await rag.apipeline_process_documents(texts)
129
+ await rag.apipeline_process_chunks()
130
+ await rag.apipeline_process_extract_graph()
131
 
132
+ # Old method use ainsert
133
+ #await rag.ainsert(texts)
134
+
135
  # Perform search in different modes
136
  modes = ["naive", "local", "global", "hybrid"]
137
  for mode in modes:
lightrag/kg/oracle_impl.py CHANGED
@@ -12,9 +12,6 @@ from ..base import (
12
  BaseGraphStorage,
13
  BaseKVStorage,
14
  BaseVectorStorage,
15
- DocStatusStorage,
16
- DocStatus,
17
- DocProcessingStatus,
18
  )
19
 
20
  import oracledb
@@ -156,8 +153,6 @@ class OracleDB:
156
  if data is None:
157
  await cursor.execute(sql)
158
  else:
159
- # print(data)
160
- # print(sql)
161
  await cursor.execute(sql, data)
162
  await connection.commit()
163
  except Exception as e:
@@ -175,7 +170,7 @@ class OracleKVStorage(BaseKVStorage):
175
 
176
  def __post_init__(self):
177
  self._data = {}
178
- self._max_batch_size = self.global_config["embedding_batch_num"]
179
 
180
  ################ QUERY METHODS ################
181
 
@@ -204,12 +199,11 @@ class OracleKVStorage(BaseKVStorage):
204
  array_res = await self.db.query(SQL, params, multirows=True)
205
  res = {}
206
  for row in array_res:
207
- res[row["id"]] = row
208
  return res
209
  else:
210
  return None
211
-
212
- # Query by id
213
  async def get_by_ids(self, ids: list[str], fields=None) -> Union[list[dict], None]:
214
  """get doc_chunks data based on id"""
215
  SQL = SQL_TEMPLATES["get_by_ids_" + self.namespace].format(
@@ -228,8 +222,7 @@ class OracleKVStorage(BaseKVStorage):
228
  dict_res[mode] = {}
229
  for row in res:
230
  dict_res[row["mode"]][row["id"]] = row
231
- res = [{k: v} for k, v in dict_res.items()]
232
-
233
  if res:
234
  data = res # [{"data":i} for i in res]
235
  # print(data)
@@ -237,38 +230,42 @@ class OracleKVStorage(BaseKVStorage):
237
  else:
238
  return None
239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
  async def filter_keys(self, keys: list[str]) -> set[str]:
241
- """remove duplicated"""
242
  SQL = SQL_TEMPLATES["filter_keys"].format(
243
  table_name=N_T[self.namespace], ids=",".join([f"'{id}'" for id in keys])
244
  )
245
  params = {"workspace": self.db.workspace}
246
- try:
247
- await self.db.query(SQL, params)
248
- except Exception as e:
249
- logger.error(f"Oracle database error: {e}")
250
- print(SQL)
251
- print(params)
252
  res = await self.db.query(SQL, params, multirows=True)
253
- data = None
254
  if res:
255
  exist_keys = [key["id"] for key in res]
256
  data = set([s for s in keys if s not in exist_keys])
 
257
  else:
258
- exist_keys = []
259
- data = set([s for s in keys if s not in exist_keys])
260
- return data
261
 
262
  ################ INSERT METHODS ################
263
  async def upsert(self, data: dict[str, dict]):
264
- left_data = {k: v for k, v in data.items() if k not in self._data}
265
- self._data.update(left_data)
266
- # print(self._data)
267
- # values = []
268
  if self.namespace == "text_chunks":
269
  list_data = [
270
  {
271
- "__id__": k,
272
  **{k1: v1 for k1, v1 in v.items()},
273
  }
274
  for k, v in data.items()
@@ -284,33 +281,30 @@ class OracleKVStorage(BaseKVStorage):
284
  embeddings = np.concatenate(embeddings_list)
285
  for i, d in enumerate(list_data):
286
  d["__vector__"] = embeddings[i]
287
- # print(list_data)
 
288
  for item in list_data:
289
- merge_sql = SQL_TEMPLATES["merge_chunk"]
290
- data = {
291
- "check_id": item["__id__"],
292
- "id": item["__id__"],
293
  "content": item["content"],
294
  "workspace": self.db.workspace,
295
  "tokens": item["tokens"],
296
  "chunk_order_index": item["chunk_order_index"],
297
  "full_doc_id": item["full_doc_id"],
298
  "content_vector": item["__vector__"],
 
299
  }
300
- # print(merge_sql)
301
- await self.db.execute(merge_sql, data)
302
-
303
  if self.namespace == "full_docs":
304
- for k, v in self._data.items():
305
  # values.clear()
306
  merge_sql = SQL_TEMPLATES["merge_doc_full"]
307
- data = {
308
  "id": k,
309
  "content": v["content"],
310
  "workspace": self.db.workspace,
311
  }
312
- # print(merge_sql)
313
- await self.db.execute(merge_sql, data)
314
 
315
  if self.namespace == "llm_response_cache":
316
  for mode, items in data.items():
@@ -325,102 +319,20 @@ class OracleKVStorage(BaseKVStorage):
325
  }
326
 
327
  await self.db.execute(upsert_sql, _data)
328
- return left_data
 
 
 
 
 
 
 
329
 
330
  async def index_done_callback(self):
331
  if self.namespace in ["full_docs", "text_chunks"]:
332
  logger.info("full doc and chunk data had been saved into oracle db!")
333
 
334
 
335
- @dataclass
336
- class OracleDocStatusStorage(DocStatusStorage):
337
- """Oracle implementation of document status storage"""
338
- # should pass db object to self.db
339
- db: OracleDB = None
340
- meta_fields = None
341
-
342
- def __post_init__(self):
343
- pass
344
-
345
- async def filter_keys(self, ids: list[str]) -> set[str]:
346
- """Return keys that don't exist in storage"""
347
- SQL = SQL_TEMPLATES["get_by_id_" + self.namespace].format(
348
- ids = ",".join([f"'{id}'" for id in ids])
349
- )
350
- params = {"workspace": self.db.workspace}
351
- res = await self.db.query(SQL, params, True)
352
- # The result is like [{'id': 'id1'}, {'id': 'id2'}, ...].
353
- if res:
354
- existed = set([element["id"] for element in res])
355
- return set(ids) - existed
356
- else:
357
- return set(ids)
358
-
359
- async def get_status_counts(self) -> Dict[str, int]:
360
- """Get counts of documents in each status"""
361
- SQL = SQL_TEMPLATES["get_status_counts"]
362
- params = {"workspace": self.db.workspace}
363
- res = await self.db.query(SQL, params, True)
364
- # Result is like [{'status': 'PENDING', 'count': 1}, {'status': 'PROCESSING', 'count': 2}, ...]
365
- counts = {}
366
- for doc in res:
367
- counts[doc["status"]] = doc["count"]
368
- return counts
369
-
370
- async def get_docs_by_status(self, status: DocStatus) -> Dict[str, DocProcessingStatus]:
371
- """Get all documents by status"""
372
- SQL = SQL_TEMPLATES["get_docs_by_status"]
373
- params = {"workspace": self.db.workspace, "status": status}
374
- res = await self.db.query(SQL, params, True)
375
- # Result is like [{'id': 'id1', 'status': 'PENDING', 'updated_at': '2023-07-01 00:00:00'}, {'id': 'id2', 'status': 'PENDING', 'updated_at': '2023-07-01 00:00:00'}, ...]
376
- # Converting to be a dict
377
- return {
378
- element["id"]: DocProcessingStatus(
379
- #content_summary=element["content_summary"],
380
- content_summary = "",
381
- content_length=element["CONTENT_LENGTH"],
382
- status=element["STATUS"],
383
- created_at=element["CREATETIME"],
384
- updated_at=element["UPDATETIME"],
385
- chunks_count=-1,
386
- #chunks_count=element["chunks_count"],
387
- )
388
- for element in res
389
- }
390
-
391
- async def get_failed_docs(self) -> Dict[str, DocProcessingStatus]:
392
- """Get all failed documents"""
393
- return await self.get_docs_by_status(DocStatus.FAILED)
394
-
395
- async def get_pending_docs(self) -> Dict[str, DocProcessingStatus]:
396
- """Get all pending documents"""
397
- return await self.get_docs_by_status(DocStatus.PENDING)
398
-
399
- async def index_done_callback(self):
400
- """Save data after indexing, but for ORACLE, we already saved them during the upsert stage, so no action to take here"""
401
- logger.info("Doc status had been saved into ORACLE db!")
402
-
403
- async def upsert(self, data: dict[str, dict]):
404
- """Update or insert document status
405
-
406
- Args:
407
- data: Dictionary of document IDs and their status data
408
- """
409
- SQL = SQL_TEMPLATES["merge_doc_status"]
410
- for k, v in data.items():
411
- # chunks_count is optional
412
- params = {
413
- "workspace": self.db.workspace,
414
- "id": k,
415
- "content_summary": v["content_summary"],
416
- "content_length": v["content_length"],
417
- "chunks_count": v["chunks_count"] if "chunks_count" in v else -1,
418
- "status": v["status"],
419
- }
420
- await self.db.execute(SQL, params)
421
- return data
422
-
423
-
424
  @dataclass
425
  class OracleVectorDBStorage(BaseVectorStorage):
426
  # should pass db object to self.db
@@ -466,7 +378,7 @@ class OracleGraphStorage(BaseGraphStorage):
466
 
467
  def __post_init__(self):
468
  """从graphml文件加载图"""
469
- self._max_batch_size = self.global_config["embedding_batch_num"]
470
 
471
  #################### insert method ################
472
 
@@ -500,7 +412,6 @@ class OracleGraphStorage(BaseGraphStorage):
500
  "content": content,
501
  "content_vector": content_vector,
502
  }
503
- # print(merge_sql)
504
  await self.db.execute(merge_sql, data)
505
  # self._graph.add_node(node_id, **node_data)
506
 
@@ -718,9 +629,10 @@ TABLES = {
718
  },
719
  "LIGHTRAG_DOC_CHUNKS": {
720
  "ddl": """CREATE TABLE LIGHTRAG_DOC_CHUNKS (
721
- id varchar(256) PRIMARY KEY,
722
  workspace varchar(1024),
723
  full_doc_id varchar(256),
 
724
  chunk_order_index NUMBER,
725
  tokens NUMBER,
726
  content CLOB,
@@ -795,9 +707,9 @@ TABLES = {
795
 
796
  SQL_TEMPLATES = {
797
  # SQL for KVStorage
798
- "get_by_id_full_docs": "select ID,NVL(content,'') as content from LIGHTRAG_DOC_FULL where workspace=:workspace and ID=:id",
799
 
800
- "get_by_id_text_chunks": "select ID,TOKENS,NVL(content,'') as content,CHUNK_ORDER_INDEX,FULL_DOC_ID from LIGHTRAG_DOC_CHUNKS where workspace=:workspace and ID=:id",
801
 
802
  "get_by_id_llm_response_cache": """SELECT id, original_prompt, NVL(return_value, '') as "return", cache_mode as "mode"
803
  FROM LIGHTRAG_LLM_CACHE WHERE workspace=:workspace AND id=:id""",
@@ -808,24 +720,34 @@ SQL_TEMPLATES = {
808
  "get_by_ids_llm_response_cache": """SELECT id, original_prompt, NVL(return_value, '') as "return", cache_mode as "mode"
809
  FROM LIGHTRAG_LLM_CACHE WHERE workspace=:workspace AND id IN ({ids})""",
810
 
811
- "get_by_ids_full_docs": "select ID,NVL(content,'') as content from LIGHTRAG_DOC_FULL where workspace=:workspace and ID in ({ids})",
812
 
813
- "get_by_ids_text_chunks": "select ID,TOKENS,NVL(content,'') as content,CHUNK_ORDER_INDEX,FULL_DOC_ID from LIGHTRAG_DOC_CHUNKS where workspace=:workspace and ID in ({ids})",
 
 
814
 
 
 
 
 
 
 
815
  "filter_keys": "select id from {table_name} where workspace=:workspace and id in ({ids})",
816
 
 
 
817
  "merge_doc_full": """MERGE INTO LIGHTRAG_DOC_FULL a
818
  USING DUAL
819
  ON (a.id = :id and a.workspace = :workspace)
820
  WHEN NOT MATCHED THEN
821
  INSERT(id,content,workspace) values(:id,:content,:workspace)""",
822
 
823
- "merge_chunk": """MERGE INTO LIGHTRAG_DOC_CHUNKS a
824
  USING DUAL
825
- ON (a.id = :check_id)
826
- WHEN NOT MATCHED THEN
827
- INSERT(id,content,workspace,tokens,chunk_order_index,full_doc_id,content_vector)
828
- values (:id,:content,:workspace,:tokens,:chunk_order_index,:full_doc_id,:content_vector) """,
829
 
830
  "upsert_llm_response_cache": """MERGE INTO LIGHTRAG_LLM_CACHE a
831
  USING DUAL
@@ -838,27 +760,7 @@ SQL_TEMPLATES = {
838
  return_value = :return_value,
839
  cache_mode = :cache_mode,
840
  updatetime = SYSDATE""",
841
-
842
- "get_by_id_doc_status": "SELECT id FROM LIGHTRAG_DOC_FULL WHERE workspace=:workspace AND id IN ({ids})",
843
-
844
- "get_status_counts": """SELECT status as "status", COUNT(1) as "count"
845
- FROM LIGHTRAG_DOC_FULL WHERE workspace=:workspace GROUP BY STATUS""",
846
-
847
- "get_docs_by_status": """select content_length,status,
848
- TO_CHAR(created_at,'YYYY-MM-DD HH24:MI:SS') as created_at,TO_CHAR(updatetime,'YYYY-MM-DD HH24:MI:SS') as updatetime
849
- from LIGHTRAG_DOC_STATUS where workspace=:workspace and status=:status""",
850
 
851
- "merge_doc_status":"""MERGE INTO LIGHTRAG_DOC_FULL a
852
- USING DUAL
853
- ON (a.id = :id and a.workspace = :workspace)
854
- WHEN NOT MATCHED THEN
855
- INSERT (id,content_summary,content_length,chunks_count,status) values(:id,:content_summary,:content_length,:chunks_count,:status)
856
- WHEN MATCHED THEN UPDATE
857
- SET content_summary = :content_summary,
858
- content_length = :content_length,
859
- chunks_count = :chunks_count,
860
- status = :status,
861
- updatetime = SYSDATE""",
862
 
863
  # SQL for VectorStorage
864
  "entities": """SELECT name as entity_name FROM
 
12
  BaseGraphStorage,
13
  BaseKVStorage,
14
  BaseVectorStorage,
 
 
 
15
  )
16
 
17
  import oracledb
 
153
  if data is None:
154
  await cursor.execute(sql)
155
  else:
 
 
156
  await cursor.execute(sql, data)
157
  await connection.commit()
158
  except Exception as e:
 
170
 
171
  def __post_init__(self):
172
  self._data = {}
173
+ self._max_batch_size = self.global_config.get("embedding_batch_num",10)
174
 
175
  ################ QUERY METHODS ################
176
 
 
199
  array_res = await self.db.query(SQL, params, multirows=True)
200
  res = {}
201
  for row in array_res:
202
+ res[row["id"]] = row
203
  return res
204
  else:
205
  return None
206
+
 
207
  async def get_by_ids(self, ids: list[str], fields=None) -> Union[list[dict], None]:
208
  """get doc_chunks data based on id"""
209
  SQL = SQL_TEMPLATES["get_by_ids_" + self.namespace].format(
 
222
  dict_res[mode] = {}
223
  for row in res:
224
  dict_res[row["mode"]][row["id"]] = row
225
+ res = [{k: v} for k, v in dict_res.items()]
 
226
  if res:
227
  data = res # [{"data":i} for i in res]
228
  # print(data)
 
230
  else:
231
  return None
232
 
233
+ async def get_by_status_and_ids(self, status: str, ids: list[str]) -> Union[list[dict], None]:
234
+ """Specifically for llm_response_cache."""
235
+ if ids is not None:
236
+ SQL = SQL_TEMPLATES["get_by_status_ids_" + self.namespace].format(
237
+ ids=",".join([f"'{id}'" for id in ids])
238
+ )
239
+ else:
240
+ SQL = SQL_TEMPLATES["get_by_status_" + self.namespace]
241
+ params = {"workspace": self.db.workspace, "status": status}
242
+ res = await self.db.query(SQL, params, multirows=True)
243
+ if res:
244
+ return res
245
+ else:
246
+ return None
247
+
248
  async def filter_keys(self, keys: list[str]) -> set[str]:
249
+ """Return keys that don't exist in storage"""
250
  SQL = SQL_TEMPLATES["filter_keys"].format(
251
  table_name=N_T[self.namespace], ids=",".join([f"'{id}'" for id in keys])
252
  )
253
  params = {"workspace": self.db.workspace}
 
 
 
 
 
 
254
  res = await self.db.query(SQL, params, multirows=True)
 
255
  if res:
256
  exist_keys = [key["id"] for key in res]
257
  data = set([s for s in keys if s not in exist_keys])
258
+ return data
259
  else:
260
+ return set(keys)
261
+
 
262
 
263
  ################ INSERT METHODS ################
264
  async def upsert(self, data: dict[str, dict]):
 
 
 
 
265
  if self.namespace == "text_chunks":
266
  list_data = [
267
  {
268
+ "id": k,
269
  **{k1: v1 for k1, v1 in v.items()},
270
  }
271
  for k, v in data.items()
 
281
  embeddings = np.concatenate(embeddings_list)
282
  for i, d in enumerate(list_data):
283
  d["__vector__"] = embeddings[i]
284
+
285
+ merge_sql = SQL_TEMPLATES["merge_chunk"]
286
  for item in list_data:
287
+ _data = {
288
+ "id": item["id"],
 
 
289
  "content": item["content"],
290
  "workspace": self.db.workspace,
291
  "tokens": item["tokens"],
292
  "chunk_order_index": item["chunk_order_index"],
293
  "full_doc_id": item["full_doc_id"],
294
  "content_vector": item["__vector__"],
295
+ "status": item["status"],
296
  }
297
+ await self.db.execute(merge_sql, _data)
 
 
298
  if self.namespace == "full_docs":
299
+ for k, v in data.items():
300
  # values.clear()
301
  merge_sql = SQL_TEMPLATES["merge_doc_full"]
302
+ _data = {
303
  "id": k,
304
  "content": v["content"],
305
  "workspace": self.db.workspace,
306
  }
307
+ await self.db.execute(merge_sql, _data)
 
308
 
309
  if self.namespace == "llm_response_cache":
310
  for mode, items in data.items():
 
319
  }
320
 
321
  await self.db.execute(upsert_sql, _data)
322
+ return None
323
+
324
+ async def change_status(self, id: str, status: str):
325
+ SQL = SQL_TEMPLATES["change_status"].format(
326
+ table_name=N_T[self.namespace]
327
+ )
328
+ params = {"workspace": self.db.workspace, "id": id, "status": status}
329
+ await self.db.execute(SQL, params)
330
 
331
  async def index_done_callback(self):
332
  if self.namespace in ["full_docs", "text_chunks"]:
333
  logger.info("full doc and chunk data had been saved into oracle db!")
334
 
335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
  @dataclass
337
  class OracleVectorDBStorage(BaseVectorStorage):
338
  # should pass db object to self.db
 
378
 
379
  def __post_init__(self):
380
  """从graphml文件加载图"""
381
+ self._max_batch_size = self.global_config.get("embedding_batch_num", 10)
382
 
383
  #################### insert method ################
384
 
 
412
  "content": content,
413
  "content_vector": content_vector,
414
  }
 
415
  await self.db.execute(merge_sql, data)
416
  # self._graph.add_node(node_id, **node_data)
417
 
 
629
  },
630
  "LIGHTRAG_DOC_CHUNKS": {
631
  "ddl": """CREATE TABLE LIGHTRAG_DOC_CHUNKS (
632
+ id varchar(256),
633
  workspace varchar(1024),
634
  full_doc_id varchar(256),
635
+ status varchar(256),
636
  chunk_order_index NUMBER,
637
  tokens NUMBER,
638
  content CLOB,
 
707
 
708
  SQL_TEMPLATES = {
709
  # SQL for KVStorage
710
+ "get_by_id_full_docs": "select ID,content,status from LIGHTRAG_DOC_FULL where workspace=:workspace and ID=:id",
711
 
712
+ "get_by_id_text_chunks": "select ID,TOKENS,content,CHUNK_ORDER_INDEX,FULL_DOC_ID,status from LIGHTRAG_DOC_CHUNKS where workspace=:workspace and ID=:id",
713
 
714
  "get_by_id_llm_response_cache": """SELECT id, original_prompt, NVL(return_value, '') as "return", cache_mode as "mode"
715
  FROM LIGHTRAG_LLM_CACHE WHERE workspace=:workspace AND id=:id""",
 
720
  "get_by_ids_llm_response_cache": """SELECT id, original_prompt, NVL(return_value, '') as "return", cache_mode as "mode"
721
  FROM LIGHTRAG_LLM_CACHE WHERE workspace=:workspace AND id IN ({ids})""",
722
 
723
+ "get_by_ids_full_docs": "select t.*,createtime as created_at from LIGHTRAG_DOC_FULL t where workspace=:workspace and ID in ({ids})",
724
 
725
+ "get_by_ids_text_chunks": "select ID,TOKENS,content,CHUNK_ORDER_INDEX,FULL_DOC_ID from LIGHTRAG_DOC_CHUNKS where workspace=:workspace and ID in ({ids})",
726
+
727
+ "get_by_status_ids_full_docs": "select id,status from LIGHTRAG_DOC_FULL t where workspace=:workspace AND status=:status and ID in ({ids})",
728
 
729
+ "get_by_status_ids_text_chunks": "select id,status from LIGHTRAG_DOC_CHUNKS where workspace=:workspace and status=:status ID in ({ids})",
730
+
731
+ "get_by_status_full_docs": "select id,status from LIGHTRAG_DOC_FULL t where workspace=:workspace AND status=:status",
732
+
733
+ "get_by_status_text_chunks": "select id,status from LIGHTRAG_DOC_CHUNKS where workspace=:workspace and status=:status",
734
+
735
  "filter_keys": "select id from {table_name} where workspace=:workspace and id in ({ids})",
736
 
737
+ "change_status": "update {table_name} set status=:status,updatetime=SYSDATE where workspace=:workspace and id=:id",
738
+
739
  "merge_doc_full": """MERGE INTO LIGHTRAG_DOC_FULL a
740
  USING DUAL
741
  ON (a.id = :id and a.workspace = :workspace)
742
  WHEN NOT MATCHED THEN
743
  INSERT(id,content,workspace) values(:id,:content,:workspace)""",
744
 
745
+ "merge_chunk": """MERGE INTO LIGHTRAG_DOC_CHUNKS
746
  USING DUAL
747
+ ON (id = :id and workspace = :workspace)
748
+ WHEN NOT MATCHED THEN INSERT
749
+ (id,content,workspace,tokens,chunk_order_index,full_doc_id,content_vector,status)
750
+ values (:id,:content,:workspace,:tokens,:chunk_order_index,:full_doc_id,:content_vector,:status) """,
751
 
752
  "upsert_llm_response_cache": """MERGE INTO LIGHTRAG_LLM_CACHE a
753
  USING DUAL
 
760
  return_value = :return_value,
761
  cache_mode = :cache_mode,
762
  updatetime = SYSDATE""",
 
 
 
 
 
 
 
 
 
763
 
 
 
 
 
 
 
 
 
 
 
 
764
 
765
  # SQL for VectorStorage
766
  "entities": """SELECT name as entity_name FROM
lightrag/lightrag.py CHANGED
@@ -26,6 +26,7 @@ from .utils import (
26
  convert_response_to_json,
27
  logger,
28
  set_logger,
 
29
  )
30
  from .base import (
31
  BaseGraphStorage,
@@ -36,22 +37,31 @@ from .base import (
36
  DocStatus,
37
  )
38
 
39
- from .storage import (
40
- JsonKVStorage,
41
- NanoVectorDBStorage,
42
- NetworkXStorage,
43
- JsonDocStatusStorage,
44
- )
45
-
46
  from .prompt import GRAPH_FIELD_SEP
47
 
48
-
49
- # future KG integrations
50
-
51
- # from .kg.ArangoDB_impl import (
52
- # GraphStorage as ArangoDBStorage
53
- # )
54
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  def lazy_external_import(module_name: str, class_name: str):
57
  """Lazily import a class from an external module based on the package of the caller."""
@@ -65,36 +75,13 @@ def lazy_external_import(module_name: str, class_name: str):
65
 
66
  def import_class(*args, **kwargs):
67
  import importlib
68
-
69
- # Import the module using importlib
70
  module = importlib.import_module(module_name, package=package)
71
-
72
- # Get the class from the module and instantiate it
73
  cls = getattr(module, class_name)
74
  return cls(*args, **kwargs)
75
 
76
  return import_class
77
 
78
 
79
- Neo4JStorage = lazy_external_import(".kg.neo4j_impl", "Neo4JStorage")
80
- OracleKVStorage = lazy_external_import(".kg.oracle_impl", "OracleKVStorage")
81
- OracleGraphStorage = lazy_external_import(".kg.oracle_impl", "OracleGraphStorage")
82
- OracleVectorDBStorage = lazy_external_import(".kg.oracle_impl", "OracleVectorDBStorage")
83
- OracleDocStatusStorage = lazy_external_import(".kg.oracle_impl", "OracleDocStatusStorage")
84
- MilvusVectorDBStorge = lazy_external_import(".kg.milvus_impl", "MilvusVectorDBStorge")
85
- MongoKVStorage = lazy_external_import(".kg.mongo_impl", "MongoKVStorage")
86
- ChromaVectorDBStorage = lazy_external_import(".kg.chroma_impl", "ChromaVectorDBStorage")
87
- TiDBKVStorage = lazy_external_import(".kg.tidb_impl", "TiDBKVStorage")
88
- TiDBVectorDBStorage = lazy_external_import(".kg.tidb_impl", "TiDBVectorDBStorage")
89
- TiDBGraphStorage = lazy_external_import(".kg.tidb_impl", "TiDBGraphStorage")
90
- PGKVStorage = lazy_external_import(".kg.postgres_impl", "PGKVStorage")
91
- PGVectorStorage = lazy_external_import(".kg.postgres_impl", "PGVectorStorage")
92
- AGEStorage = lazy_external_import(".kg.age_impl", "AGEStorage")
93
- PGGraphStorage = lazy_external_import(".kg.postgres_impl", "PGGraphStorage")
94
- GremlinStorage = lazy_external_import(".kg.gremlin_impl", "GremlinStorage")
95
- PGDocStatusStorage = lazy_external_import(".kg.postgres_impl", "PGDocStatusStorage")
96
-
97
-
98
  def always_get_an_event_loop() -> asyncio.AbstractEventLoop:
99
  """
100
  Ensure that there is always an event loop available.
@@ -198,52 +185,64 @@ class LightRAG:
198
  logger.setLevel(self.log_level)
199
 
200
  logger.info(f"Logger initialized for working directory: {self.working_dir}")
 
 
 
201
 
202
- _print_config = ",\n ".join([f"{k} = {v}" for k, v in asdict(self).items()])
 
 
203
  logger.debug(f"LightRAG init with param:\n {_print_config}\n")
204
 
205
- # @TODO: should move all storage setup here to leverage initial start params attached to self.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
- self.key_string_value_json_storage_cls: Type[BaseKVStorage] = (
208
- self._get_storage_class()[self.kv_storage]
 
209
  )
210
- self.vector_db_storage_cls: Type[BaseVectorStorage] = self._get_storage_class()[
211
- self.vector_storage
212
- ]
213
- self.graph_storage_cls: Type[BaseGraphStorage] = self._get_storage_class()[
214
- self.graph_storage
215
- ]
216
 
217
- if not os.path.exists(self.working_dir):
218
- logger.info(f"Creating working directory {self.working_dir}")
219
- os.makedirs(self.working_dir)
 
 
 
 
 
 
220
 
221
  self.llm_response_cache = self.key_string_value_json_storage_cls(
222
  namespace="llm_response_cache",
223
- global_config=asdict(self),
224
  embedding_func=None,
225
  )
226
 
227
- self.embedding_func = limit_async_func_call(self.embedding_func_max_async)(
228
- self.embedding_func
229
- )
230
-
231
  ####
232
  # add embedding func by walter
233
  ####
234
  self.full_docs = self.key_string_value_json_storage_cls(
235
  namespace="full_docs",
236
- global_config=asdict(self),
237
  embedding_func=self.embedding_func,
238
  )
239
  self.text_chunks = self.key_string_value_json_storage_cls(
240
  namespace="text_chunks",
241
- global_config=asdict(self),
242
  embedding_func=self.embedding_func,
243
  )
244
  self.chunk_entity_relation_graph = self.graph_storage_cls(
245
  namespace="chunk_entity_relation",
246
- global_config=asdict(self),
247
  embedding_func=self.embedding_func,
248
  )
249
  ####
@@ -252,73 +251,64 @@ class LightRAG:
252
 
253
  self.entities_vdb = self.vector_db_storage_cls(
254
  namespace="entities",
255
- global_config=asdict(self),
256
  embedding_func=self.embedding_func,
257
  meta_fields={"entity_name"},
258
  )
259
  self.relationships_vdb = self.vector_db_storage_cls(
260
  namespace="relationships",
261
- global_config=asdict(self),
262
  embedding_func=self.embedding_func,
263
  meta_fields={"src_id", "tgt_id"},
264
  )
265
  self.chunks_vdb = self.vector_db_storage_cls(
266
  namespace="chunks",
267
- global_config=asdict(self),
268
  embedding_func=self.embedding_func,
269
  )
270
 
 
 
 
 
 
 
 
 
271
  self.llm_model_func = limit_async_func_call(self.llm_model_max_async)(
272
  partial(
273
  self.llm_model_func,
274
- hashing_kv=self.llm_response_cache
275
- if self.llm_response_cache
276
- and hasattr(self.llm_response_cache, "global_config")
277
- else self.key_string_value_json_storage_cls(
278
- namespace="llm_response_cache",
279
- global_config=asdict(self),
280
- embedding_func=None,
281
- ),
282
  **self.llm_model_kwargs,
283
  )
284
  )
285
 
286
  # Initialize document status storage
287
- self.doc_status_storage_cls = self._get_storage_class()[self.doc_status_storage]
288
  self.doc_status = self.doc_status_storage_cls(
289
  namespace="doc_status",
290
- global_config=asdict(self),
291
  embedding_func=None,
292
  )
293
 
294
- def _get_storage_class(self) -> dict:
295
- return {
296
- # kv storage
297
- "JsonKVStorage": JsonKVStorage,
298
- "OracleKVStorage": OracleKVStorage,
299
- "OracleDocStatusStorage":OracleDocStatusStorage,
300
- "MongoKVStorage": MongoKVStorage,
301
- "TiDBKVStorage": TiDBKVStorage,
302
- # vector storage
303
- "NanoVectorDBStorage": NanoVectorDBStorage,
304
- "OracleVectorDBStorage": OracleVectorDBStorage,
305
- "MilvusVectorDBStorge": MilvusVectorDBStorge,
306
- "ChromaVectorDBStorage": ChromaVectorDBStorage,
307
- "TiDBVectorDBStorage": TiDBVectorDBStorage,
308
- # graph storage
309
- "NetworkXStorage": NetworkXStorage,
310
- "Neo4JStorage": Neo4JStorage,
311
- "OracleGraphStorage": OracleGraphStorage,
312
- "AGEStorage": AGEStorage,
313
- "PGGraphStorage": PGGraphStorage,
314
- "PGKVStorage": PGKVStorage,
315
- "PGDocStatusStorage": PGDocStatusStorage,
316
- "PGVectorStorage": PGVectorStorage,
317
- "TiDBGraphStorage": TiDBGraphStorage,
318
- "GremlinStorage": GremlinStorage,
319
- # "ArangoDBStorage": ArangoDBStorage
320
- "JsonDocStatusStorage": JsonDocStatusStorage,
321
- }
322
 
323
  def insert(
324
  self, string_or_strings, split_by_character=None, split_by_character_only=False
@@ -358,6 +348,11 @@ class LightRAG:
358
  }
359
  for content in unique_contents
360
  }
 
 
 
 
 
361
 
362
  # 3. Filter out already processed documents
363
  _add_doc_keys = await self.doc_status.filter_keys(list(new_docs.keys()))
@@ -406,12 +401,7 @@ class LightRAG:
406
  }
407
 
408
  # Update status with chunks information
409
- doc_status.update(
410
- {
411
- "chunks_count": len(chunks),
412
- "updated_at": datetime.now().isoformat(),
413
- }
414
- )
415
  await self.doc_status.upsert({doc_id: doc_status})
416
 
417
  try:
@@ -435,30 +425,16 @@ class LightRAG:
435
 
436
  self.chunk_entity_relation_graph = maybe_new_kg
437
 
438
- # Store original document and chunks
439
- await self.full_docs.upsert(
440
- {doc_id: {"content": doc["content"]}}
441
- )
442
  await self.text_chunks.upsert(chunks)
443
 
444
  # Update status to processed
445
- doc_status.update(
446
- {
447
- "status": DocStatus.PROCESSED,
448
- "updated_at": datetime.now().isoformat(),
449
- }
450
- )
451
  await self.doc_status.upsert({doc_id: doc_status})
452
 
453
  except Exception as e:
454
  # Mark as failed if any step fails
455
- doc_status.update(
456
- {
457
- "status": DocStatus.FAILED,
458
- "error": str(e),
459
- "updated_at": datetime.now().isoformat(),
460
- }
461
- )
462
  await self.doc_status.upsert({doc_id: doc_status})
463
  raise e
464
 
@@ -540,6 +516,174 @@ class LightRAG:
540
  if update_storage:
541
  await self._insert_done()
542
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543
  async def _insert_done(self):
544
  tasks = []
545
  for storage_inst in [
 
26
  convert_response_to_json,
27
  logger,
28
  set_logger,
29
+ statistic_data
30
  )
31
  from .base import (
32
  BaseGraphStorage,
 
37
  DocStatus,
38
  )
39
 
 
 
 
 
 
 
 
40
  from .prompt import GRAPH_FIELD_SEP
41
 
42
+ STORAGES = {
43
+ "JsonKVStorage": '.storage',
44
+ "NanoVectorDBStorage": '.storage',
45
+ "NetworkXStorage": '.storage',
46
+ "JsonDocStatusStorage": '.storage',
47
+
48
+ "Neo4JStorage":".kg.neo4j_impl",
49
+ "OracleKVStorage":".kg.oracle_impl",
50
+ "OracleGraphStorage":".kg.oracle_impl",
51
+ "OracleVectorDBStorage":".kg.oracle_impl",
52
+ "MilvusVectorDBStorge":".kg.milvus_impl",
53
+ "MongoKVStorage":".kg.mongo_impl",
54
+ "ChromaVectorDBStorage":".kg.chroma_impl",
55
+ "TiDBKVStorage":".kg.tidb_impl",
56
+ "TiDBVectorDBStorage":".kg.tidb_impl",
57
+ "TiDBGraphStorage":".kg.tidb_impl",
58
+ "PGKVStorage":".kg.postgres_impl",
59
+ "PGVectorStorage":".kg.postgres_impl",
60
+ "AGEStorage":".kg.age_impl",
61
+ "PGGraphStorage":".kg.postgres_impl",
62
+ "GremlinStorage":".kg.gremlin_impl",
63
+ "PGDocStatusStorage":".kg.postgres_impl",
64
+ }
65
 
66
  def lazy_external_import(module_name: str, class_name: str):
67
  """Lazily import a class from an external module based on the package of the caller."""
 
75
 
76
  def import_class(*args, **kwargs):
77
  import importlib
 
 
78
  module = importlib.import_module(module_name, package=package)
 
 
79
  cls = getattr(module, class_name)
80
  return cls(*args, **kwargs)
81
 
82
  return import_class
83
 
84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  def always_get_an_event_loop() -> asyncio.AbstractEventLoop:
86
  """
87
  Ensure that there is always an event loop available.
 
185
  logger.setLevel(self.log_level)
186
 
187
  logger.info(f"Logger initialized for working directory: {self.working_dir}")
188
+ if not os.path.exists(self.working_dir):
189
+ logger.info(f"Creating working directory {self.working_dir}")
190
+ os.makedirs(self.working_dir)
191
 
192
+ # show config
193
+ global_config=asdict(self)
194
+ _print_config = ",\n ".join([f"{k} = {v}" for k, v in global_config.items()])
195
  logger.debug(f"LightRAG init with param:\n {_print_config}\n")
196
 
197
+ # Init LLM
198
+ self.embedding_func = limit_async_func_call(self.embedding_func_max_async)(
199
+ self.embedding_func
200
+ )
201
+
202
+
203
+ # Initialize all storages
204
+ self.key_string_value_json_storage_cls: Type[BaseKVStorage] = self._get_storage_class(self.kv_storage)
205
+ self.vector_db_storage_cls: Type[BaseVectorStorage] = self._get_storage_class(self.vector_storage)
206
+ self.graph_storage_cls: Type[BaseGraphStorage] = self._get_storage_class(self.graph_storage)
207
+
208
+ self.key_string_value_json_storage_cls = partial(
209
+ self.key_string_value_json_storage_cls,
210
+ global_config=global_config
211
+ )
212
 
213
+ self.vector_db_storage_cls = partial(
214
+ self.vector_db_storage_cls,
215
+ global_config=global_config
216
  )
 
 
 
 
 
 
217
 
218
+ self.graph_storage_cls = partial(
219
+ self.graph_storage_cls,
220
+ global_config=global_config
221
+ )
222
+
223
+ self.json_doc_status_storage = self.key_string_value_json_storage_cls(
224
+ namespace="json_doc_status_storage",
225
+ embedding_func=None,
226
+ )
227
 
228
  self.llm_response_cache = self.key_string_value_json_storage_cls(
229
  namespace="llm_response_cache",
 
230
  embedding_func=None,
231
  )
232
 
 
 
 
 
233
  ####
234
  # add embedding func by walter
235
  ####
236
  self.full_docs = self.key_string_value_json_storage_cls(
237
  namespace="full_docs",
 
238
  embedding_func=self.embedding_func,
239
  )
240
  self.text_chunks = self.key_string_value_json_storage_cls(
241
  namespace="text_chunks",
 
242
  embedding_func=self.embedding_func,
243
  )
244
  self.chunk_entity_relation_graph = self.graph_storage_cls(
245
  namespace="chunk_entity_relation",
 
246
  embedding_func=self.embedding_func,
247
  )
248
  ####
 
251
 
252
  self.entities_vdb = self.vector_db_storage_cls(
253
  namespace="entities",
 
254
  embedding_func=self.embedding_func,
255
  meta_fields={"entity_name"},
256
  )
257
  self.relationships_vdb = self.vector_db_storage_cls(
258
  namespace="relationships",
 
259
  embedding_func=self.embedding_func,
260
  meta_fields={"src_id", "tgt_id"},
261
  )
262
  self.chunks_vdb = self.vector_db_storage_cls(
263
  namespace="chunks",
 
264
  embedding_func=self.embedding_func,
265
  )
266
 
267
+ if self.llm_response_cache and hasattr(self.llm_response_cache, "global_config"):
268
+ hashing_kv = self.llm_response_cache
269
+ else:
270
+ hashing_kv = self.key_string_value_json_storage_cls(
271
+ namespace="llm_response_cache",
272
+ embedding_func=None,
273
+ )
274
+
275
  self.llm_model_func = limit_async_func_call(self.llm_model_max_async)(
276
  partial(
277
  self.llm_model_func,
278
+ hashing_kv=hashing_kv,
 
 
 
 
 
 
 
279
  **self.llm_model_kwargs,
280
  )
281
  )
282
 
283
  # Initialize document status storage
284
+ self.doc_status_storage_cls = self._get_storage_class(self.doc_status_storage)
285
  self.doc_status = self.doc_status_storage_cls(
286
  namespace="doc_status",
287
+ global_config=global_config,
288
  embedding_func=None,
289
  )
290
 
291
+ def _get_storage_class(self, storage_name: str) -> dict:
292
+ import_path = STORAGES[storage_name]
293
+ storage_class = lazy_external_import(import_path, storage_name)
294
+ return storage_class
295
+
296
+ def set_storage_client(self,db_client):
297
+ # Now only tested on Oracle Database
298
+ for storage in [self.vector_db_storage_cls,
299
+ self.graph_storage_cls,
300
+ self.doc_status, self.full_docs,
301
+ self.text_chunks,
302
+ self.llm_response_cache,
303
+ self.key_string_value_json_storage_cls,
304
+ self.chunks_vdb,
305
+ self.relationships_vdb,
306
+ self.entities_vdb,
307
+ self.graph_storage_cls,
308
+ self.chunk_entity_relation_graph,
309
+ self.llm_response_cache]:
310
+ # set client
311
+ storage.db = db_client
 
 
 
 
 
 
 
312
 
313
  def insert(
314
  self, string_or_strings, split_by_character=None, split_by_character_only=False
 
348
  }
349
  for content in unique_contents
350
  }
351
+
352
+ # 3. Store original document and chunks
353
+ await self.full_docs.upsert(
354
+ {doc_id: {"content": doc["content"]}}
355
+ )
356
 
357
  # 3. Filter out already processed documents
358
  _add_doc_keys = await self.doc_status.filter_keys(list(new_docs.keys()))
 
401
  }
402
 
403
  # Update status with chunks information
404
+ doc_status.update({"chunks_count": len(chunks),"updated_at": datetime.now().isoformat()})
 
 
 
 
 
405
  await self.doc_status.upsert({doc_id: doc_status})
406
 
407
  try:
 
425
 
426
  self.chunk_entity_relation_graph = maybe_new_kg
427
 
428
+
 
 
 
429
  await self.text_chunks.upsert(chunks)
430
 
431
  # Update status to processed
432
+ doc_status.update({"status": DocStatus.PROCESSED,"updated_at": datetime.now().isoformat()})
 
 
 
 
 
433
  await self.doc_status.upsert({doc_id: doc_status})
434
 
435
  except Exception as e:
436
  # Mark as failed if any step fails
437
+ doc_status.update({"status": DocStatus.FAILED,"error": str(e),"updated_at": datetime.now().isoformat()})
 
 
 
 
 
 
438
  await self.doc_status.upsert({doc_id: doc_status})
439
  raise e
440
 
 
516
  if update_storage:
517
  await self._insert_done()
518
 
519
+ async def apipeline_process_documents(self, string_or_strings):
520
+ """Input list remove duplicates, generate document IDs and initial pendding status, filter out already stored documents, store docs
521
+ Args:
522
+ string_or_strings: Single document string or list of document strings
523
+ """
524
+ if isinstance(string_or_strings, str):
525
+ string_or_strings = [string_or_strings]
526
+
527
+ # 1. Remove duplicate contents from the list
528
+ unique_contents = list(set(doc.strip() for doc in string_or_strings))
529
+
530
+ logger.info(f"Received {len(string_or_strings)} docs, contains {len(unique_contents)} new unique documents")
531
+
532
+ # 2. Generate document IDs and initial status
533
+ new_docs = {
534
+ compute_mdhash_id(content, prefix="doc-"): {
535
+ "content": content,
536
+ "content_summary": self._get_content_summary(content),
537
+ "content_length": len(content),
538
+ "status": DocStatus.PENDING,
539
+ "created_at": datetime.now().isoformat(),
540
+ "updated_at": None,
541
+ }
542
+ for content in unique_contents
543
+ }
544
+
545
+ # 3. Filter out already processed documents
546
+ _not_stored_doc_keys = await self.full_docs.filter_keys(list(new_docs.keys()))
547
+ if len(_not_stored_doc_keys) < len(new_docs):
548
+ logger.info(f"Skipping {len(new_docs)-len(_not_stored_doc_keys)} already existing documents")
549
+ new_docs = {k: v for k, v in new_docs.items() if k in _not_stored_doc_keys}
550
+
551
+ if not new_docs:
552
+ logger.info(f"All documents have been processed or are duplicates")
553
+ return None
554
+
555
+ # 4. Store original document
556
+ for doc_id, doc in new_docs.items():
557
+ await self.full_docs.upsert({doc_id: {"content": doc["content"]}})
558
+ await self.full_docs.change_status(doc_id, DocStatus.PENDING)
559
+ logger.info(f"Stored {len(new_docs)} new unique documents")
560
+
561
+ async def apipeline_process_chunks(self):
562
+ """Get pendding documents, split into chunks,insert chunks"""
563
+ # 1. get all pending and failed documents
564
+ _todo_doc_keys = []
565
+ _failed_doc = await self.full_docs.get_by_status_and_ids(status = DocStatus.FAILED,ids = None)
566
+ _pendding_doc = await self.full_docs.get_by_status_and_ids(status = DocStatus.PENDING,ids = None)
567
+ if _failed_doc:
568
+ _todo_doc_keys.extend([doc["id"] for doc in _failed_doc])
569
+ if _pendding_doc:
570
+ _todo_doc_keys.extend([doc["id"] for doc in _pendding_doc])
571
+ if not _todo_doc_keys:
572
+ logger.info("All documents have been processed or are duplicates")
573
+ return None
574
+ else:
575
+ logger.info(f"Filtered out {len(_todo_doc_keys)} not processed documents")
576
+
577
+ new_docs = {
578
+ doc["id"]: doc
579
+ for doc in await self.full_docs.get_by_ids(_todo_doc_keys)
580
+ }
581
+
582
+ # 2. split docs into chunks, insert chunks, update doc status
583
+ chunk_cnt = 0
584
+ batch_size = self.addon_params.get("insert_batch_size", 10)
585
+ for i in range(0, len(new_docs), batch_size):
586
+ batch_docs = dict(list(new_docs.items())[i : i + batch_size])
587
+ for doc_id, doc in tqdm_async(
588
+ batch_docs.items(), desc=f"Level 1 - Spliting doc in batch {i//batch_size + 1}"
589
+ ):
590
+ try:
591
+ # Generate chunks from document
592
+ chunks = {
593
+ compute_mdhash_id(dp["content"], prefix="chunk-"): {
594
+ **dp,
595
+ "full_doc_id": doc_id,
596
+ "status": DocStatus.PENDING,
597
+ }
598
+ for dp in chunking_by_token_size(
599
+ doc["content"],
600
+ overlap_token_size=self.chunk_overlap_token_size,
601
+ max_token_size=self.chunk_token_size,
602
+ tiktoken_model=self.tiktoken_model_name,
603
+ )
604
+ }
605
+ chunk_cnt += len(chunks)
606
+ await self.text_chunks.upsert(chunks)
607
+ await self.text_chunks.change_status(doc_id, DocStatus.PROCESSED)
608
+
609
+ try:
610
+ # Store chunks in vector database
611
+ await self.chunks_vdb.upsert(chunks)
612
+ # Update doc status
613
+ await self.full_docs.change_status(doc_id, DocStatus.PROCESSED)
614
+ except Exception as e:
615
+ # Mark as failed if any step fails
616
+ await self.full_docs.change_status(doc_id, DocStatus.FAILED)
617
+ raise e
618
+ except Exception as e:
619
+ import traceback
620
+ error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}"
621
+ logger.error(error_msg)
622
+ continue
623
+ logger.info(f"Stored {chunk_cnt} chunks from {len(new_docs)} documents")
624
+
625
+ async def apipeline_process_extract_graph(self):
626
+ """Get pendding or failed chunks, extract entities and relationships from each chunk"""
627
+ # 1. get all pending and failed chunks
628
+ _todo_chunk_keys = []
629
+ _failed_chunks = await self.text_chunks.get_by_status_and_ids(status = DocStatus.FAILED,ids = None)
630
+ _pendding_chunks = await self.text_chunks.get_by_status_and_ids(status = DocStatus.PENDING,ids = None)
631
+ if _failed_chunks:
632
+ _todo_chunk_keys.extend([doc["id"] for doc in _failed_chunks])
633
+ if _pendding_chunks:
634
+ _todo_chunk_keys.extend([doc["id"] for doc in _pendding_chunks])
635
+ if not _todo_chunk_keys:
636
+ logger.info("All chunks have been processed or are duplicates")
637
+ return None
638
+
639
+ # Process documents in batches
640
+ batch_size = self.addon_params.get("insert_batch_size", 10)
641
+
642
+ semaphore = asyncio.Semaphore(batch_size) # Control the number of tasks that are processed simultaneously
643
+
644
+ async def process_chunk(chunk_id):
645
+ async with semaphore:
646
+ chunks = {i["id"]: i for i in await self.text_chunks.get_by_ids([chunk_id])}
647
+ # Extract and store entities and relationships
648
+ try:
649
+ maybe_new_kg = await extract_entities(
650
+ chunks,
651
+ knowledge_graph_inst=self.chunk_entity_relation_graph,
652
+ entity_vdb=self.entities_vdb,
653
+ relationships_vdb=self.relationships_vdb,
654
+ llm_response_cache=self.llm_response_cache,
655
+ global_config=asdict(self),
656
+ )
657
+ if maybe_new_kg is None:
658
+ logger.info("No entities or relationships extracted!")
659
+ # Update status to processed
660
+ await self.text_chunks.change_status(chunk_id, DocStatus.PROCESSED)
661
+ except Exception as e:
662
+ logger.error("Failed to extract entities and relationships")
663
+ # Mark as failed if any step fails
664
+ await self.text_chunks.change_status(chunk_id, DocStatus.FAILED)
665
+ raise e
666
+
667
+ with tqdm_async(total=len(_todo_chunk_keys),
668
+ desc="\nLevel 1 - Processing chunks",
669
+ unit="chunk",
670
+ position=0) as progress:
671
+ tasks = []
672
+ for chunk_id in _todo_chunk_keys:
673
+ task = asyncio.create_task(process_chunk(chunk_id))
674
+ tasks.append(task)
675
+
676
+ for future in asyncio.as_completed(tasks):
677
+ await future
678
+ progress.update(1)
679
+ progress.set_postfix({
680
+ 'LLM call': statistic_data["llm_call"],
681
+ 'LLM cache': statistic_data["llm_cache"],
682
+ })
683
+
684
+ # Ensure all indexes are updated after each document
685
+ await self._insert_done()
686
+
687
  async def _insert_done(self):
688
  tasks = []
689
  for storage_inst in [
lightrag/operate.py CHANGED
@@ -20,6 +20,7 @@ from .utils import (
20
  handle_cache,
21
  save_to_cache,
22
  CacheData,
 
23
  )
24
  from .base import (
25
  BaseGraphStorage,
@@ -371,14 +372,16 @@ async def extract_entities(
371
  if need_to_restore:
372
  llm_response_cache.global_config = global_config
373
  if cached_return:
 
 
374
  return cached_return
375
-
376
  if history_messages:
377
  res: str = await use_llm_func(
378
  input_text, history_messages=history_messages
379
  )
380
  else:
381
- res: str = await use_llm_func(input_text)
382
  await save_to_cache(
383
  llm_response_cache,
384
  CacheData(args_hash=arg_hash, content=res, prompt=_prompt),
@@ -459,10 +462,8 @@ async def extract_entities(
459
  now_ticks = PROMPTS["process_tickers"][
460
  already_processed % len(PROMPTS["process_tickers"])
461
  ]
462
- print(
463
  f"{now_ticks} Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
464
- end="",
465
- flush=True,
466
  )
467
  return dict(maybe_nodes), dict(maybe_edges)
468
 
@@ -470,8 +471,8 @@ async def extract_entities(
470
  for result in tqdm_async(
471
  asyncio.as_completed([_process_single_content(c) for c in ordered_chunks]),
472
  total=len(ordered_chunks),
473
- desc="Extracting entities from chunks",
474
- unit="chunk",
475
  ):
476
  results.append(await result)
477
 
@@ -482,7 +483,7 @@ async def extract_entities(
482
  maybe_nodes[k].extend(v)
483
  for k, v in m_edges.items():
484
  maybe_edges[tuple(sorted(k))].extend(v)
485
- logger.info("Inserting entities into storage...")
486
  all_entities_data = []
487
  for result in tqdm_async(
488
  asyncio.as_completed(
@@ -492,12 +493,12 @@ async def extract_entities(
492
  ]
493
  ),
494
  total=len(maybe_nodes),
495
- desc="Inserting entities",
496
- unit="entity",
497
  ):
498
  all_entities_data.append(await result)
499
 
500
- logger.info("Inserting relationships into storage...")
501
  all_relationships_data = []
502
  for result in tqdm_async(
503
  asyncio.as_completed(
@@ -509,8 +510,8 @@ async def extract_entities(
509
  ]
510
  ),
511
  total=len(maybe_edges),
512
- desc="Inserting relationships",
513
- unit="relationship",
514
  ):
515
  all_relationships_data.append(await result)
516
 
 
20
  handle_cache,
21
  save_to_cache,
22
  CacheData,
23
+ statistic_data
24
  )
25
  from .base import (
26
  BaseGraphStorage,
 
372
  if need_to_restore:
373
  llm_response_cache.global_config = global_config
374
  if cached_return:
375
+ logger.debug(f"Found cache for {arg_hash}")
376
+ statistic_data["llm_cache"] += 1
377
  return cached_return
378
+ statistic_data["llm_call"] += 1
379
  if history_messages:
380
  res: str = await use_llm_func(
381
  input_text, history_messages=history_messages
382
  )
383
  else:
384
+ res: str = await use_llm_func(input_text)
385
  await save_to_cache(
386
  llm_response_cache,
387
  CacheData(args_hash=arg_hash, content=res, prompt=_prompt),
 
462
  now_ticks = PROMPTS["process_tickers"][
463
  already_processed % len(PROMPTS["process_tickers"])
464
  ]
465
+ logger.debug(
466
  f"{now_ticks} Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
 
 
467
  )
468
  return dict(maybe_nodes), dict(maybe_edges)
469
 
 
471
  for result in tqdm_async(
472
  asyncio.as_completed([_process_single_content(c) for c in ordered_chunks]),
473
  total=len(ordered_chunks),
474
+ desc="Level 2 - Extracting entities and relationships",
475
+ unit="chunk", position=1,leave=False
476
  ):
477
  results.append(await result)
478
 
 
483
  maybe_nodes[k].extend(v)
484
  for k, v in m_edges.items():
485
  maybe_edges[tuple(sorted(k))].extend(v)
486
+ logger.debug("Inserting entities into storage...")
487
  all_entities_data = []
488
  for result in tqdm_async(
489
  asyncio.as_completed(
 
493
  ]
494
  ),
495
  total=len(maybe_nodes),
496
+ desc="Level 3 - Inserting entities",
497
+ unit="entity", position=2,leave=False
498
  ):
499
  all_entities_data.append(await result)
500
 
501
+ logger.debug("Inserting relationships into storage...")
502
  all_relationships_data = []
503
  for result in tqdm_async(
504
  asyncio.as_completed(
 
510
  ]
511
  ),
512
  total=len(maybe_edges),
513
+ desc="Level 3 - Inserting relationships",
514
+ unit="relationship", position=3,leave=False
515
  ):
516
  all_relationships_data.append(await result)
517
 
lightrag/utils.py CHANGED
@@ -30,8 +30,13 @@ class UnlimitedSemaphore:
30
 
31
  ENCODER = None
32
 
 
 
33
  logger = logging.getLogger("lightrag")
34
 
 
 
 
35
 
36
  def set_logger(log_file: str):
37
  logger.setLevel(logging.DEBUG)
@@ -453,7 +458,8 @@ async def handle_cache(hashing_kv, args_hash, prompt, mode="default"):
453
  return None, None, None, None
454
 
455
  # For naive mode, only use simple cache matching
456
- if mode == "naive":
 
457
  if exists_func(hashing_kv, "get_by_mode_and_id"):
458
  mode_cache = await hashing_kv.get_by_mode_and_id(mode, args_hash) or {}
459
  else:
 
30
 
31
  ENCODER = None
32
 
33
+ statistic_data = {"llm_call": 0, "llm_cache": 0, "embed_call": 0}
34
+
35
  logger = logging.getLogger("lightrag")
36
 
37
+ # Set httpx logging level to WARNING
38
+ logging.getLogger("httpx").setLevel(logging.WARNING)
39
+
40
 
41
  def set_logger(log_file: str):
42
  logger.setLevel(logging.DEBUG)
 
458
  return None, None, None, None
459
 
460
  # For naive mode, only use simple cache matching
461
+ #if mode == "naive":
462
+ if mode == "default":
463
  if exists_func(hashing_kv, "get_by_mode_and_id"):
464
  mode_cache = await hashing_kv.get_by_mode_and_id(mode, args_hash) or {}
465
  else: