LarFii commited on
Commit
1971f8d
·
1 Parent(s): d09c742

update reproduce

Browse files
Files changed (5) hide show
  1. README.md +113 -1
  2. reproduce/Step_0.py +63 -0
  3. reproduce/Step_1.py +32 -0
  4. reproduce/Step_2.py +76 -0
  5. reproduce/Step_3.py +62 -0
README.md CHANGED
@@ -149,7 +149,6 @@ Output your evaluation in the following JSON format:
149
  }}
150
  ```
151
  ### Overall Performance Table
152
- ### Overall Performance Table
153
  | | **Agriculture** | | **CS** | | **Legal** | | **Mix** | |
154
  |----------------------|-------------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|
155
  | | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** |
@@ -173,6 +172,114 @@ Output your evaluation in the following JSON format:
173
  | **Empowerment** | 36.69% | **63.31%** | 45.09% | **54.91%** | 42.81% | **57.19%** | **52.94%** | 47.06% |
174
  | **Overall** | 43.62% | **56.38%** | 45.98% | **54.02%** | 45.70% | **54.30%** | **51.86%** | 48.14% |
175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  ## Code Structure
177
 
178
  ```python
@@ -191,6 +298,11 @@ Output your evaluation in the following JSON format:
191
  │ ├── prompt.py
192
  │ ├── storage.py
193
  │ └── utils.jpeg
 
 
 
 
 
194
  ├── LICENSE
195
  ├── README.md
196
  ├── requirements.txt
 
149
  }}
150
  ```
151
  ### Overall Performance Table
 
152
  | | **Agriculture** | | **CS** | | **Legal** | | **Mix** | |
153
  |----------------------|-------------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|
154
  | | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** |
 
172
  | **Empowerment** | 36.69% | **63.31%** | 45.09% | **54.91%** | 42.81% | **57.19%** | **52.94%** | 47.06% |
173
  | **Overall** | 43.62% | **56.38%** | 45.98% | **54.02%** | 45.70% | **54.30%** | **51.86%** | 48.14% |
174
 
175
+ ## Reproduce
176
+ All the code can be found in the `./reproduce` directory.
177
+ ### Step-0 Extract Unique Contexts
178
+ First, we need to extract unique contexts in the datasets.
179
+ ```python
180
+ def extract_unique_contexts(input_directory, output_directory):
181
+
182
+ os.makedirs(output_directory, exist_ok=True)
183
+
184
+ jsonl_files = glob.glob(os.path.join(input_directory, '*.jsonl'))
185
+ print(f"Found {len(jsonl_files)} JSONL files.")
186
+
187
+ for file_path in jsonl_files:
188
+ filename = os.path.basename(file_path)
189
+ name, ext = os.path.splitext(filename)
190
+ output_filename = f"{name}_unique_contexts.json"
191
+ output_path = os.path.join(output_directory, output_filename)
192
+
193
+ unique_contexts_dict = {}
194
+
195
+ print(f"Processing file: {filename}")
196
+
197
+ try:
198
+ with open(file_path, 'r', encoding='utf-8') as infile:
199
+ for line_number, line in enumerate(infile, start=1):
200
+ line = line.strip()
201
+ if not line:
202
+ continue
203
+ try:
204
+ json_obj = json.loads(line)
205
+ context = json_obj.get('context')
206
+ if context and context not in unique_contexts_dict:
207
+ unique_contexts_dict[context] = None
208
+ except json.JSONDecodeError as e:
209
+ print(f"JSON decoding error in file {filename} at line {line_number}: {e}")
210
+ except FileNotFoundError:
211
+ print(f"File not found: {filename}")
212
+ continue
213
+ except Exception as e:
214
+ print(f"An error occurred while processing file {filename}: {e}")
215
+ continue
216
+
217
+ unique_contexts_list = list(unique_contexts_dict.keys())
218
+ print(f"There are {len(unique_contexts_list)} unique `context` entries in the file {filename}.")
219
+
220
+ try:
221
+ with open(output_path, 'w', encoding='utf-8') as outfile:
222
+ json.dump(unique_contexts_list, outfile, ensure_ascii=False, indent=4)
223
+ print(f"Unique `context` entries have been saved to: {output_filename}")
224
+ except Exception as e:
225
+ print(f"An error occurred while saving to the file {output_filename}: {e}")
226
+
227
+ print("All files have been processed.")
228
+
229
+ ```
230
+ ### Step-1 Insert Contexts
231
+ For the extracted contexts, we insert them into the LightRAG system.
232
+
233
+ ```python
234
+ def insert_text(rag, file_path):
235
+ with open(file_path, mode='r') as f:
236
+ unique_contexts = json.load(f)
237
+
238
+ retries = 0
239
+ max_retries = 3
240
+ while retries < max_retries:
241
+ try:
242
+ rag.insert(unique_contexts)
243
+ break
244
+ except Exception as e:
245
+ retries += 1
246
+ print(f"Insertion failed, retrying ({retries}/{max_retries}), error: {e}")
247
+ time.sleep(10)
248
+ if retries == max_retries:
249
+ print("Insertion failed after exceeding the maximum number of retries")
250
+ ```
251
+ ### Step-2 Generate Queries
252
+
253
+ We extract tokens from both the first half and the second half of each context in the dataset, then combine them to generate queries for dataset descriptions.
254
+ ```python
255
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
256
+
257
+ def get_summary(context, tot_tokens=2000):
258
+ tokens = tokenizer.tokenize(context)
259
+ half_tokens = tot_tokens // 2
260
+
261
+ start_tokens = tokens[1000:1000 + half_tokens]
262
+ end_tokens = tokens[-(1000 + half_tokens):1000]
263
+
264
+ summary_tokens = start_tokens + end_tokens
265
+ summary = tokenizer.convert_tokens_to_string(summary_tokens)
266
+
267
+ return summary
268
+ ```
269
+
270
+ ### Step-3 Query
271
+ For the queries generated in Step-2, we will extract them and query LightRAG.
272
+ ```python
273
+ def extract_queries(file_path):
274
+ with open(file_path, 'r') as f:
275
+ data = f.read()
276
+
277
+ data = data.replace('**', '')
278
+
279
+ queries = re.findall(r'- Question \d+: (.+)', data)
280
+
281
+ return queries
282
+ ```
283
  ## Code Structure
284
 
285
  ```python
 
298
  │ ├── prompt.py
299
  │ ├── storage.py
300
  │ └── utils.jpeg
301
+ ├── reproduce
302
+ │ ├── Step_0.py
303
+ │ ├── Step_1.py
304
+ │ ├── Step_2.py
305
+ │ └── Step_3.py
306
  ├── LICENSE
307
  ├── README.md
308
  ├── requirements.txt
reproduce/Step_0.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import glob
4
+ import argparse
5
+
6
+ def extract_unique_contexts(input_directory, output_directory):
7
+
8
+ os.makedirs(output_directory, exist_ok=True)
9
+
10
+ jsonl_files = glob.glob(os.path.join(input_directory, '*.jsonl'))
11
+ print(f"Found {len(jsonl_files)} JSONL files.")
12
+
13
+ for file_path in jsonl_files:
14
+ filename = os.path.basename(file_path)
15
+ name, ext = os.path.splitext(filename)
16
+ output_filename = f"{name}_unique_contexts.json"
17
+ output_path = os.path.join(output_directory, output_filename)
18
+
19
+ unique_contexts_dict = {}
20
+
21
+ print(f"Processing file: {filename}")
22
+
23
+ try:
24
+ with open(file_path, 'r', encoding='utf-8') as infile:
25
+ for line_number, line in enumerate(infile, start=1):
26
+ line = line.strip()
27
+ if not line:
28
+ continue
29
+ try:
30
+ json_obj = json.loads(line)
31
+ context = json_obj.get('context')
32
+ if context and context not in unique_contexts_dict:
33
+ unique_contexts_dict[context] = None
34
+ except json.JSONDecodeError as e:
35
+ print(f"JSON decoding error in file {filename} at line {line_number}: {e}")
36
+ except FileNotFoundError:
37
+ print(f"File not found: {filename}")
38
+ continue
39
+ except Exception as e:
40
+ print(f"An error occurred while processing file {filename}: {e}")
41
+ continue
42
+
43
+ unique_contexts_list = list(unique_contexts_dict.keys())
44
+ print(f"There are {len(unique_contexts_list)} unique `context` entries in the file {filename}.")
45
+
46
+ try:
47
+ with open(output_path, 'w', encoding='utf-8') as outfile:
48
+ json.dump(unique_contexts_list, outfile, ensure_ascii=False, indent=4)
49
+ print(f"Unique `context` entries have been saved to: {output_filename}")
50
+ except Exception as e:
51
+ print(f"An error occurred while saving to the file {output_filename}: {e}")
52
+
53
+ print("All files have been processed.")
54
+
55
+
56
+ if __name__ == "__main__":
57
+ parser = argparse.ArgumentParser()
58
+ parser.add_argument('-i', '--input_dir', type=str, default='../datasets')
59
+ parser.add_argument('-o', '--output_dir', type=str, default='../datasets/unique_contexts')
60
+
61
+ args = parser.parse_args()
62
+
63
+ extract_unique_contexts(args.input_dir, args.output_dir)
reproduce/Step_1.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+
5
+ from lightrag import LightRAG
6
+
7
+ def insert_text(rag, file_path):
8
+ with open(file_path, mode='r') as f:
9
+ unique_contexts = json.load(f)
10
+
11
+ retries = 0
12
+ max_retries = 3
13
+ while retries < max_retries:
14
+ try:
15
+ rag.insert(unique_contexts)
16
+ break
17
+ except Exception as e:
18
+ retries += 1
19
+ print(f"Insertion failed, retrying ({retries}/{max_retries}), error: {e}")
20
+ time.sleep(10)
21
+ if retries == max_retries:
22
+ print("Insertion failed after exceeding the maximum number of retries")
23
+
24
+ cls = "agriculture"
25
+ WORKING_DIR = "../{cls}"
26
+
27
+ if not os.path.exists(WORKING_DIR):
28
+ os.mkdir(WORKING_DIR)
29
+
30
+ rag = LightRAG(working_dir=WORKING_DIR)
31
+
32
+ insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
reproduce/Step_2.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from openai import OpenAI
4
+ from transformers import GPT2Tokenizer
5
+
6
+ def openai_complete_if_cache(
7
+ model="gpt-4o", prompt=None, system_prompt=None, history_messages=[], **kwargs
8
+ ) -> str:
9
+ openai_client = OpenAI()
10
+
11
+ messages = []
12
+ if system_prompt:
13
+ messages.append({"role": "system", "content": system_prompt})
14
+ messages.extend(history_messages)
15
+ messages.append({"role": "user", "content": prompt})
16
+
17
+ response = openai_client.chat.completions.create(
18
+ model=model, messages=messages, **kwargs
19
+ )
20
+ return response.choices[0].message.content
21
+
22
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
23
+
24
+ def get_summary(context, tot_tokens=2000):
25
+ tokens = tokenizer.tokenize(context)
26
+ half_tokens = tot_tokens // 2
27
+
28
+ start_tokens = tokens[1000:1000 + half_tokens]
29
+ end_tokens = tokens[-(1000 + half_tokens):1000]
30
+
31
+ summary_tokens = start_tokens + end_tokens
32
+ summary = tokenizer.convert_tokens_to_string(summary_tokens)
33
+
34
+ return summary
35
+
36
+
37
+ clses = ['agriculture']
38
+ for cls in clses:
39
+ with open(f'../datasets/unique_contexts/{cls}_unique_contexts.json', mode='r') as f:
40
+ unique_contexts = json.load(f)
41
+
42
+ summaries = [get_summary(context) for context in unique_contexts]
43
+
44
+ total_description = "\n\n".join(summaries)
45
+
46
+ prompt = f"""
47
+ Given the following description of a dataset:
48
+
49
+ {total_description}
50
+
51
+ Please identify 5 potential users who would engage with this dataset. For each user, list 5 tasks they would perform with this dataset. Then, for each (user, task) combination, generate 5 questions that require a high-level understanding of the entire dataset.
52
+
53
+ Output the results in the following structure:
54
+ - User 1: [user description]
55
+ - Task 1: [task description]
56
+ - Question 1:
57
+ - Question 2:
58
+ - Question 3:
59
+ - Question 4:
60
+ - Question 5:
61
+ - Task 2: [task description]
62
+ ...
63
+ - Task 5: [task description]
64
+ - User 2: [user description]
65
+ ...
66
+ - User 5: [user description]
67
+ ...
68
+ """
69
+
70
+ result = openai_complete_if_cache(model='gpt-4o', prompt=prompt)
71
+
72
+ file_path = f"../datasets/questions/{cls}_questions.txt"
73
+ with open(file_path, "w") as file:
74
+ file.write(result)
75
+
76
+ print(f"{cls}_questions written to {file_path}")
reproduce/Step_3.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import json
3
+ import asyncio
4
+ from lightrag import LightRAG, QueryParam
5
+ from tqdm import tqdm
6
+
7
+ def extract_queries(file_path):
8
+ with open(file_path, 'r') as f:
9
+ data = f.read()
10
+
11
+ data = data.replace('**', '')
12
+
13
+ queries = re.findall(r'- Question \d+: (.+)', data)
14
+
15
+ return queries
16
+
17
+ async def process_query(query_text, rag_instance, query_param):
18
+ try:
19
+ result, context = await rag_instance.aquery(query_text, param=query_param)
20
+ return {"query": query_text, "result": result, "context": context}, None
21
+ except Exception as e:
22
+ return None, {"query": query_text, "error": str(e)}
23
+
24
+ def always_get_an_event_loop() -> asyncio.AbstractEventLoop:
25
+ try:
26
+ loop = asyncio.get_event_loop()
27
+ except RuntimeError:
28
+ loop = asyncio.new_event_loop()
29
+ asyncio.set_event_loop(loop)
30
+ return loop
31
+
32
+ def run_queries_and_save_to_json(queries, rag_instance, query_param, output_file, error_file):
33
+ loop = always_get_an_event_loop()
34
+
35
+ with open(output_file, 'a', encoding='utf-8') as result_file, open(error_file, 'a', encoding='utf-8') as err_file:
36
+ result_file.write("[\n")
37
+ first_entry = True
38
+
39
+ for query_text in tqdm(queries, desc="Processing queries", unit="query"):
40
+ result, error = loop.run_until_complete(process_query(query_text, rag_instance, query_param))
41
+
42
+ if result:
43
+ if not first_entry:
44
+ result_file.write(",\n")
45
+ json.dump(result, result_file, ensure_ascii=False, indent=4)
46
+ first_entry = False
47
+ elif error:
48
+ json.dump(error, err_file, ensure_ascii=False, indent=4)
49
+ err_file.write("\n")
50
+
51
+ result_file.write("\n]")
52
+
53
+ if __name__ == "__main__":
54
+ cls = "agriculture"
55
+ mode = "hybird"
56
+ WORKING_DIR = "../{cls}"
57
+
58
+ rag = LightRAG(working_dir=WORKING_DIR)
59
+ query_param = QueryParam(mode=mode)
60
+
61
+ queries = extract_queries(f"../datasets/questions/{cls}_questions.txt")
62
+ run_queries_and_save_to_json(queries, rag, query_param, "result.json", "errors.json")