zrguo commited on
Commit
99c8d80
·
1 Parent(s): beb4bc2

Move batch_eval.py to ./reproduce/

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. reproduce/batch_eval.py +108 -0
README.md CHANGED
@@ -1243,7 +1243,7 @@ Output the results in the following structure:
1243
 
1244
  ### Batch Eval
1245
 
1246
- To evaluate the performance of two RAG systems on high-level queries, LightRAG uses the following prompt, with the specific code available in `example/batch_eval.py`.
1247
 
1248
  <details>
1249
  <summary> Prompt </summary>
 
1243
 
1244
  ### Batch Eval
1245
 
1246
+ To evaluate the performance of two RAG systems on high-level queries, LightRAG uses the following prompt, with the specific code available in `reproduce/batch_eval.py`.
1247
 
1248
  <details>
1249
  <summary> Prompt </summary>
reproduce/batch_eval.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import json
3
+ import jsonlines
4
+
5
+ from openai import OpenAI
6
+
7
+
8
+ def batch_eval(query_file, result1_file, result2_file, output_file_path):
9
+ client = OpenAI()
10
+
11
+ with open(query_file, "r") as f:
12
+ data = f.read()
13
+
14
+ queries = re.findall(r"- Question \d+: (.+)", data)
15
+
16
+ with open(result1_file, "r") as f:
17
+ answers1 = json.load(f)
18
+ answers1 = [i["result"] for i in answers1]
19
+
20
+ with open(result2_file, "r") as f:
21
+ answers2 = json.load(f)
22
+ answers2 = [i["result"] for i in answers2]
23
+
24
+ requests = []
25
+ for i, (query, answer1, answer2) in enumerate(zip(queries, answers1, answers2)):
26
+ sys_prompt = """
27
+ ---Role---
28
+ You are an expert tasked with evaluating two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
29
+ """
30
+
31
+ prompt = f"""
32
+ You will evaluate two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
33
+
34
+ - **Comprehensiveness**: How much detail does the answer provide to cover all aspects and details of the question?
35
+ - **Diversity**: How varied and rich is the answer in providing different perspectives and insights on the question?
36
+ - **Empowerment**: How well does the answer help the reader understand and make informed judgments about the topic?
37
+
38
+ For each criterion, choose the better answer (either Answer 1 or Answer 2) and explain why. Then, select an overall winner based on these three categories.
39
+
40
+ Here is the question:
41
+ {query}
42
+
43
+ Here are the two answers:
44
+
45
+ **Answer 1:**
46
+ {answer1}
47
+
48
+ **Answer 2:**
49
+ {answer2}
50
+
51
+ Evaluate both answers using the three criteria listed above and provide detailed explanations for each criterion.
52
+
53
+ Output your evaluation in the following JSON format:
54
+
55
+ {{
56
+ "Comprehensiveness": {{
57
+ "Winner": "[Answer 1 or Answer 2]",
58
+ "Explanation": "[Provide explanation here]"
59
+ }},
60
+ "Empowerment": {{
61
+ "Winner": "[Answer 1 or Answer 2]",
62
+ "Explanation": "[Provide explanation here]"
63
+ }},
64
+ "Overall Winner": {{
65
+ "Winner": "[Answer 1 or Answer 2]",
66
+ "Explanation": "[Summarize why this answer is the overall winner based on the three criteria]"
67
+ }}
68
+ }}
69
+ """
70
+
71
+ request_data = {
72
+ "custom_id": f"request-{i+1}",
73
+ "method": "POST",
74
+ "url": "/v1/chat/completions",
75
+ "body": {
76
+ "model": "gpt-4o-mini",
77
+ "messages": [
78
+ {"role": "system", "content": sys_prompt},
79
+ {"role": "user", "content": prompt},
80
+ ],
81
+ },
82
+ }
83
+
84
+ requests.append(request_data)
85
+
86
+ with jsonlines.open(output_file_path, mode="w") as writer:
87
+ for request in requests:
88
+ writer.write(request)
89
+
90
+ print(f"Batch API requests written to {output_file_path}")
91
+
92
+ batch_input_file = client.files.create(
93
+ file=open(output_file_path, "rb"), purpose="batch"
94
+ )
95
+ batch_input_file_id = batch_input_file.id
96
+
97
+ batch = client.batches.create(
98
+ input_file_id=batch_input_file_id,
99
+ endpoint="/v1/chat/completions",
100
+ completion_window="24h",
101
+ metadata={"description": "nightly eval job"},
102
+ )
103
+
104
+ print(f"Batch {batch.id} has been created.")
105
+
106
+
107
+ if __name__ == "__main__":
108
+ batch_eval()