Larfii
commited on
Commit
·
ddb02a2
1
Parent(s):
90bbd37
update
Browse files- examples/batch_eval.py +112 -0
- examples/insert.py +19 -0
- examples/query.py +17 -0
- lightrag/__pycache__/lightrag.cpython-310.pyc +0 -0
- lightrag/__pycache__/llm.cpython-310.pyc +0 -0
- lightrag/llm.py +4 -5
examples/batch_eval.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import json
|
4 |
+
import jsonlines
|
5 |
+
|
6 |
+
from openai import OpenAI
|
7 |
+
|
8 |
+
|
9 |
+
def batch_eval(query_file, result1_file, result2_file, output_file_path, api_key):
|
10 |
+
client = OpenAI(api_key=api_key)
|
11 |
+
|
12 |
+
with open(query_file, 'r') as f:
|
13 |
+
data = f.read()
|
14 |
+
|
15 |
+
queries = re.findall(r'- Question \d+: (.+)', data)
|
16 |
+
|
17 |
+
with open(result1_file, 'r') as f:
|
18 |
+
answers1 = json.load(f)
|
19 |
+
answers1 = [i['result'] for i in answers1]
|
20 |
+
|
21 |
+
with open(result2_file, 'r') as f:
|
22 |
+
answers2 = json.load(f)
|
23 |
+
answers2 = [i['result'] for i in answers2]
|
24 |
+
|
25 |
+
requests = []
|
26 |
+
for i, (query, answer1, answer2) in enumerate(zip(queries, answers1, answers2)):
|
27 |
+
sys_prompt = f"""
|
28 |
+
---Role---
|
29 |
+
You are an expert tasked with evaluating two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
|
30 |
+
"""
|
31 |
+
|
32 |
+
prompt = f"""
|
33 |
+
You will evaluate two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
|
34 |
+
|
35 |
+
- **Comprehensiveness**: How much detail does the answer provide to cover all aspects and details of the question?
|
36 |
+
- **Diversity**: How varied and rich is the answer in providing different perspectives and insights on the question?
|
37 |
+
- **Empowerment**: How well does the answer help the reader understand and make informed judgments about the topic?
|
38 |
+
|
39 |
+
For each criterion, choose the better answer (either Answer 1 or Answer 2) and explain why. Then, select an overall winner based on these three categories.
|
40 |
+
|
41 |
+
Here is the question:
|
42 |
+
{query}
|
43 |
+
|
44 |
+
Here are the two answers:
|
45 |
+
|
46 |
+
**Answer 1:**
|
47 |
+
{answer1}
|
48 |
+
|
49 |
+
**Answer 2:**
|
50 |
+
{answer2}
|
51 |
+
|
52 |
+
Evaluate both answers using the three criteria listed above and provide detailed explanations for each criterion.
|
53 |
+
|
54 |
+
Output your evaluation in the following JSON format:
|
55 |
+
|
56 |
+
{{
|
57 |
+
"Comprehensiveness": {{
|
58 |
+
"Winner": "[Answer 1 or Answer 2]",
|
59 |
+
"Explanation": "[Provide explanation here]"
|
60 |
+
}},
|
61 |
+
"Empowerment": {{
|
62 |
+
"Winner": "[Answer 1 or Answer 2]",
|
63 |
+
"Explanation": "[Provide explanation here]"
|
64 |
+
}},
|
65 |
+
"Overall Winner": {{
|
66 |
+
"Winner": "[Answer 1 or Answer 2]",
|
67 |
+
"Explanation": "[Summarize why this answer is the overall winner based on the three criteria]"
|
68 |
+
}}
|
69 |
+
}}
|
70 |
+
"""
|
71 |
+
|
72 |
+
|
73 |
+
request_data = {
|
74 |
+
"custom_id": f"request-{i+1}",
|
75 |
+
"method": "POST",
|
76 |
+
"url": "/v1/chat/completions",
|
77 |
+
"body": {
|
78 |
+
"model": "gpt-4o-mini",
|
79 |
+
"messages": [
|
80 |
+
{"role": "system", "content": sys_prompt},
|
81 |
+
{"role": "user", "content": prompt}
|
82 |
+
],
|
83 |
+
}
|
84 |
+
}
|
85 |
+
|
86 |
+
requests.append(request_data)
|
87 |
+
|
88 |
+
with jsonlines.open(output_file_path, mode='w') as writer:
|
89 |
+
for request in requests:
|
90 |
+
writer.write(request)
|
91 |
+
|
92 |
+
print(f"Batch API requests written to {output_file_path}")
|
93 |
+
|
94 |
+
batch_input_file = client.files.create(
|
95 |
+
file=open(output_file_path, "rb"),
|
96 |
+
purpose="batch"
|
97 |
+
)
|
98 |
+
batch_input_file_id = batch_input_file.id
|
99 |
+
|
100 |
+
batch = client.batches.create(
|
101 |
+
input_file_id=batch_input_file_id,
|
102 |
+
endpoint="/v1/chat/completions",
|
103 |
+
completion_window="24h",
|
104 |
+
metadata={
|
105 |
+
"description": "nightly eval job"
|
106 |
+
}
|
107 |
+
)
|
108 |
+
|
109 |
+
print(f'Batch {batch.id} has been created.')
|
110 |
+
|
111 |
+
if __name__ == "__main__":
|
112 |
+
batch_eval()
|
examples/insert.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
sys.path.append('xxx/xxx/LightRAG')
|
4 |
+
|
5 |
+
from lightrag import LightRAG
|
6 |
+
|
7 |
+
os.environ["OPENAI_API_KEY"] = ""
|
8 |
+
|
9 |
+
WORKING_DIR = ""
|
10 |
+
|
11 |
+
if not os.path.exists(WORKING_DIR):
|
12 |
+
os.mkdir(WORKING_DIR)
|
13 |
+
|
14 |
+
rag = LightRAG(working_dir=WORKING_DIR)
|
15 |
+
|
16 |
+
with open('./text.txt', 'r') as f:
|
17 |
+
text = f.read()
|
18 |
+
|
19 |
+
rag.insert(text)
|
examples/query.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
sys.path.append('xxx/xxx/LightRAG')
|
4 |
+
|
5 |
+
from lightrag import LightRAG, QueryParam
|
6 |
+
|
7 |
+
os.environ["OPENAI_API_KEY"] = ""
|
8 |
+
|
9 |
+
WORKING_DIR = ""
|
10 |
+
|
11 |
+
rag = LightRAG(working_dir=WORKING_DIR)
|
12 |
+
|
13 |
+
mode = 'global'
|
14 |
+
query_param = QueryParam(mode=mode)
|
15 |
+
|
16 |
+
result, _ = rag.query("", param=query_param)
|
17 |
+
print(result)
|
lightrag/__pycache__/lightrag.cpython-310.pyc
CHANGED
Binary files a/lightrag/__pycache__/lightrag.cpython-310.pyc and b/lightrag/__pycache__/lightrag.cpython-310.pyc differ
|
|
lightrag/__pycache__/llm.cpython-310.pyc
CHANGED
Binary files a/lightrag/__pycache__/llm.cpython-310.pyc and b/lightrag/__pycache__/llm.cpython-310.pyc differ
|
|
lightrag/llm.py
CHANGED
@@ -17,10 +17,9 @@ from .utils import compute_args_hash, wrap_embedding_func_with_attrs
|
|
17 |
retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
|
18 |
)
|
19 |
async def openai_complete_if_cache(
|
20 |
-
model, prompt,
|
21 |
-
, system_prompt=None, history_messages=[], **kwargs
|
22 |
) -> str:
|
23 |
-
openai_async_client = AsyncOpenAI(
|
24 |
hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
|
25 |
messages = []
|
26 |
if system_prompt:
|
@@ -72,8 +71,8 @@ async def gpt_4o_mini_complete(
|
|
72 |
wait=wait_exponential(multiplier=1, min=4, max=10),
|
73 |
retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
|
74 |
)
|
75 |
-
async def openai_embedding(texts: list[str]
|
76 |
-
openai_async_client = AsyncOpenAI(
|
77 |
response = await openai_async_client.embeddings.create(
|
78 |
model="text-embedding-3-small", input=texts, encoding_format="float"
|
79 |
)
|
|
|
17 |
retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
|
18 |
)
|
19 |
async def openai_complete_if_cache(
|
20 |
+
model, prompt, system_prompt=None, history_messages=[], **kwargs
|
|
|
21 |
) -> str:
|
22 |
+
openai_async_client = AsyncOpenAI()
|
23 |
hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
|
24 |
messages = []
|
25 |
if system_prompt:
|
|
|
71 |
wait=wait_exponential(multiplier=1, min=4, max=10),
|
72 |
retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
|
73 |
)
|
74 |
+
async def openai_embedding(texts: list[str]) -> np.ndarray:
|
75 |
+
openai_async_client = AsyncOpenAI()
|
76 |
response = await openai_async_client.embeddings.create(
|
77 |
model="text-embedding-3-small", input=texts, encoding_format="float"
|
78 |
)
|