zrguo commited on
Commit
e708e45
·
unverified ·
2 Parent(s): 96e7f29 5b5f653

Merge pull request #750 from gurjot-05/main

Browse files
Files changed (1) hide show
  1. examples/lightrag_gemini_demo.py +84 -0
examples/lightrag_gemini_demo.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install -q -U google-genai to use gemini as a client
2
+
3
+ import os
4
+ import numpy as np
5
+ from google import genai
6
+ from google.genai import types
7
+ from dotenv import load_dotenv
8
+ from lightrag.utils import EmbeddingFunc
9
+ from lightrag import LightRAG, QueryParam
10
+ from sentence_transformers import SentenceTransformer
11
+
12
+ load_dotenv()
13
+ gemini_api_key = os.getenv("GEMINI_API_KEY")
14
+
15
+ WORKING_DIR = "./dickens"
16
+
17
+ if os.path.exists(WORKING_DIR):
18
+ import shutil
19
+
20
+ shutil.rmtree(WORKING_DIR)
21
+
22
+ os.mkdir(WORKING_DIR)
23
+
24
+
25
+ async def llm_model_func(
26
+ prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
27
+ ) -> str:
28
+ # 1. Initialize the GenAI Client with your Gemini API Key
29
+ client = genai.Client(api_key=gemini_api_key)
30
+
31
+ # 2. Combine prompts: system prompt, history, and user prompt
32
+ if history_messages is None:
33
+ history_messages = []
34
+
35
+ combined_prompt = ""
36
+ if system_prompt:
37
+ combined_prompt += f"{system_prompt}\n"
38
+
39
+ for msg in history_messages:
40
+ # Each msg is expected to be a dict: {"role": "...", "content": "..."}
41
+ combined_prompt += f"{msg['role']}: {msg['content']}\n"
42
+
43
+ # Finally, add the new user prompt
44
+ combined_prompt += f"user: {prompt}"
45
+
46
+ # 3. Call the Gemini model
47
+ response = client.models.generate_content(
48
+ model="gemini-1.5-flash",
49
+ contents=[combined_prompt],
50
+ config=types.GenerateContentConfig(max_output_tokens=500, temperature=0.1),
51
+ )
52
+
53
+ # 4. Return the response text
54
+ return response.text
55
+
56
+
57
+ async def embedding_func(texts: list[str]) -> np.ndarray:
58
+ model = SentenceTransformer("all-MiniLM-L6-v2")
59
+ embeddings = model.encode(texts, convert_to_numpy=True)
60
+ return embeddings
61
+
62
+
63
+ rag = LightRAG(
64
+ working_dir=WORKING_DIR,
65
+ llm_model_func=llm_model_func,
66
+ embedding_func=EmbeddingFunc(
67
+ embedding_dim=384,
68
+ max_token_size=8192,
69
+ func=embedding_func,
70
+ ),
71
+ )
72
+
73
+ file_path = "story.txt"
74
+ with open(file_path, "r") as file:
75
+ text = file.read()
76
+
77
+ rag.insert(text)
78
+
79
+ response = rag.query(
80
+ query="What is the main theme of the story?",
81
+ param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
82
+ )
83
+
84
+ print(response)