Pankaj Kaushal
commited on
Commit
·
f1449cf
1
Parent(s):
b7cf5a4
Linting and formatting
Browse files
examples/lightrag_api_llamaindex_direct_demo_simplified.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
import os
|
2 |
from lightrag import LightRAG, QueryParam
|
3 |
-
from lightrag.wrapper.llama_index_impl import
|
|
|
|
|
|
|
4 |
from lightrag.utils import EmbeddingFunc
|
5 |
from llama_index.llms.openai import OpenAI
|
6 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
@@ -25,20 +28,21 @@ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "your-api-key-here")
|
|
25 |
if not os.path.exists(WORKING_DIR):
|
26 |
os.mkdir(WORKING_DIR)
|
27 |
|
|
|
28 |
# Initialize LLM function
|
29 |
async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
|
30 |
try:
|
31 |
# Initialize OpenAI if not in kwargs
|
32 |
-
if
|
33 |
llm_instance = OpenAI(
|
34 |
model=LLM_MODEL,
|
35 |
api_key=OPENAI_API_KEY,
|
36 |
temperature=0.7,
|
37 |
)
|
38 |
-
kwargs[
|
39 |
|
40 |
response = await llama_index_complete_if_cache(
|
41 |
-
kwargs[
|
42 |
prompt,
|
43 |
system_prompt=system_prompt,
|
44 |
history_messages=history_messages,
|
@@ -49,6 +53,7 @@ async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwar
|
|
49 |
print(f"LLM request failed: {str(e)}")
|
50 |
raise
|
51 |
|
|
|
52 |
# Initialize embedding function
|
53 |
async def embedding_func(texts):
|
54 |
try:
|
@@ -61,6 +66,7 @@ async def embedding_func(texts):
|
|
61 |
print(f"Embedding failed: {str(e)}")
|
62 |
raise
|
63 |
|
|
|
64 |
# Get embedding dimension
|
65 |
async def get_embedding_dim():
|
66 |
test_text = ["This is a test sentence."]
|
@@ -69,6 +75,7 @@ async def get_embedding_dim():
|
|
69 |
print(f"embedding_dim={embedding_dim}")
|
70 |
return embedding_dim
|
71 |
|
|
|
72 |
# Initialize RAG instance
|
73 |
rag = LightRAG(
|
74 |
working_dir=WORKING_DIR,
|
@@ -86,13 +93,21 @@ with open("./book.txt", "r", encoding="utf-8") as f:
|
|
86 |
|
87 |
# Test different query modes
|
88 |
print("\nNaive Search:")
|
89 |
-
print(
|
|
|
|
|
90 |
|
91 |
print("\nLocal Search:")
|
92 |
-
print(
|
|
|
|
|
93 |
|
94 |
print("\nGlobal Search:")
|
95 |
-
print(
|
|
|
|
|
96 |
|
97 |
print("\nHybrid Search:")
|
98 |
-
print(
|
|
|
|
|
|
1 |
import os
|
2 |
from lightrag import LightRAG, QueryParam
|
3 |
+
from lightrag.wrapper.llama_index_impl import (
|
4 |
+
llama_index_complete_if_cache,
|
5 |
+
llama_index_embed,
|
6 |
+
)
|
7 |
from lightrag.utils import EmbeddingFunc
|
8 |
from llama_index.llms.openai import OpenAI
|
9 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
|
|
28 |
if not os.path.exists(WORKING_DIR):
|
29 |
os.mkdir(WORKING_DIR)
|
30 |
|
31 |
+
|
32 |
# Initialize LLM function
|
33 |
async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
|
34 |
try:
|
35 |
# Initialize OpenAI if not in kwargs
|
36 |
+
if "llm_instance" not in kwargs:
|
37 |
llm_instance = OpenAI(
|
38 |
model=LLM_MODEL,
|
39 |
api_key=OPENAI_API_KEY,
|
40 |
temperature=0.7,
|
41 |
)
|
42 |
+
kwargs["llm_instance"] = llm_instance
|
43 |
|
44 |
response = await llama_index_complete_if_cache(
|
45 |
+
kwargs["llm_instance"],
|
46 |
prompt,
|
47 |
system_prompt=system_prompt,
|
48 |
history_messages=history_messages,
|
|
|
53 |
print(f"LLM request failed: {str(e)}")
|
54 |
raise
|
55 |
|
56 |
+
|
57 |
# Initialize embedding function
|
58 |
async def embedding_func(texts):
|
59 |
try:
|
|
|
66 |
print(f"Embedding failed: {str(e)}")
|
67 |
raise
|
68 |
|
69 |
+
|
70 |
# Get embedding dimension
|
71 |
async def get_embedding_dim():
|
72 |
test_text = ["This is a test sentence."]
|
|
|
75 |
print(f"embedding_dim={embedding_dim}")
|
76 |
return embedding_dim
|
77 |
|
78 |
+
|
79 |
# Initialize RAG instance
|
80 |
rag = LightRAG(
|
81 |
working_dir=WORKING_DIR,
|
|
|
93 |
|
94 |
# Test different query modes
|
95 |
print("\nNaive Search:")
|
96 |
+
print(
|
97 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
98 |
+
)
|
99 |
|
100 |
print("\nLocal Search:")
|
101 |
+
print(
|
102 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
103 |
+
)
|
104 |
|
105 |
print("\nGlobal Search:")
|
106 |
+
print(
|
107 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
108 |
+
)
|
109 |
|
110 |
print("\nHybrid Search:")
|
111 |
+
print(
|
112 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
113 |
+
)
|
examples/lightrag_api_llamaindex_litellm_demo_simplified.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
import os
|
2 |
from lightrag import LightRAG, QueryParam
|
3 |
-
from lightrag.wrapper.llama_index_impl import
|
|
|
|
|
|
|
4 |
from lightrag.utils import EmbeddingFunc
|
5 |
from llama_index.llms.litellm import LiteLLM
|
6 |
from llama_index.embeddings.litellm import LiteLLMEmbedding
|
@@ -27,21 +30,22 @@ LITELLM_KEY = os.environ.get("LITELLM_KEY", "sk-1234")
|
|
27 |
if not os.path.exists(WORKING_DIR):
|
28 |
os.mkdir(WORKING_DIR)
|
29 |
|
|
|
30 |
# Initialize LLM function
|
31 |
async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
|
32 |
try:
|
33 |
# Initialize LiteLLM if not in kwargs
|
34 |
-
if
|
35 |
llm_instance = LiteLLM(
|
36 |
model=f"openai/{LLM_MODEL}", # Format: "provider/model_name"
|
37 |
api_base=LITELLM_URL,
|
38 |
api_key=LITELLM_KEY,
|
39 |
temperature=0.7,
|
40 |
)
|
41 |
-
kwargs[
|
42 |
|
43 |
response = await llama_index_complete_if_cache(
|
44 |
-
kwargs[
|
45 |
prompt,
|
46 |
system_prompt=system_prompt,
|
47 |
history_messages=history_messages,
|
@@ -52,6 +56,7 @@ async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwar
|
|
52 |
print(f"LLM request failed: {str(e)}")
|
53 |
raise
|
54 |
|
|
|
55 |
# Initialize embedding function
|
56 |
async def embedding_func(texts):
|
57 |
try:
|
@@ -65,6 +70,7 @@ async def embedding_func(texts):
|
|
65 |
print(f"Embedding failed: {str(e)}")
|
66 |
raise
|
67 |
|
|
|
68 |
# Get embedding dimension
|
69 |
async def get_embedding_dim():
|
70 |
test_text = ["This is a test sentence."]
|
@@ -73,6 +79,7 @@ async def get_embedding_dim():
|
|
73 |
print(f"embedding_dim={embedding_dim}")
|
74 |
return embedding_dim
|
75 |
|
|
|
76 |
# Initialize RAG instance
|
77 |
rag = LightRAG(
|
78 |
working_dir=WORKING_DIR,
|
@@ -90,13 +97,21 @@ with open("./book.txt", "r", encoding="utf-8") as f:
|
|
90 |
|
91 |
# Test different query modes
|
92 |
print("\nNaive Search:")
|
93 |
-
print(
|
|
|
|
|
94 |
|
95 |
print("\nLocal Search:")
|
96 |
-
print(
|
|
|
|
|
97 |
|
98 |
print("\nGlobal Search:")
|
99 |
-
print(
|
|
|
|
|
100 |
|
101 |
print("\nHybrid Search:")
|
102 |
-
print(
|
|
|
|
|
|
1 |
import os
|
2 |
from lightrag import LightRAG, QueryParam
|
3 |
+
from lightrag.wrapper.llama_index_impl import (
|
4 |
+
llama_index_complete_if_cache,
|
5 |
+
llama_index_embed,
|
6 |
+
)
|
7 |
from lightrag.utils import EmbeddingFunc
|
8 |
from llama_index.llms.litellm import LiteLLM
|
9 |
from llama_index.embeddings.litellm import LiteLLMEmbedding
|
|
|
30 |
if not os.path.exists(WORKING_DIR):
|
31 |
os.mkdir(WORKING_DIR)
|
32 |
|
33 |
+
|
34 |
# Initialize LLM function
|
35 |
async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
|
36 |
try:
|
37 |
# Initialize LiteLLM if not in kwargs
|
38 |
+
if "llm_instance" not in kwargs:
|
39 |
llm_instance = LiteLLM(
|
40 |
model=f"openai/{LLM_MODEL}", # Format: "provider/model_name"
|
41 |
api_base=LITELLM_URL,
|
42 |
api_key=LITELLM_KEY,
|
43 |
temperature=0.7,
|
44 |
)
|
45 |
+
kwargs["llm_instance"] = llm_instance
|
46 |
|
47 |
response = await llama_index_complete_if_cache(
|
48 |
+
kwargs["llm_instance"],
|
49 |
prompt,
|
50 |
system_prompt=system_prompt,
|
51 |
history_messages=history_messages,
|
|
|
56 |
print(f"LLM request failed: {str(e)}")
|
57 |
raise
|
58 |
|
59 |
+
|
60 |
# Initialize embedding function
|
61 |
async def embedding_func(texts):
|
62 |
try:
|
|
|
70 |
print(f"Embedding failed: {str(e)}")
|
71 |
raise
|
72 |
|
73 |
+
|
74 |
# Get embedding dimension
|
75 |
async def get_embedding_dim():
|
76 |
test_text = ["This is a test sentence."]
|
|
|
79 |
print(f"embedding_dim={embedding_dim}")
|
80 |
return embedding_dim
|
81 |
|
82 |
+
|
83 |
# Initialize RAG instance
|
84 |
rag = LightRAG(
|
85 |
working_dir=WORKING_DIR,
|
|
|
97 |
|
98 |
# Test different query modes
|
99 |
print("\nNaive Search:")
|
100 |
+
print(
|
101 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
102 |
+
)
|
103 |
|
104 |
print("\nLocal Search:")
|
105 |
+
print(
|
106 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
107 |
+
)
|
108 |
|
109 |
print("\nGlobal Search:")
|
110 |
+
print(
|
111 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
112 |
+
)
|
113 |
|
114 |
print("\nHybrid Search:")
|
115 |
+
print(
|
116 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
117 |
+
)
|