Remove deprecated demo code
Browse files
examples/lightrag_ollama_gremlin_demo.py
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
##############################################
|
2 |
-
# Gremlin storage implementation is deprecated
|
3 |
-
##############################################
|
4 |
-
|
5 |
-
import asyncio
|
6 |
-
import inspect
|
7 |
-
import os
|
8 |
-
|
9 |
-
# Uncomment these lines below to filter out somewhat verbose INFO level
|
10 |
-
# logging prints (the default loglevel is INFO).
|
11 |
-
# This has to go before the lightrag imports to work,
|
12 |
-
# which triggers linting errors, so we keep it commented out:
|
13 |
-
# import logging
|
14 |
-
# logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.WARN)
|
15 |
-
|
16 |
-
from lightrag import LightRAG, QueryParam
|
17 |
-
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
|
18 |
-
from lightrag.utils import EmbeddingFunc
|
19 |
-
from lightrag.kg.shared_storage import initialize_pipeline_status
|
20 |
-
|
21 |
-
WORKING_DIR = "./dickens_gremlin"
|
22 |
-
|
23 |
-
if not os.path.exists(WORKING_DIR):
|
24 |
-
os.mkdir(WORKING_DIR)
|
25 |
-
|
26 |
-
# Gremlin
|
27 |
-
os.environ["GREMLIN_HOST"] = "localhost"
|
28 |
-
os.environ["GREMLIN_PORT"] = "8182"
|
29 |
-
os.environ["GREMLIN_GRAPH"] = "dickens"
|
30 |
-
|
31 |
-
# Creating a non-default source requires manual
|
32 |
-
# configuration and a restart on the server: use the dafault "g"
|
33 |
-
os.environ["GREMLIN_TRAVERSE_SOURCE"] = "g"
|
34 |
-
|
35 |
-
# No authorization by default on docker tinkerpop/gremlin-server
|
36 |
-
os.environ["GREMLIN_USER"] = ""
|
37 |
-
os.environ["GREMLIN_PASSWORD"] = ""
|
38 |
-
|
39 |
-
|
40 |
-
async def initialize_rag():
|
41 |
-
rag = LightRAG(
|
42 |
-
working_dir=WORKING_DIR,
|
43 |
-
llm_model_func=ollama_model_complete,
|
44 |
-
llm_model_name="llama3.1:8b",
|
45 |
-
llm_model_max_async=4,
|
46 |
-
llm_model_max_token_size=32768,
|
47 |
-
llm_model_kwargs={
|
48 |
-
"host": "http://localhost:11434",
|
49 |
-
"options": {"num_ctx": 32768},
|
50 |
-
},
|
51 |
-
embedding_func=EmbeddingFunc(
|
52 |
-
embedding_dim=768,
|
53 |
-
max_token_size=8192,
|
54 |
-
func=lambda texts: ollama_embed(
|
55 |
-
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
56 |
-
),
|
57 |
-
),
|
58 |
-
graph_storage="GremlinStorage",
|
59 |
-
)
|
60 |
-
|
61 |
-
await rag.initialize_storages()
|
62 |
-
await initialize_pipeline_status()
|
63 |
-
|
64 |
-
return rag
|
65 |
-
|
66 |
-
|
67 |
-
async def print_stream(stream):
|
68 |
-
async for chunk in stream:
|
69 |
-
print(chunk, end="", flush=True)
|
70 |
-
|
71 |
-
|
72 |
-
def main():
|
73 |
-
# Initialize RAG instance
|
74 |
-
rag = asyncio.run(initialize_rag())
|
75 |
-
|
76 |
-
# Insert example text
|
77 |
-
with open("./book.txt", "r", encoding="utf-8") as f:
|
78 |
-
rag.insert(f.read())
|
79 |
-
|
80 |
-
# Test different query modes
|
81 |
-
print("\nNaive Search:")
|
82 |
-
print(
|
83 |
-
rag.query(
|
84 |
-
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
85 |
-
)
|
86 |
-
)
|
87 |
-
|
88 |
-
print("\nLocal Search:")
|
89 |
-
print(
|
90 |
-
rag.query(
|
91 |
-
"What are the top themes in this story?", param=QueryParam(mode="local")
|
92 |
-
)
|
93 |
-
)
|
94 |
-
|
95 |
-
print("\nGlobal Search:")
|
96 |
-
print(
|
97 |
-
rag.query(
|
98 |
-
"What are the top themes in this story?", param=QueryParam(mode="global")
|
99 |
-
)
|
100 |
-
)
|
101 |
-
|
102 |
-
print("\nHybrid Search:")
|
103 |
-
print(
|
104 |
-
rag.query(
|
105 |
-
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
106 |
-
)
|
107 |
-
)
|
108 |
-
|
109 |
-
# stream response
|
110 |
-
resp = rag.query(
|
111 |
-
"What are the top themes in this story?",
|
112 |
-
param=QueryParam(mode="hybrid", stream=True),
|
113 |
-
)
|
114 |
-
|
115 |
-
if inspect.isasyncgen(resp):
|
116 |
-
asyncio.run(print_stream(resp))
|
117 |
-
else:
|
118 |
-
print(resp)
|
119 |
-
|
120 |
-
|
121 |
-
if __name__ == "__main__":
|
122 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/lightrag_ollama_neo4j_milvus_mongo_demo.py
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from lightrag import LightRAG, QueryParam
|
3 |
-
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
4 |
-
from lightrag.utils import EmbeddingFunc
|
5 |
-
import asyncio
|
6 |
-
import nest_asyncio
|
7 |
-
|
8 |
-
nest_asyncio.apply()
|
9 |
-
from lightrag.kg.shared_storage import initialize_pipeline_status
|
10 |
-
|
11 |
-
# WorkingDir
|
12 |
-
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
13 |
-
WORKING_DIR = os.path.join(ROOT_DIR, "myKG")
|
14 |
-
if not os.path.exists(WORKING_DIR):
|
15 |
-
os.mkdir(WORKING_DIR)
|
16 |
-
print(f"WorkingDir: {WORKING_DIR}")
|
17 |
-
|
18 |
-
# mongo
|
19 |
-
os.environ["MONGO_URI"] = "mongodb://root:root@localhost:27017/"
|
20 |
-
os.environ["MONGO_DATABASE"] = "LightRAG"
|
21 |
-
|
22 |
-
# neo4j
|
23 |
-
BATCH_SIZE_NODES = 500
|
24 |
-
BATCH_SIZE_EDGES = 100
|
25 |
-
os.environ["NEO4J_URI"] = "bolt://localhost:7687"
|
26 |
-
os.environ["NEO4J_USERNAME"] = "neo4j"
|
27 |
-
os.environ["NEO4J_PASSWORD"] = "neo4j"
|
28 |
-
|
29 |
-
# milvus
|
30 |
-
os.environ["MILVUS_URI"] = "http://localhost:19530"
|
31 |
-
os.environ["MILVUS_USER"] = "root"
|
32 |
-
os.environ["MILVUS_PASSWORD"] = "root"
|
33 |
-
os.environ["MILVUS_DB_NAME"] = "lightrag"
|
34 |
-
|
35 |
-
|
36 |
-
async def initialize_rag():
|
37 |
-
rag = LightRAG(
|
38 |
-
working_dir=WORKING_DIR,
|
39 |
-
llm_model_func=ollama_model_complete,
|
40 |
-
llm_model_name="qwen2.5:14b",
|
41 |
-
llm_model_max_async=4,
|
42 |
-
llm_model_max_token_size=32768,
|
43 |
-
llm_model_kwargs={
|
44 |
-
"host": "http://127.0.0.1:11434",
|
45 |
-
"options": {"num_ctx": 32768},
|
46 |
-
},
|
47 |
-
embedding_func=EmbeddingFunc(
|
48 |
-
embedding_dim=1024,
|
49 |
-
max_token_size=8192,
|
50 |
-
func=lambda texts: ollama_embed(
|
51 |
-
texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
|
52 |
-
),
|
53 |
-
),
|
54 |
-
kv_storage="MongoKVStorage",
|
55 |
-
graph_storage="Neo4JStorage",
|
56 |
-
vector_storage="MilvusVectorDBStorage",
|
57 |
-
)
|
58 |
-
|
59 |
-
await rag.initialize_storages()
|
60 |
-
await initialize_pipeline_status()
|
61 |
-
|
62 |
-
return rag
|
63 |
-
|
64 |
-
|
65 |
-
def main():
|
66 |
-
# Initialize RAG instance
|
67 |
-
rag = asyncio.run(initialize_rag())
|
68 |
-
|
69 |
-
# Insert example text
|
70 |
-
with open("./book.txt", "r", encoding="utf-8") as f:
|
71 |
-
rag.insert(f.read())
|
72 |
-
|
73 |
-
# Test different query modes
|
74 |
-
print("\nNaive Search:")
|
75 |
-
print(
|
76 |
-
rag.query(
|
77 |
-
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
78 |
-
)
|
79 |
-
)
|
80 |
-
|
81 |
-
print("\nLocal Search:")
|
82 |
-
print(
|
83 |
-
rag.query(
|
84 |
-
"What are the top themes in this story?", param=QueryParam(mode="local")
|
85 |
-
)
|
86 |
-
)
|
87 |
-
|
88 |
-
print("\nGlobal Search:")
|
89 |
-
print(
|
90 |
-
rag.query(
|
91 |
-
"What are the top themes in this story?", param=QueryParam(mode="global")
|
92 |
-
)
|
93 |
-
)
|
94 |
-
|
95 |
-
print("\nHybrid Search:")
|
96 |
-
print(
|
97 |
-
rag.query(
|
98 |
-
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
99 |
-
)
|
100 |
-
)
|
101 |
-
|
102 |
-
|
103 |
-
if __name__ == "__main__":
|
104 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/lightrag_openai_compatible_demo_embedding_cache.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import asyncio
|
3 |
-
from lightrag import LightRAG, QueryParam
|
4 |
-
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
5 |
-
from lightrag.utils import EmbeddingFunc
|
6 |
-
import numpy as np
|
7 |
-
from lightrag.kg.shared_storage import initialize_pipeline_status
|
8 |
-
|
9 |
-
WORKING_DIR = "./dickens"
|
10 |
-
|
11 |
-
if not os.path.exists(WORKING_DIR):
|
12 |
-
os.mkdir(WORKING_DIR)
|
13 |
-
|
14 |
-
|
15 |
-
async def llm_model_func(
|
16 |
-
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
17 |
-
) -> str:
|
18 |
-
return await openai_complete_if_cache(
|
19 |
-
"solar-mini",
|
20 |
-
prompt,
|
21 |
-
system_prompt=system_prompt,
|
22 |
-
history_messages=history_messages,
|
23 |
-
api_key=os.getenv("UPSTAGE_API_KEY"),
|
24 |
-
base_url="https://api.upstage.ai/v1/solar",
|
25 |
-
**kwargs,
|
26 |
-
)
|
27 |
-
|
28 |
-
|
29 |
-
async def embedding_func(texts: list[str]) -> np.ndarray:
|
30 |
-
return await openai_embed(
|
31 |
-
texts,
|
32 |
-
model="solar-embedding-1-large-query",
|
33 |
-
api_key=os.getenv("UPSTAGE_API_KEY"),
|
34 |
-
base_url="https://api.upstage.ai/v1/solar",
|
35 |
-
)
|
36 |
-
|
37 |
-
|
38 |
-
async def get_embedding_dim():
|
39 |
-
test_text = ["This is a test sentence."]
|
40 |
-
embedding = await embedding_func(test_text)
|
41 |
-
embedding_dim = embedding.shape[1]
|
42 |
-
return embedding_dim
|
43 |
-
|
44 |
-
|
45 |
-
# function test
|
46 |
-
async def test_funcs():
|
47 |
-
result = await llm_model_func("How are you?")
|
48 |
-
print("llm_model_func: ", result)
|
49 |
-
|
50 |
-
result = await embedding_func(["How are you?"])
|
51 |
-
print("embedding_func: ", result)
|
52 |
-
|
53 |
-
|
54 |
-
# asyncio.run(test_funcs())
|
55 |
-
|
56 |
-
|
57 |
-
async def initialize_rag():
|
58 |
-
embedding_dimension = await get_embedding_dim()
|
59 |
-
print(f"Detected embedding dimension: {embedding_dimension}")
|
60 |
-
|
61 |
-
rag = LightRAG(
|
62 |
-
working_dir=WORKING_DIR,
|
63 |
-
embedding_cache_config={
|
64 |
-
"enabled": True,
|
65 |
-
"similarity_threshold": 0.90,
|
66 |
-
},
|
67 |
-
llm_model_func=llm_model_func,
|
68 |
-
embedding_func=EmbeddingFunc(
|
69 |
-
embedding_dim=embedding_dimension,
|
70 |
-
max_token_size=8192,
|
71 |
-
func=embedding_func,
|
72 |
-
),
|
73 |
-
)
|
74 |
-
|
75 |
-
await rag.initialize_storages()
|
76 |
-
await initialize_pipeline_status()
|
77 |
-
|
78 |
-
return rag
|
79 |
-
|
80 |
-
|
81 |
-
async def main():
|
82 |
-
try:
|
83 |
-
# Initialize RAG instance
|
84 |
-
rag = await initialize_rag()
|
85 |
-
|
86 |
-
with open("./book.txt", "r", encoding="utf-8") as f:
|
87 |
-
await rag.ainsert(f.read())
|
88 |
-
|
89 |
-
# Perform naive search
|
90 |
-
print(
|
91 |
-
await rag.aquery(
|
92 |
-
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
93 |
-
)
|
94 |
-
)
|
95 |
-
|
96 |
-
# Perform local search
|
97 |
-
print(
|
98 |
-
await rag.aquery(
|
99 |
-
"What are the top themes in this story?", param=QueryParam(mode="local")
|
100 |
-
)
|
101 |
-
)
|
102 |
-
|
103 |
-
# Perform global search
|
104 |
-
print(
|
105 |
-
await rag.aquery(
|
106 |
-
"What are the top themes in this story?",
|
107 |
-
param=QueryParam(mode="global"),
|
108 |
-
)
|
109 |
-
)
|
110 |
-
|
111 |
-
# Perform hybrid search
|
112 |
-
print(
|
113 |
-
await rag.aquery(
|
114 |
-
"What are the top themes in this story?",
|
115 |
-
param=QueryParam(mode="hybrid"),
|
116 |
-
)
|
117 |
-
)
|
118 |
-
except Exception as e:
|
119 |
-
print(f"An error occurred: {e}")
|
120 |
-
|
121 |
-
|
122 |
-
if __name__ == "__main__":
|
123 |
-
asyncio.run(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|