File size: 3,295 Bytes
0244726
 
 
 
d267c74
 
 
 
f39a7c1
 
 
 
 
 
d267c74
 
0553d6a
d267c74
8b3b01c
d267c74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275e33e
8b3b01c
 
 
 
 
 
 
275e33e
 
 
 
8b3b01c
 
 
 
 
 
d267c74
8b3b01c
 
d267c74
8b3b01c
 
275e33e
8b3b01c
d267c74
275e33e
d267c74
 
 
 
275e33e
8b3b01c
 
 
 
 
 
 
 
 
 
 
275e33e
 
 
8b3b01c
 
 
 
275e33e
 
 
8b3b01c
 
 
 
275e33e
 
 
8b3b01c
 
 
 
275e33e
 
 
8b3b01c
 
 
 
 
 
 
 
 
 
 
 
 
275e33e
8b3b01c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
##############################################
# Gremlin storage implementation is deprecated
##############################################

import asyncio
import inspect
import os

# Uncomment these lines below to filter out somewhat verbose INFO level
# logging prints (the default loglevel is INFO).
# This has to go before the lightrag imports to work,
# which triggers linting errors, so we keep it commented out:
# import logging
# logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.WARN)

from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status

WORKING_DIR = "./dickens_gremlin"

if not os.path.exists(WORKING_DIR):
    os.mkdir(WORKING_DIR)

# Gremlin
os.environ["GREMLIN_HOST"] = "localhost"
os.environ["GREMLIN_PORT"] = "8182"
os.environ["GREMLIN_GRAPH"] = "dickens"

# Creating a non-default source requires manual
# configuration and a restart on the server: use the dafault "g"
os.environ["GREMLIN_TRAVERSE_SOURCE"] = "g"

# No authorization by default on docker tinkerpop/gremlin-server
os.environ["GREMLIN_USER"] = ""
os.environ["GREMLIN_PASSWORD"] = ""


async def initialize_rag():
    rag = LightRAG(
        working_dir=WORKING_DIR,
        llm_model_func=ollama_model_complete,
        llm_model_name="llama3.1:8b",
        llm_model_max_async=4,
        llm_model_max_token_size=32768,
        llm_model_kwargs={
            "host": "http://localhost:11434",
            "options": {"num_ctx": 32768},
        },
        embedding_func=EmbeddingFunc(
            embedding_dim=768,
            max_token_size=8192,
            func=lambda texts: ollama_embed(
                texts, embed_model="nomic-embed-text", host="http://localhost:11434"
            ),
        ),
        graph_storage="GremlinStorage",
    )

    await rag.initialize_storages()
    await initialize_pipeline_status()

    return rag


async def print_stream(stream):
    async for chunk in stream:
        print(chunk, end="", flush=True)


def main():
    # Initialize RAG instance
    rag = asyncio.run(initialize_rag())

    # Insert example text
    with open("./book.txt", "r", encoding="utf-8") as f:
        rag.insert(f.read())

    # Test different query modes
    print("\nNaive Search:")
    print(
        rag.query(
            "What are the top themes in this story?", param=QueryParam(mode="naive")
        )
    )

    print("\nLocal Search:")
    print(
        rag.query(
            "What are the top themes in this story?", param=QueryParam(mode="local")
        )
    )

    print("\nGlobal Search:")
    print(
        rag.query(
            "What are the top themes in this story?", param=QueryParam(mode="global")
        )
    )

    print("\nHybrid Search:")
    print(
        rag.query(
            "What are the top themes in this story?", param=QueryParam(mode="hybrid")
        )
    )

    # stream response
    resp = rag.query(
        "What are the top themes in this story?",
        param=QueryParam(mode="hybrid", stream=True),
    )

    if inspect.isasyncgen(resp):
        asyncio.run(print_stream(resp))
    else:
        print(resp)


if __name__ == "__main__":
    main()