alazarchuk commited on
Commit
cc565e9
·
1 Parent(s): cc51c55

Finetune example to be able to run ollama example without need to tweak context size in Modelfile

Browse files
Files changed (1) hide show
  1. examples/lightrag_ollama_demo.py +4 -4
examples/lightrag_ollama_demo.py CHANGED
@@ -15,9 +15,10 @@ if not os.path.exists(WORKING_DIR):
15
  rag = LightRAG(
16
  working_dir=WORKING_DIR,
17
  llm_model_func=ollama_model_complete,
18
- llm_model_name="mistral:7b",
19
- llm_model_max_async=2,
20
- llm_model_kwargs={"host": "http://localhost:11434"},
 
21
  embedding_func=EmbeddingFunc(
22
  embedding_dim=768,
23
  max_token_size=8192,
@@ -27,7 +28,6 @@ rag = LightRAG(
27
  ),
28
  )
29
 
30
-
31
  with open("./book.txt") as f:
32
  rag.insert(f.read())
33
 
 
15
  rag = LightRAG(
16
  working_dir=WORKING_DIR,
17
  llm_model_func=ollama_model_complete,
18
+ llm_model_name="gemma2:2b",
19
+ llm_model_max_async=4,
20
+ llm_model_max_token_size=32768,
21
+ llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
22
  embedding_func=EmbeddingFunc(
23
  embedding_dim=768,
24
  max_token_size=8192,
 
28
  ),
29
  )
30
 
 
31
  with open("./book.txt") as f:
32
  rag.insert(f.read())
33