LarFii commited on
Commit
9ce8151
·
1 Parent(s): 998e00d
examples/lightrag_openai_demo.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
 
3
  from lightrag import LightRAG, QueryParam
4
- from lightrag.llm.openai import gpt_4o_mini_complete
5
 
6
  WORKING_DIR = "./dickens"
7
 
@@ -10,6 +10,7 @@ if not os.path.exists(WORKING_DIR):
10
 
11
  rag = LightRAG(
12
  working_dir=WORKING_DIR,
 
13
  llm_model_func=gpt_4o_mini_complete,
14
  # llm_model_func=gpt_4o_complete
15
  )
 
1
  import os
2
 
3
  from lightrag import LightRAG, QueryParam
4
+ from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
5
 
6
  WORKING_DIR = "./dickens"
7
 
 
10
 
11
  rag = LightRAG(
12
  working_dir=WORKING_DIR,
13
+ embedding_func=openai_embed,
14
  llm_model_func=gpt_4o_mini_complete,
15
  # llm_model_func=gpt_4o_complete
16
  )
lightrag/operate.py CHANGED
@@ -1504,7 +1504,7 @@ async def naive_query(
1504
  use_model_func = global_config["llm_model_func"]
1505
  args_hash = compute_args_hash(query_param.mode, query, cache_type="query")
1506
  cached_response, quantized, min_val, max_val = await handle_cache(
1507
- hashing_kv, args_hash, query, "default", cache_type="query"
1508
  )
1509
  if cached_response is not None:
1510
  return cached_response
 
1504
  use_model_func = global_config["llm_model_func"]
1505
  args_hash = compute_args_hash(query_param.mode, query, cache_type="query")
1506
  cached_response, quantized, min_val, max_val = await handle_cache(
1507
+ hashing_kv, args_hash, query, query_param.mode, cache_type="query"
1508
  )
1509
  if cached_response is not None:
1510
  return cached_response