gzdaniel commited on
Commit
cb84ce8
·
1 Parent(s): 3511697

Add user_prompt the QueryParam

Browse files
Files changed (3) hide show
  1. lightrag/base.py +5 -0
  2. lightrag/operate.py +8 -0
  3. lightrag/prompt.py +9 -2
lightrag/base.py CHANGED
@@ -93,6 +93,11 @@ class QueryParam:
93
  This allows using different models for different query modes.
94
  """
95
 
 
 
 
 
 
96
 
97
  @dataclass
98
  class StorageNameSpace(ABC):
 
93
  This allows using different models for different query modes.
94
  """
95
 
96
+ user_prompt: str | None= None
97
+ """User-provided prompt for the query.
98
+ If proivded, this will be use instead of the default vaulue from prompt template.
99
+ """
100
+
101
 
102
  @dataclass
103
  class StorageNameSpace(ABC):
lightrag/operate.py CHANGED
@@ -925,11 +925,14 @@ async def kg_query(
925
  query_param.conversation_history, query_param.history_turns
926
  )
927
 
 
 
928
  sys_prompt_temp = system_prompt if system_prompt else PROMPTS["rag_response"]
929
  sys_prompt = sys_prompt_temp.format(
930
  context_data=context,
931
  response_type=query_param.response_type,
932
  history=history_context,
 
933
  )
934
 
935
  if query_param.only_need_prompt:
@@ -1907,11 +1910,14 @@ async def naive_query(
1907
  query_param.conversation_history, query_param.history_turns
1908
  )
1909
 
 
 
1910
  sys_prompt_temp = system_prompt if system_prompt else PROMPTS["naive_rag_response"]
1911
  sys_prompt = sys_prompt_temp.format(
1912
  content_data=text_units_str,
1913
  response_type=query_param.response_type,
1914
  history=history_context,
 
1915
  )
1916
 
1917
  if query_param.only_need_prompt:
@@ -1957,6 +1963,7 @@ async def naive_query(
1957
  return response
1958
 
1959
 
 
1960
  async def kg_query_with_keywords(
1961
  query: str,
1962
  knowledge_graph_inst: BaseGraphStorage,
@@ -2079,6 +2086,7 @@ async def kg_query_with_keywords(
2079
  return response
2080
 
2081
 
 
2082
  async def query_with_keywords(
2083
  query: str,
2084
  prompt: str,
 
925
  query_param.conversation_history, query_param.history_turns
926
  )
927
 
928
+ # Build system prompt
929
+ user_prompt = query_param.user_prompt if query_param.user_prompt else PROMPTS["DEFAULT_USER_PROMPT"]
930
  sys_prompt_temp = system_prompt if system_prompt else PROMPTS["rag_response"]
931
  sys_prompt = sys_prompt_temp.format(
932
  context_data=context,
933
  response_type=query_param.response_type,
934
  history=history_context,
935
+ user_prompt=user_prompt,
936
  )
937
 
938
  if query_param.only_need_prompt:
 
1910
  query_param.conversation_history, query_param.history_turns
1911
  )
1912
 
1913
+ # Build system prompt
1914
+ user_prompt = query_param.user_prompt if query_param.user_prompt else PROMPTS["DEFAULT_USER_PROMPT"]
1915
  sys_prompt_temp = system_prompt if system_prompt else PROMPTS["naive_rag_response"]
1916
  sys_prompt = sys_prompt_temp.format(
1917
  content_data=text_units_str,
1918
  response_type=query_param.response_type,
1919
  history=history_context,
1920
+ user_prompt=user_prompt,
1921
  )
1922
 
1923
  if query_param.only_need_prompt:
 
1963
  return response
1964
 
1965
 
1966
+ # TODO: Deprecated, use user_prompt in QueryParam instead
1967
  async def kg_query_with_keywords(
1968
  query: str,
1969
  knowledge_graph_inst: BaseGraphStorage,
 
2086
  return response
2087
 
2088
 
2089
+ # TODO: Deprecated, use user_prompt in QueryParam instead
2090
  async def query_with_keywords(
2091
  query: str,
2092
  prompt: str,
lightrag/prompt.py CHANGED
@@ -12,6 +12,8 @@ PROMPTS["DEFAULT_COMPLETION_DELIMITER"] = "<|COMPLETE|>"
12
 
13
  PROMPTS["DEFAULT_ENTITY_TYPES"] = ["organization", "person", "geo", "event", "category"]
14
 
 
 
15
  PROMPTS["entity_extraction"] = """---Goal---
16
  Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.
17
  Use {language} as output language.
@@ -224,7 +226,10 @@ When handling relationships with timestamps:
224
  - Ensure the response maintains continuity with the conversation history.
225
  - List up to 5 most important reference sources at the end under "References" section. Clearly indicating whether each source is from Knowledge Graph (KG) or Vector Data (DC), and include the file path if available, in the following format: [KG/DC] file_path
226
  - If you don't know the answer, just say so.
227
- - Do not make anything up. Do not include information not provided by the Knowledge Base."""
 
 
 
228
 
229
  PROMPTS["keywords_extraction"] = """---Role---
230
 
@@ -322,8 +327,10 @@ When handling content with timestamps:
322
  - Ensure the response maintains continuity with the conversation history.
323
  - List up to 5 most important reference sources at the end under "References" section. Clearly indicating each source from Document Chunks(DC), and include the file path if available, in the following format: [DC] file_path
324
  - If you don't know the answer, just say so.
325
- - Do not include information not provided by the Document Chunks."""
 
326
 
 
327
 
328
  PROMPTS[
329
  "similarity_check"
 
12
 
13
  PROMPTS["DEFAULT_ENTITY_TYPES"] = ["organization", "person", "geo", "event", "category"]
14
 
15
+ PROMPTS["DEFAULT_USER_PROMPT"] = "n/a"
16
+
17
  PROMPTS["entity_extraction"] = """---Goal---
18
  Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.
19
  Use {language} as output language.
 
226
  - Ensure the response maintains continuity with the conversation history.
227
  - List up to 5 most important reference sources at the end under "References" section. Clearly indicating whether each source is from Knowledge Graph (KG) or Vector Data (DC), and include the file path if available, in the following format: [KG/DC] file_path
228
  - If you don't know the answer, just say so.
229
+ - Do not make anything up. Do not include information not provided by the Knowledge Base.
230
+ - Addtional user prompt: {user_prompt}
231
+
232
+ Response:"""
233
 
234
  PROMPTS["keywords_extraction"] = """---Role---
235
 
 
327
  - Ensure the response maintains continuity with the conversation history.
328
  - List up to 5 most important reference sources at the end under "References" section. Clearly indicating each source from Document Chunks(DC), and include the file path if available, in the following format: [DC] file_path
329
  - If you don't know the answer, just say so.
330
+ - Do not include information not provided by the Document Chunks.
331
+ - Addtional user prompt: {user_prompt}
332
 
333
+ Response:"""
334
 
335
  PROMPTS[
336
  "similarity_check"