Fix linting
Browse files
lightrag/llm/llama_index_impl.py
CHANGED
@@ -95,7 +95,7 @@ async def llama_index_complete_if_cache(
|
|
95 |
prompt: str,
|
96 |
system_prompt: Optional[str] = None,
|
97 |
history_messages: List[dict] = [],
|
98 |
-
chat_kwargs
|
99 |
) -> str:
|
100 |
"""Complete the prompt using LlamaIndex."""
|
101 |
try:
|
@@ -122,7 +122,9 @@ async def llama_index_complete_if_cache(
|
|
122 |
# Add current prompt
|
123 |
formatted_messages.append(ChatMessage(role=MessageRole.USER, content=prompt))
|
124 |
|
125 |
-
response: ChatResponse = await model.achat(
|
|
|
|
|
126 |
|
127 |
# In newer versions, the response is in message.content
|
128 |
content = response.message.content
|
|
|
95 |
prompt: str,
|
96 |
system_prompt: Optional[str] = None,
|
97 |
history_messages: List[dict] = [],
|
98 |
+
chat_kwargs={},
|
99 |
) -> str:
|
100 |
"""Complete the prompt using LlamaIndex."""
|
101 |
try:
|
|
|
122 |
# Add current prompt
|
123 |
formatted_messages.append(ChatMessage(role=MessageRole.USER, content=prompt))
|
124 |
|
125 |
+
response: ChatResponse = await model.achat(
|
126 |
+
messages=formatted_messages, **chat_kwargs
|
127 |
+
)
|
128 |
|
129 |
# In newer versions, the response is in message.content
|
130 |
content = response.message.content
|