yangdx
commited on
Commit
·
a42342a
1
Parent(s):
d164828
能够正确调用rag,rag执行完成后,无法返回内容
Browse files- lightrag/api/lightrag_ollama.py +45 -28
lightrag/api/lightrag_ollama.py
CHANGED
@@ -659,38 +659,55 @@ def create_app(args):
|
|
659 |
cleaned_query, mode = parse_query_mode(query)
|
660 |
|
661 |
# 调用RAG进行查询
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
|
|
|
|
|
|
667 |
)
|
668 |
-
)
|
669 |
|
670 |
-
if request.stream:
|
671 |
async def stream_generator():
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
|
681 |
-
|
682 |
-
|
683 |
-
|
684 |
-
|
685 |
-
|
686 |
-
|
687 |
-
|
688 |
-
|
689 |
-
|
690 |
-
|
691 |
-
|
692 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
693 |
else:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
694 |
return OllamaChatResponse(
|
695 |
model=LIGHTRAG_MODEL,
|
696 |
created_at=LIGHTRAG_CREATED_AT,
|
|
|
659 |
cleaned_query, mode = parse_query_mode(query)
|
660 |
|
661 |
# 调用RAG进行查询
|
662 |
+
if request.stream:
|
663 |
+
response = await rag.aquery(
|
664 |
+
cleaned_query,
|
665 |
+
param=QueryParam(
|
666 |
+
mode=mode,
|
667 |
+
stream=True,
|
668 |
+
only_need_context=False
|
669 |
+
),
|
670 |
)
|
|
|
671 |
|
|
|
672 |
async def stream_generator():
|
673 |
+
try:
|
674 |
+
async for chunk in response:
|
675 |
+
yield {
|
676 |
+
"model": LIGHTRAG_MODEL,
|
677 |
+
"created_at": LIGHTRAG_CREATED_AT,
|
678 |
+
"message": {
|
679 |
+
"role": "assistant",
|
680 |
+
"content": chunk
|
681 |
+
},
|
682 |
+
"done": False
|
683 |
+
}
|
684 |
+
yield {
|
685 |
+
"model": LIGHTRAG_MODEL,
|
686 |
+
"created_at": LIGHTRAG_CREATED_AT,
|
687 |
+
"message": {
|
688 |
+
"role": "assistant",
|
689 |
+
"content": ""
|
690 |
+
},
|
691 |
+
"done": True
|
692 |
+
}
|
693 |
+
except Exception as e:
|
694 |
+
logging.error(f"Error in stream_generator: {str(e)}")
|
695 |
+
raise
|
696 |
+
from fastapi.responses import StreamingResponse
|
697 |
+
import json
|
698 |
+
return StreamingResponse(
|
699 |
+
(f"data: {json.dumps(chunk)}\n\n" async for chunk in stream_generator()),
|
700 |
+
media_type="text/event-stream"
|
701 |
+
)
|
702 |
else:
|
703 |
+
response = await rag.aquery(
|
704 |
+
cleaned_query,
|
705 |
+
param=QueryParam(
|
706 |
+
mode=mode,
|
707 |
+
stream=False,
|
708 |
+
only_need_context=False
|
709 |
+
),
|
710 |
+
)
|
711 |
return OllamaChatResponse(
|
712 |
model=LIGHTRAG_MODEL,
|
713 |
created_at=LIGHTRAG_CREATED_AT,
|