yangdx commited on
Commit
a42342a
·
1 Parent(s): d164828

能够正确调用rag,rag执行完成后,无法返回内容

Browse files
Files changed (1) hide show
  1. lightrag/api/lightrag_ollama.py +45 -28
lightrag/api/lightrag_ollama.py CHANGED
@@ -659,38 +659,55 @@ def create_app(args):
659
  cleaned_query, mode = parse_query_mode(query)
660
 
661
  # 调用RAG进行查询
662
- response = await rag.aquery(
663
- cleaned_query,
664
- param=QueryParam(
665
- mode=mode,
666
- stream=request.stream,
 
 
 
667
  )
668
- )
669
 
670
- if request.stream:
671
  async def stream_generator():
672
- async for chunk in response:
673
- yield OllamaChatResponse(
674
- model=LIGHTRAG_MODEL,
675
- created_at=LIGHTRAG_CREATED_AT,
676
- message=OllamaMessage(
677
- role="assistant",
678
- content=chunk
679
- ),
680
- done=False
681
- )
682
- # 发送一个空的完成消息
683
- yield OllamaChatResponse(
684
- model=LIGHTRAG_MODEL,
685
- created_at=LIGHTRAG_CREATED_AT,
686
- message=OllamaMessage(
687
- role="assistant",
688
- content=""
689
- ),
690
- done=True
691
- )
692
- return stream_generator()
 
 
 
 
 
 
 
 
693
  else:
 
 
 
 
 
 
 
 
694
  return OllamaChatResponse(
695
  model=LIGHTRAG_MODEL,
696
  created_at=LIGHTRAG_CREATED_AT,
 
659
  cleaned_query, mode = parse_query_mode(query)
660
 
661
  # 调用RAG进行查询
662
+ if request.stream:
663
+ response = await rag.aquery(
664
+ cleaned_query,
665
+ param=QueryParam(
666
+ mode=mode,
667
+ stream=True,
668
+ only_need_context=False
669
+ ),
670
  )
 
671
 
 
672
  async def stream_generator():
673
+ try:
674
+ async for chunk in response:
675
+ yield {
676
+ "model": LIGHTRAG_MODEL,
677
+ "created_at": LIGHTRAG_CREATED_AT,
678
+ "message": {
679
+ "role": "assistant",
680
+ "content": chunk
681
+ },
682
+ "done": False
683
+ }
684
+ yield {
685
+ "model": LIGHTRAG_MODEL,
686
+ "created_at": LIGHTRAG_CREATED_AT,
687
+ "message": {
688
+ "role": "assistant",
689
+ "content": ""
690
+ },
691
+ "done": True
692
+ }
693
+ except Exception as e:
694
+ logging.error(f"Error in stream_generator: {str(e)}")
695
+ raise
696
+ from fastapi.responses import StreamingResponse
697
+ import json
698
+ return StreamingResponse(
699
+ (f"data: {json.dumps(chunk)}\n\n" async for chunk in stream_generator()),
700
+ media_type="text/event-stream"
701
+ )
702
  else:
703
+ response = await rag.aquery(
704
+ cleaned_query,
705
+ param=QueryParam(
706
+ mode=mode,
707
+ stream=False,
708
+ only_need_context=False
709
+ ),
710
+ )
711
  return OllamaChatResponse(
712
  model=LIGHTRAG_MODEL,
713
  created_at=LIGHTRAG_CREATED_AT,