mtyrrell commited on
Commit
aedcb69
Β·
1 Parent(s): e6aebd3

troubleshoot chatui

Browse files
Files changed (2) hide show
  1. app/main.py +111 -35
  2. requirements.txt +18 -6
app/main.py CHANGED
@@ -3,7 +3,6 @@ from langserve import add_routes
3
  from langchain_core.runnables import RunnableLambda
4
  from pydantic import BaseModel
5
  from typing import List, Literal, Optional, Dict, Any
6
- from contextlib import asynccontextmanager
7
  import gradio as gr
8
  from datetime import datetime
9
 
@@ -35,32 +34,64 @@ class ChatFedOutput(BaseModel):
35
  # Logic
36
  # ─────────────────────────────────────────
37
 
38
- def process_chatfed_query_core(query: str) -> Dict[str, Any]:
39
- return {
40
- "result": f"Processed: {query}",
41
- "metadata": {
 
42
  "session_id": f"session_{datetime.now().strftime('%Y%m%d%H%M%S')}"
43
  }
44
- }
45
-
46
- from typing import cast
47
-
48
- def chatui_handler(data: ChatUIInput) -> ChatFedOutput:
49
- # Handle case where data might be a dict instead of ChatUIInput object
50
- if isinstance(data, dict):
51
- data = ChatUIInput(**data)
52
-
53
- last_user_msg = next((m.content for m in reversed(data.messages) if m.role == "user"), "")
54
- result = process_chatfed_query_core(query=last_user_msg)
55
-
56
- # 🚨 FIX: Wrap the string result
57
- return ChatFedOutput(
58
- result=result["result"],
59
- metadata=result["metadata"]
60
  )
61
 
62
- def langserve_handler(data: ChatFedInput):
63
- return process_chatfed_query_core(data.query)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  # ─────────────────────────────────────────
66
  # Gradio Interface
@@ -69,29 +100,46 @@ def langserve_handler(data: ChatFedInput):
69
  def create_gradio_interface():
70
  with gr.Blocks(title="ChatFed UI") as demo:
71
  gr.Markdown("## ChatFed Dev UI")
72
- query = gr.Textbox(label="Query")
73
- btn = gr.Button("Send")
74
- out = gr.Textbox(label="Response")
75
-
76
- btn.click(fn=lambda q: process_chatfed_query_core(q)["result"], inputs=query, outputs=out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  return demo
78
 
79
  # ─────────────────────────────────────────
80
  # App Startup
81
  # ─────────────────────────────────────────
82
 
83
- # Create Gradio app first
84
- demo = create_gradio_interface()
85
-
86
- # Get the FastAPI app from Gradio
87
- app = demo.app
88
 
89
  # Register LangServe-compatible endpoints
90
  add_routes(
91
  app,
92
  RunnableLambda(chatui_handler),
93
  path="/chatfed-chatui",
94
- input_type=ChatUIInput
 
95
  )
96
 
97
  add_routes(
@@ -102,10 +150,38 @@ add_routes(
102
  output_type=ChatFedOutput
103
  )
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  # ─────────────────────────────────────────
106
  # Run with uvicorn (dev/test only)
107
  # ─────────────────────────────────────────
108
 
109
  if __name__ == "__main__":
110
  import uvicorn
111
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
 
 
 
 
 
3
  from langchain_core.runnables import RunnableLambda
4
  from pydantic import BaseModel
5
  from typing import List, Literal, Optional, Dict, Any
 
6
  import gradio as gr
7
  from datetime import datetime
8
 
 
34
  # Logic
35
  # ─────────────────────────────────────────
36
 
37
+ def process_chatfed_query_core(query: str) -> ChatFedOutput:
38
+ """Process query and return ChatFedOutput model"""
39
+ return ChatFedOutput(
40
+ result=f"Processed: {query}",
41
+ metadata={
42
  "session_id": f"session_{datetime.now().strftime('%Y%m%d%H%M%S')}"
43
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  )
45
 
46
+ def chatui_handler(data) -> ChatFedOutput:
47
+ """Handler for chat UI input - accepts raw dict and returns ChatFedOutput"""
48
+ try:
49
+ # Handle both dict and Pydantic model input
50
+ if isinstance(data, dict):
51
+ messages = data.get("messages", [])
52
+ else:
53
+ messages = data.messages if hasattr(data, 'messages') else []
54
+
55
+ # Find the last user message
56
+ last_user_msg = ""
57
+ for msg in reversed(messages):
58
+ if isinstance(msg, dict):
59
+ if msg.get("role") == "user":
60
+ last_user_msg = msg.get("content", "")
61
+ break
62
+ else:
63
+ if hasattr(msg, 'role') and msg.role == "user":
64
+ last_user_msg = msg.content
65
+ break
66
+
67
+ result = process_chatfed_query_core(query=last_user_msg)
68
+ result.metadata["source"] = "chatui"
69
+ return result
70
+
71
+ except Exception as e:
72
+ return ChatFedOutput(
73
+ result=f"Error processing chat UI request: {str(e)}",
74
+ metadata={"error": True, "source": "chatui"}
75
+ )
76
+
77
+ def langserve_handler(data) -> ChatFedOutput:
78
+ """Handler for LangServe input - accepts raw dict and returns ChatFedOutput"""
79
+ try:
80
+ # Handle both dict and Pydantic model input
81
+ if isinstance(data, dict):
82
+ query = data.get("query", "")
83
+ else:
84
+ query = data.query if hasattr(data, 'query') else ""
85
+
86
+ result = process_chatfed_query_core(query)
87
+ result.metadata["source"] = "langserve"
88
+ return result
89
+
90
+ except Exception as e:
91
+ return ChatFedOutput(
92
+ result=f"Error processing request: {str(e)}",
93
+ metadata={"error": True, "source": "langserve"}
94
+ )
95
 
96
  # ─────────────────────────────────────────
97
  # Gradio Interface
 
100
  def create_gradio_interface():
101
  with gr.Blocks(title="ChatFed UI") as demo:
102
  gr.Markdown("## ChatFed Dev UI")
103
+
104
+ with gr.Row():
105
+ with gr.Column(scale=4):
106
+ query = gr.Textbox(label="Query", placeholder="Enter your query here...")
107
+ with gr.Column(scale=1):
108
+ btn = gr.Button("Send", variant="primary")
109
+
110
+ out = gr.Textbox(label="Response", lines=10)
111
+
112
+ def process_query(q: str) -> str:
113
+ if not q.strip():
114
+ return "Please enter a query."
115
+ try:
116
+ result = process_chatfed_query_core(q)
117
+ return result.result
118
+ except Exception as e:
119
+ return f"Error processing query: {str(e)}"
120
+
121
+ btn.click(fn=process_query, inputs=query, outputs=out)
122
+ query.submit(fn=process_query, inputs=query, outputs=out) # Allow Enter key
123
+
124
  return demo
125
 
126
  # ─────────────────────────────────────────
127
  # App Startup
128
  # ─────────────────────────────────────────
129
 
130
+ app = FastAPI(
131
+ title="ChatFed API",
132
+ description="API for ChatFed query processing",
133
+ version="1.0.0"
134
+ )
135
 
136
  # Register LangServe-compatible endpoints
137
  add_routes(
138
  app,
139
  RunnableLambda(chatui_handler),
140
  path="/chatfed-chatui",
141
+ input_type=ChatUIInput,
142
+ output_type=ChatFedOutput
143
  )
144
 
145
  add_routes(
 
150
  output_type=ChatFedOutput
151
  )
152
 
153
+ # Add health check endpoint
154
+ @app.get("/health")
155
+ async def health_check():
156
+ return {"status": "healthy", "timestamp": datetime.now().isoformat()}
157
+
158
+ # Add a simple root endpoint
159
+ @app.get("/")
160
+ async def root():
161
+ return {
162
+ "message": "ChatFed API is running",
163
+ "endpoints": {
164
+ "chatfed": "/chatfed",
165
+ "chatfed_chatui": "/chatfed-chatui",
166
+ "gradio_ui": "/ui",
167
+ "docs": "/docs",
168
+ "health": "/health"
169
+ }
170
+ }
171
+
172
+ # Mount Gradio at a specific path instead of root to avoid conflicts
173
+ demo = create_gradio_interface()
174
+ app.mount("/ui", demo)
175
+
176
  # ─────────────────────────────────────────
177
  # Run with uvicorn (dev/test only)
178
  # ─────────────────────────────────────────
179
 
180
  if __name__ == "__main__":
181
  import uvicorn
182
+ uvicorn.run(
183
+ app,
184
+ host="0.0.0.0",
185
+ port=7860,
186
+ log_level="info"
187
+ )
requirements.txt CHANGED
@@ -1,12 +1,24 @@
1
- gradio[mcp]
 
 
 
 
 
 
 
 
 
 
2
  gradio_client>=1.0.0
3
  langgraph>=0.2.0
 
4
  Pillow>=9.0.0
5
- fastapi
6
- langserve[all]
7
- uvicorn[standard]
8
- typing_extensions
9
- python-multipart
 
10
 
11
 
12
 
 
1
+ # gradio[mcp]
2
+ # gradio_client>=1.0.0
3
+ # langgraph>=0.2.0
4
+ # Pillow>=9.0.0
5
+ # fastapi
6
+ # langserve[all]
7
+ # uvicorn[standard]
8
+ # typing_extensions
9
+ # python-multipart
10
+
11
+ gradio>=4.0.0
12
  gradio_client>=1.0.0
13
  langgraph>=0.2.0
14
+ langchain-core>=0.2.0
15
  Pillow>=9.0.0
16
+ fastapi>=0.100.0
17
+ langserve[all]>=0.0.30
18
+ uvicorn[standard]>=0.23.0
19
+ typing_extensions>=4.5.0
20
+ python-multipart>=0.0.6
21
+ pydantic>=2.0.0
22
 
23
 
24