openfree commited on
Commit
297f05e
·
verified ·
1 Parent(s): 7a94f5b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +1608 -0
app.py ADDED
@@ -0,0 +1,1608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MOUSE Workflow - Visual Workflow Builder with UI Execution
3
+ @Powered by VIDraft
4
+ ✓ Visual workflow designer with drag-and-drop
5
+ ✓ Import/Export JSON with copy-paste support
6
+ ✓ Auto-generate UI from workflow for end-user execution
7
+ """
8
+
9
+ import os, json, typing, tempfile, traceback
10
+ import gradio as gr
11
+ from gradio_workflowbuilder import WorkflowBuilder
12
+
13
+ # Optional imports for LLM APIs
14
+ try:
15
+ from openai import OpenAI
16
+ OPENAI_AVAILABLE = True
17
+ except ImportError:
18
+ OPENAI_AVAILABLE = False
19
+ print("OpenAI library not available. Install with: pip install openai")
20
+
21
+ # Anthropic 관련 코드 주석 처리
22
+ # try:
23
+ # import anthropic
24
+ # ANTHROPIC_AVAILABLE = True
25
+ # except ImportError:
26
+ # ANTHROPIC_AVAILABLE = False
27
+ # print("Anthropic library not available. Install with: pip install anthropic")
28
+ ANTHROPIC_AVAILABLE = False
29
+
30
+ try:
31
+ import requests
32
+ REQUESTS_AVAILABLE = True
33
+ except ImportError:
34
+ REQUESTS_AVAILABLE = False
35
+ print("Requests library not available. Install with: pip install requests")
36
+
37
+ try:
38
+ from huggingface_hub import HfApi, create_repo, upload_file
39
+ HF_HUB_AVAILABLE = True
40
+ except ImportError:
41
+ HF_HUB_AVAILABLE = False
42
+ print("Huggingface Hub not available. Install with: pip install huggingface-hub")
43
+
44
+ # -------------------------------------------------------------------
45
+ # 🛠️ 헬퍼 함수들
46
+ # -------------------------------------------------------------------
47
+ def export_pretty(data: typing.Dict[str, typing.Any]) -> str:
48
+ return json.dumps(data, indent=2, ensure_ascii=False) if data else "No workflow to export"
49
+
50
+ def export_file(data: typing.Dict[str, typing.Any]) -> typing.Optional[str]:
51
+ """워크플로우를 JSON 파일로 내보내기"""
52
+ if not data:
53
+ return None
54
+
55
+ try:
56
+ # 임시 파일 생성
57
+ fd, path = tempfile.mkstemp(suffix=".json", prefix="workflow_", text=True)
58
+ with os.fdopen(fd, "w", encoding="utf-8") as f:
59
+ json.dump(data, f, ensure_ascii=False, indent=2)
60
+ return path
61
+ except Exception as e:
62
+ print(f"Error exporting file: {e}")
63
+ return None
64
+
65
+ def load_json_from_text_or_file(json_text: str, file_obj) -> typing.Tuple[typing.Dict[str, typing.Any], str]:
66
+ """텍스트 또는 파일에서 JSON 로드"""
67
+ # 파일이 있으면 파일 우선
68
+ if file_obj is not None:
69
+ try:
70
+ with open(file_obj.name, "r", encoding="utf-8") as f:
71
+ json_text = f.read()
72
+ except Exception as e:
73
+ return None, f"❌ Error reading file: {str(e)}"
74
+
75
+ # JSON 텍스트가 없거나 비어있으면
76
+ if not json_text or json_text.strip() == "":
77
+ return None, "No JSON data provided"
78
+
79
+ try:
80
+ # JSON 파싱
81
+ data = json.loads(json_text.strip())
82
+
83
+ # 데이터 검증
84
+ if not isinstance(data, dict):
85
+ return None, "Invalid format: not a dictionary"
86
+
87
+ # 필수 필드 확인
88
+ if 'nodes' not in data:
89
+ data['nodes'] = []
90
+ if 'edges' not in data:
91
+ data['edges'] = []
92
+
93
+ nodes_count = len(data.get('nodes', []))
94
+ edges_count = len(data.get('edges', []))
95
+
96
+ return data, f"✅ Loaded: {nodes_count} nodes, {edges_count} edges"
97
+
98
+ except json.JSONDecodeError as e:
99
+ return None, f"❌ JSON parsing error: {str(e)}"
100
+ except Exception as e:
101
+ return None, f"❌ Error: {str(e)}"
102
+
103
+ def create_sample_workflow(example_type="basic"):
104
+ """샘플 워크플로우 생성"""
105
+
106
+ if example_type == "basic":
107
+ # 기본 예제: 간단한 Q&A - VIDraft 사용
108
+ return {
109
+ "nodes": [
110
+ {
111
+ "id": "input_1",
112
+ "type": "ChatInput",
113
+ "position": {"x": 100, "y": 200},
114
+ "data": {
115
+ "label": "User Question",
116
+ "template": {
117
+ "input_value": {"value": "What is the capital of Korea?"}
118
+ }
119
+ }
120
+ },
121
+ {
122
+ "id": "llm_1",
123
+ "type": "llmNode",
124
+ "position": {"x": 400, "y": 200},
125
+ "data": {
126
+ "label": "AI Processing",
127
+ "template": {
128
+ "provider": {"value": "VIDraft"}, # 기본값을 VIDraft로 변경
129
+ "model": {"value": "Gemma-3-r1984-27B"},
130
+ "temperature": {"value": 0.7},
131
+ "system_prompt": {"value": "You are a helpful assistant."}
132
+ }
133
+ }
134
+ },
135
+ {
136
+ "id": "output_1",
137
+ "type": "ChatOutput",
138
+ "position": {"x": 700, "y": 200},
139
+ "data": {"label": "Answer"}
140
+ }
141
+ ],
142
+ "edges": [
143
+ {"id": "e1", "source": "input_1", "target": "llm_1"},
144
+ {"id": "e2", "source": "llm_1", "target": "output_1"}
145
+ ]
146
+ }
147
+
148
+ elif example_type == "vidraft":
149
+ # VIDraft 예제
150
+ return {
151
+ "nodes": [
152
+ {
153
+ "id": "input_1",
154
+ "type": "ChatInput",
155
+ "position": {"x": 100, "y": 200},
156
+ "data": {
157
+ "label": "User Input",
158
+ "template": {
159
+ "input_value": {"value": "AI와 머신러닝의 차이점을 설명해주세요."}
160
+ }
161
+ }
162
+ },
163
+ {
164
+ "id": "llm_1",
165
+ "type": "llmNode",
166
+ "position": {"x": 400, "y": 200},
167
+ "data": {
168
+ "label": "VIDraft AI (Gemma)",
169
+ "template": {
170
+ "provider": {"value": "VIDraft"},
171
+ "model": {"value": "Gemma-3-r1984-27B"},
172
+ "temperature": {"value": 0.8},
173
+ "system_prompt": {"value": "당신은 전문적이고 친절한 AI 교육자입니다. 복잡한 개념을 쉽게 설명해주세요."}
174
+ }
175
+ }
176
+ },
177
+ {
178
+ "id": "output_1",
179
+ "type": "ChatOutput",
180
+ "position": {"x": 700, "y": 200},
181
+ "data": {"label": "AI Explanation"}
182
+ }
183
+ ],
184
+ "edges": [
185
+ {"id": "e1", "source": "input_1", "target": "llm_1"},
186
+ {"id": "e2", "source": "llm_1", "target": "output_1"}
187
+ ]
188
+ }
189
+
190
+ elif example_type == "multi_input":
191
+ # 다중 입력 예제
192
+ return {
193
+ "nodes": [
194
+ {
195
+ "id": "name_input",
196
+ "type": "textInput",
197
+ "position": {"x": 100, "y": 100},
198
+ "data": {
199
+ "label": "Your Name",
200
+ "template": {
201
+ "input_value": {"value": "John"}
202
+ }
203
+ }
204
+ },
205
+ {
206
+ "id": "topic_input",
207
+ "type": "textInput",
208
+ "position": {"x": 100, "y": 250},
209
+ "data": {
210
+ "label": "Topic",
211
+ "template": {
212
+ "input_value": {"value": "Python programming"}
213
+ }
214
+ }
215
+ },
216
+ {
217
+ "id": "level_input",
218
+ "type": "textInput",
219
+ "position": {"x": 100, "y": 400},
220
+ "data": {
221
+ "label": "Skill Level",
222
+ "template": {
223
+ "input_value": {"value": "beginner"}
224
+ }
225
+ }
226
+ },
227
+ {
228
+ "id": "combiner",
229
+ "type": "textNode",
230
+ "position": {"x": 350, "y": 250},
231
+ "data": {
232
+ "label": "Combine Inputs",
233
+ "template": {
234
+ "text": {"value": "Create a personalized learning plan"}
235
+ }
236
+ }
237
+ },
238
+ {
239
+ "id": "llm_1",
240
+ "type": "llmNode",
241
+ "position": {"x": 600, "y": 250},
242
+ "data": {
243
+ "label": "Generate Learning Plan",
244
+ "template": {
245
+ "provider": {"value": "VIDraft"}, # 기본값을 VIDraft로 변경
246
+ "model": {"value": "Gemma-3-r1984-27B"},
247
+ "temperature": {"value": 0.7},
248
+ "system_prompt": {"value": "You are an expert educational consultant. Create personalized learning plans based on the user's name, topic of interest, and skill level."}
249
+ }
250
+ }
251
+ },
252
+ {
253
+ "id": "output_1",
254
+ "type": "ChatOutput",
255
+ "position": {"x": 900, "y": 250},
256
+ "data": {"label": "Your Learning Plan"}
257
+ }
258
+ ],
259
+ "edges": [
260
+ {"id": "e1", "source": "name_input", "target": "combiner"},
261
+ {"id": "e2", "source": "topic_input", "target": "combiner"},
262
+ {"id": "e3", "source": "level_input", "target": "combiner"},
263
+ {"id": "e4", "source": "combiner", "target": "llm_1"},
264
+ {"id": "e5", "source": "llm_1", "target": "output_1"}
265
+ ]
266
+ }
267
+
268
+ elif example_type == "chain":
269
+ # 체인 처리 예제
270
+ return {
271
+ "nodes": [
272
+ {
273
+ "id": "input_1",
274
+ "type": "ChatInput",
275
+ "position": {"x": 50, "y": 200},
276
+ "data": {
277
+ "label": "Original Text",
278
+ "template": {
279
+ "input_value": {"value": "The quick brown fox jumps over the lazy dog."}
280
+ }
281
+ }
282
+ },
283
+ {
284
+ "id": "translator",
285
+ "type": "llmNode",
286
+ "position": {"x": 300, "y": 200},
287
+ "data": {
288
+ "label": "Translate to Korean",
289
+ "template": {
290
+ "provider": {"value": "VIDraft"},
291
+ "model": {"value": "Gemma-3-r1984-27B"},
292
+ "temperature": {"value": 0.3},
293
+ "system_prompt": {"value": "You are a professional translator. Translate the given English text to Korean accurately."}
294
+ }
295
+ }
296
+ },
297
+ {
298
+ "id": "analyzer",
299
+ "type": "llmNode",
300
+ "position": {"x": 600, "y": 200},
301
+ "data": {
302
+ "label": "Analyze Translation",
303
+ "template": {
304
+ "provider": {"value": "OpenAI"},
305
+ "model": {"value": "gpt-4.1-mini"},
306
+ "temperature": {"value": 0.5},
307
+ "system_prompt": {"value": "You are a linguistic expert. Analyze the Korean translation and explain its nuances and cultural context."}
308
+ }
309
+ }
310
+ },
311
+ {
312
+ "id": "output_translation",
313
+ "type": "ChatOutput",
314
+ "position": {"x": 450, "y": 350},
315
+ "data": {"label": "Korean Translation"}
316
+ },
317
+ {
318
+ "id": "output_analysis",
319
+ "type": "ChatOutput",
320
+ "position": {"x": 900, "y": 200},
321
+ "data": {"label": "Translation Analysis"}
322
+ }
323
+ ],
324
+ "edges": [
325
+ {"id": "e1", "source": "input_1", "target": "translator"},
326
+ {"id": "e2", "source": "translator", "target": "analyzer"},
327
+ {"id": "e3", "source": "translator", "target": "output_translation"},
328
+ {"id": "e4", "source": "analyzer", "target": "output_analysis"}
329
+ ]
330
+ }
331
+
332
+ # 기본값은 basic
333
+ return create_sample_workflow("basic")
334
+
335
+ # 배포를 위한 독립 앱 생성 함수
336
+ def generate_standalone_app(workflow_data: dict, app_name: str, app_description: str) -> str:
337
+ """워크플로우를 독립적인 Gradio 앱으로 변환"""
338
+
339
+ # JSON 데이터를 문자열로 변환
340
+ workflow_json = json.dumps(workflow_data, indent=2)
341
+
342
+ app_code = f'''"""
343
+ {app_name}
344
+ {app_description}
345
+ Generated by MOUSE Workflow
346
+ """
347
+
348
+ import os
349
+ import json
350
+ import gradio as gr
351
+ import requests
352
+
353
+ # Workflow configuration
354
+ WORKFLOW_DATA = {workflow_json}
355
+
356
+ def execute_workflow(*input_values):
357
+ """Execute the workflow with given inputs"""
358
+
359
+ # API keys from environment
360
+ vidraft_token = os.getenv("FRIENDLI_TOKEN")
361
+ openai_key = os.getenv("OPENAI_API_KEY")
362
+
363
+ nodes = WORKFLOW_DATA.get("nodes", [])
364
+ edges = WORKFLOW_DATA.get("edges", [])
365
+
366
+ results = {{}}
367
+
368
+ # Get input nodes
369
+ input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
370
+
371
+ # Map inputs to node IDs
372
+ for i, node in enumerate(input_nodes):
373
+ if i < len(input_values):
374
+ results[node["id"]] = input_values[i]
375
+
376
+ # Process nodes
377
+ for node in nodes:
378
+ node_id = node.get("id")
379
+ node_type = node.get("type", "")
380
+ node_data = node.get("data", {{}})
381
+ template = node_data.get("template", {{}})
382
+
383
+ if node_type == "textNode":
384
+ # Combine connected inputs
385
+ base_text = template.get("text", {{}}).get("value", "")
386
+ connected_inputs = []
387
+
388
+ for edge in edges:
389
+ if edge.get("target") == node_id:
390
+ source_id = edge.get("source")
391
+ if source_id in results:
392
+ connected_inputs.append(f"{{source_id}}: {{results[source_id]}}")
393
+
394
+ if connected_inputs:
395
+ results[node_id] = f"{{base_text}}\\n\\nInputs:\\n" + "\\n".join(connected_inputs)
396
+ else:
397
+ results[node_id] = base_text
398
+
399
+ elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
400
+ # Get provider and model - VIDraft as default
401
+ provider = template.get("provider", {{}}).get("value", "VIDraft")
402
+ if provider not in ["VIDraft", "OpenAI"]:
403
+ provider = "VIDraft" # Default to VIDraft
404
+ temperature = template.get("temperature", {{}}).get("value", 0.7)
405
+ system_prompt = template.get("system_prompt", {{}}).get("value", "")
406
+
407
+ # Get input text
408
+ input_text = ""
409
+ for edge in edges:
410
+ if edge.get("target") == node_id:
411
+ source_id = edge.get("source")
412
+ if source_id in results:
413
+ input_text = results[source_id]
414
+ break
415
+
416
+ # Call API
417
+ if provider == "OpenAI" and openai_key:
418
+ try:
419
+ from openai import OpenAI
420
+ client = OpenAI(api_key=openai_key)
421
+
422
+ messages = []
423
+ if system_prompt:
424
+ messages.append({{"role": "system", "content": system_prompt}})
425
+ messages.append({{"role": "user", "content": input_text}})
426
+
427
+ response = client.chat.completions.create(
428
+ model="gpt-4.1-mini",
429
+ messages=messages,
430
+ temperature=temperature,
431
+ max_tokens=1000
432
+ )
433
+
434
+ results[node_id] = response.choices[0].message.content
435
+ except Exception as e:
436
+ results[node_id] = f"[OpenAI Error: {{str(e)}}]"
437
+
438
+ elif provider == "VIDraft" and vidraft_token:
439
+ try:
440
+ headers = {{
441
+ "Authorization": f"Bearer {{vidraft_token}}",
442
+ "Content-Type": "application/json"
443
+ }}
444
+
445
+ messages = []
446
+ if system_prompt:
447
+ messages.append({{"role": "system", "content": system_prompt}})
448
+ messages.append({{"role": "user", "content": input_text}})
449
+
450
+ payload = {{
451
+ "model": "dep89a2fld32mcm",
452
+ "messages": messages,
453
+ "max_tokens": 16384,
454
+ "temperature": temperature,
455
+ "top_p": 0.8,
456
+ "stream": False
457
+ }}
458
+
459
+ response = requests.post(
460
+ "https://api.friendli.ai/dedicated/v1/chat/completions",
461
+ headers=headers,
462
+ json=payload,
463
+ timeout=30
464
+ )
465
+
466
+ if response.status_code == 200:
467
+ results[node_id] = response.json()["choices"][0]["message"]["content"]
468
+ else:
469
+ results[node_id] = f"[VIDraft Error: {{response.status_code}}]"
470
+ except Exception as e:
471
+ results[node_id] = f"[VIDraft Error: {{str(e)}}]"
472
+ else:
473
+ # Show which API key is missing
474
+ if provider == "OpenAI":
475
+ results[node_id] = "[OpenAI API key not found. Please set OPENAI_API_KEY in Space secrets]"
476
+ elif provider == "VIDraft":
477
+ results[node_id] = "[VIDraft API key not found. Please set FRIENDLI_TOKEN in Space secrets]"
478
+ else:
479
+ results[node_id] = f"[No API key found for {{provider}}. Using simulated response: {{input_text[:50]}}...]"
480
+
481
+ elif node_type in ["ChatOutput", "textOutput", "Output"]:
482
+ # Get connected result
483
+ for edge in edges:
484
+ if edge.get("target") == node_id:
485
+ source_id = edge.get("source")
486
+ if source_id in results:
487
+ results[node_id] = results[source_id]
488
+ break
489
+
490
+ # Return outputs
491
+ output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
492
+ return [results.get(n["id"], "") for n in output_nodes]
493
+
494
+ # Build UI
495
+ with gr.Blocks(title="{app_name}", theme=gr.themes.Soft()) as demo:
496
+ gr.Markdown("# {app_name}")
497
+ gr.Markdown("{app_description}")
498
+
499
+ # API Status Check
500
+ vidraft_token = os.getenv("FRIENDLI_TOKEN")
501
+ openai_key = os.getenv("OPENAI_API_KEY")
502
+
503
+ with gr.Accordion("🔑 API Status", open=False):
504
+ if vidraft_token:
505
+ gr.Markdown("✅ **VIDraft API**: Connected (Gemma-3-r1984-27B)")
506
+ else:
507
+ gr.Markdown("❌ **VIDraft API**: Not configured")
508
+
509
+ if openai_key:
510
+ gr.Markdown("✅ **OpenAI API**: Connected (gpt-4.1-mini)")
511
+ else:
512
+ gr.Markdown("⚠️ **OpenAI API**: Not configured (optional)")
513
+
514
+ if not vidraft_token:
515
+ gr.Markdown("""
516
+ **⚠️ Important**: Please add FRIENDLI_TOKEN to Space secrets for the app to work properly.
517
+
518
+ Go to: Space settings → Repository secrets → Add secret
519
+ """)
520
+ elif not openai_key:
521
+ gr.Markdown("""
522
+ **💡 Tip**: The app will work with VIDraft alone. Add OPENAI_API_KEY if you need OpenAI features.
523
+ """)
524
+ else:
525
+ gr.Markdown("**✨ All APIs configured! Your app is fully functional.**")
526
+
527
+ # Extract nodes
528
+ nodes = WORKFLOW_DATA.get("nodes", [])
529
+ input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
530
+ output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
531
+
532
+ # Create inputs
533
+ inputs = []
534
+ if input_nodes:
535
+ gr.Markdown("### 📥 Inputs")
536
+ for node in input_nodes:
537
+ label = node.get("data", {{}}).get("label", node.get("id"))
538
+ template = node.get("data", {{}}).get("template", {{}})
539
+ default_value = template.get("input_value", {{}}).get("value", "")
540
+
541
+ if node.get("type") == "numberInput":
542
+ inp = gr.Number(label=label, value=float(default_value) if default_value else 0)
543
+ else:
544
+ inp = gr.Textbox(label=label, value=default_value, lines=2)
545
+ inputs.append(inp)
546
+
547
+ # Execute button
548
+ btn = gr.Button("🚀 Execute Workflow", variant="primary")
549
+
550
+ # Create outputs
551
+ outputs = []
552
+ if output_nodes:
553
+ gr.Markdown("### 📤 Outputs")
554
+ for node in output_nodes:
555
+ label = node.get("data", {{}}).get("label", node.get("id"))
556
+ out = gr.Textbox(label=label, interactive=False, lines=3)
557
+ outputs.append(out)
558
+
559
+ # Connect
560
+ btn.click(fn=execute_workflow, inputs=inputs, outputs=outputs)
561
+
562
+ gr.Markdown("---")
563
+ gr.Markdown("*Powered by MOUSE Workflow*")
564
+
565
+ if __name__ == "__main__":
566
+ demo.launch()
567
+ '''
568
+
569
+ return app_code
570
+
571
+ def generate_requirements_txt() -> str:
572
+ """Generate requirements.txt for the standalone app"""
573
+ return """gradio==5.34.2
574
+ openai
575
+ requests
576
+ """
577
+
578
+ def deploy_to_huggingface(workflow_data: dict, app_name: str, app_description: str,
579
+ hf_token: str, space_name: str, is_private: bool = False,
580
+ api_keys: dict = None) -> dict:
581
+ """Deploy workflow to Hugging Face Space with API keys"""
582
+
583
+ if not HF_HUB_AVAILABLE:
584
+ return {"success": False, "error": "huggingface-hub library not installed"}
585
+
586
+ if api_keys is None:
587
+ api_keys = {}
588
+
589
+ try:
590
+ # Initialize HF API
591
+ api = HfApi(token=hf_token)
592
+
593
+ # Create repository
594
+ repo_id = api.create_repo(
595
+ repo_id=space_name,
596
+ repo_type="space",
597
+ space_sdk="gradio",
598
+ private=is_private,
599
+ exist_ok=True
600
+ )
601
+
602
+ # Detect which providers are used in the workflow
603
+ providers_used = set()
604
+ nodes = workflow_data.get("nodes", [])
605
+ for node in nodes:
606
+ if node.get("type") in ["llmNode", "OpenAIModel", "ChatModel"]:
607
+ template = node.get("data", {}).get("template", {})
608
+ provider = template.get("provider", {}).get("value", "")
609
+ if provider:
610
+ providers_used.add(provider)
611
+
612
+ # Generate files
613
+ app_code = generate_standalone_app(workflow_data, app_name, app_description)
614
+ requirements = generate_requirements_txt()
615
+
616
+ # README with API setup instructions
617
+ api_status = []
618
+ if "FRIENDLI_TOKEN" in api_keys and api_keys["FRIENDLI_TOKEN"]:
619
+ api_status.append("- **FRIENDLI_TOKEN**: ✅ Will be configured automatically")
620
+ else:
621
+ api_status.append("- **FRIENDLI_TOKEN**: ⚠️ Not provided (VIDraft won't work)")
622
+
623
+ if "OPENAI_API_KEY" in api_keys and api_keys["OPENAI_API_KEY"]:
624
+ api_status.append("- **OPENAI_API_KEY**: ✅ Will be configured automatically")
625
+ elif "OpenAI" in providers_used:
626
+ api_status.append("- **OPENAI_API_KEY**: ❌ Required but not provided")
627
+
628
+ readme = f"""---
629
+ title: {app_name}
630
+ emoji: 🐭
631
+ colorFrom: blue
632
+ colorTo: green
633
+ sdk: gradio
634
+ sdk_version: 5.34.2
635
+ app_file: app.py
636
+ pinned: false
637
+ ---
638
+
639
+ # {app_name}
640
+
641
+ {app_description}
642
+
643
+ ## 🔑 API Configuration Status
644
+
645
+ {chr(10).join(api_status)}
646
+
647
+ ## 📝 Providers Used in This Workflow
648
+
649
+ {', '.join(providers_used) if providers_used else 'No LLM providers detected'}
650
+
651
+ ## 🚀 Default Configuration
652
+
653
+ This app is configured to use **VIDraft (Gemma-3-r1984-27B)** as the default LLM provider for optimal performance.
654
+
655
+ ---
656
+ Generated by MOUSE Workflow
657
+ """
658
+
659
+ # Upload files
660
+ api.upload_file(
661
+ path_or_fileobj=app_code.encode(),
662
+ path_in_repo="app.py",
663
+ repo_id=repo_id.repo_id,
664
+ repo_type="space"
665
+ )
666
+
667
+ api.upload_file(
668
+ path_or_fileobj=requirements.encode(),
669
+ path_in_repo="requirements.txt",
670
+ repo_id=repo_id.repo_id,
671
+ repo_type="space"
672
+ )
673
+
674
+ api.upload_file(
675
+ path_or_fileobj=readme.encode(),
676
+ path_in_repo="README.md",
677
+ repo_id=repo_id.repo_id,
678
+ repo_type="space"
679
+ )
680
+
681
+ # Add all provided API keys as secrets
682
+ added_secrets = []
683
+ failed_secrets = []
684
+
685
+ for key_name, key_value in api_keys.items():
686
+ if key_value: # Only add non-empty keys
687
+ try:
688
+ api.add_space_secret(
689
+ repo_id=repo_id.repo_id,
690
+ key=key_name,
691
+ value=key_value
692
+ )
693
+ added_secrets.append(key_name)
694
+ except Exception as e:
695
+ failed_secrets.append(f"{key_name}: {str(e)}")
696
+ print(f"Warning: Could not add {key_name} secret: {e}")
697
+
698
+ space_url = f"https://huggingface.co/spaces/{repo_id.repo_id}"
699
+
700
+ return {
701
+ "success": True,
702
+ "space_url": space_url,
703
+ "message": f"Successfully deployed to {space_url}",
704
+ "added_secrets": added_secrets,
705
+ "failed_secrets": failed_secrets,
706
+ "providers_used": list(providers_used)
707
+ }
708
+
709
+ except Exception as e:
710
+ return {
711
+ "success": False,
712
+ "error": str(e)
713
+ }
714
+
715
+ # UI 실행을 위한 실제 워크플로우 실행 함수
716
+ def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
717
+ """워크플로우 실제 실행"""
718
+ import traceback
719
+
720
+ # API 키 확인
721
+ vidraft_token = os.getenv("FRIENDLI_TOKEN") # VIDraft/Friendli token
722
+ openai_key = os.getenv("OPENAI_API_KEY")
723
+ # anthropic_key = os.getenv("ANTHROPIC_API_KEY") # 주석 처리
724
+
725
+ # OpenAI 라이브러리 확인
726
+ try:
727
+ from openai import OpenAI
728
+ openai_available = True
729
+ except ImportError:
730
+ openai_available = False
731
+ print("OpenAI library not available")
732
+
733
+ # Anthropic 라이브러리 확인 - 주석 처리
734
+ # try:
735
+ # import anthropic
736
+ # anthropic_available = True
737
+ # except ImportError:
738
+ # anthropic_available = False
739
+ # print("Anthropic library not available")
740
+ anthropic_available = False
741
+
742
+ results = {}
743
+ nodes = workflow_data.get("nodes", [])
744
+ edges = workflow_data.get("edges", [])
745
+
746
+ # 노드를 순서대로 처리
747
+ for node in nodes:
748
+ node_id = node.get("id")
749
+ node_type = node.get("type", "")
750
+ node_data = node.get("data", {})
751
+
752
+ try:
753
+ if node_type in ["ChatInput", "textInput", "Input"]:
754
+ # UI에서 제공된 입력값 사용
755
+ if node_id in input_values:
756
+ results[node_id] = input_values[node_id]
757
+ else:
758
+ # 기본값 사용
759
+ template = node_data.get("template", {})
760
+ default_value = template.get("input_value", {}).get("value", "")
761
+ results[node_id] = default_value
762
+
763
+ elif node_type == "textNode":
764
+ # 텍스트 노드는 연결된 모든 입력을 결합
765
+ template = node_data.get("template", {})
766
+ base_text = template.get("text", {}).get("value", "")
767
+
768
+ # 연결된 입력들 수집
769
+ connected_inputs = []
770
+ for edge in edges:
771
+ if edge.get("target") == node_id:
772
+ source_id = edge.get("source")
773
+ if source_id in results:
774
+ connected_inputs.append(f"{source_id}: {results[source_id]}")
775
+
776
+ # 결합된 텍스트 생성
777
+ if connected_inputs:
778
+ combined_text = f"{base_text}\n\nInputs:\n" + "\n".join(connected_inputs)
779
+ results[node_id] = combined_text
780
+ else:
781
+ results[node_id] = base_text
782
+
783
+ elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
784
+ # LLM 노드 처리
785
+ template = node_data.get("template", {})
786
+
787
+ # 프로바이더 정보 추출 - VIDraft 또는 OpenAI만 허용
788
+ provider_info = template.get("provider", {})
789
+ provider = provider_info.get("value", "VIDraft") if isinstance(provider_info, dict) else "VIDraft" # 기본값 VIDraft
790
+
791
+ # provider가 VIDraft 또는 OpenAI가 아닌 경우 VIDraft로 기본 설정
792
+ if provider not in ["VIDraft", "OpenAI"]:
793
+ provider = "VIDraft"
794
+
795
+ # 모델 정보 추출
796
+ if provider == "OpenAI":
797
+ # OpenAI는 gpt-4.1-mini로 고정
798
+ model = "gpt-4.1-mini"
799
+ elif provider == "VIDraft":
800
+ # VIDraft는 Gemma-3-r1984-27B로 고정
801
+ model = "Gemma-3-r1984-27B"
802
+ else:
803
+ model = "Gemma-3-r1984-27B" # 기본값 VIDraft 모델
804
+
805
+ # 온도 정보 추출
806
+ temp_info = template.get("temperature", {})
807
+ temperature = temp_info.get("value", 0.7) if isinstance(temp_info, dict) else 0.7
808
+
809
+ # 시스템 프롬프트 추출
810
+ prompt_info = template.get("system_prompt", {})
811
+ system_prompt = prompt_info.get("value", "") if isinstance(prompt_info, dict) else ""
812
+
813
+ # 입력 텍스트 찾기
814
+ input_text = ""
815
+ for edge in edges:
816
+ if edge.get("target") == node_id:
817
+ source_id = edge.get("source")
818
+ if source_id in results:
819
+ input_text = results[source_id]
820
+ break
821
+
822
+ # 실제 API 호출
823
+ if provider == "OpenAI" and openai_key and openai_available:
824
+ try:
825
+ client = OpenAI(api_key=openai_key)
826
+
827
+ messages = []
828
+ if system_prompt:
829
+ messages.append({"role": "system", "content": system_prompt})
830
+ messages.append({"role": "user", "content": input_text})
831
+
832
+ response = client.chat.completions.create(
833
+ model="gpt-4.1-mini", # 고정된 모델명
834
+ messages=messages,
835
+ temperature=temperature,
836
+ max_tokens=1000
837
+ )
838
+
839
+ results[node_id] = response.choices[0].message.content
840
+
841
+ except Exception as e:
842
+ results[node_id] = f"[OpenAI Error: {str(e)}]"
843
+
844
+ # Anthropic 관련 코드 주석 처리
845
+ # elif provider == "Anthropic" and anthropic_key and anthropic_available:
846
+ # try:
847
+ # client = anthropic.Anthropic(api_key=anthropic_key)
848
+ #
849
+ # message = client.messages.create(
850
+ # model="claude-3-haiku-20240307",
851
+ # max_tokens=1000,
852
+ # temperature=temperature,
853
+ # system=system_prompt if system_prompt else None,
854
+ # messages=[{"role": "user", "content": input_text}]
855
+ # )
856
+ #
857
+ # results[node_id] = message.content[0].text
858
+ #
859
+ # except Exception as e:
860
+ # results[node_id] = f"[Anthropic Error: {str(e)}]"
861
+
862
+ elif provider == "VIDraft" and vidraft_token:
863
+ try:
864
+ import requests
865
+
866
+ headers = {
867
+ "Authorization": f"Bearer {vidraft_token}",
868
+ "Content-Type": "application/json"
869
+ }
870
+
871
+ # 메시지 구성
872
+ messages = []
873
+ if system_prompt:
874
+ messages.append({"role": "system", "content": system_prompt})
875
+ messages.append({"role": "user", "content": input_text})
876
+
877
+ payload = {
878
+ "model": "dep89a2fld32mcm", # VIDraft 모델 ID
879
+ "messages": messages,
880
+ "max_tokens": 16384,
881
+ "temperature": temperature,
882
+ "top_p": 0.8,
883
+ "stream": False # 동기 실행을 위해 False로 설정
884
+ }
885
+
886
+ # VIDraft API endpoint
887
+ response = requests.post(
888
+ "https://api.friendli.ai/dedicated/v1/chat/completions",
889
+ headers=headers,
890
+ json=payload,
891
+ timeout=30
892
+ )
893
+
894
+ if response.status_code == 200:
895
+ response_json = response.json()
896
+ results[node_id] = response_json["choices"][0]["message"]["content"]
897
+ else:
898
+ results[node_id] = f"[VIDraft API Error: {response.status_code} - {response.text}]"
899
+
900
+ except Exception as e:
901
+ results[node_id] = f"[VIDraft Error: {str(e)}]"
902
+
903
+ else:
904
+ # API 키가 없는 경우 시뮬레이션
905
+ results[node_id] = f"[Simulated {provider} Response to: {input_text[:50]}...]"
906
+
907
+ elif node_type in ["ChatOutput", "textOutput", "Output"]:
908
+ # 출력 노드는 연결된 노드의 결과를 가져옴
909
+ for edge in edges:
910
+ if edge.get("target") == node_id:
911
+ source_id = edge.get("source")
912
+ if source_id in results:
913
+ results[node_id] = results[source_id]
914
+ break
915
+
916
+ except Exception as e:
917
+ results[node_id] = f"[Node Error: {str(e)}]"
918
+ print(f"Error processing node {node_id}: {traceback.format_exc()}")
919
+
920
+ return results
921
+
922
+ # -------------------------------------------------------------------
923
+ # 🎨 CSS
924
+ # -------------------------------------------------------------------
925
+ CSS = """
926
+ .main-container{max-width:1600px;margin:0 auto;}
927
+ .workflow-section{margin-bottom:2rem;min-height:500px;}
928
+ .button-row{display:flex;gap:1rem;justify-content:center;margin:1rem 0;}
929
+ .status-box{
930
+ padding:10px;border-radius:5px;margin-top:10px;
931
+ background:#f0f9ff;border:1px solid #3b82f6;color:#1e40af;
932
+ }
933
+ .component-description{
934
+ padding:24px;background:linear-gradient(135deg,#f8fafc 0%,#e2e8f0 100%);
935
+ border-left:4px solid #3b82f6;border-radius:12px;
936
+ box-shadow:0 2px 8px rgba(0,0,0,.05);margin:16px 0;
937
+ }
938
+ .workflow-container{position:relative;}
939
+ .ui-execution-section{
940
+ background:linear-gradient(135deg,#f0fdf4 0%,#dcfce7 100%);
941
+ padding:24px;border-radius:12px;margin:24px 0;
942
+ border:1px solid #86efac;
943
+ }
944
+ .powered-by{
945
+ text-align:center;color:#64748b;font-size:14px;
946
+ margin-top:8px;font-style:italic;
947
+ }
948
+ .sample-buttons{
949
+ display:grid;grid-template-columns:1fr 1fr;gap:0.5rem;
950
+ margin-top:0.5rem;
951
+ }
952
+ .deploy-section{
953
+ background:linear-gradient(135deg,#fef3c7 0%,#fde68a 100%);
954
+ padding:24px;border-radius:12px;margin:24px 0;
955
+ border:1px solid #fbbf24;
956
+ }
957
+ .save-indicator{
958
+ text-align:right;
959
+ font-size:14px;
960
+ color:#16a34a;
961
+ padding:8px 16px;
962
+ background:#f0fdf4;
963
+ border-radius:20px;
964
+ display:inline-block;
965
+ margin-left:auto;
966
+ }
967
+ .workflow-info{
968
+ font-size:14px;
969
+ color:#475569;
970
+ background:#f8fafc;
971
+ padding:8px 16px;
972
+ border-radius:8px;
973
+ display:inline-block;
974
+ margin-bottom:16px;
975
+ }
976
+ """
977
+
978
+ # -------------------------------------------------------------------
979
+ # 🖥️ Gradio 앱
980
+ # -------------------------------------------------------------------
981
+ with gr.Blocks(title="🐭 MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as demo:
982
+
983
+ with gr.Column(elem_classes=["main-container"]):
984
+ gr.Markdown("# 🐭 MOUSE Workflow")
985
+ gr.Markdown("**Visual Workflow Builder with Interactive UI Execution**")
986
+ gr.HTML('<p class="powered-by">@Powered by VIDraft & Huggingface gradio</p>')
987
+
988
+ html_content = """<div class="component-description">
989
+ <p style="font-size:16px;margin:0;">Build sophisticated workflows visually • Import/Export JSON • Generate interactive UI for end-users • Default LLM: VIDraft (Gemma-3-r1984-27B)</p>
990
+ <p style="font-size:14px;margin-top:8px;color:#64748b;">💡 Tip: Your workflow is automatically saved as you make changes. The JSON preview updates in real-time!</p>
991
+ </div>"""
992
+ gr.HTML(html_content)
993
+
994
+ # API Status Display
995
+ with gr.Accordion("🔌 API Status", open=False):
996
+ gr.Markdown(f"""
997
+ **Available APIs:**
998
+ - FRIENDLI_TOKEN (VIDraft): {'✅ Connected' if os.getenv("FRIENDLI_TOKEN") else '❌ Not found'}
999
+ - OPENAI_API_KEY: {'✅ Connected' if os.getenv("OPENAI_API_KEY") else '❌ Not found'}
1000
+
1001
+ **Libraries:**
1002
+ - OpenAI: {'✅ Installed' if OPENAI_AVAILABLE else '❌ Not installed'}
1003
+ - Requests: {'✅ Installed' if REQUESTS_AVAILABLE else '❌ Not installed'}
1004
+ - Hugging Face Hub: {'✅ Installed' if HF_HUB_AVAILABLE else '❌ Not installed (needed for deployment)'}
1005
+
1006
+ **Available Models:**
1007
+ - OpenAI: gpt-4.1-mini (fixed)
1008
+ - VIDraft: Gemma-3-r1984-27B (model ID: dep89a2fld32mcm)
1009
+
1010
+ **Sample Workflows:**
1011
+ - Basic Q&A: Simple question-answer flow (VIDraft)
1012
+ - VIDraft: Korean language example with Gemma model
1013
+ - Multi-Input: Combine multiple inputs for personalized output (VIDraft)
1014
+ - Chain: Sequential processing with multiple outputs (VIDraft + OpenAI)
1015
+
1016
+ **Note**: All examples prioritize VIDraft for optimal performance. Friendli API token will be automatically configured during deployment.
1017
+ """)
1018
+
1019
+ # State for storing workflow data
1020
+ loaded_data = gr.State(None)
1021
+ trigger_update = gr.State(False)
1022
+ save_status = gr.State("Ready")
1023
+
1024
+ # ─── Dynamic Workflow Container ───
1025
+ with gr.Column(elem_classes=["workflow-container"]):
1026
+ # Auto-save status indicator
1027
+ with gr.Row():
1028
+ gr.Markdown("### 🎨 Visual Workflow Designer")
1029
+ save_indicator = gr.Markdown("💾 Auto-save: Ready", elem_classes=["save-indicator"])
1030
+
1031
+ @gr.render(inputs=[loaded_data, trigger_update])
1032
+ def render_workflow(data, trigger):
1033
+ """동적으로 WorkflowBuilder 렌더링"""
1034
+ workflow_value = data if data else {"nodes": [], "edges": []}
1035
+
1036
+ wb = WorkflowBuilder(
1037
+ label="",
1038
+ info="Drag nodes → Connect edges → Edit properties → Changes auto-save!",
1039
+ value=workflow_value,
1040
+ elem_id="main_workflow"
1041
+ )
1042
+
1043
+ # WorkflowBuilder 변경사항을 자동으로 loaded_data에 저장
1044
+ def update_workflow_data(workflow_data):
1045
+ """워크플로우 데이터 업데이트 및 상태 표시"""
1046
+ import time, copy # ← copy 추가
1047
+ # deepcopy 로 새 객체를 만들어 Gradio 가 변화를 감지하도록 함
1048
+ safe_data = copy.deepcopy(workflow_data or {})
1049
+ return safe_data, f"💾 Auto-save: Saved ✓ ({time.strftime('%H:%M:%S')})"
1050
+
1051
+
1052
+ wb.change(
1053
+ fn=update_workflow_data,
1054
+ inputs=wb,
1055
+ outputs=[loaded_data, save_indicator]
1056
+ )
1057
+
1058
+ return wb
1059
+
1060
+ # ─── Import Section ───
1061
+ with gr.Accordion("📥 Import Workflow", open=True):
1062
+ gr.Markdown("*Load an existing workflow from JSON or start with a sample template*")
1063
+ with gr.Row():
1064
+ with gr.Column(scale=2):
1065
+ import_json_text = gr.Code(
1066
+ language="json",
1067
+ label="Paste JSON here",
1068
+ lines=8,
1069
+ value='{\n "nodes": [],\n "edges": []\n}'
1070
+ )
1071
+ with gr.Column(scale=1):
1072
+ file_upload = gr.File(
1073
+ label="Or upload JSON file",
1074
+ file_types=[".json"],
1075
+ type="filepath"
1076
+ )
1077
+ btn_load = gr.Button("📥 Load Workflow", variant="primary", size="lg")
1078
+
1079
+ # Sample buttons
1080
+ gr.Markdown("**Sample Workflows:**")
1081
+ with gr.Row():
1082
+ btn_sample_basic = gr.Button("🎯 Basic Q&A", variant="secondary", scale=1)
1083
+ btn_sample_vidraft = gr.Button("🤖 VIDraft", variant="secondary", scale=1)
1084
+ with gr.Row():
1085
+ btn_sample_multi = gr.Button("📝 Multi-Input", variant="secondary", scale=1)
1086
+ btn_sample_chain = gr.Button("🔗 Chain", variant="secondary", scale=1)
1087
+
1088
+ # Status
1089
+ status_text = gr.Textbox(
1090
+ label="Status",
1091
+ value="Ready",
1092
+ elem_classes=["status-box"],
1093
+ interactive=False
1094
+ )
1095
+
1096
+ # ─── Export Section ───
1097
+ gr.Markdown("## 💾 Export / Live Preview")
1098
+ gr.Markdown("*Your workflow is automatically saved. The JSON below shows your current workflow in real-time.*")
1099
+
1100
+ # Workflow info display
1101
+ workflow_info = gr.Markdown("📊 Empty workflow", elem_classes=["workflow-info"])
1102
+
1103
+ with gr.Row():
1104
+ with gr.Column(scale=3):
1105
+ export_preview = gr.Code(
1106
+ language="json",
1107
+ label="Current Workflow JSON (Live Preview)",
1108
+ lines=8,
1109
+ interactive=False
1110
+ )
1111
+ gr.Markdown("*💡 This JSON updates automatically as you modify the workflow above*")
1112
+ with gr.Column(scale=1):
1113
+ btn_preview = gr.Button("🔄 Force Refresh", size="lg", variant="secondary")
1114
+ btn_download = gr.DownloadButton(
1115
+ "💾 Download JSON",
1116
+ size="lg",
1117
+ variant="primary",
1118
+ visible=True
1119
+ )
1120
+
1121
+ # ─── Deploy Section ───
1122
+ with gr.Accordion("🚀 Deploy to Hugging Face Space", open=False, elem_classes=["deploy-section"]):
1123
+ gr.Markdown("""
1124
+ Deploy your **current workflow** as an independent Hugging Face Space app.
1125
+ The workflow shown in the JSON preview above will be deployed exactly as is.
1126
+ """)
1127
+
1128
+ gr.Markdown("*⚠️ Make sure to save/finalize your workflow design before deploying!*")
1129
+
1130
+ with gr.Row():
1131
+ with gr.Column(scale=2):
1132
+ deploy_name = gr.Textbox(
1133
+ label="App Name",
1134
+ placeholder="My Awesome Workflow App",
1135
+ value="My Workflow App"
1136
+ )
1137
+ deploy_description = gr.Textbox(
1138
+ label="App Description",
1139
+ placeholder="Describe what your workflow does...",
1140
+ lines=3,
1141
+ value="A workflow application created with MOUSE Workflow builder."
1142
+ )
1143
+ deploy_space_name = gr.Textbox(
1144
+ label="Space Name (your-username/space-name)",
1145
+ placeholder="username/my-workflow-app",
1146
+ info="This will be the URL of your Space"
1147
+ )
1148
+
1149
+ with gr.Column(scale=1):
1150
+ deploy_token = gr.Textbox(
1151
+ label="Hugging Face Token",
1152
+ type="password",
1153
+ placeholder="hf_...",
1154
+ info="Get your token from huggingface.co/settings/tokens"
1155
+ )
1156
+
1157
+ # API Keys 설정 섹션
1158
+ gr.Markdown("### 🔑 API Keys Configuration")
1159
+
1160
+ # FRIENDLI_TOKEN 설정
1161
+ friendli_token_input = gr.Textbox(
1162
+ label="FRIENDLI_TOKEN (VIDraft/Gemma)",
1163
+ type="password",
1164
+ placeholder="flp_...",
1165
+ value=os.getenv("FRIENDLI_TOKEN", ""),
1166
+ info="Required for VIDraft. Will be added as secret."
1167
+ )
1168
+
1169
+ # OpenAI API Key 설정
1170
+ openai_token_input = gr.Textbox(
1171
+ label="OPENAI_API_KEY (Optional)",
1172
+ type="password",
1173
+ placeholder="sk-...",
1174
+ value=os.getenv("OPENAI_API_KEY", ""),
1175
+ info="Optional. Leave empty if not using OpenAI."
1176
+ )
1177
+
1178
+ deploy_private = gr.Checkbox(
1179
+ label="Make Space Private",
1180
+ value=False
1181
+ )
1182
+
1183
+ btn_deploy = gr.Button("🚀 Deploy to HF Space", variant="primary", size="lg")
1184
+
1185
+ # Deploy status
1186
+ deploy_status = gr.Markdown("")
1187
+
1188
+ # Preview generated code
1189
+ with gr.Accordion("📄 Preview Generated Code", open=False):
1190
+ generated_code_preview = gr.Code(
1191
+ language="python",
1192
+ label="app.py (This will be deployed)",
1193
+ lines=20
1194
+ )
1195
+
1196
+ # ─── UI Execution Section ───
1197
+ with gr.Column(elem_classes=["ui-execution-section"]):
1198
+ gr.Markdown("## 🚀 UI Execution")
1199
+ gr.Markdown("Test your workflow instantly! Click below to generate and run the UI from your current workflow design.")
1200
+
1201
+ btn_execute_ui = gr.Button("▶️ Generate & Run UI from Current Workflow", variant="primary", size="lg")
1202
+
1203
+ # UI execution state
1204
+ ui_workflow_data = gr.State(None)
1205
+
1206
+ # Dynamic UI container
1207
+ @gr.render(inputs=[ui_workflow_data])
1208
+ def render_execution_ui(workflow_data):
1209
+ if not workflow_data or not workflow_data.get("nodes"):
1210
+ gr.Markdown("*Load a workflow first, then click 'Generate & Run UI'*")
1211
+ return
1212
+
1213
+ gr.Markdown("### 📋 Generated UI")
1214
+
1215
+ # Extract input and output nodes
1216
+ input_nodes = []
1217
+ output_nodes = []
1218
+
1219
+ for node in workflow_data.get("nodes", []):
1220
+ node_type = node.get("type", "")
1221
+ if node_type in ["ChatInput", "textInput", "Input", "numberInput"]:
1222
+ input_nodes.append(node)
1223
+ elif node_type in ["ChatOutput", "textOutput", "Output"]:
1224
+ output_nodes.append(node)
1225
+ elif node_type == "textNode":
1226
+ # textNode는 중간 처리 노드로, UI에는 표시하지 않음
1227
+ pass
1228
+
1229
+ # Create input components
1230
+ input_components = {}
1231
+
1232
+ if input_nodes:
1233
+ gr.Markdown("#### 📥 Inputs")
1234
+ for node in input_nodes:
1235
+ node_id = node.get("id")
1236
+ label = node.get("data", {}).get("label", node_id)
1237
+ node_type = node.get("type")
1238
+
1239
+ # Get default value
1240
+ template = node.get("data", {}).get("template", {})
1241
+ default_value = template.get("input_value", {}).get("value", "")
1242
+
1243
+ if node_type == "numberInput":
1244
+ input_components[node_id] = gr.Number(
1245
+ label=label,
1246
+ value=float(default_value) if default_value else 0
1247
+ )
1248
+ else:
1249
+ input_components[node_id] = gr.Textbox(
1250
+ label=label,
1251
+ value=default_value,
1252
+ lines=2,
1253
+ placeholder="Enter your input..."
1254
+ )
1255
+
1256
+ # Execute button
1257
+ execute_btn = gr.Button("🎯 Execute", variant="primary")
1258
+
1259
+ # Create output components
1260
+ output_components = {}
1261
+
1262
+ if output_nodes:
1263
+ gr.Markdown("#### 📤 Outputs")
1264
+ for node in output_nodes:
1265
+ node_id = node.get("id")
1266
+ label = node.get("data", {}).get("label", node_id)
1267
+
1268
+ output_components[node_id] = gr.Textbox(
1269
+ label=label,
1270
+ interactive=False,
1271
+ lines=3
1272
+ )
1273
+
1274
+ # Execution log
1275
+ gr.Markdown("#### 📊 Execution Log")
1276
+ log_output = gr.Textbox(
1277
+ label="Log",
1278
+ interactive=False,
1279
+ lines=5
1280
+ )
1281
+
1282
+ # Define execution handler
1283
+ def execute_ui_workflow(*input_values):
1284
+ # Create input dictionary
1285
+ inputs_dict = {}
1286
+ input_keys = list(input_components.keys())
1287
+ for i, key in enumerate(input_keys):
1288
+ if i < len(input_values):
1289
+ inputs_dict[key] = input_values[i]
1290
+
1291
+ # Check API status
1292
+ log = "=== Workflow Execution Started ===\n"
1293
+ log += f"Inputs provided: {len(inputs_dict)}\n"
1294
+
1295
+ # API 상태 확인
1296
+ vidraft_token = os.getenv("FRIENDLI_TOKEN")
1297
+ openai_key = os.getenv("OPENAI_API_KEY")
1298
+
1299
+ log += "\nAPI Status:\n"
1300
+ log += f"- FRIENDLI_TOKEN (VIDraft): {'✅ Found' if vidraft_token else '❌ Not found'}\n"
1301
+ log += f"- OPENAI_API_KEY: {'✅ Found' if openai_key else '❌ Not found'}\n"
1302
+
1303
+ if not vidraft_token and not openai_key:
1304
+ log += "\n⚠️ No API keys found. Results will be simulated.\n"
1305
+ log += "To get real AI responses, set API keys in environment variables.\n"
1306
+ log += "Minimum requirement: FRIENDLI_TOKEN for VIDraft\n"
1307
+ elif vidraft_token and not openai_key:
1308
+ log += "\n✅ VIDraft API connected - Basic functionality available\n"
1309
+ log += "💡 Add OPENAI_API_KEY for full functionality\n"
1310
+
1311
+ log += "\n--- Processing Nodes ---\n"
1312
+
1313
+ try:
1314
+ results = execute_workflow_simple(workflow_data, inputs_dict)
1315
+
1316
+ # Prepare outputs
1317
+ output_values = []
1318
+ for node_id in output_components.keys():
1319
+ value = results.get(node_id, "No output")
1320
+ output_values.append(value)
1321
+
1322
+ # Log 길이 제한
1323
+ display_value = value[:100] + "..." if len(str(value)) > 100 else value
1324
+ log += f"\nOutput [{node_id}]: {display_value}\n"
1325
+
1326
+ log += "\n=== Execution Completed Successfully! ===\n"
1327
+ output_values.append(log)
1328
+
1329
+ return output_values
1330
+
1331
+ except Exception as e:
1332
+ error_msg = f"❌ Error: {str(e)}"
1333
+ log += f"\n{error_msg}\n"
1334
+ log += "=== Execution Failed ===\n"
1335
+ return [error_msg] * len(output_components) + [log]
1336
+
1337
+ # Connect execution
1338
+ all_inputs = list(input_components.values())
1339
+ all_outputs = list(output_components.values()) + [log_output]
1340
+
1341
+ execute_btn.click(
1342
+ fn=execute_ui_workflow,
1343
+ inputs=all_inputs,
1344
+ outputs=all_outputs
1345
+ )
1346
+
1347
+ # ─── Event Handlers ───
1348
+
1349
+ # Load workflow (from text or file)
1350
+ def load_workflow(json_text, file_obj):
1351
+ data, status = load_json_from_text_or_file(json_text, file_obj)
1352
+ if data:
1353
+ # 로드 성공시 자동으로 미리보기 업데이트
1354
+ return data, status, json_text if not file_obj else export_pretty(data), "💾 Auto-save: Loaded ✓"
1355
+ else:
1356
+ return None, status, gr.update(), gr.update()
1357
+
1358
+ btn_load.click(
1359
+ fn=load_workflow,
1360
+ inputs=[import_json_text, file_upload],
1361
+ outputs=[loaded_data, status_text, import_json_text, save_indicator]
1362
+ ).then(
1363
+ fn=lambda current_trigger: not current_trigger,
1364
+ inputs=trigger_update,
1365
+ outputs=trigger_update
1366
+ )
1367
+
1368
+ # Auto-load when file is uploaded
1369
+ file_upload.change(
1370
+ fn=load_workflow,
1371
+ inputs=[import_json_text, file_upload],
1372
+ outputs=[loaded_data, status_text, import_json_text, save_indicator]
1373
+ ).then(
1374
+ fn=lambda current_trigger: not current_trigger,
1375
+ inputs=trigger_update,
1376
+ outputs=trigger_update
1377
+ )
1378
+
1379
+ # Load samples
1380
+ btn_sample_basic.click(
1381
+ fn=lambda: (create_sample_workflow("basic"), "✅ Basic Q&A sample loaded", export_pretty(create_sample_workflow("basic")), "💾 Auto-save: Sample loaded ✓"),
1382
+ outputs=[loaded_data, status_text, import_json_text, save_indicator]
1383
+ ).then(
1384
+ fn=lambda current_trigger: not current_trigger,
1385
+ inputs=trigger_update,
1386
+ outputs=trigger_update
1387
+ )
1388
+
1389
+ btn_sample_vidraft.click(
1390
+ fn=lambda: (create_sample_workflow("vidraft"), "✅ VIDraft sample loaded", export_pretty(create_sample_workflow("vidraft")), "💾 Auto-save: Sample loaded ✓"),
1391
+ outputs=[loaded_data, status_text, import_json_text, save_indicator]
1392
+ ).then(
1393
+ fn=lambda current_trigger: not current_trigger,
1394
+ inputs=trigger_update,
1395
+ outputs=trigger_update
1396
+ )
1397
+
1398
+ btn_sample_multi.click(
1399
+ fn=lambda: (create_sample_workflow("multi_input"), "✅ Multi-input sample loaded", export_pretty(create_sample_workflow("multi_input")), "💾 Auto-save: Sample loaded ✓"),
1400
+ outputs=[loaded_data, status_text, import_json_text, save_indicator]
1401
+ ).then(
1402
+ fn=lambda current_trigger: not current_trigger,
1403
+ inputs=trigger_update,
1404
+ outputs=trigger_update
1405
+ )
1406
+
1407
+ btn_sample_chain.click(
1408
+ fn=lambda: (create_sample_workflow("chain"), "✅ Chain processing sample loaded", export_pretty(create_sample_workflow("chain")), "💾 Auto-save: Sample loaded ✓"),
1409
+ outputs=[loaded_data, status_text, import_json_text, save_indicator]
1410
+ ).then(
1411
+ fn=lambda current_trigger: not current_trigger,
1412
+ inputs=trigger_update,
1413
+ outputs=trigger_update
1414
+ )
1415
+
1416
+ # Preview current workflow - 강제 새로고침
1417
+ def force_refresh_preview(current_data):
1418
+ """현재 워크플로우 데이터를 강제로 새로고침"""
1419
+ if current_data:
1420
+ node_count = len(current_data.get("nodes", []))
1421
+ edge_count = len(current_data.get("edges", []))
1422
+ info = f"📊 Workflow contains {node_count} nodes and {edge_count} edges"
1423
+ return export_pretty(current_data), "💾 Auto-save: Refreshed ✓", info
1424
+ return "No workflow data available", "💾 Auto-save: No data", "📊 Empty workflow"
1425
+
1426
+ btn_preview.click(
1427
+ fn=force_refresh_preview,
1428
+ inputs=loaded_data,
1429
+ outputs=[export_preview, save_indicator, workflow_info]
1430
+ )
1431
+
1432
+ # Download workflow는 이미 loaded_data.change에서 처리됨
1433
+
1434
+ # Auto-update export preview when workflow changes
1435
+ def update_preview_and_download(data):
1436
+ """워크플로우 변경시 미리보기와 다운로드 업데이트"""
1437
+ if data:
1438
+ preview = export_pretty(data)
1439
+ download_file = export_file(data)
1440
+ node_count = len(data.get("nodes", []))
1441
+ edge_count = len(data.get("edges", []))
1442
+ status = f"📊 Workflow contains {node_count} nodes and {edge_count} edges"
1443
+ return preview, download_file, status
1444
+ return "No workflow data", None, "📊 Empty workflow"
1445
+
1446
+ loaded_data.change(
1447
+ fn=update_preview_and_download,
1448
+ inputs=loaded_data,
1449
+ outputs=[export_preview, btn_download, workflow_info]
1450
+ )
1451
+
1452
+ # Generate UI execution - 현재 워크플로우 사용
1453
+ def prepare_ui_execution(current_data):
1454
+ """현재 워크플로우를 UI 실행용으로 준비"""
1455
+ if not current_data or not current_data.get("nodes"):
1456
+ gr.Warning("Please create a workflow first!")
1457
+ return None
1458
+ return current_data
1459
+
1460
+ btn_execute_ui.click(
1461
+ fn=prepare_ui_execution,
1462
+ inputs=loaded_data,
1463
+ outputs=ui_workflow_data
1464
+ )
1465
+
1466
+ # ─── Deploy Event Handlers ───
1467
+
1468
+ # Preview generated code
1469
+ def preview_generated_code(workflow_data, app_name, app_description):
1470
+ if not workflow_data:
1471
+ return "# No workflow loaded\n# Create or load a workflow first"
1472
+
1473
+ if not workflow_data.get("nodes"):
1474
+ return "# Empty workflow\n# Add some nodes to see the generated code"
1475
+
1476
+ try:
1477
+ code = generate_standalone_app(workflow_data, app_name, app_description)
1478
+ return code
1479
+ except Exception as e:
1480
+ return f"# Error generating code\n# {str(e)}"
1481
+
1482
+ # Update preview when inputs change
1483
+ deploy_name.change(
1484
+ fn=preview_generated_code,
1485
+ inputs=[loaded_data, deploy_name, deploy_description],
1486
+ outputs=generated_code_preview
1487
+ )
1488
+
1489
+ deploy_description.change(
1490
+ fn=preview_generated_code,
1491
+ inputs=[loaded_data, deploy_name, deploy_description],
1492
+ outputs=generated_code_preview
1493
+ )
1494
+
1495
+ # Update preview when workflow changes too
1496
+ loaded_data.change(
1497
+ fn=preview_generated_code,
1498
+ inputs=[loaded_data, deploy_name, deploy_description],
1499
+ outputs=generated_code_preview
1500
+ )
1501
+
1502
+ # Deploy handler
1503
+ def handle_deploy(workflow_data, app_name, app_description, hf_token, space_name,
1504
+ friendli_token, openai_token, is_private):
1505
+ if not workflow_data:
1506
+ return "❌ No workflow loaded. Please create or load a workflow first."
1507
+
1508
+ if not workflow_data.get("nodes"):
1509
+ return "❌ Empty workflow. Please add some nodes to your workflow."
1510
+
1511
+ if not hf_token:
1512
+ return "❌ Hugging Face token is required. Get yours at huggingface.co/settings/tokens"
1513
+
1514
+ if not space_name:
1515
+ return "❌ Space name is required. Format: username/space-name"
1516
+
1517
+ # Validate space name format
1518
+ if "/" not in space_name:
1519
+ return "❌ Invalid space name format. Use: username/space-name"
1520
+
1521
+ # Check if huggingface-hub is available
1522
+ if not HF_HUB_AVAILABLE:
1523
+ return "❌ huggingface-hub library not installed. Install with: pip install huggingface-hub"
1524
+
1525
+ # Show deploying status
1526
+ yield "🔄 Deploying to Hugging Face Space..."
1527
+
1528
+ # Prepare API keys
1529
+ api_keys = {}
1530
+
1531
+ # Always include FRIENDLI_TOKEN (even if empty)
1532
+ if not friendli_token:
1533
+ friendli_token = os.getenv("FRIENDLI_TOKEN", "")
1534
+ if friendli_token:
1535
+ api_keys["FRIENDLI_TOKEN"] = friendli_token
1536
+
1537
+ # Include OpenAI key if provided
1538
+ if not openai_token:
1539
+ openai_token = os.getenv("OPENAI_API_KEY", "")
1540
+ if openai_token:
1541
+ api_keys["OPENAI_API_KEY"] = openai_token
1542
+
1543
+ # Deploy
1544
+ result = deploy_to_huggingface(
1545
+ workflow_data=workflow_data,
1546
+ app_name=app_name,
1547
+ app_description=app_description,
1548
+ hf_token=hf_token,
1549
+ space_name=space_name,
1550
+ is_private=is_private,
1551
+ api_keys=api_keys
1552
+ )
1553
+
1554
+ if result["success"]:
1555
+ # Build secrets status message
1556
+ secrets_msg = "\n\n**🔑 API Keys Status:**"
1557
+
1558
+ if result.get("added_secrets"):
1559
+ for secret in result["added_secrets"]:
1560
+ secrets_msg += f"\n- {secret}: ✅ Successfully added"
1561
+
1562
+ if result.get("failed_secrets"):
1563
+ for failure in result["failed_secrets"]:
1564
+ secrets_msg += f"\n- {failure}: ❌ Failed to add"
1565
+
1566
+ # Check for missing required keys
1567
+ providers = result.get("providers_used", [])
1568
+ if "VIDraft" in providers and "FRIENDLI_TOKEN" not in result.get("added_secrets", []):
1569
+ secrets_msg += "\n- FRIENDLI_TOKEN: ⚠️ Required for VIDraft but not provided"
1570
+ if "OpenAI" in providers and "OPENAI_API_KEY" not in result.get("added_secrets", []):
1571
+ secrets_msg += "\n- OPENAI_API_KEY: ⚠️ Required for OpenAI but not provided"
1572
+
1573
+ yield f"""✅ **Deployment Successful!**
1574
+
1575
+ 🎉 Your workflow has been deployed to:
1576
+ [{result['space_url']}]({result['space_url']})
1577
+
1578
+ ⏱️ The Space will be ready in a few minutes. Building usually takes 2-5 minutes.
1579
+
1580
+ {secrets_msg}
1581
+
1582
+ 📝 **Providers Detected in Workflow:**
1583
+ {', '.join(result.get('providers_used', [])) if result.get('providers_used') else 'No LLM providers detected'}
1584
+
1585
+ 🚀 **Default Configuration:**
1586
+ The app is configured to prioritize VIDraft (Gemma-3-r1984-27B) for optimal performance.
1587
+
1588
+ 📚 **Space Management:**
1589
+ - To update secrets: Go to Space settings → Repository secrets
1590
+ - To restart Space: Go to Space settings → Factory reboot
1591
+ - To make changes: Edit files directly in the Space repository
1592
+ """
1593
+ else:
1594
+ yield f"❌ **Deployment Failed**\n\nError: {result['error']}"
1595
+
1596
+ btn_deploy.click(
1597
+ fn=handle_deploy,
1598
+ inputs=[loaded_data, deploy_name, deploy_description, deploy_token, deploy_space_name,
1599
+ friendli_token_input, openai_token_input, deploy_private],
1600
+ outputs=deploy_status
1601
+ )
1602
+
1603
+
1604
+ # -------------------------------------------------------------------
1605
+ # 🚀 실행
1606
+ # -------------------------------------------------------------------
1607
+ if __name__ == "__main__":
1608
+ demo.launch(server_name="0.0.0.0", show_error=True)