import pickle import subprocess import sys import gradio as gr import os from openai import AsyncOpenAI from openai import OpenAI from huggingface_hub import InferenceClient # File: enhanced_gradio_interface.py import asyncio from collections import defaultdict import json import os import re from time import time import uuid from typing import List, Dict, Any, Optional from dataclasses import dataclass from threading import Lock import threading import json import os import queue import traceback import uuid from typing import Coroutine, Dict, List, Any, Optional, Callable from dataclasses import dataclass from queue import Queue, Empty from threading import Lock, Event, Thread import threading from concurrent.futures import ThreadPoolExecutor import time import gradio as gr from openai import AsyncOpenAI, OpenAI import pyttsx3 from rich.console import Console api_key = "" client = OpenAI( base_url="https://Localhost/v1", api_key=api_key ) BASE_URL="http://localhost:1234/v1" BASE_API_KEY="not-needed" BASE_CLIENT = AsyncOpenAI( base_url=BASE_URL, api_key=BASE_API_KEY ) # Global state for client BASEMODEL_ID = "leroydyer/qwen/qwen3-0.6b-q4_k_m.gguf" # Global state for selected model ID CLIENT =OpenAI( base_url=BASE_URL, api_key=BASE_API_KEY ) # Global state for client # --- Global Variables (if needed) --- console = Console() # --- Configuration --- LOCAL_BASE_URL = "http://localhost:1234/v1" LOCAL_API_KEY = "not-needed" # HuggingFace Spaces configuration HF_INFERENCE_URL = "https://api-inference.huggingface.co/models/" HF_API_KEY = os.getenv("HF_API_KEY", "") DEFAULT_TEMPERATURE = 0.7 DEFAULT_MAX_TOKENS = 5000 console = Console() ############################################################# @dataclass class LLMMessage: role: str content: str message_id: str = None conversation_id: str = None timestamp: float = None metadata: Dict[str, Any] = None def __post_init__(self): if self.message_id is None: self.message_id = str(uuid.uuid4()) if self.timestamp is None: self.timestamp = time.time() if self.metadata is None: self.metadata = {} @dataclass class LLMRequest: message: LLMMessage response_event: str = None callback: Callable = None def __post_init__(self): if self.response_event is None: self.response_event = f"llm_response_{self.message.message_id}" @dataclass class LLMResponse: message: LLMMessage request_id: str success: bool = True error: str = None ############################################################# class EventManager: def __init__(self): self._handlers = defaultdict(list) self._lock = threading.Lock() def register(self, event: str, handler: Callable): with self._lock: self._handlers[event].append(handler) def unregister(self, event: str, handler: Callable): with self._lock: if event in self._handlers and handler in self._handlers[event]: self._handlers[event].remove(handler) def raise_event(self, event: str, data: Any): with self._lock: handlers = self._handlers[event][:] for handler in handlers: try: handler(data) except Exception as e: console.log(f"Error in event handler for {event}: {e}", style="bold red") EVENT_MANAGER = EventManager() def RegisterEvent(event: str, handler: Callable): EVENT_MANAGER.register(event, handler) def RaiseEvent(event: str, data: Any): EVENT_MANAGER.raise_event(event, data) def UnregisterEvent(event: str, handler: Callable): EVENT_MANAGER.unregister(event, handler) ############################################################# @dataclass class CanvasArtifact: id: str type: str # 'code', 'diagram', 'text', 'image' content: str title: str timestamp: float metadata: Dict[str, Any] = None def __post_init__(self): if self.metadata is None: self.metadata = {} class LLMAgent: """Main Agent Driver ! Agent For Multiple messages at once , has a message queing service as well as agenerator method for easy intergration with console applications as well as ui !""" def __init__( self, model_id: str = BASEMODEL_ID, system_prompt: str = None, max_queue_size: int = 1000, max_retries: int = 3, timeout: int = 30000, max_tokens: int = 5000, temperature: float = 0.3, base_url: str = "http://localhost:1234/v1", api_key: str = "not-needed", generate_fn: Callable[[List[Dict[str, str]]], Coroutine[Any, Any, str]] = None, ): self.model_id = model_id self.system_prompt = system_prompt or "You are a helpful AI assistant." self.request_queue = Queue(maxsize=max_queue_size) self.max_retries = max_retries self.timeout = timeout self.is_running = False self._stop_event = Event() self.processing_thread = None # Canvas artifacts self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = defaultdict(list) self.max_canvas_artifacts = 1000 # Conversation tracking self.conversations: Dict[str, List[LLMMessage]] = {} self.max_history_length = 100 self._generate = generate_fn or self._default_generate self.api_key = api_key self.base_url = base_url self.max_tokens = max_tokens self.temperature = temperature self.async_client = self.CreateClient(base_url, api_key) self.current_conversation = "default" # Active requests waiting for responses self.pending_requests: Dict[str, LLMRequest] = {} self.pending_requests_lock = Lock() # Register internal event handlers self._register_event_handlers() # Register internal event handlers self._register_event_handlers() # Speech synthesis try: self.tts_engine = pyttsx3.init() self.setup_tts() self.speech_enabled = True except Exception as e: console.log(f"[yellow]TTS not available: {e}[/yellow]") self.speech_enabled = False console.log("[bold green]🚀 Enhanced LLM Agent Initialized[/bold green]") # Start the processing thread immediately self.start() def setup_tts(self): """Configure text-to-speech engine""" if hasattr(self, 'tts_engine'): voices = self.tts_engine.getProperty('voices') if voices: self.tts_engine.setProperty('voice', voices[0].id) self.tts_engine.setProperty('rate', 150) self.tts_engine.setProperty('volume', 0.8) def speak(self, text: str): """Convert text to speech in a non-blocking way""" if not hasattr(self, 'speech_enabled') or not self.speech_enabled: return def _speak(): try: # Clean text for speech (remove markdown, code blocks) clean_text = re.sub(r'```.*?```', '', text, flags=re.DOTALL) clean_text = re.sub(r'`.*?`', '', clean_text) clean_text = clean_text.strip() if clean_text: self.tts_engine.say(clean_text) self.tts_engine.runAndWait() else: self.tts_engine.say(text) self.tts_engine.runAndWait() except Exception as e: console.log(f"[red]TTS Error: {e}[/red]") thread = threading.Thread(target=_speak, daemon=True) thread.start() async def _default_generate(self, messages: List[Dict[str, str]]) -> str: """Default generate function if none provided""" return await self.openai_generate(messages) def create_interface(self): """Create the full LCARS-styled interface without HuggingFace options""" lcars_css = """ :root { --lcars-orange: #FF9900; --lcars-red: #FF0033; --lcars-blue: #6699FF; --lcars-purple: #CC99FF; --lcars-pale-blue: #99CCFF; --lcars-black: #000000; --lcars-dark-blue: #3366CC; --lcars-gray: #424242; --lcars-yellow: #FFFF66; } body { background: var(--lcars-black); color: var(--lcars-orange); font-family: 'Antonio', 'LCD', 'Courier New', monospace; margin: 0; padding: 0; } .gradio-container { background: var(--lcars-black) !important; min-height: 100vh; } .lcars-container { background: var(--lcars-black); border: 4px solid var(--lcars-orange); border-radius: 0 30px 0 0; min-height: 100vh; padding: 20px; } .lcars-header { background: linear-gradient(90deg, var(--lcars-red), var(--lcars-orange)); padding: 20px 40px; border-radius: 0 60px 0 0; margin: -20px -20px 20px -20px; border-bottom: 6px solid var(--lcars-blue); } .lcars-title { font-size: 2.5em; font-weight: bold; color: var(--lcars-black); margin: 0; } .lcars-subtitle { font-size: 1.2em; color: var(--lcars-black); margin: 10px 0 0 0; } .lcars-panel { background: rgba(66, 66, 66, 0.9); border: 2px solid var(--lcars-orange); border-radius: 0 20px 0 20px; padding: 15px; margin-bottom: 15px; } .lcars-button { background: var(--lcars-orange); color: var(--lcars-black) !important; border: none !important; border-radius: 0 15px 0 15px !important; padding: 10px 20px !important; font-family: inherit !important; font-weight: bold !important; margin: 5px !important; } .lcars-button:hover { background: var(--lcars-red) !important; } .lcars-input { background: var(--lcars-black) !important; color: var(--lcars-orange) !important; border: 2px solid var(--lcars-blue) !important; border-radius: 0 10px 0 10px !important; padding: 10px !important; } .lcars-chatbot { background: var(--lcars-black) !important; border: 2px solid var(--lcars-purple) !important; border-radius: 0 15px 0 15px !important; } .status-indicator { display: inline-block; width: 12px; height: 12px; border-radius: 50%; background: var(--lcars-red); margin-right: 8px; } .status-online { background: var(--lcars-blue); animation: pulse 2s infinite; } @keyframes pulse { 0% { opacity: 1; } 50% { opacity: 0.5; } 100% { opacity: 1; } } """ with gr.Blocks(css=lcars_css, theme=gr.themes.Default(), title="LCARS Terminal") as interface: with gr.Column(elem_classes="lcars-container"): # Header with gr.Row(elem_classes="lcars-header"): gr.Markdown("""
🚀 LCARS TERMINAL
STARFLEET AI DEVELOPMENT CONSOLE
SYSTEM ONLINE
""") # Main Content with gr.Row(): # Left Sidebar with gr.Column(scale=1): # Configuration Panel with gr.Column(elem_classes="lcars-panel"): pass # Canvas Artifacts with gr.Column(elem_classes="lcars-panel"): gr.Markdown("""### 🎨 CANVAS ARTIFACTS""") artifact_display = gr.JSON(label="") with gr.Row(): refresh_artifacts_btn = gr.Button("🔄 Refresh", elem_classes="lcars-button") clear_canvas_btn = gr.Button("🗑️ Clear Canvas", elem_classes="lcars-button") # Main Content Area with gr.Column(scale=2): # Code Canvas with gr.Accordion("💻 COLLABORATIVE CODE CANVAS", open=False): code_editor = gr.Code(interactive=True, value="# Welcome to LCARS Collaborative Canvas\nprint('Hello, Starfleet!')", language="python", lines=15, label="" ) with gr.Row(): load_to_chat_btn = gr.Button("💬 Discuss Code", elem_classes="lcars-button") analyze_btn = gr.Button("🔍 Analyze", elem_classes="lcars-button") optimize_btn = gr.Button("⚡ Optimize", elem_classes="lcars-button") # Chat Interface with gr.Column(elem_classes="lcars-panel"): gr.Markdown("""### 💬 MISSION LOG""") chatbot = gr.Chatbot(label="", height=300) with gr.Row(): message_input = gr.Textbox( placeholder="Enter your command or query...", show_label=False, lines=2, scale=4 ) send_btn = gr.Button("🚀 SEND", elem_classes="lcars-button", scale=1) # Status with gr.Row(): status_display = gr.Textbox( value="LCARS terminal operational. Awaiting commands.", label="Status", max_lines=2 ) with gr.Column(scale=0): clear_chat_btn = gr.Button("🗑️ Clear Chat", elem_classes="lcars-button") new_session_btn = gr.Button("🆕 New Session", elem_classes="lcars-button") # Event handlers are connected here, no change needed async def process_message(message, history, speech_enabled=True): if not message.strip(): return "", history, "Please enter a message" history = history + [[message, None]] try: # Fixed: Uses the new chat_with_canvas method which includes canvas context response = await self.chat_with_canvas( message, self.current_conversation, include_canvas=True ) history[-1][1] = response if speech_enabled and self.speech_enabled: self.speak(response) artifacts = self.get_canvas_summary(self.current_conversation) status = f"✅ Response received. Canvas artifacts: {len(artifacts)}" return "", history, status, artifacts except Exception as e: error_msg = f"❌ Error: {str(e)}" history[-1][1] = error_msg return "", history, error_msg, self.get_canvas_summary(self.current_conversation) def get_artifacts(): return self.get_canvas_summary(self.current_conversation) def clear_canvas(): self.clear_canvas(self.current_conversation) return [], "✅ Canvas cleared" def clear_chat(): self.clear_conversation(self.current_conversation) return [], "✅ Chat cleared" def new_session(): self.clear_conversation(self.current_conversation) self.clear_canvas(self.current_conversation) return [], "# New session started\nprint('Ready!')", "🆕 New session started", [] # Connect events send_btn.click(process_message, inputs=[message_input, chatbot], outputs=[message_input, chatbot, status_display, artifact_display]) message_input.submit(process_message, inputs=[message_input, chatbot], outputs=[message_input, chatbot, status_display, artifact_display]) refresh_artifacts_btn.click(get_artifacts, outputs=artifact_display) clear_canvas_btn.click(clear_canvas, outputs=[artifact_display, status_display]) clear_chat_btn.click(clear_chat, outputs=[chatbot, status_display]) new_session_btn.click(new_session, outputs=[chatbot, code_editor, status_display, artifact_display]) return interface def _register_event_handlers(self): """Register internal event handlers for response routing""" RegisterEvent("llm_internal_response", self._handle_internal_response) def _handle_internal_response(self, response: LLMResponse): """Route responses to the appropriate request handlers""" console.log(f"[bold cyan]Handling internal response for: {response.request_id}[/bold cyan]") request = None with self.pending_requests_lock: if response.request_id in self.pending_requests: request = self.pending_requests[response.request_id] del self.pending_requests[response.request_id] console.log(f"Found pending request for: {response.request_id}") else: console.log(f"No pending request found for: {response.request_id}", style="yellow") return # Raise the specific response event if request.response_event: console.log(f"[bold green]Raising event: {request.response_event}[/bold green]") RaiseEvent(request.response_event, response) # Call callback if provided if request.callback: try: console.log(f"[bold yellow]Calling callback for: {response.request_id}[/bold yellow]") request.callback(response) except Exception as e: console.log(f"Error in callback: {e}", style="bold red") def _add_to_conversation_history(self, conversation_id: str, message: LLMMessage): """Add message to conversation history""" if conversation_id not in self.conversations: self.conversations[conversation_id] = [] self.conversations[conversation_id].append(message) # Trim history if too long if len(self.conversations[conversation_id]) > self.max_history_length * 2: self.conversations[conversation_id] = self.conversations[conversation_id][-(self.max_history_length * 2):] def _build_messages_from_conversation(self, conversation_id: str, new_message: LLMMessage) -> List[Dict[str, str]]: """Build message list from conversation history""" messages = [] # Add system prompt if self.system_prompt: messages.append({"role": "system", "content": self.system_prompt}) # Add conversation history if conversation_id in self.conversations: for msg in self.conversations[conversation_id][-self.max_history_length:]: messages.append({"role": msg.role, "content": msg.content}) # Add the new message messages.append({"role": new_message.role, "content": new_message.content}) return messages def _process_llm_request(self, request: LLMRequest): """Process a single LLM request""" console.log(f"[bold green]Processing LLM request: {request.message.message_id}[/bold green]") try: # Build messages for LLM messages = self._build_messages_from_conversation( request.message.conversation_id or "default", request.message ) console.log(f"Calling LLM with {len(messages)} messages") # Call LLM - Use sync call for thread compatibility response_content = self._call_llm_sync(messages) console.log(f"[bold green]LLM response received: {response_content}...[/bold green]") # Create response message response_message = LLMMessage( role="assistant", content=response_content, conversation_id=request.message.conversation_id, metadata={"request_id": request.message.message_id} ) # Update conversation history self._add_to_conversation_history( request.message.conversation_id or "default", request.message ) self._add_to_conversation_history( request.message.conversation_id or "default", response_message ) # Create and send response response = LLMResponse( message=response_message, request_id=request.message.message_id, success=True ) console.log(f"[bold blue]Sending internal response for: {request.message.message_id}[/bold blue]") RaiseEvent("llm_internal_response", response) except Exception as e: console.log(f"[bold red]Error processing LLM request: {e}[/bold red]") traceback.print_exc() # Create error response error_response = LLMResponse( message=LLMMessage( role="system", content=f"Error: {str(e)}", conversation_id=request.message.conversation_id ), request_id=request.message.message_id, success=False, error=str(e) ) RaiseEvent("llm_internal_response", error_response) def _call_llm_sync(self, messages: List[Dict[str, str]]) -> str: """Sync call to the LLM with retry logic""" console.log(f"Making LLM call to {self.model_id}") for attempt in range(self.max_retries): try: response = CLIENT.chat.completions.create( model=self.model_id, messages=messages, temperature=self.temperature, max_tokens=self.max_tokens ) content = response.choices[0].message.content console.log(f"LLM call successful, response length: {len(content)}") return content except Exception as e: console.log(f"LLM call attempt {attempt + 1} failed: {e}") if attempt == self.max_retries - 1: raise e # Wait before retry def _process_queue(self): """Main queue processing loop""" console.log("[bold cyan]LLM Agent queue processor started[/bold cyan]") while not self._stop_event.is_set(): try: request = self.request_queue.get(timeout=1.0) if request: console.log(f"Got request from queue: {request.message.message_id}") self._process_llm_request(request) self.request_queue.task_done() except Empty: continue except Exception as e: console.log(f"Error in queue processing: {e}", style="bold red") traceback.print_exc() console.log("[bold cyan]LLM Agent queue processor stopped[/bold cyan]") def send_message( self, content: str, role: str = "user", conversation_id: str = None, response_event: str = None, callback: Callable = None, metadata: Dict = None ) -> str: """Send a message to the LLM and get response via events""" if not self.is_running: raise RuntimeError("LLM Agent is not running. Call start() first.") # Create message message = LLMMessage( role=role, content=content, conversation_id=conversation_id, metadata=metadata or {} ) # Create request request = LLMRequest( message=message, response_event=response_event, callback=callback ) # Store in pending requests BEFORE adding to queue with self.pending_requests_lock: self.pending_requests[message.message_id] = request console.log(f"Added to pending requests: {message.message_id}") # Add to queue try: self.request_queue.put(request, timeout=5.0) console.log(f"[bold magenta]Message queued: {message.message_id}, Content: {content[:50]}...[/bold magenta]") return message.message_id except queue.Full: console.log(f"[bold red]Queue full, cannot send message[/bold red]") with self.pending_requests_lock: if message.message_id in self.pending_requests: del self.pending_requests[message.message_id] raise RuntimeError("LLM Agent queue is full") async def chat(self, messages: List[Dict[str, str]]) -> str: """ Async chat method that sends message via queue and returns response string. This is the main method you should use. """ # Create future for the response loop = asyncio.get_event_loop() response_future = loop.create_future() def chat_callback(response: LLMResponse): """Callback when LLM responds - thread-safe""" console.log(f"[bold yellow]✓ CHAT CALLBACK TRIGGERED![/bold yellow]") if not response_future.done(): if response.success: content = response.message.content console.log(f"Callback received content: {content}...") # Schedule setting the future result on the main event loop loop.call_soon_threadsafe(response_future.set_result, content) else: console.log(f"Error in response: {response.error}") error_msg = f"❌ Error: {response.error}" loop.call_soon_threadsafe(response_future.set_result, error_msg) else: console.log(f"[bold red]Future already done, ignoring callback[/bold red]") console.log(f"Sending message to LLM agent...") # Extract the actual message content from the messages list user_message = "" for msg in messages: if msg.get("role") == "user": user_message = msg.get("content", "") break if not user_message.strip(): return "" # Send message with callback using the queue system try: message_id = self.send_message( content=user_message, conversation_id="default", callback=chat_callback ) console.log(f"Message sent with ID: {message_id}, waiting for response...") # Wait for the response and return it try: response = await asyncio.wait_for(response_future, timeout=self.timeout) console.log(f"[bold green]✓ Chat complete! Response length: {len(response)}[/bold green]") return response except asyncio.TimeoutError: console.log("[bold red]Response timeout[/bold red]") # Clean up the pending request with self.pending_requests_lock: if message_id in self.pending_requests: del self.pending_requests[message_id] return "❌ Response timeout - check if LLM server is running" except Exception as e: console.log(f"[bold red]Error sending message: {e}[/bold red]") traceback.print_exc() return f"❌ Error sending message: {e}" def start(self): """Start the LLM agent""" if not self.is_running: self.is_running = True self._stop_event.clear() self.processing_thread = Thread(target=self._process_queue, daemon=True) self.processing_thread.start() console.log("[bold green]LLM Agent started[/bold green]") def stop(self): """Stop the LLM agent""" console.log("Stopping LLM Agent...") self._stop_event.set() if self.processing_thread and self.processing_thread.is_alive(): self.processing_thread.join(timeout=10) self.is_running = False console.log("LLM Agent stopped") def get_conversation_history(self, conversation_id: str = "default") -> List[LLMMessage]: """Get conversation history""" return self.conversations.get(conversation_id, [])[:] def clear_conversation(self, conversation_id: str = "default"): """Clear conversation history""" if conversation_id in self.conversations: del self.conversations[conversation_id] async def _chat(self, messages: List[Dict[str, str]]) -> str: return await self._generate(messages) @staticmethod async def openai_generate(messages: List[Dict[str, str]], max_tokens: int = 8096, temperature: float = 0.4, model: str = BASEMODEL_ID,tools=None) -> str: """Static method for generating responses using OpenAI API""" try: resp = await BASE_CLIENT.chat.completions.create( model=model, messages=messages, temperature=temperature, max_tokens=max_tokens, tools=tools ) response_text = resp.choices[0].message.content or "" return response_text except Exception as e: console.log(f"[bold red]Error in openai_generate: {e}[/bold red]") return f"[LLM_Agent Error - openai_generate: {str(e)}]" async def _call_(self, messages: List[Dict[str, str]]) -> str: """Internal call method using instance client""" try: resp = await self.async_client.chat.completions.create( model=self.model_id, messages=messages, temperature=self.temperature, max_tokens=self.max_tokens ) response_text = resp.choices[0].message.content or "" return response_text except Exception as e: console.log(f"[bold red]Error in _call_: {e}[/bold red]") return f"[LLM_Agent Error - _call_: {str(e)}]" @staticmethod def CreateClient(base_url: str, api_key: str) -> AsyncOpenAI: '''Create async OpenAI Client required for multi tasking''' return AsyncOpenAI( base_url=base_url, api_key=api_key ) @staticmethod async def fetch_available_models(base_url: str, api_key: str) -> List[str]: """Fetches available models from the OpenAI API.""" try: async_client = AsyncOpenAI(base_url=base_url, api_key=api_key) models = await async_client.models.list() model_choices = [model.id for model in models.data] return model_choices except Exception as e: console.log(f"[bold red]LLM_Agent Error fetching models: {e}[/bold red]") return ["LLM_Agent Error fetching models"] def get_models(self) -> List[str]: """Get available models using instance credentials""" return asyncio.run(self.fetch_available_models(self.base_url, self.api_key)) def get_queue_size(self) -> int: """Get current queue size""" return self.request_queue.qsize() def get_pending_requests_count(self) -> int: """Get number of pending requests""" with self.pending_requests_lock: return len(self.pending_requests) def get_status(self) : """Get agent status information""" return str({ "is_running": self.is_running, "queue_size": self.get_queue_size(), "pending_requests": self.get_pending_requests_count(), "conversations_count": len(self.conversations), "model": self.model_id, "BaseURL": self.base_url }) def direct_chat(self, user_message: str, conversation_id: str = "default") -> str: """ Send a message and get a response using direct API call. """ try: # Create message object message = LLMMessage(role="user", content=user_message, conversation_id=conversation_id) # Build messages for LLM messages = self._build_messages_from_conversation(conversation_id, message) console.log(f"Calling LLM at {self.base_url} with {len(messages)} messages") # Make the direct API call response = CLIENT.chat.completions.create( model=self.model_id, messages=messages, temperature=self.temperature, max_tokens=self.max_tokens ) response_content = response.choices[0].message.content console.log(f"[bold green]LLM response received: {response_content[:50]}...[/bold green]") # Update conversation history self._add_to_conversation_history(conversation_id, message) response_message = LLMMessage(role="assistant", content=response_content, conversation_id=conversation_id) self._add_to_conversation_history(conversation_id, response_message) return response_content except Exception as e: console.log(f"[bold red]Error in chat: {e}[/bold red]") traceback.print_exc() return f"❌ Error communicating with LLM: {str(e)}" # --- TEST Canvas Methods --- def add_artifact(self, conversation_id: str, artifact_type: str, content: str, title: str = "", metadata: Dict = None): artifact = CanvasArtifact( id=str(uuid.uuid4()), type=artifact_type, content=content, title=title, timestamp=time.time(), metadata=metadata or {} ) self.canvas_artifacts[conversation_id].append(artifact) def get_canvas_artifacts(self, conversation_id: str = "default") -> List[CanvasArtifact]: return self.canvas_artifacts.get(conversation_id, []) def get_canvas_summary(self, conversation_id: str = "default") -> List[Dict[str, Any]]: artifacts = self.get_canvas_artifacts(conversation_id) return [{"id": a.id, "type": a.type, "title": a.title, "timestamp": a.timestamp} for a in artifacts] def clear_canvas(self, conversation_id: str = "default"): if conversation_id in self.canvas_artifacts: self.canvas_artifacts[conversation_id] = [] def clear_conversation(self, conversation_id: str = "default"): if conversation_id in self.conversations: del self.conversations[conversation_id] def get_latest_code_artifact(self, conversation_id: str) -> Optional[str]: """Get the most recent code artifact content""" if conversation_id not in self.canvas_artifacts: return None for artifact in reversed(self.canvas_artifacts[conversation_id]): if artifact.type == "code": return artifact.content return None def get_canvas_context(self, conversation_id: str) -> str: """Get formatted canvas context for LLM prompts""" if conversation_id not in self.canvas_artifacts or not self.canvas_artifacts[conversation_id]: return "" context_lines = ["\n=== COLLABORATIVE CANVAS ARTIFACTS ==="] for artifact in self.canvas_artifacts[conversation_id][-10:]: # Last 10 artifacts context_lines.append(f"\n--- {artifact.title} [{artifact.type.upper()}] ---") preview = artifact.content[:500] + "..." if len(artifact.content) > 500 else artifact.content context_lines.append(preview) return "\n".join(context_lines) + "\n=================================\n" def get_artifact_by_id(self, conversation_id: str, artifact_id: str) -> Optional[CanvasArtifact]: """Get specific artifact by ID""" if conversation_id not in self.canvas_artifacts: return None for artifact in self.canvas_artifacts[conversation_id]: if artifact.id == artifact_id: return artifact return None def _extract_artifacts_to_canvas(self, response: str, conversation_id: str): """Automatically extract code blocks and add to canvas""" # Find all code blocks with optional language specification code_blocks = re.findall(r'```(?:(\w+)\n)?(.*?)```', response, re.DOTALL) for i, (lang, code_block) in enumerate(code_blocks): if len(code_block.strip()) > 10: # Only add substantial code blocks self.add_artifact_to_canvas( conversation_id, code_block.strip(), "code", f"code_snippet_{lang or 'unknown'}_{len(self.canvas_artifacts.get(conversation_id, [])) + 1}" ) async def chat_with_canvas(self, message: str, conversation_id: str, include_canvas: bool = False): """Chat method that can optionally include canvas context.""" messages = [{"role": "user", "content": message}] if include_canvas: artifacts = self.get_canvas_summary(conversation_id) if artifacts: canvas_context = "Current Canvas Context:\\n" + "\\n".join([ f"- [{art['type'].upper()}] {art['title'] or 'Untitled'}: {art['content_preview']}" for art in artifacts ]) messages.insert(0, {"role": "system", "content": canvas_context}) return await self.chat(messages) def respond( message, history: list[dict[str, str]], system_message, max_tokens, temperature, top_p, hf_token: gr.OAuthToken, ): """ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference """ client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b") messages = [{"role": "system", "content": system_message}] messages.extend(history) messages.append({"role": "user", "content": message}) response = "" for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): choices = message.choices token = "" if len(choices) and choices[0].delta.content: token = choices[0].delta.content response += token yield response custom_css = """ .gradio-container { background-color: rgba(243, 48, 4, 0.85); background-image: url("https://huggingface.co/LeroyDyer/ImageFiles/resolve/main/LCARS_PANEL.png"); background-size: cover; background-position: center; background-repeat: no-repeat; border-radius: 20px; } .agent-card { padding: 10px; margin: 5px 0; border-radius: 8px; background: #f0f8ff; } .agent-card.active { background: #e6f2ff; border-left: 3px solid #3399FF; } .status-indicator { display: inline-block; width: 10px; height: 10px; border-radius: 50%; margin-right: 5px; } .online { background-color: #4CAF50; } .offline { background-color: #F44336; } .console-log { font-family: monospace; font-size: 0.9em; background: #1e1e1e; color: #00ff00; padding: 10px; border-radius: 5px; height: 500px; overflow-y: auto; } .log-entry { margin: 2px 0; } .log-public { color: #00ff00; } .log-direct { color: #ffaa00; } .log-system { color: #00aaff; } .message-controls { background: #f5f5f5; padding: 10px; border-radius: 5px; margin-bottom: 10px; } .console-log { font-family: monospace; font-size: 0.85em; background: #1e1e1e; color: #00ff00; padding: 10px; border-radius: 5px; height: 600px; overflow-y: auto; word-wrap: break-word; white-space: pre-wrap; } .log-entry { margin: 4px 0; padding: 2px 4px; border-left: 2px solid #333; } .log-public { color: #00ff00; border-left-color: #00aa00; } .log-direct { color: #ffaa00; border-left-color: #ff8800; } .log-system { color: #00aaff; border-left-color: #0088ff; } .lcars-container { background: #000d1a; color: #7EC8E3; font-family: 'Courier New', monospace; padding: 20px; border-radius: 0; } .lcars-title { color: #7EC8E3; text-align: center; font-size: 2.2em; text-shadow: 0 0 10px #7EC8E3, 0 0 20px rgba(126, 200, 227, 0.5); margin-bottom: 10px; letter-spacing: 2px; } .lcars-subtitle { color: #aaa; text-align: center; font-style: italic; margin-bottom: 30px; } /* Glowing Input Boxes */ .gr-box input, .gr-box textarea { background: #001122 !important; color: #7EC8E3 !important; border: 1px solid #7EC8E3 !important; box-shadow: 0 0 8px rgba(126, 200, 227, 0.3) !important; font-family: 'Courier New', monospace !important; } .gr-button { background: linear-gradient(90deg, #003366, #0055aa) !important; color: #7EC8E3 !important; border: 1px solid #7EC8E3 !important; box-shadow: 0 0 10px rgba(126, 200, 227, 0.4) !important; font-family: 'Courier New', monospace !important; font-weight: bold !important; letter-spacing: 1px; transition: all 0.3s ease; } .gr-button:hover { background: linear-gradient(90deg, #004488, #0077cc) !important; box-shadow: 0 0 15px rgba(126, 200, 227, 0.6) !important; transform: scale(1.05); } /* Output Panels */ .lcars-output-panel { border: 2px solid #7EC8E3; border-radius: 12px; padding: 15px; background: #00141a; box-shadow: 0 0 15px rgba(126, 200, 227, 0.2); margin-top: 10px; } .lcars-error { color: #ff6b6b; font-weight: bold; text-shadow: 0 0 5px rgba(255,107,107,0.5); padding: 20px; text-align: center; } .lcars-log { max-height: 400px; overflow-y: auto; background: #001018; border: 1px solid #7EC8E3; border-radius: 8px; padding: 10px; } .lcars-step { margin-bottom: 15px; padding: 10px; background: #000c14; border-left: 3px solid #7EC8E3; } .lcars-step h4 { margin: 0 0 8px 0; color: #7EC8E3; } .lcars-step pre { white-space: pre-wrap; background: #00080c; padding: 10px; border-radius: 5px; color: #ccc; font-size: 0.9em; margin: 10px 0 0 0; } code { background: #000f1f; color: #7EC8E3; padding: 2px 6px; border-radius: 4px; font-family: 'Courier New'; } @keyframes glow-pulse { 0% { opacity: 0.8; } 50% { opacity: 1; } 100% { opacity: 0.8; } } iframe { animation: glow-pulse 2.5s infinite ease-in-out; } .gr-form { background: transparent !important; } /* ========================= LCARS47 Bridge Theme Seamless Drop-In ========================= */ :root { /* Core LCARS Palette */ --lcars-bg: #000814; --lcars-panel: #111827; --lcars-red: #CC6666; --lcars-gold: #FFCC66; --lcars-cyan: #66CCCC; --lcars-text: #FFFFFF; --lcars-muted: #AAAAAA; --lcars-orange: #FF9966; --lcars-purple: #663399; --lcars-rose: #FF6F91; --lcars-gold: #FFC766; --lcars-peach: #FFCC99; --lcars-blue: #9999FF; --lcars-lavender: #CCCCFF; --lcars-tan: #FFCC99; --lcars-rust: #CC6666; --lcars-gold: #FFCC66; --lcars-bg: #F5F0FF; --lcars-panel: #E8E0F5; --lcars-text: #2D2D5F; --lcars-text-light: #5F5F8F; --lcars-border: #9999CC; --lcars-accent: #6666CC; --lcars-dark: #111317; /* Shared component vars */ --radius-large: 24px; --radius-full: 50%; --pulse-speed: 2s; --font-stack: "Arial Narrow", "Helvetica Neue", sans-serif; } .lcars-thinking {{ background: linear-gradient(135deg, {self.colors['panel']}, #001122) !important; border-left: 4px solid {self.colors['info']} !important; color: {self.colors['text']} !important; padding: 15px !important; border-radius: 0px 15px 15px 0px !important; }} .gradio-container {{background-color: rgba(243, 48, 4, 0.85); background: linear-gradient(135deg, {self.colors['background']}, #001122) !important; color: {self.colors['text']} !important; font-family: 'Courier New', monospace !important; background-image: url("https://huggingface.co/LeroyDyer/ImageFiles/resolve/main/LCARS_PANEL.png"); background-size: cover; background-position: center; background-repeat: no-repeat; border-radius: 20px; }} #left-panel { flex: 0 0 250px !important; /* fixed width */ max-width: 350px !important; padding: 20px !important; } @keyframes pulse { 0% { box-shadow: 0 0 5px var(--lcars-orange); } 50% { box-shadow: 0 0 20px var(--lcars-orange); } 100% { box-shadow: 0 0 5px var(--lcars-orange); } } .pulse-animation { animation: pulse 2s infinite; } /* Panels */ .lcars-panel { background-color: var(--lcars-panel); border-radius: var(--radius-large); padding: 1rem; margin: 0.5rem; box-shadow: 0 0 8px rgba(0,0,0,0.6); } /* Inputs & Outputs */ .lcars-input {{ background: {self.colors['panel']} !important; color: {self.colors['text']} !important; border: 2px solid {self.colors['primary']} !important; border-radius: 0px 10px 10px 0px !important; padding: 10px !important; }} .lcars-output {{ background: linear-gradient(135deg, {self.colors['background']}, {self.colors['panel']}) !important; color: {self.colors['text']} !important; border: 2px solid {self.colors['success']} !important; border-radius: 0px 15px 15px 0px !important; padding: 15px !important; font-family: 'Courier New', monospace !important; }} /* Responsive */ @media (max-width: 768px) { .gradio-container { padding: 10px; } #lcars_logo { height: 150px !important; width: 150px !important; } } /* Code & Thinking blocks */ .lcars-code {{ background: {self.colors['background']} !important; color: {self.colors['success']} !important; border: 1px solid {self.colors['success']} !important; border-radius: 5px !important; font-family: 'Courier New', monospace !important; }} .lcars-thinking {{ background: linear-gradient(135deg, {self.colors['panel']}, #001122) !important; border-left: 4px solid {self.colors['info']} !important; color: {self.colors['text']} !important; padding: 15px !important; border-radius: 0px 15px 15px 0px !important; }} .lcars-artifact {{ background: {self.colors['panel']} !important; border: 2px solid {self.colors['border']} !important; color: {self.colors['text']} !important; border-radius: 0px 15px 15px 0px !important; padding: 15px !important; margin: 10px 0 !important; }} /* Headers */ .lcars-header { background: var(--lcars-red); color: var(--lcars-text); border-radius: var(--radius-large); padding: 0.75rem 1.5rem; text-transform: uppercase; font-size: 1.25rem; } /* Chatbox */ .chatbox > div { background: var(--lcars-dark) !important; border-radius: 18px !important; border: 2px solid var(--lcars-purple) !important; } /* ========================= Buttons / Tabs / Chips ========================= */ button, .lcars-tab, .lcars-chip { background: var(--lcars-gold); border: none; border-radius: var(--radius-large); padding: 0.5rem 1rem; margin: 0.25rem; color: var(--lcars-bg); font-weight: bold; font-size: 1rem; transition: all 0.3s ease-in-out; cursor: pointer; } button:hover, .lcars-tab:hover, .lcars-chip:hover { background: var(--lcars-orange); color: var(--lcars-text); } /* Circular buttons */ button.round, .lcars-chip.round { border-radius: var(--radius-full); padding: 0.75rem; width: 3rem; height: 3rem; text-align: center; } /* ========================= Containers (Code, JSON, Chat, Artifacts) ========================= */ .json-container, .code-container, .chat-container, .artifact-container { border-radius: var(--radius-large); padding: 1rem; margin: 0.5rem 0; background: var(--lcars-panel); color: var(--lcars-text); font-family: monospace; font-size: 0.9rem; line-height: 1.4; white-space: pre-wrap; overflow-x: auto; } /* ========================= Artifact / Chat / Code Borders ========================= */ .artifact-container { border: 3px solid var(--lcars-gold); animation: pulse-yellow var(--pulse-speed) infinite; } .chat-container { border: 3px solid var(--lcars-red); animation: pulse-red var(--pulse-speed) infinite; } .code-container { border: 3px solid var(--lcars-purple); animation: pulse-orange var(--pulse-speed) infinite; } /* ========================= Animations ========================= */ @keyframes pulse-red { 0%, 100% { box-shadow: 0 0 5px var(--lcars-red); } 50% { box-shadow: 0 0 20px var(--lcars-red); } } @keyframes pulse-yellow { 0%, 100% { box-shadow: 0 0 5px var(--lcars-gold); } 50% { box-shadow: 0 0 20px var(--lcars-gold); } } @keyframes pulse-orange { 0%, 100% { box-shadow: 0 0 5px var(--lcars-orange); } 50% { box-shadow: 0 0 20px var(--lcars-orange); } } /* Thought styling */ .thought { opacity: 0.8; font-family: "Courier New", monospace; border: 1px rgb(229, 128, 12) solid; padding: 10px; border-radius: 5px; display: none; box-shadow: 0 0 20px rgba(255, 153, 0, 0.932); } .thought-prompt { opacity: 0.8; font-family: "Courier New", monospace; } /* ========================= Metadata & Thought Blocks ========================= */ .metadata-display, .thought-block { background: var(--lcars-blue); border-radius: var(--radius-large); padding: 0.75rem; margin: 0.5rem 0; color: var(--lcars-bg); font-weight: bold; } .metadata-display { background: var(--lcars-panel); border-left: 4px solid var(--lcars-blue); box-shadow: 0 0 20px rgba(255, 153, 0, 0.932); padding: 10px; border-radius: 5px; overflow-y: auto; max-height: 300px; } .metadata-display .json-container { font-family: monospace; font-size: 0.9em; background: #6b50111a; } .primary { background: linear-gradient(45deg, var(--lcars-orange), #ffaa33) !important; color: hwb(90 7% 5% / 0.102); font-family: "Courier New", monospace; border: 1px rgb(229, 128, 12) solid; } .secondary { background: linear-gradient(45deg, var(--lcars-blue), #33aacc) !important; color: #6b50111a; font-family: "Courier New", monospace; border: 1px rgb(229, 128, 12) solid; box-shadow: 0 0 20px rgba(255, 153, 0, 0.932); } ::-webkit-scrollbar-thumb:hover { background-color: var(--lcars-gold); } #lcars_logo { border-radius: 15px; border: 2px solid var(--lcars-orange); box-shadow: 0 0 20px rgba(255, 153, 0, 0.932); } .lcars-tab {{ background: {self.colors['panel']} !important; color: {self.colors['text']} !important; border: 2px solid {self.colors['primary']} !important; border-radius: 0px 10px 0px 0px !important; }} .lcars-tab.selected {{ background: {self.colors['primary']} !important; color: {self.colors['background']} !important; }} .lcars-panel.lcars-empty { text-align: center; font-style: italic; color: var(--lcars-text-light); } .lcars-panel.lcars-error { background: #FFE5E5; border-color: var(--lcars-rust); color: #CC0000; } /* Input fields */ .lcars-input input, .lcars-input textarea { background: white !important; border: 2px solid var(--lcars-border) !important; border-radius: 8px !important; color: var(--lcars-text) !important; padding: 10px !important; font-size: 14px !important; } .lcars-input input:focus, .lcars-input textarea:focus { border-color: var(--lcars-accent) !important; outline: none !important; box-shadow: 0 0 8px rgba(102, 102, 204, 0.3) !important; } /* Dropdowns and selects */ .lcars-dropdown select, .lcars-dropdown input { background: white !important; border: 2px solid var(--lcars-border) !important; border-radius: 8px !important; color: var(--lcars-text) !important; padding: 8px !important; } /* Checkboxes */ .lcars-checkbox label { background: var(--lcars-panel) !important; border: 2px solid var(--lcars-border) !important; border-radius: 8px !important; padding: 8px 12px !important; margin: 4px !important; transition: all 0.2s ease !important; } .lcars-checkbox label:hover { background: var(--lcars-lavender) !important; border-color: var(--lcars-accent) !important; } /* Radio buttons */ .lcars-radio label { background: var(--lcars-panel) !important; border: 2px solid var(--lcars-border) !important; border-radius: 20px !important; padding: 8px 16px !important; margin: 4px !important; } /* Display fields */ .lcars-display input { background: var(--lcars-panel) !important; border: 2px solid var(--lcars-border) !important; border-radius: 8px !important; color: var(--lcars-text) !important; font-family: 'Courier New', monospace !important; padding: 10px !important; } /* Accordions */ .lcars-accordion { background: var(--lcars-panel) !important; border: 2px solid var(--lcars-border) !important; border-radius: 12px !important; margin: 8px 0 !important; } .lcars-accordion summary { background: linear-gradient(135deg, var(--lcars-orange), var(--lcars-peach)) !important; color: var(--lcars-text) !important; font-weight: bold !important; padding: 12px !important; border-radius: 10px !important; cursor: pointer !important; } /* Participant Cards & Collapsible Layout */ .lcars-participants-container { display: flex; flex-direction: column; gap: 15px; width: 100%; } /* Base Card Styles */ .lcars-collapsible-card { border: 1px solid #444; border-radius: 8px; background: #1a1a1a; color: #fff; overflow: hidden; transition: all 0.3s ease; } .lcars-collapsible-card.collapsed .lcars-participant-expanded { display: none; } .lcars-collapsible-card.expanded .lcars-participant-collapsed { display: none; } .lcars-collapsible-card.expanded .lcars-collapse-icon { transform: rotate(90deg); } /* Card Headers */ .lcars-participant-header { background: #3366cc; color: white; padding: 12px 15px; display: flex; justify-content: space-between; align-items: center; cursor: pointer; border-bottom: 2px solid #ffcc00; transition: background 0.2s ease; } .lcars-participant-header:hover { background: #2a55a8; } .lcars-participant-name { font-weight: bold; font-size: 1.1em; } .lcars-collapse-icon { transition: transform 0.3s ease; font-size: 0.8em; } /* Badges */ .lcars-badge-manager { background: #ffcc00; color: #000; padding: 4px 8px; border-radius: 12px; font-size: 0.8em; font-weight: bold; letter-spacing: 1px; box-shadow: 0 2px 4px rgba(255, 215, 0, 0.3); } .lcars-badge-agent { background: #00cc66; color: #000; padding: 4px 8px; border-radius: 12px; font-size: 0.8em; font-weight: bold; letter-spacing: 1px; box-shadow: 0 2px 4px rgba(0, 204, 102, 0.3); } .lcars-badge-human { background: #9966cc; color: #fff; padding: 4px 8px; border-radius: 12px; font-size: 0.8em; font-weight: bold; letter-spacing: 1px; box-shadow: 0 2px 4px rgba(153, 102, 255, 0.3); } /* Card Content Sections */ .lcars-participant-collapsed, .lcars-participant-expanded { padding: 15px; } .lcars-participant-preview { display: flex; flex-direction: column; gap: 8px; } .lcars-info-section { margin-bottom: 20px; padding-bottom: 15px; border-bottom: 1px solid #333; } .lcars-info-section:last-child { border-bottom: none; margin-bottom: 0; } .lcars-section-title { color: #ffcc00; font-weight: bold; font-size: 0.9em; text-transform: uppercase; letter-spacing: 1px; margin-bottom: 10px; border-bottom: 1px solid #444; padding-bottom: 5px; } /* Info Rows */ .lcars-info-row { display: flex; margin-bottom: 8px; line-height: 1.4; color: var(--lcars-text-light); } .lcars-info-row.full-width { flex-direction: column; } .lcars-label { color: #ffcc00; font-weight: bold; min-width: 120px; margin-right: 10px; font-size: 0.9em; } /* Lists */ .lcars-goals-list li { margin-bottom: 5px; line-height: 1.4; color: #e0e0e0; } /* Template Styling */ .lcars-template-container { background: rgba(255, 255, 255, 0.05); border: 1px solid #444; border-radius: 4px; padding: 10px; max-height: 200px; overflow-y: auto; } .lcars-template-preview { color: #e0e0e0; font-family: monospace; font-size: 0.85em; line-height: 1.4; white-space: pre-wrap; } .lcars-template-truncated { color: #ffcc00; font-size: 0.8em; font-style: italic; margin-top: 8px; } .lcars-no-template { color: #888; font-style: italic; } /* More Skills Indicator */ .lcars-more-skills { color: #ffcc00; font-size: 0.8em; font-style: italic; margin-top: 5px; display: block; } /* Agent Details Panel */ .lcars-agent-details { background: white; border: 3px solid var(--lcars-border); border-radius: 12px; overflow: hidden; box-shadow: 0 4px 12px rgba(102, 102, 204, 0.2); } .lcars-agent-header { background: linear-gradient(135deg, var(--lcars-blue), var(--lcars-lavender)); padding: 16px; display: flex; justify-content: space-between; align-items: center; } .lcars-agent-name { font-size: 20px; font-weight: bold; color: white; text-transform: uppercase; letter-spacing: 2px; } .lcars-status-connected { background: #66CC66; color: white; padding: 6px 14px; border-radius: 16px; font-size: 12px; font-weight: bold; } .lcars-status-available { background: var(--lcars-orange); color: white; padding: 6px 14px; border-radius: 16px; font-size: 12px; font-weight: bold; } .lcars-agent-body { padding: 18px; } .lcars-detail-row { margin: 12px 0; display: flex; gap: 10px; } .lcars-detail-label { font-weight: bold; color: var(--lcars-accent); min-width: 120px; text-transform: uppercase; font-size: 12px; letter-spacing: 1px; } .lcars-detail-value { color: var(--lcars-text); flex: 1; } .lcars-model-badge { background: var(--lcars-panel); color: var(--lcars-accent); padding: 4px 10px; border-radius: 6px; font-family: 'Courier New', monospace; font-size: 12px; } .lcars-detail-section { margin: 16px 0; padding: 12px; background: var(--lcars-panel); border-radius: 8px; } .lcars-skills-list { line-height: 2; } .lcars-skill-item { color: var(--lcars-text-light); font-size: 13px; margin-left: 8px; } .lcars-expertise { color: var(--lcars-text-light); font-size: 13px; line-height: 1.8; } /* Pattern Details */ .lcars-pattern-details { border: 1px solid #444; border-radius: 8px; margin: 10px 0; background: #1a1a1a; color: #fff; } .lcars-pattern-header { background: #3366cc; color: white; padding: 12px 15px; font-weight: bold; font-size: 1.1em; text-align: center; border-bottom: 2px solid #ffcc00; } .lcars-pattern-body { padding: 15px; } .lcars-pattern-section { margin-bottom: 20px; display: block; } .lcars-pattern-section:last-child { margin-bottom: 0; } .lcars-pattern-label { font-weight: bold; color: #ffcc00; margin-bottom: 5px; font-size: 0.9em; text-transform: uppercase; letter-spacing: 1px; } .lcars-pattern-text { color: #fa0404; line-height: 1.5; } /* Log display */ .lcars-log-panel { background: #00008734; color: #050505; font-family: 'Courier New', monospace; font-size: 16px; border-radius: 8px; padding: 12px; max-height: 500px; overflow-y: auto; box-shadow: inset 0 2px 8px rgba(0, 0, 0, 0.3); } .lcars-log-panel.lcars-empty { color: #999; text-align: center; font-style: italic; } .lcars-log-entries { display: flex; flex-direction: column; gap: 4px; } .lcars-log-entry { padding: 6px 10px; border-left: 3px solid transparent; border-radius: 3px; transition: all 0.2s ease; } .lcars-log-entry:hover { background: rgba(255, 255, 255, 0.05); } .lcars-log-info { border-left-color: #5c635cda; color: #1636e7; } .lcars-log-error { border-left-color: #202120; color: #1636e7; } .lcars-log-level { font-weight: bold; margin-right: 8px; } /* Chatbot styling */ .lcars-chatbot { border: 3px solid var(--lcars-border) !important; border-radius: 12px !important; background: white !important; } .gradio-container { background-color: rgba(243, 48, 4, 0.85); background-image: url("https://huggingface.co/LeroyDyer/ImageFiles/resolve/main/LCARS_PANEL.png"); background-size: cover; background-position: center; background-repeat: no-repeat; border-radius: 20px; } .tab-nav button { background: var(--lcars-panel) !important; border: 2px solid var(--lcars-border) !important; color: var(--lcars-text) !important; border-radius: 8px 8px 0 0 !important; margin-right: 4px !important; font-weight: bold !important; } .tab-nav button.selected { background: linear-gradient(135deg, var(--lcars-orange), var(--lcars-peach)) !important; color: var(--lcars-text) !important; border-bottom: none !important; } /* Ensure vertical stacking of participants */ .lcars-participants-container { display: flex !important; flex-direction: column !important; gap: 16px !important; width: 100% !important; max-width: 100% !important; margin: 0 auto !important; align-items: stretch !important; /* Ensures full width alignment */ } /* Make sure each participant card respects container flow */ .lcars-participant-card-manager, .lcars-participant-card-agent, .lcars-participant-card-human { display: flex !important; flex-direction: column !important; break-inside: avoid !important; /* Prevents awkward splits in print/PDF */ position: relative !important; width: 100% !important; box-sizing: border-box !important; background: white !important; color: #2D2D5F !important; } .lcars-content { background: rgba(0, 0, 0, 0.95) !important; border: 2px solid #ff9900 !important; color: #ffffff !important; font-family: 'Times New Roman', serif !important; padding: 20px !important; height: 600px !important; overflow-y: auto !important; } .gr-button:hover { background: linear-gradient(45deg, #ffcc00, #ff9900) !important; box-shadow: 0 0 15px rgba(255, 153, 0, 0.8) !important; } .block { background: rgba(0, 0, 0, 0.8) !important; border: 2px solid #ff9900 !important; border-radius: 0px !important; } /* Scrollbar */ ::-webkit-scrollbar {{ width: 12px; }} ::-webkit-scrollbar-track {{ background: {self.colors['background']}; }} ::-webkit-scrollbar-thumb {{ background: {self.colors['primary']}; border-radius: 0px 10px 10px 0px; }} ::-webkit-scrollbar-thumb:hover {{ background: {self.colors['accent']}; }} .lcars-button, button[variant="primary"] { background: linear-gradient(135deg, var(--lcars-orange), var(--lcars-peach)) !important; color: var(--lcars-text) !important; } .lcars-button-add { background: linear-gradient(135deg, var(--lcars-blue), var(--lcars-lavender)) !important; color: white !important; } .lcars-button-send, .lcars-button-task { background: linear-gradient(135deg, var(--lcars-purple), var(--lcars-lavender)) !important; color: white !important; } .lcars-button-remove { background: linear-gradient(135deg, var(--lcars-rust), #FF9999) !important; color: white !important; } .lcars-button-secondary, .lcars-button-create { background: linear-gradient(135deg, var(--lcars-gold), var(--lcars-tan)) !important; color: var(--lcars-text) !important; } .gradio-container {{background-color: rgba(243, 48, 4, 0.85); background: linear-gradient(135deg, {self.colors['background']}, #001122) !important; color: {self.colors['text']} !important; font-family: 'Courier New', monospace !important; background-image: url("https://huggingface.co/LeroyDyer/ImageFiles/resolve/main/LCARS_PANEL.png"); background-size: cover; background-position: center; background-repeat: no-repeat; border-radius: 20px; }} """ # Session management SESSION_FILE = "lcars_session.pkl" ARTIFACTS_FILE = "lcars_artifacts.json" # Initialize the agent agent = LLMAgent( model_id=BASEMODEL_ID, system_prompt="You are L.C.A.R.S - Local Computer Advanced Reasoning System, an advanced AI assistant with capabilities for code generation, analysis, and collaborative problem solving.", temperature=0.7, max_tokens=5000 ) @dataclass class ParsedResponse: """Fixed ParsedResponse data model""" def __init__(self, thinking="", main_content="", code_snippets=None, raw_reasoning="", raw_content=""): self.thinking = thinking self.main_content = main_content self.code_snippets = code_snippets or [] self.raw_reasoning = raw_reasoning self.raw_content = raw_content def execute_python_code(code): """Execute Python code safely and return output""" try: # Create a temporary file temp_file = "temp_execution.py" with open(temp_file, 'w', encoding='utf-8') as f: f.write(code) # Execute the code result = subprocess.run( [sys.executable, temp_file], capture_output=True, text=True, timeout=30 # 30 second timeout ) # Clean up if os.path.exists(temp_file): os.remove(temp_file) output = "" if result.stdout: output += f"**Output:**\n{result.stdout}\n" if result.stderr: output += f"**Errors:**\n{result.stderr}\n" if result.returncode != 0: output += f"**Return code:** {result.returncode}\n" return output.strip() if output else "Code executed (no output)" except subprocess.TimeoutExpired: return "❌ Execution timed out (30 seconds)" except Exception as e: return f"❌ Execution error: {str(e)}" def execute_code_artifact(artifact_id, current_code): """Execute a specific code artifact""" try: artifacts = agent.get_canvas_artifacts(agent.current_conversation) if not artifacts: return "No artifacts available", current_code try: artifact_idx = int(artifact_id) if 0 <= artifact_idx < len(artifacts): artifact = artifacts[artifact_idx] if artifact.type == "code": # Execute the code execution_result = execute_python_code(artifact.content) display_text = f"## 🚀 Executing Artifact #{artifact_idx}\n\n**Title:** {artifact.title}\n\n**Execution Result:**\n{execution_result}" return display_text, artifact.content else: return f"❌ Artifact {artifact_idx} is not code (type: {artifact.type})", current_code else: return f"❌ Invalid artifact ID. Available: 0-{len(artifacts)-1}", current_code except ValueError: return "❌ Please enter a valid numeric artifact ID", current_code except Exception as e: return f"❌ Error: {str(e)}", current_code def execute_current_code(code): """Execute the code currently in the editor""" try: if not code.strip(): return "❌ No code to execute", code execution_result = execute_python_code(code) display_text = f"## 🚀 Code Execution Result\n\n{execution_result}" return display_text, code except Exception as e: return f"❌ Execution error: {str(e)}", code def save_session(): """Save current session to disk""" try: session_data = { 'conversations': agent.conversations, 'current_conversation': agent.current_conversation, 'canvas_artifacts': dict(agent.canvas_artifacts), 'history': getattr(agent, 'display_history', []) } with open(SESSION_FILE, 'wb') as f: pickle.dump(session_data, f) print(f"💾 Session saved to {SESSION_FILE}") return True except Exception as e: print(f"❌ Error saving session: {e}") return False def load_session(): """Load session from disk""" try: if os.path.exists(SESSION_FILE): with open(SESSION_FILE, 'rb') as f: session_data = pickle.load(f) agent.conversations = session_data.get('conversations', {}) agent.current_conversation = session_data.get('current_conversation', 'default') agent.canvas_artifacts = defaultdict(list, session_data.get('canvas_artifacts', {})) agent.display_history = session_data.get('history', []) print(f"📂 Session loaded from {SESSION_FILE}") return True else: print("📂 No existing session found, starting fresh") return False except Exception as e: print(f"❌ Error loading session: {e}") return False def save_artifacts(): """Save artifacts to JSON file""" try: artifacts_data = [] for conv_id, artifacts in agent.canvas_artifacts.items(): for artifact in artifacts: artifacts_data.append({ 'conversation_id': conv_id, 'id': artifact.id, 'type': artifact.type, 'content': artifact.content, 'title': artifact.title, 'timestamp': artifact.timestamp, 'metadata': artifact.metadata }) with open(ARTIFACTS_FILE, 'w', encoding='utf-8') as f: json.dump(artifacts_data, f, indent=2, ensure_ascii=False) print(f"💾 Artifacts saved to {ARTIFACTS_FILE}") return True except Exception as e: print(f"❌ Error saving artifacts: {e}") return False def load_artifacts(): """Load artifacts from JSON file""" try: if os.path.exists(ARTIFACTS_FILE): with open(ARTIFACTS_FILE, 'r', encoding='utf-8') as f: artifacts_data = json.load(f) agent.canvas_artifacts.clear() for artifact_data in artifacts_data: conv_id = artifact_data['conversation_id'] artifact = CanvasArtifact( id=artifact_data['id'], type=artifact_data['type'], content=artifact_data['content'], title=artifact_data['title'], timestamp=artifact_data['timestamp'], metadata=artifact_data.get('metadata', {}) ) agent.canvas_artifacts[conv_id].append(artifact) print(f"📂 Artifacts loaded from {ARTIFACTS_FILE}") return True else: print("📂 No existing artifacts found") return False except Exception as e: print(f"❌ Error loading artifacts: {e}") return False def parse_llm_response(response_text): """Parse LLM response to extract thinking, content, and code snippets""" parsed = ParsedResponse() parsed.raw_content = response_text # Patterns for different response components thinking_patterns = [ r'🧠[^\n]*?(.*?)(?=🤖|💻|🚀|$)', # 🧠 thinking section r'Thinking:[^\n]*?(.*?)(?=Response:|Answer:|$)', # Thinking: section r'Reasoning:[^\n]*?(.*?)(?=Response:|Answer:|$)', # Reasoning: section ] # Try to extract thinking/reasoning thinking_content = "" for pattern in thinking_patterns: thinking_match = re.search(pattern, response_text, re.IGNORECASE | re.DOTALL) if thinking_match: thinking_content = thinking_match.group(1).strip() break if thinking_content: parsed.thinking = thinking_content parsed.raw_reasoning = thinking_content # Remove thinking from main content main_content = re.sub(pattern, '', response_text, flags=re.IGNORECASE | re.DOTALL).strip() else: main_content = response_text # Extract code snippets code_blocks = re.findall(r'```(?:(\w+)\n)?(.*?)```', main_content, re.DOTALL) parsed.code_snippets = [] for lang, code in code_blocks: if code.strip(): parsed.code_snippets.append({ 'language': lang or 'text', 'code': code.strip(), 'description': f"Code snippet ({lang or 'unknown'})" }) # Remove code blocks from main content for cleaner display clean_content = re.sub(r'```.*?```', '', main_content, flags=re.DOTALL) clean_content = re.sub(r'`.*?`', '', clean_content) parsed.main_content = clean_content.strip() return parsed def extract_artifacts_from_response(parsed_response, conversation_id): """Extract and save artifacts from parsed response""" artifacts_created = [] # Save code snippets as artifacts for i, snippet in enumerate(parsed_response.code_snippets): agent.add_artifact( conversation_id=conversation_id, artifact_type="code", content=snippet['code'], title=f"code_snippet_{snippet['language']}_{i}", metadata={ "language": snippet['language'], "description": snippet.get('description', ''), "source": "llm_response" } ) artifacts_created.append(f"code_snippet_{i}") # Save thinking as a text artifact if substantial if len(parsed_response.thinking) > 50: agent.add_artifact( conversation_id=conversation_id, artifact_type="text", content=parsed_response.thinking, title="reasoning_process", metadata={"type": "reasoning", "source": "llm_response"} ) artifacts_created.append("reasoning") return artifacts_created def process_lcars_message(message, history, speak_response=False): """Process messages using the LLMAgent and parse responses""" if not message.strip(): return "", history, "Please enter a message", [] try: # Add user message to displayed history new_history = history + [[message, ""]] # Use the agent's direct_chat method raw_response = agent.direct_chat(message, agent.current_conversation) # Parse the response parsed_response = parse_llm_response(raw_response) # Extract and save artifacts from the response artifacts_created = extract_artifacts_from_response(parsed_response, agent.current_conversation) # Update the history with the main content display_content = parsed_response.main_content if parsed_response.code_snippets: display_content += "\n\n**Code Snippets Generated:**" for i, snippet in enumerate(parsed_response.code_snippets): display_content += f"\n```{snippet['language']}\n{snippet['code']}\n```" new_history[-1][1] = display_content # Speak response if enabled if speak_response and agent.speech_enabled: agent.speak(parsed_response.main_content) # Get artifacts for display artifacts = agent.get_canvas_summary(agent.current_conversation) status = f"✅ Response parsed. Artifacts created: {len(artifacts_created)} | Total: {len(artifacts)}" return "", new_history, status, artifacts, parsed_response.thinking except Exception as e: error_msg = f"❌ Error: {str(e)}" new_history = history + [[message, error_msg]] return "", new_history, error_msg, agent.get_canvas_summary(agent.current_conversation), "" def update_chat_display(history): """Convert history to formatted HTML for display""" if not history: return "
No messages yet
" html = "
" for i, (user_msg, bot_msg) in enumerate(history): html += f"""
👤 You: {user_msg}
🤖 L.C.A.R.S: {bot_msg}
""" html += "
" return html def update_artifacts_display(): """Get formatted artifacts display""" artifacts = agent.get_canvas_artifacts(agent.current_conversation) if not artifacts: return "
No artifacts generated yet
" html = "
" for i, artifact in enumerate(artifacts[-10:]): # Last 10 artifacts type_icon = { "code": "💻", "text": "📝", "diagram": "📊", "image": "🖼️" }.get(artifact.type, "📄") html += f"""
{type_icon} {artifact.title} (#{i})
Type: {artifact.type} | Time: {time.ctime(artifact.timestamp)}
{artifact.content[:150]}{'...' if len(artifact.content) > 150 else ''}
""" html += "
" return html def get_plain_text_response(history): """Extract the latest bot response for plain text display""" if not history: return "## 🤖 L.C.A.R.S Response\n\n*Awaiting your query...*" last_exchange = history[-1] if len(last_exchange) >= 2 and last_exchange[1]: return f"## 🤖 L.C.A.R.S Response\n\n{last_exchange[1]}" else: return "## 🤖 L.C.A.R.S Response\n\n*Processing...*" def execute_code_artifact(artifact_id, current_code): """Execute a specific code artifact""" try: artifacts = agent.get_canvas_artifacts(agent.current_conversation) if not artifacts: return "No artifacts available", current_code try: artifact_idx = int(artifact_id) if 0 <= artifact_idx < len(artifacts): artifact = artifacts[artifact_idx] if artifact.type == "code": # Return the code to display in the editor display_text = f"## 📋 Loaded Artifact #{artifact_idx}\n\n**Title:** {artifact.title}\n\n**Code:**\n```python\n{artifact.content}\n```" return display_text, artifact.content else: return f"❌ Artifact {artifact_idx} is not code (type: {artifact.type})", current_code else: return f"❌ Invalid artifact ID. Available: 0-{len(artifacts)-1}", current_code except ValueError: return "❌ Please enter a valid numeric artifact ID", current_code except Exception as e: return f"❌ Error: {str(e)}", current_code def create_code_artifact(code, description, language): """Create a new code artifact""" try: if not code.strip(): return "❌ No code provided", code agent.add_artifact( conversation_id=agent.current_conversation, artifact_type="code", content=code, title=description or f"Code_{len(agent.get_canvas_artifacts(agent.current_conversation))}", metadata={"language": language, "description": description} ) artifacts_count = len(agent.get_canvas_artifacts(agent.current_conversation)) return f"✅ Code artifact saved! Total artifacts: {artifacts_count}", code except Exception as e: return f"❌ Error saving artifact: {str(e)}", code def clear_current_chat(): """Clear the current conversation""" agent.clear_conversation(agent.current_conversation) empty_history = [] status_msg = "✅ Chat cleared" plain_text = "## 🤖 L.C.A.R.S Response\n\n*Chat cleared*" chat_display = update_chat_display(empty_history) artifacts_display = update_artifacts_display() return empty_history, plain_text, status_msg, chat_display, artifacts_display, "" def new_session(): """Start a new session""" agent.clear_conversation(agent.current_conversation) agent.clear_canvas(agent.current_conversation) new_code = "# New L.C.A.R.S Session Started\nprint('🚀 Local Computer Advanced Reasoning System Online')\nprint('🤖 All systems nominal - Ready for collaboration')" empty_history = [] status_msg = "🆕 New session started" plain_text = "## 🤖 L.C.A.R.S Response\n\n*New session started*" chat_display = update_chat_display(empty_history) artifacts_display = update_artifacts_display() return empty_history, new_code, plain_text, status_msg, chat_display, artifacts_display, "" def update_model_settings(base_url, api_key, model_id, temperature, max_tokens): """Update agent model settings""" try: agent.base_url = base_url agent.api_key = api_key agent.model_id = model_id agent.temperature = float(temperature) agent.max_tokens = int(max_tokens) # Recreate client with new settings agent.async_client = agent.CreateClient(base_url, api_key) return f"✅ Model settings updated: {model_id} | Temp: {temperature} | Max tokens: {max_tokens}" except Exception as e: return f"❌ Error updating settings: {str(e)}" async def fetch_models(base_url, api_key): """Fetch available models from the API""" try: models = await agent.fetch_available_models(base_url, api_key) return gr.Dropdown(choices=models, value=models[0] if models else "") except Exception as e: print(f"Error fetching models: {e}") return gr.Dropdown(choices=[], value="") # Create the Gradio interface with gr.Blocks( title="🚀 L.C.A.R.S - Local Computer Advanced Reasoning System", theme='Yntec/HaleyCH_Theme_Orange_Green', css=custom_css ) as demo: # State management history_state = gr.State([]) with gr.Sidebar(label = "Settings"): gr.HTML("
⚙️ MODEL SETTINGS
") with gr.Accordion("🔧 Configuration", open=True): base_url = gr.Textbox( value=agent.base_url, label="Base URL", placeholder="http://localhost:1234/v1" ) api_key = gr.Textbox( value=agent.api_key, label="API Key", placeholder="not-needed for local models", type="password" ) model_id = gr.Dropdown( value=agent.model_id, label="Model", choices=[agent.model_id], allow_custom_value=True ) temperature = gr.Slider( value=agent.temperature, minimum=0.1, maximum=2.0, step=0.1, label="Temperature" ) max_tokens = gr.Slider( value=agent.max_tokens, minimum=100, maximum=10000, step=100, label="Max Tokens" ) with gr.Row(): update_settings_btn = gr.Button("🔄 Update Settings", variant="primary") fetch_models_btn = gr.Button("📋 Fetch Models", variant="secondary") # ============================================ # HEADER SECTION # ============================================ with gr.Row(): with gr.Column(scale=1): gr.Image( value="https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg", elem_id="lcars_logo", height=200, show_download_button=False, container=False, width=200 ) with gr.Column(scale=3): gr.HTML(f"""
🖥️ L.C.A.R.S - Local Computer Advanced Reasoning System
USS Enterprise • NCC-1701-D • Starfleet Command
""") # ============================================ # MAIN INTERFACE TABS # ============================================ with gr.Tabs(): # ============================================ # L.C.A.R.S MAIN CHAT TAB (Enhanced) # ============================================ with gr.TabItem(label="🤖 L.C.A.R.S Chat Intelligence", elem_id="lcars_main_tab"): with gr.Row(): # LEFT COLUMN - INPUT & CONTROLS with gr.Column(scale=2): gr.HTML("
🧠 REASONING PROCESS
") with gr.Accordion(label="🧠 AI Reasoning & Thinking", open=True): thinking_display = gr.Markdown( value="*AI reasoning will appear here during processing...*", label="Thought Process", show_label=True, height=200 ) # Main chat input message = gr.Textbox( show_copy_button=True, lines=3, label="💬 Ask L.C.A.R.S", placeholder="Enter your message to the Local Computer Advanced Reasoning System..." ) # Control buttons with gr.Row(): submit_btn = gr.Button("🚀 Ask L.C.A.R.S", variant="primary", size="lg") clear_btn = gr.Button("🗑️ Clear Chat", variant="secondary") new_session_btn = gr.Button("🆕 New Session", variant="secondary") # Audio controls with gr.Row(): speak_response = gr.Checkbox(label="🔊 Speak Response", value=False) # Quick Actions with gr.Accordion(label="⚡ Utility Quick Actions", open=False): with gr.Row(): artifact_id_input = gr.Textbox( label="Artifact ID", placeholder="Artifact ID (0, 1, 2...)", scale=2 ) execute_artifact_btn = gr.Button("📂 Load Artifact", variant="primary") # MIDDLE COLUMN - RESPONSES with gr.Column(scale=2): gr.HTML("
SYSTEM RESPONSE
") with gr.Accordion(label="🤖 L.C.A.R.S Response", open=True): plain_text_output = gr.Markdown( value="## 🤖 L.C.A.R.S Response\n\n*Awaiting your query...*", container=True, show_copy_button=True, label="AI Response", height=300 ) execution_output = gr.Markdown( value="*Execution results will appear here*", label="Execution Results", height=150 ) status_display = gr.Textbox( value="System ready", label="Status", interactive=False ) gr.HTML("
Current Session
") # Enhanced Chat History Display with gr.Accordion(label="📜 Session Chat History", open=True): chat_history_display = gr.HTML( value="
No messages yet
", label="Full Session History", show_label=True ) # RIGHT COLUMN - ENHANCED CODE ARTIFACTS with gr.Column(scale=2): gr.HTML("
🧱 ENHANCED CODE ARTIFACTS WORKSHOP
") with gr.Accordion(label="🧱 Code Artifacts Workshop", open=True): # Enhanced Code Editor with save functionality code_artifacts = gr.Code( language="python", label="Generated Code & Artifacts", lines=15, interactive=True, show_line_numbers=True, elem_id="code_editor", value="# Welcome to L.C.A.R.S Code Workshop\n# Write or generate code here\n\nprint('🚀 L.C.A.R.S Code Workshop Active')" ) # Enhanced Artifact Controls with gr.Row(): artifact_description = gr.Textbox( label="Artifact Description", placeholder="Brief description of the code...", scale=2 ) artifact_language = gr.Dropdown( choices=["python", "javascript", "html", "css", "bash", "sql", "json"], value="python", label="Language", scale=1 ) with gr.Row(): execute_code_btn = gr.Button("▶️ Execute Code", variant="primary") create_artifact_btn = gr.Button("💾 Save Artifact", variant="primary") # Artifacts Display with gr.Accordion(label="📊 Current Session Artifacts", open=True): artifacts_display = gr.HTML( value="
No artifacts generated yet
", label="Generated Artifacts Timeline", show_label=True ) # ============================================ # EVENT HANDLERS - WITH PARSED RESPONSE SUPPORT # ============================================ # Main chat functionality def handle_message(message, history, speak_response): # Process the message cleaned_message, new_history, status_msg, artifacts, thinking = process_lcars_message(message, history, speak_response) # Update all displays plain_text = get_plain_text_response(new_history) chat_display = update_chat_display(new_history) artifacts_html = update_artifacts_display() # Format thinking for display thinking_display_content = f"## 🧠 AI Reasoning\n\n{thinking}" if thinking else "*No reasoning content extracted*" # Return in correct order for outputs return cleaned_message, new_history, plain_text, status_msg, chat_display, artifacts_html, thinking_display_content submit_btn.click( fn=handle_message, inputs=[message, history_state, speak_response], outputs=[ message, # 0 - cleaned message input history_state, # 1 - updated history state plain_text_output, # 2 - markdown response (string) status_display, # 3 - status message (string) chat_history_display, # 4 - HTML display artifacts_display, # 5 - HTML display thinking_display # 6 - thinking markdown ] ) message.submit( fn=handle_message, inputs=[message, history_state, speak_response], outputs=[ message, history_state, plain_text_output, status_display, chat_history_display, artifacts_display, thinking_display ] ) # Clear chat clear_btn.click( fn=clear_current_chat, outputs=[ history_state, # 0 - empty history list plain_text_output, # 1 - markdown string status_display, # 2 - status string chat_history_display, # 3 - HTML string artifacts_display, # 4 - HTML string thinking_display # 5 - thinking markdown ] ) # New session new_session_btn.click( fn=new_session, outputs=[ history_state, # 0 - empty history list code_artifacts, # 1 - code string plain_text_output, # 2 - markdown string status_display, # 3 - status string chat_history_display, # 4 - HTML string artifacts_display, # 5 - HTML string thinking_display # 6 - thinking markdown ] ) # Artifact operations create_artifact_btn.click( fn=create_code_artifact, inputs=[code_artifacts, artifact_description, artifact_language], outputs=[execution_output, code_artifacts] ) execute_artifact_btn.click( fn=execute_code_artifact, inputs=[artifact_id_input, code_artifacts], outputs=[execution_output, code_artifacts] ) execute_code_btn.click( fn=execute_current_code, inputs=[code_artifacts], outputs=[execution_output, code_artifacts] ) # Model settings update_settings_btn.click( fn=update_model_settings, inputs=[base_url, api_key, model_id, temperature, max_tokens], outputs=[status_display] ) fetch_models_btn.click( fn=fetch_models, inputs=[base_url, api_key], outputs=[model_id] ) if __name__ == "__main__": # Start the agent agent.start() print("🚀 L.C.A.R.S Agent Started!") print(f"🤖 Model: {agent.model_id}") print(f"🔗 Base URL: {agent.base_url}") print(f"💬 Default Conversation: {agent.current_conversation}") # Launch the interface demo.launch(share=True, server_name="0.0.0.0", server_port=7860)