Spaces:
Sleeping
Sleeping
| import json | |
| import time | |
| import typing | |
| from agent_server.openai_schemas import ChatMessage | |
| def normalize_content_to_text(content: typing.Any) -> str: | |
| if isinstance(content, str): | |
| return content | |
| if isinstance(content, (bytes, bytearray)): | |
| try: | |
| return content.decode("utf-8", errors="ignore") | |
| except Exception: | |
| return str(content) | |
| if isinstance(content, list): | |
| parts = [] | |
| for item in content: | |
| if ( | |
| isinstance(item, dict) | |
| and item.get("type") == "text" | |
| and isinstance(item.get("text"), str) | |
| ): | |
| parts.append(item["text"]) | |
| else: | |
| try: | |
| parts.append(json.dumps(item, ensure_ascii=False)) | |
| except Exception: | |
| parts.append(str(item)) | |
| return "\n".join(parts) | |
| if isinstance(content, dict): | |
| try: | |
| return json.dumps(content, ensure_ascii=False) | |
| except Exception: | |
| return str(content) | |
| return str(content) | |
| def messages_to_task(messages: typing.List[ChatMessage]) -> str: | |
| system_parts = [ | |
| normalize_content_to_text(m.get("content", "")) | |
| for m in messages | |
| if m.get("role") == "system" | |
| ] | |
| user_parts = [ | |
| normalize_content_to_text(m.get("content", "")) | |
| for m in messages | |
| if m.get("role") == "user" | |
| ] | |
| assistant_parts = [ | |
| normalize_content_to_text(m.get("content", "")) | |
| for m in messages | |
| if m.get("role") == "assistant" | |
| ] | |
| sys_txt = "\n".join([s for s in system_parts if s]).strip() | |
| history = "" | |
| if assistant_parts: | |
| history = "\n\nPrevious assistant replies (for context):\n" + "\n---\n".join( | |
| assistant_parts | |
| ) | |
| last_user = user_parts[-1] if user_parts else "" | |
| return f"{sys_txt}\nTask:\n{last_user}\n{history}".strip() | |
| def openai_response(message_text: str, model_name: str) -> typing.Dict[str, typing.Any]: | |
| now = int(time.time()) | |
| return { | |
| "id": f"chatcmpl-smol-{now}", | |
| "object": "chat.completion", | |
| "created": now, | |
| "model": model_name, | |
| "choices": [ | |
| { | |
| "index": 0, | |
| "message": {"role": "assistant", "content": message_text}, | |
| "finish_reason": "stop", | |
| } | |
| ], | |
| "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, | |
| } | |
| def sse_headers() -> dict: | |
| return { | |
| "Cache-Control": "no-cache, no-transform", | |
| "Connection": "keep-alive", | |
| "X-Accel-Buffering": "no", | |
| } | |
| def now_ts() -> int: | |
| return int(time.time()) | |