Spaces:
Sleeping
Sleeping
File size: 5,771 Bytes
ae5e6e0 724a9f2 ae5e6e0 1268cd5 4428a6a ae5e6e0 1268cd5 ae5e6e0 4428a6a ae5e6e0 1268cd5 3682ef8 4428a6a 3682ef8 1268cd5 4428a6a 1268cd5 ae5e6e0 3682ef8 ae5e6e0 1268cd5 ae5e6e0 3682ef8 ae5e6e0 3682ef8 d6f4456 ae5e6e0 4428a6a ae5e6e0 3682ef8 ae5e6e0 3682ef8 ae5e6e0 1268cd5 3682ef8 ae5e6e0 1268cd5 3682ef8 ae5e6e0 4428a6a 3682ef8 4428a6a 3682ef8 ae5e6e0 1268cd5 4428a6a ae5e6e0 4428a6a 3682ef8 1268cd5 4428a6a ae5e6e0 1268cd5 3682ef8 1268cd5 f7f6c1f 1268cd5 f7f6c1f 1268cd5 f7f6c1f 1268cd5 f7f6c1f 1268cd5 f7f6c1f 1268cd5 4428a6a d6f4456 1268cd5 ae5e6e0 3682ef8 4428a6a 1268cd5 4428a6a ae5e6e0 724a9f2 4428a6a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
import os
import json
import requests
from datetime import datetime
from flask import Flask, request, jsonify, send_from_directory
from transformers import pipeline
from openai import OpenAI
app = Flask(__name__)
# Load emotion model
emotion_model = pipeline(
"text-classification",
model="j-hartmann/emotion-english-distilroberta-base"
)
# Ensure user data file
USER_FILE = "user_data.json"
if not os.path.exists(USER_FILE):
with open(USER_FILE, "w") as f:
json.dump({
"name": None,
"age": None,
"mood": None,
"last_interaction": None,
"missed_days": 0,
"mode": "emotional_support",
"conversation_history": []
}, f)
def load_user():
with open(USER_FILE, "r") as f:
return json.load(f)
def save_user(data):
with open(USER_FILE, "w") as f:
json.dump(data, f)
# Safe OpenAI client initialization
def get_client():
api_key = os.getenv("OPENAI_API_KEY")
if not api_key or api_key.strip() == "":
raise ValueError("OpenAI API key is missing or empty. Please set OPENAI_API_KEY in Hugging Face Secrets.")
return OpenAI(api_key=api_key)
# Model priority list for fallbacks
MODEL_LIST = ["gpt-4o-mini", "gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
# Helpline numbers
HELPLINES = {
"US": "National Suicide Prevention Lifeline: 988",
"UK": "Samaritans: 116 123",
"IN": "AASRA: 91-9820466726",
"CA": "Canada Suicide Prevention Service: 988",
"AU": "Lifeline: 13 11 14",
"default": "Please contact your local crisis helpline immediately."
}
def get_country_from_ip(ip):
try:
response = requests.get(f"http://ipapi.co/{ip}/country/")
if response.status_code == 200:
return response.text.upper()
except:
pass
return "default"
def detect_self_harm(message):
keywords = ["suicide", "kill myself", "end my life", "self harm", "hurt myself"]
return any(word in message.lower() for word in keywords)
@app.route("/chat", methods=["POST"])
def chat():
try:
data = request.get_json()
message = data.get("message", "").strip()
if not message:
return jsonify({"reply": "Please enter a message.", "emotion": "neutral"}), 400
mode = data.get("mode", "emotional_support")
user_ip = request.remote_addr
user = load_user()
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
user["last_interaction"] = now
user["mode"] = mode
user["conversation_history"].append({"role": "user", "content": message, "timestamp": now})
# Emotion detection
emotion = emotion_model(message)[0]["label"]
user["mood"] = emotion
# Self-harm check
if detect_self_harm(message):
country = get_country_from_ip(user_ip)
helpline = HELPLINES.get(country, HELPLINES["default"])
reply = (
f"I'm really concerned about what you shared. Please reach out right now — "
f"you're not alone. Here's someone you can call: {helpline}"
)
user["conversation_history"].append({"role": "assistant", "content": reply, "timestamp": now})
save_user(user)
return jsonify({"reply": reply, "emotion": emotion})
# Build context for chat
history = user["conversation_history"][-10:]
messages = [
{"role": "system", "content": (
f"You are Serenity — an empathetic, emotionally intelligent best friend. "
f"Be warm, caring, supportive, and human-like. Respond briefly (1–2 sentences), "
f"showing love, empathy, and curiosity. Never sound robotic or repetitive. "
f"Current mood: {emotion}. Mode: {mode}."
)}
] + history
# Try models in priority order
client = get_client()
reply = None
for model in MODEL_LIST:
try:
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=0.8
)
reply = response.choices[0].message.content.strip()
print(f"✅ Used model: {model}")
break # Success, stop trying other models
except Exception as e:
error_str = str(e).lower()
if "rate_limit" in error_str or "429" in error_str or "quota" in error_str:
print(f"⚠️ Rate limit on {model}, trying next model...")
continue # Try next model
else:
# Non-rate-limit error, re-raise
raise e
if reply is None:
# All models failed
reply = "All models are currently rate-limited. Please try again later or check your OpenAI account."
user["conversation_history"].append({"role": "assistant", "content": reply, "timestamp": now})
save_user(user)
return jsonify({"reply": reply, "emotion": emotion})
except ValueError as e:
print(f"❌ API Key Error: {e}")
return jsonify({
"reply": "API key is missing or invalid. Please check your Hugging Face Secrets and ensure it's a valid OpenAI key.",
"emotion": "neutral"
}), 500
except Exception as e:
print(f"❌ Chat error: {e}")
return jsonify({
"reply": "Something went wrong (e.g., network issue). Try again later.",
"emotion": "neutral"
}), 500
@app.route("/")
def index():
return send_from_directory(".", "index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860) |