Spaces:
Running
Running
Upload 4 files
Browse files- app.py +53 -0
- labels.txt +5 -0
- model.onnx +3 -0
- requirements.txt +107 -0
app.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import onnxruntime as ort
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import torchvision.transforms as T
|
| 7 |
+
import cv2
|
| 8 |
+
|
| 9 |
+
# Load labels
|
| 10 |
+
with open("labels.txt") as f:
|
| 11 |
+
LABELS = [l.strip() for l in f.readlines()]
|
| 12 |
+
|
| 13 |
+
# Load ONNX model
|
| 14 |
+
session = ort.InferenceSession("model.onnx")
|
| 15 |
+
|
| 16 |
+
# Image preprocessing
|
| 17 |
+
transform = T.Compose([
|
| 18 |
+
T.Resize((224, 224)),
|
| 19 |
+
T.ToTensor(),
|
| 20 |
+
])
|
| 21 |
+
|
| 22 |
+
def predict(image):
|
| 23 |
+
if image is None:
|
| 24 |
+
return "No image provided", {}
|
| 25 |
+
|
| 26 |
+
# Convert OpenCV webcam frame to PIL
|
| 27 |
+
if isinstance(image, np.ndarray):
|
| 28 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 29 |
+
image = Image.fromarray(image)
|
| 30 |
+
|
| 31 |
+
img = transform(image).unsqueeze(0).numpy()
|
| 32 |
+
|
| 33 |
+
outputs = session.run(None, {"images": img})[0]
|
| 34 |
+
probs = torch.softmax(torch.tensor(outputs), dim=1)[0]
|
| 35 |
+
|
| 36 |
+
result = {LABELS[i]: float(probs[i]) for i in range(len(LABELS))}
|
| 37 |
+
top_idx = torch.argmax(probs).item()
|
| 38 |
+
|
| 39 |
+
return LABELS[top_idx], result
|
| 40 |
+
|
| 41 |
+
# Gradio UI
|
| 42 |
+
interface = gr.Interface(
|
| 43 |
+
fn=predict,
|
| 44 |
+
inputs=gr.Image(source="webcam", type="numpy", label="Capture or Upload Face"),
|
| 45 |
+
outputs=[
|
| 46 |
+
gr.Label(label="Predicted Emotion"),
|
| 47 |
+
gr.JSON(label="Confidence Scores")
|
| 48 |
+
],
|
| 49 |
+
title="Face Emotion Recognition",
|
| 50 |
+
description="Capture a live face or upload an image to classify emotions."
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
interface.launch()
|
labels.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Angry
|
| 2 |
+
Fear
|
| 3 |
+
Happy
|
| 4 |
+
Sad
|
| 5 |
+
Suprise
|
model.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb9dedccf5797b19b30ad9e3efe9a6cb4aff311f80d298cb946c6cb3c64e5774
|
| 3 |
+
size 6176368
|
requirements.txt
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
absl-py==2.3.1
|
| 2 |
+
asttokens==3.0.0
|
| 3 |
+
astunparse==1.6.3
|
| 4 |
+
certifi==2025.11.12
|
| 5 |
+
charset-normalizer==3.4.4
|
| 6 |
+
coloredlogs==15.0.1
|
| 7 |
+
comm==0.2.2
|
| 8 |
+
contourpy==1.3.3
|
| 9 |
+
cycler==0.12.1
|
| 10 |
+
debugpy==1.8.14
|
| 11 |
+
decorator==5.2.1
|
| 12 |
+
executing==2.2.0
|
| 13 |
+
filelock==3.20.0
|
| 14 |
+
flatbuffers==25.2.10
|
| 15 |
+
fonttools==4.61.0
|
| 16 |
+
fsspec==2025.10.0
|
| 17 |
+
gast==0.6.0
|
| 18 |
+
google-pasta==0.2.0
|
| 19 |
+
grpcio==1.73.1
|
| 20 |
+
h5py==3.14.0
|
| 21 |
+
humanfriendly==10.0
|
| 22 |
+
idna==3.11
|
| 23 |
+
ipykernel==6.29.5
|
| 24 |
+
ipython==9.4.0
|
| 25 |
+
ipython-pygments-lexers==1.1.1
|
| 26 |
+
jedi==0.19.2
|
| 27 |
+
jinja2==3.1.6
|
| 28 |
+
jupyter-client==8.6.3
|
| 29 |
+
jupyter-core==5.8.1
|
| 30 |
+
keras==3.10.0
|
| 31 |
+
kiwisolver==1.4.9
|
| 32 |
+
libclang==18.1.1
|
| 33 |
+
markdown==3.8.2
|
| 34 |
+
markdown-it-py==3.0.0
|
| 35 |
+
markupsafe==3.0.3
|
| 36 |
+
matplotlib==3.10.7
|
| 37 |
+
matplotlib-inline==0.1.7
|
| 38 |
+
mdurl==0.1.2
|
| 39 |
+
ml-dtypes==0.5.1
|
| 40 |
+
mpmath==1.3.0
|
| 41 |
+
namex==0.1.0
|
| 42 |
+
nest-asyncio==1.6.0
|
| 43 |
+
networkx==3.6
|
| 44 |
+
numpy==2.3.5
|
| 45 |
+
nvidia-cublas-cu12==12.8.4.1
|
| 46 |
+
nvidia-cuda-cupti-cu12==12.8.90
|
| 47 |
+
nvidia-cuda-nvrtc-cu12==12.8.93
|
| 48 |
+
nvidia-cuda-runtime-cu12==12.8.90
|
| 49 |
+
nvidia-cudnn-cu12==9.10.2.21
|
| 50 |
+
nvidia-cufft-cu12==11.3.3.83
|
| 51 |
+
nvidia-cufile-cu12==1.13.1.3
|
| 52 |
+
nvidia-curand-cu12==10.3.9.90
|
| 53 |
+
nvidia-cusolver-cu12==11.7.3.90
|
| 54 |
+
nvidia-cusparse-cu12==12.5.8.93
|
| 55 |
+
nvidia-cusparselt-cu12==0.7.1
|
| 56 |
+
nvidia-nccl-cu12==2.27.5
|
| 57 |
+
nvidia-nvjitlink-cu12==12.8.93
|
| 58 |
+
nvidia-nvshmem-cu12==3.3.20
|
| 59 |
+
nvidia-nvtx-cu12==12.8.90
|
| 60 |
+
onnxruntime==1.23.2
|
| 61 |
+
opencv-python==4.11.0.86
|
| 62 |
+
opt-einsum==3.4.0
|
| 63 |
+
optree==0.16.0
|
| 64 |
+
packaging==25.0
|
| 65 |
+
pandas==2.3.1
|
| 66 |
+
parso==0.8.4
|
| 67 |
+
pexpect==4.9.0
|
| 68 |
+
pillow==12.0.0
|
| 69 |
+
platformdirs==4.3.8
|
| 70 |
+
polars==1.35.2
|
| 71 |
+
polars-runtime-32==1.35.2
|
| 72 |
+
prompt-toolkit==3.0.51
|
| 73 |
+
protobuf==5.29.5
|
| 74 |
+
psutil==7.1.3
|
| 75 |
+
ptyprocess==0.7.0
|
| 76 |
+
pure-eval==0.2.3
|
| 77 |
+
pygments==2.19.2
|
| 78 |
+
pyparsing==3.2.5
|
| 79 |
+
python-dateutil==2.9.0.post0
|
| 80 |
+
pytz==2025.2
|
| 81 |
+
pyyaml==6.0.3
|
| 82 |
+
pyzmq==27.0.0
|
| 83 |
+
requests==2.32.5
|
| 84 |
+
rich==14.0.0
|
| 85 |
+
scipy==1.16.3
|
| 86 |
+
setuptools==80.9.0
|
| 87 |
+
six==1.17.0
|
| 88 |
+
stack-data==0.6.3
|
| 89 |
+
sympy==1.14.0
|
| 90 |
+
tensorboard==2.19.0
|
| 91 |
+
tensorboard-data-server==0.7.2
|
| 92 |
+
tensorflow==2.19.0
|
| 93 |
+
termcolor==3.1.0
|
| 94 |
+
torch==2.9.1
|
| 95 |
+
torchvision==0.24.1
|
| 96 |
+
tornado==6.5.1
|
| 97 |
+
traitlets==5.14.3
|
| 98 |
+
triton==3.5.1
|
| 99 |
+
typing-extensions==4.15.0
|
| 100 |
+
tzdata==2025.2
|
| 101 |
+
ultralytics==8.3.234
|
| 102 |
+
ultralytics-thop==2.0.18
|
| 103 |
+
urllib3==2.5.0
|
| 104 |
+
wcwidth==0.2.13
|
| 105 |
+
werkzeug==3.1.3
|
| 106 |
+
wheel==0.45.1
|
| 107 |
+
wrapt==1.17.2
|