|
import cv2 as cv |
|
import numpy as np |
|
import gradio as gr |
|
import datetime |
|
from huggingface_hub import hf_hub_download |
|
|
|
from facial_fer_model import FacialExpressionRecog |
|
from yunet import YuNet |
|
|
|
|
|
FD_MODEL_PATH = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx") |
|
FER_MODEL_PATH = hf_hub_download(repo_id="opencv/facial_expression_recognition", filename="facial_expression_recognition_mobilefacenet_2022july.onnx") |
|
|
|
backend_id = cv.dnn.DNN_BACKEND_OPENCV |
|
target_id = cv.dnn.DNN_TARGET_CPU |
|
|
|
fer_model = FacialExpressionRecog(modelPath=FER_MODEL_PATH, backendId=backend_id, targetId=target_id) |
|
detect_model = YuNet(modelPath=FD_MODEL_PATH) |
|
|
|
def visualize(image, det_res, fer_res): |
|
output = image.copy() |
|
landmark_color = [(255, 0, 0), (0, 0, 255), (0, 255, 0), (255, 0, 255), (0, 255, 255)] |
|
|
|
for det, fer_type in zip(det_res, fer_res): |
|
bbox = det[0:4].astype(np.int32) |
|
fer_type_str = FacialExpressionRecog.getDesc(fer_type) |
|
cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 255, 0), 2) |
|
cv.putText(output, fer_type_str, (bbox[0], bbox[1] - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) |
|
|
|
landmarks = det[4:14].astype(np.int32).reshape((5, 2)) |
|
for idx, landmark in enumerate(landmarks): |
|
cv.circle(output, landmark, 2, landmark_color[idx], 2) |
|
|
|
return output |
|
|
|
def detect_expression(input_image): |
|
image = cv.cvtColor(input_image, cv.COLOR_RGB2BGR) |
|
h, w, _ = image.shape |
|
detect_model.setInputSize([w, h]) |
|
|
|
dets = detect_model.infer(image) |
|
if dets is None: |
|
return cv.cvtColor(image, cv.COLOR_BGR2RGB) |
|
|
|
fer_res = [] |
|
for face_points in dets: |
|
result = fer_model.infer(image, face_points[:-1]) |
|
fer_res.append(result[0]) |
|
|
|
output = visualize(image, dets, fer_res) |
|
return cv.cvtColor(output, cv.COLOR_BGR2RGB) |
|
|
|
|
|
demo = gr.Interface( |
|
fn=detect_expression, |
|
inputs=gr.Image(type="numpy", label="Upload Image"), |
|
outputs=gr.Image(type="numpy", label="Facial Expression Result"), |
|
title="Facial Expression Recognition (FER) with OpenCV DNN", |
|
description="Detects faces and recognizes facial expressions using YuNet + MobileFaceNet ONNX models.", |
|
allow_flagging="never" |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|