Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os | |
| import cv2 | |
| import face_recognition | |
| from fastai.vision.all import load_learner | |
| import time | |
| model = load_learner("gaze-recognizer-v1.pkl") | |
| def video_processing(video): | |
| start_time = time.time() | |
| # Loop through the frames of the video | |
| video_capture = cv2.VideoCapture(video) | |
| on_camera = 0 | |
| off_camera = 0 | |
| total = 0 | |
| while True: | |
| # Read a single frame from the video | |
| for i in range(24*30): | |
| ret, frame = video_capture.read() | |
| if not ret: | |
| break | |
| # If there are no more frames, break out of the loop | |
| if not ret: | |
| break | |
| # Convert the frame to RGB color (face_recognition uses RGB) | |
| gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
| # Find all the faces in the frame using a pre-trained convolutional neural network. | |
| face_locations = face_recognition.face_locations(gray) | |
| #face_locations = face_recognition.face_locations(gray, number_of_times_to_upsample=0, model="cnn") | |
| if len(face_locations) > 0: | |
| # Show the original frame with face rectangles drawn around the faces | |
| for top, right, bottom, left in face_locations: | |
| # cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) | |
| face_image = gray[top:bottom, left:right] | |
| # Resize the face image to the desired size | |
| resized_face_image = cv2.resize(face_image, (128,128)) | |
| # Predict the class of the resized face image using the model | |
| result = model.predict(resized_face_image) | |
| print(result[0]) | |
| if(result[0] == 'on_camera'): on_camera = on_camera + 1 | |
| elif(result[0] == 'off_camera'): off_camera = off_camera + 1 | |
| total = total + 1 | |
| # cv2.imshow('Video', frame) | |
| # If the user presses the 'q' key, exit the loop | |
| # if cv2.waitKey(1) & 0xFF == ord('q'): | |
| # break | |
| gaze_percentage = on_camera/total*100 | |
| # print(total,on_camera,off_camera) | |
| # print(f'focus perfectage = {on_camera/total*100}') | |
| # Release the video capture object and close all windows | |
| video_capture.release() | |
| cv2.destroyAllWindows() | |
| end_time = time.time() | |
| print(f'Time taken: {end_time-start_time}') | |
| return gaze_percentage | |
| demo = gr.Interface(fn = video_processing, | |
| inputs= gr.Video(), | |
| outputs = gr.Text() | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |