| """ | |
| Smart Content Moderation - Frame-Based Video Processing (V2) | |
| Extract frames β Process β Combine β Output | |
| FAST FIX - WITHOUT DELETING OLD FILES | |
| """ | |
| import cv2 | |
| import os | |
| import logging | |
| from pathlib import Path | |
| from datetime import datetime | |
| import tempfile | |
| import shutil | |
| logger = logging.getLogger(__name__) | |
| try: | |
| from detectors.yolov8_face import YOLOv8Face | |
| from detectors.text_detector import TextDetector | |
| from detectors.nsfw_detector import NSFWDetector | |
| from modules.face_blur_p import FaceBlurrer | |
| from modules.text_blur_p import TextBlurrer | |
| from modules.nsfw_blur import NSFWBlurrer | |
| except ImportError as e: | |
| logger.warning(f"Import warning: {e}") | |
| def validate_blur_strength(blur_strength): | |
| blur_strength = int(blur_strength) | |
| return blur_strength if blur_strength % 2 == 1 else blur_strength + 1 | |
| def get_file_type(file_path): | |
| """Detect file type""" | |
| file_lower = str(file_path).lower() | |
| if file_lower.endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff')): | |
| return 'image' | |
| elif file_lower.endswith(('.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', '.webm', '.m4v')): | |
| return 'video' | |
| return 'unknown' | |
| def process_frame(frame, blur_strength, confidence, blur_text, nsfw_blur, nsfw_blur_type, blood_threshold): | |
| """Process single frame with all 3 detections""" | |
| if frame is None or frame.size == 0: | |
| return frame | |
| try: | |
| try: | |
| face_blurrer = FaceBlurrer(method="gaussian", blur_strength=blur_strength, adaptive=True) | |
| frame = face_blurrer.blur_faces(frame, confidence_threshold=confidence) | |
| except: | |
| pass | |
| if blur_text: | |
| try: | |
| text_detector = TextDetector(languages=['en'], gpu=False) | |
| text_regions = text_detector.detect_text(frame, confidence_threshold=0.5) | |
| if text_regions: | |
| text_blurrer = TextBlurrer(kernel_size=(blur_strength, blur_strength)) | |
| frame = text_blurrer.blur_hate_text(frame) | |
| except: | |
| pass | |
| if nsfw_blur: | |
| try: | |
| nsfw_blurrer = NSFWBlurrer(method=nsfw_blur_type, blood_threshold=blood_threshold) | |
| frame = nsfw_blurrer.blur_nsfw(frame) | |
| except: | |
| pass | |
| return frame | |
| except: | |
| return frame | |
| def process_image(input_path, blur_strength=51, confidence=0.5, blur_text=True, nsfw_blur=True, nsfw_blur_type="gaussian", blood_threshold=0.3): | |
| """Process image file - handles video routing""" | |
| file_lower = str(input_path).lower() | |
| if file_lower.endswith(('.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', '.webm', '.m4v')): | |
| logger.info(f"VIDEO detected - routing to process_video()") | |
| return process_video(input_path, blur_strength, confidence, blur_text, nsfw_blur, nsfw_blur_type, blood_threshold) | |
| logger.info(f"Processing IMAGE: {Path(input_path).name}") | |
| try: | |
| image = cv2.imread(input_path) | |
| if image is None: | |
| raise ValueError(f"Cannot read: {input_path}") | |
| processed = process_frame(image, blur_strength, confidence, blur_text, nsfw_blur, nsfw_blur_type, blood_threshold) | |
| output_dir = Path("social_moderation/data/output/images") | |
| output_dir.mkdir(parents=True, exist_ok=True) | |
| filename = Path(input_path).stem | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| output_path = output_dir / f"{filename}_{timestamp}.jpg" | |
| cv2.imwrite(str(output_path), processed) | |
| logger.info(f"β Image saved: {output_path}") | |
| return str(output_path) | |
| except Exception as e: | |
| logger.error(f"Image error: {e}") | |
| raise | |
| def process_video(input_path, blur_strength=51, confidence=0.5, blur_text=True, nsfw_blur=True, nsfw_blur_type="gaussian", blood_threshold=0.3): | |
| """Process video using frame extraction""" | |
| logger.info(f"Processing VIDEO: {Path(input_path).name}") | |
| try: | |
| logger.info("Step 1: Extracting frames...") | |
| cap = cv2.VideoCapture(input_path) | |
| if not cap.isOpened(): | |
| raise ValueError(f"Cannot open: {input_path}") | |
| fps = cap.get(cv2.CAP_PROP_FPS) | |
| width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
| height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
| logger.info(f"Video: {width}x{height} @ {fps} FPS, {total_frames} frames") | |
| temp_dir = tempfile.mkdtemp(prefix="video_frames_") | |
| frame_num = 0 | |
| saved_frames = [] | |
| while True: | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| frame_num += 1 | |
| frame_path = os.path.join(temp_dir, f"frame_{frame_num:06d}.jpg") | |
| cv2.imwrite(frame_path, frame) | |
| saved_frames.append(frame_path) | |
| if frame_num % 50 == 0: | |
| logger.info(f"Extracted: {frame_num}/{total_frames}") | |
| cap.release() | |
| logger.info(f"β Extracted {len(saved_frames)} frames") | |
| logger.info("Step 2: Processing frames...") | |
| processed_frames = [] | |
| for i, frame_path in enumerate(saved_frames): | |
| try: | |
| frame = cv2.imread(frame_path) | |
| if frame is None: | |
| processed_frames.append(frame_path) | |
| continue | |
| processed = process_frame(frame, blur_strength, confidence, blur_text, nsfw_blur, nsfw_blur_type, blood_threshold) | |
| output_frame_path = frame_path.replace("frame_", "processed_") | |
| cv2.imwrite(output_frame_path, processed) | |
| processed_frames.append(output_frame_path) | |
| if (i + 1) % 50 == 0: | |
| logger.info(f"Processed: {i + 1}/{len(saved_frames)}") | |
| except Exception as e: | |
| logger.warning(f"Frame {i} error: {e}") | |
| processed_frames.append(frame_path) | |
| logger.info(f"β Processed {len(processed_frames)} frames") | |
| logger.info("Step 3: Combining frames...") | |
| output_dir = Path("social_moderation/data/output/videos") | |
| output_dir.mkdir(parents=True, exist_ok=True) | |
| filename = Path(input_path).stem | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| output_path = output_dir / f"{filename}_{timestamp}.mp4" | |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
| out = cv2.VideoWriter(str(output_path), fourcc, fps, (width, height)) | |
| for i, frame_path in enumerate(processed_frames): | |
| try: | |
| frame = cv2.imread(frame_path) | |
| if frame is not None: | |
| out.write(frame) | |
| if (i + 1) % 50 == 0: | |
| logger.info(f"Written: {i + 1}/{len(processed_frames)}") | |
| except: | |
| pass | |
| out.release() | |
| logger.info(f"β Video saved: {output_path}") | |
| logger.info("Step 4: Cleanup...") | |
| shutil.rmtree(temp_dir) | |
| logger.info("β Done") | |
| return str(output_path) | |
| except Exception as e: | |
| logger.error(f"Video error: {e}") | |
| raise | |
| def process_media_file(input_path, blur_strength=51, confidence=0.5, blur_text=True, nsfw_blur=True, nsfw_blur_type="gaussian", blood_threshold=0.3): | |
| """Main entry point""" | |
| blur_strength = validate_blur_strength(blur_strength) | |
| file_type = get_file_type(input_path) | |
| logger.info(f"Detected: {file_type}") | |
| if file_type == 'video': | |
| return process_video(input_path, blur_strength, confidence, blur_text, nsfw_blur, nsfw_blur_type, blood_threshold) | |
| elif file_type == 'image': | |
| return process_image(input_path, blur_strength, confidence, blur_text, nsfw_blur, nsfw_blur_type, blood_threshold) | |
| else: | |
| raise ValueError(f"Unsupported: {input_path}") | |