|
|
|
|
|
"""cartoonImage.ipynb |
|
|
|
|
|
Automatically generated by Colab. |
|
|
|
|
|
Original file is located at |
|
|
https://colab.research.google.com/drive/1Nd6JEgoiXd6P7VSQC430_bRu3N300QyS |
|
|
""" |
|
|
|
|
|
|
|
|
!pip install numpy Pillow opencv-python mediapipe |
|
|
|
|
|
|
|
|
!pip install tensorflow tensorflow-lite |
|
|
|
|
|
|
|
|
!pip install fastapi uvicorn |
|
|
|
|
|
|
|
|
!pip install huggingface-hub |
|
|
|
|
|
|
|
|
!pip install kaggle |
|
|
|
|
|
|
|
|
!pip install google-colab |
|
|
|
|
|
from google.colab import drive |
|
|
drive.mount('/content/drive') |
|
|
|
|
|
import cv2 |
|
|
import os |
|
|
|
|
|
def video_to_frames(video_path, frames_dir, fps): |
|
|
if not os.path.exists(frames_dir): |
|
|
os.makedirs(frames_dir) |
|
|
|
|
|
|
|
|
cap = cv2.VideoCapture(video_path) |
|
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
|
video_fps = cap.get(cv2.CAP_PROP_FPS) |
|
|
print(f"Total frames: {total_frames}, Video FPS: {video_fps}") |
|
|
|
|
|
|
|
|
duration = int(input("Enter the number of seconds of video to process: ")) |
|
|
frames_to_process = duration * fps |
|
|
print(f"Frames to process: {frames_to_process}") |
|
|
|
|
|
|
|
|
frame_idx = 0 |
|
|
processed_frames = 0 |
|
|
while processed_frames < frames_to_process and cap.isOpened(): |
|
|
ret, frame = cap.read() |
|
|
if not ret: |
|
|
break |
|
|
if frame_idx % int(video_fps / fps) == 0: |
|
|
cv2.imwrite(f"{frames_dir}/frame_{processed_frames:04d}.jpg", frame) |
|
|
processed_frames += 1 |
|
|
frame_idx += 1 |
|
|
|
|
|
cap.release() |
|
|
print(f"Frames saved in {frames_dir}") |
|
|
|
|
|
|
|
|
video_path = '/content/drive/MyDrive/Cartoon Image Model/M.mp4' |
|
|
frames_dir = '/content/drive/MyDrive/video_frames' |
|
|
fps = int(input("Enter the desired FPS (e.g., 30): ")) |
|
|
video_to_frames(video_path, frames_dir, fps) |
|
|
|
|
|
import kagglehub |
|
|
|
|
|
|
|
|
path = kagglehub.model_download("spsayakpaul/cartoongan/tfLite/dr") |
|
|
|
|
|
print("Path to model files:", path) |
|
|
|
|
|
|
|
|
|
|
|
!mkdir saved_model |
|
|
!mv {path}/*.tflite saved_model |
|
|
|
|
|
!cp -r /content/saved_model /content/drive/MyDrive/ |
|
|
|
|
|
path=('/content/drive/MyDrive/saved_model/1.tflite') |
|
|
|
|
|
import tensorflow as tf |
|
|
import os |
|
|
from PIL import Image |
|
|
import numpy as np |
|
|
|
|
|
def cartoonize_frames(frames_dir, path, output_dir): |
|
|
if not os.path.exists(output_dir): |
|
|
os.makedirs(output_dir) |
|
|
interpreter = tf.lite.Interpreter(path) |
|
|
interpreter.allocate_tensors() |
|
|
input_details = interpreter.get_input_details() |
|
|
output_details = interpreter.get_output_details() |
|
|
input_shape = input_details[0]['shape'][1:3] |
|
|
|
|
|
def preprocess_image(img_path, input_shape): |
|
|
|
|
|
img = Image.open(img_path).convert('RGB') |
|
|
img = img.resize(input_shape, Image.LANCZOS) |
|
|
img_array = np.array(img, dtype=np.float32) |
|
|
img_array = np.expand_dims(img_array, axis=0) |
|
|
img_array = img_array / 255.0 |
|
|
return img_array |
|
|
|
|
|
def postprocess_image(img_array): |
|
|
|
|
|
img_array = img_array.squeeze() * 255 |
|
|
img_array = np.clip(img_array, 0, 255).astype(np.uint8) |
|
|
return Image.fromarray(img_array) |
|
|
|
|
|
for frame in sorted(os.listdir(frames_dir)): |
|
|
if frame.endswith(".jpg"): |
|
|
img_path = os.path.join(frames_dir, frame) |
|
|
processed_image = preprocess_image(img_path, input_shape) |
|
|
interpreter.set_tensor(input_details[0]['index'], processed_image) |
|
|
interpreter.invoke() |
|
|
cartoonized_image = interpreter.get_tensor(output_details[0]['index']) |
|
|
cartoonized_image = postprocess_image(cartoonized_image) |
|
|
cartoonized_image.save(f"{output_dir}/{frame}") |
|
|
|
|
|
print(f"Cartoonized frames saved in {output_dir}") |
|
|
|
|
|
|
|
|
path = '/content/saved_model/1.tflite' |
|
|
output_dir = '/content/cartoon_frames' |
|
|
cartoonize_frames(frames_dir, path, output_dir) |
|
|
|
|
|
def frames_to_video(frames_dir, output_video_path, fps): |
|
|
images = sorted([img for img in os.listdir(frames_dir) if img.endswith(".jpg")]) |
|
|
if not images: |
|
|
raise ValueError("No frames found in the directory.") |
|
|
|
|
|
first_frame = cv2.imread(os.path.join(frames_dir, images[0])) |
|
|
height, width, layers = first_frame.shape |
|
|
size = (width, height) |
|
|
|
|
|
out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, size) |
|
|
|
|
|
for image in images: |
|
|
frame = cv2.imread(os.path.join(frames_dir, image)) |
|
|
out.write(frame) |
|
|
|
|
|
out.release() |
|
|
print(f"Video created successfully at {output_video_path}") |
|
|
|
|
|
|
|
|
output_video_path = '/content/output_video.mp4' |
|
|
frames_to_video(output_dir, output_video_path, fps) |
|
|
|
|
|
from google.colab import drive |
|
|
drive.mount('/content/drive') |
|
|
|
|
|
!mkdir /content/drive/MyDrive/video_cartooned |
|
|
!cp -r /content/saved_model /content/drive/MyDrive/video_cartooned |
|
|
|
|
|
import joblib |
|
|
import tensorflow as tf |
|
|
|
|
|
|
|
|
path = '/content/drive/MyDrive/video_cartooned/saved_model/1.tflite' |
|
|
interpreter = tf.lite.Interpreter(path) |
|
|
interpreter.allocate_tensors() |
|
|
|
|
|
|
|
|
!huggingface-cli login |
|
|
|
|
|
!huggingface-cli repo create cartoonify-video |
|
|
|
|
|
!huggingface-cli lfs-enable cartoonify-video |
|
|
|
|
|
!cp -r /content/drive/MyDrive/video_cartooned/* /content/cartoonify-video |
|
|
|
|
|
!cd cartoonify-video && git add . && git commit -m "Initial commit" && git push |