Spaces:
Running
Running
File size: 8,931 Bytes
85b5a6b 35bc796 fd83866 85b5a6b 1c96585 85b5a6b 1c96585 85b5a6b 1c96585 85b5a6b 1c96585 85b5a6b 4ac7833 1c96585 85b5a6b 4ac7833 85b5a6b 4ac7833 85b5a6b 4ac7833 85b5a6b 4ac7833 85b5a6b 4ac7833 85b5a6b 1c96585 85b5a6b 1c96585 85b5a6b 1c96585 85b5a6b 1c96585 85b5a6b 4ac7833 1c96585 85b5a6b 1c96585 85b5a6b 1c96585 85b5a6b 1c96585 4ac7833 85b5a6b 1c96585 4ac7833 85b5a6b 4ac7833 85b5a6b 4ac7833 1c96585 4ac7833 85b5a6b 4ac7833 85b5a6b 4ac7833 85b5a6b 4ac7833 85b5a6b 1c96585 5546741 50da102 5546741 1c96585 5546741 1c96585 5546741 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 |
import os
import cv2
import yaml
import tarfile
import tempfile
import numpy as np
import warnings
from skimage import img_as_ubyte
import safetensors
import safetensors.torch
warnings.filterwarnings('ignore')
import imageio
import torch
import torchvision
from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector
from src.facerender.modules.mapping import MappingNet
from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
from src.facerender.modules.make_animation import make_animation
from pydub import AudioSegment
from src.utils.face_enhancer import enhancer_generator_with_len, enhancer_list
from src.utils.paste_pic import paste_pic
from src.utils.videoio import save_video_with_watermark
try:
import webui # in webui
in_webui = True
except ImportError:
in_webui = False
class AnimateFromCoeff:
def __init__(self, sadtalker_path, device):
with open(sadtalker_path['facerender_yaml']) as f:
config = yaml.safe_load(f)
generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],
**config['model_params']['common_params'])
mapping = MappingNet(**config['model_params']['mapping_params'])
generator.to(device)
kp_extractor.to(device)
he_estimator.to(device)
mapping.to(device)
for param in generator.parameters():
param.requires_grad = False
for param in kp_extractor.parameters():
param.requires_grad = False
for param in he_estimator.parameters():
param.requires_grad = False
for param in mapping.parameters():
param.requires_grad = False
# FaceVid2Vid checkpoint yükleme
if 'checkpoint' in sadtalker_path:
self.load_cpk_facevid2vid_safetensor(
sadtalker_path['checkpoint'],
kp_detector=kp_extractor,
generator=generator,
he_estimator=None,
device=device
)
else:
self.load_cpk_facevid2vid(
sadtalker_path['free_view_checkpoint'],
kp_detector=kp_extractor,
generator=generator,
he_estimator=he_estimator,
device=device
)
# MappingNet checkpoint yükleme
if sadtalker_path.get('mappingnet_checkpoint') is not None:
self.load_cpk_mapping(
sadtalker_path['mappingnet_checkpoint'],
mapping=mapping,
device=device
)
else:
raise AttributeError("mappingnet_checkpoint path belirtmelisiniz.")
self.kp_extractor = kp_extractor
self.generator = generator
self.he_estimator = he_estimator
self.mapping = mapping
self.device = device
self.kp_extractor.eval()
self.generator.eval()
self.he_estimator.eval()
self.mapping.eval()
def load_cpk_facevid2vid_safetensor(self, checkpoint_path,
generator=None, kp_detector=None,
he_estimator=None, device="cpu"):
checkpoint = safetensors.torch.load_file(checkpoint_path)
if generator is not None:
state = {k.replace('generator.', ''): v
for k, v in checkpoint.items() if k.startswith('generator.')}
generator.load_state_dict(state)
if kp_detector is not None:
state = {k.replace('kp_extractor.', ''): v
for k, v in checkpoint.items() if k.startswith('kp_extractor.')}
kp_detector.load_state_dict(state)
if he_estimator is not None:
state = {k.replace('he_estimator.', ''): v
for k, v in checkpoint.items() if k.startswith('he_estimator.')}
he_estimator.load_state_dict(state)
return None
def load_cpk_facevid2vid(self, checkpoint_path,
generator=None, discriminator=None,
kp_detector=None, he_estimator=None,
optimizer_generator=None, optimizer_discriminator=None,
optimizer_kp_detector=None, optimizer_he_estimator=None,
device="cpu"):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if generator is not None:
generator.load_state_dict(checkpoint['generator'])
if kp_detector is not None:
kp_detector.load_state_dict(checkpoint['kp_detector'])
if he_estimator is not None:
he_estimator.load_state_dict(checkpoint['he_estimator'])
if discriminator is not None and 'discriminator' in checkpoint:
discriminator.load_state_dict(checkpoint['discriminator'])
# Optimizeler varsa yükle
if optimizer_generator is not None and 'optimizer_generator' in checkpoint:
optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
if optimizer_discriminator is not None and 'optimizer_discriminator' in checkpoint:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
if optimizer_kp_detector is not None and 'optimizer_kp_detector' in checkpoint:
optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
if optimizer_he_estimator is not None and 'optimizer_he_estimator' in checkpoint:
optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
return checkpoint.get('epoch', 0)
def load_cpk_mapping(self, checkpoint_path,
mapping=None, discriminator=None,
optimizer_mapping=None, optimizer_discriminator=None,
device='cpu'):
def load_cpk_mapping(self,
checkpoint_path,
mapping=None,
discriminator=None,
optimizer_mapping=None,
optimizer_discriminator=None,
device='cpu'):
# 1) Eğer .tar veya .pth.tar ile bitiyorsa:
if checkpoint_path.endswith('.tar') or checkpoint_path.endswith('.pth.tar'):
tmpdir = tempfile.mkdtemp()
with tarfile.open(checkpoint_path, 'r') as tar:
tar.extractall(path=tmpdir)
# 1.a) Önce .pth arıyoruz, bulamazsak .pkl
candidate_pth = None
candidate_pkl = None
for root, _, files in os.walk(tmpdir):
for f in files:
if f.endswith('.pth') and candidate_pth is None:
candidate_pth = os.path.join(root, f)
if f.endswith('.pkl') and candidate_pkl is None:
candidate_pkl = os.path.join(root, f)
if candidate_pth:
break
if candidate_pth:
checkpoint_path = candidate_pth
elif candidate_pkl:
checkpoint_path = candidate_pkl
else:
raise FileNotFoundError(
f"{checkpoint_path} içinden ne .pth ne de .pkl dosyası bulunabildi."
)
# 2) Eğer checkpoint_path bir klasörse, archive/data.pkl’e bak
if os.path.isdir(checkpoint_path):
possible = os.path.join(checkpoint_path, 'archive', 'data.pkl')
if os.path.isfile(possible):
checkpoint_path = possible
# 3) Torch ile gerçek dosyayı yükle
checkpoint = torch.load(checkpoint_path,
map_location=torch.device(device))
# 4) State dict’leri ilgili modellere ata
if mapping is not None and 'mapping' in checkpoint:
mapping.load_state_dict(checkpoint['mapping'])
if discriminator is not None and 'discriminator' in checkpoint:
discriminator.load_state_dict(checkpoint['discriminator'])
if optimizer_mapping is not None and 'optimizer_mapping' in checkpoint:
optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
if optimizer_discriminator is not None and 'optimizer_discriminator' in checkpoint:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
# 5) Epoch bilgisi varsa dön, yoksa 0
return checkpoint.get('epoch', 0)
|