akin23 commited on
Commit
85b5a6b
·
verified ·
1 Parent(s): 496ac5f

Update src/facerender/animate.py

Browse files
Files changed (1) hide show
  1. src/facerender/animate.py +280 -257
src/facerender/animate.py CHANGED
@@ -1,257 +1,280 @@
1
- import os
2
- import cv2
3
- import yaml
4
- import numpy as np
5
- import warnings
6
- from skimage import img_as_ubyte
7
- import safetensors
8
- import safetensors.torch
9
- warnings.filterwarnings('ignore')
10
-
11
-
12
- import imageio
13
- import torch
14
- import torchvision
15
-
16
-
17
- from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector
18
- from src.facerender.modules.mapping import MappingNet
19
- from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
20
- from src.facerender.modules.make_animation import make_animation
21
-
22
- from pydub import AudioSegment
23
- from src.utils.face_enhancer import enhancer_generator_with_len, enhancer_list
24
- from src.utils.paste_pic import paste_pic
25
- from src.utils.videoio import save_video_with_watermark
26
-
27
- try:
28
- import webui # in webui
29
- in_webui = True
30
- except:
31
- in_webui = False
32
-
33
- class AnimateFromCoeff():
34
-
35
- def __init__(self, sadtalker_path, device):
36
-
37
- with open(sadtalker_path['facerender_yaml']) as f:
38
- config = yaml.safe_load(f)
39
-
40
- generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
41
- **config['model_params']['common_params'])
42
- kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],
43
- **config['model_params']['common_params'])
44
- he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],
45
- **config['model_params']['common_params'])
46
- mapping = MappingNet(**config['model_params']['mapping_params'])
47
-
48
- generator.to(device)
49
- kp_extractor.to(device)
50
- he_estimator.to(device)
51
- mapping.to(device)
52
- for param in generator.parameters():
53
- param.requires_grad = False
54
- for param in kp_extractor.parameters():
55
- param.requires_grad = False
56
- for param in he_estimator.parameters():
57
- param.requires_grad = False
58
- for param in mapping.parameters():
59
- param.requires_grad = False
60
-
61
- if sadtalker_path is not None:
62
- if 'checkpoint' in sadtalker_path: # use safe tensor
63
- self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None)
64
- else:
65
- self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)
66
- else:
67
- raise AttributeError("Checkpoint should be specified for video head pose estimator.")
68
-
69
- if sadtalker_path['mappingnet_checkpoint'] is not None:
70
- self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping)
71
- else:
72
- raise AttributeError("Checkpoint should be specified for video head pose estimator.")
73
-
74
- self.kp_extractor = kp_extractor
75
- self.generator = generator
76
- self.he_estimator = he_estimator
77
- self.mapping = mapping
78
-
79
- self.kp_extractor.eval()
80
- self.generator.eval()
81
- self.he_estimator.eval()
82
- self.mapping.eval()
83
-
84
- self.device = device
85
-
86
- def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None,
87
- kp_detector=None, he_estimator=None,
88
- device="cpu"):
89
-
90
- checkpoint = safetensors.torch.load_file(checkpoint_path)
91
-
92
- if generator is not None:
93
- x_generator = {}
94
- for k,v in checkpoint.items():
95
- if 'generator' in k:
96
- x_generator[k.replace('generator.', '')] = v
97
- generator.load_state_dict(x_generator)
98
- if kp_detector is not None:
99
- x_generator = {}
100
- for k,v in checkpoint.items():
101
- if 'kp_extractor' in k:
102
- x_generator[k.replace('kp_extractor.', '')] = v
103
- kp_detector.load_state_dict(x_generator)
104
- if he_estimator is not None:
105
- x_generator = {}
106
- for k,v in checkpoint.items():
107
- if 'he_estimator' in k:
108
- x_generator[k.replace('he_estimator.', '')] = v
109
- he_estimator.load_state_dict(x_generator)
110
-
111
- return None
112
-
113
- def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
114
- kp_detector=None, he_estimator=None, optimizer_generator=None,
115
- optimizer_discriminator=None, optimizer_kp_detector=None,
116
- optimizer_he_estimator=None, device="cpu"):
117
- checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
118
- if generator is not None:
119
- generator.load_state_dict(checkpoint['generator'])
120
- if kp_detector is not None:
121
- kp_detector.load_state_dict(checkpoint['kp_detector'])
122
- if he_estimator is not None:
123
- he_estimator.load_state_dict(checkpoint['he_estimator'])
124
- if discriminator is not None:
125
- try:
126
- discriminator.load_state_dict(checkpoint['discriminator'])
127
- except:
128
- print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
129
- if optimizer_generator is not None:
130
- optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
131
- if optimizer_discriminator is not None:
132
- try:
133
- optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
134
- except RuntimeError as e:
135
- print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
136
- if optimizer_kp_detector is not None:
137
- optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
138
- if optimizer_he_estimator is not None:
139
- optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
140
-
141
- return checkpoint['epoch']
142
-
143
- def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
144
- optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
145
- checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
146
- if mapping is not None:
147
- mapping.load_state_dict(checkpoint['mapping'])
148
- if discriminator is not None:
149
- discriminator.load_state_dict(checkpoint['discriminator'])
150
- if optimizer_mapping is not None:
151
- optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
152
- if optimizer_discriminator is not None:
153
- optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
154
-
155
- return checkpoint['epoch']
156
-
157
- def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):
158
-
159
- source_image=x['source_image'].type(torch.FloatTensor)
160
- source_semantics=x['source_semantics'].type(torch.FloatTensor)
161
- target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
162
- source_image=source_image.to(self.device)
163
- source_semantics=source_semantics.to(self.device)
164
- target_semantics=target_semantics.to(self.device)
165
- if 'yaw_c_seq' in x:
166
- yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
167
- yaw_c_seq = x['yaw_c_seq'].to(self.device)
168
- else:
169
- yaw_c_seq = None
170
- if 'pitch_c_seq' in x:
171
- pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
172
- pitch_c_seq = x['pitch_c_seq'].to(self.device)
173
- else:
174
- pitch_c_seq = None
175
- if 'roll_c_seq' in x:
176
- roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
177
- roll_c_seq = x['roll_c_seq'].to(self.device)
178
- else:
179
- roll_c_seq = None
180
-
181
- frame_num = x['frame_num']
182
-
183
- predictions_video = make_animation(source_image, source_semantics, target_semantics,
184
- self.generator, self.kp_extractor, self.he_estimator, self.mapping,
185
- yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)
186
-
187
- predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])
188
- predictions_video = predictions_video[:frame_num]
189
-
190
- video = []
191
- for idx in range(predictions_video.shape[0]):
192
- image = predictions_video[idx]
193
- image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)
194
- video.append(image)
195
- result = img_as_ubyte(video)
196
-
197
- ### the generated video is 256x256, so we keep the aspect ratio,
198
- original_size = crop_info[0]
199
- if original_size:
200
- result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]
201
-
202
- video_name = x['video_name'] + '.mp4'
203
- path = os.path.join(video_save_dir, 'temp_'+video_name)
204
-
205
- imageio.mimsave(path, result, fps=float(25))
206
-
207
- av_path = os.path.join(video_save_dir, video_name)
208
- return_path = av_path
209
-
210
- audio_path = x['audio_path']
211
- audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
212
- new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')
213
- start_time = 0
214
- # cog will not keep the .mp3 filename
215
- sound = AudioSegment.from_file(audio_path)
216
- frames = frame_num
217
- end_time = start_time + frames*1/25*1000
218
- word1=sound.set_frame_rate(16000)
219
- word = word1[start_time:end_time]
220
- word.export(new_audio_path, format="wav")
221
-
222
- save_video_with_watermark(path, new_audio_path, av_path, watermark= False)
223
- print(f'The generated video is named {video_save_dir}/{video_name}')
224
-
225
- if 'full' in preprocess.lower():
226
- # only add watermark to the full image.
227
- video_name_full = x['video_name'] + '_full.mp4'
228
- full_video_path = os.path.join(video_save_dir, video_name_full)
229
- return_path = full_video_path
230
- paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)
231
- print(f'The generated video is named {video_save_dir}/{video_name_full}')
232
- else:
233
- full_video_path = av_path
234
-
235
- #### paste back then enhancers
236
- if enhancer:
237
- video_name_enhancer = x['video_name'] + '_enhanced.mp4'
238
- enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)
239
- av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer)
240
- return_path = av_path_enhancer
241
-
242
- try:
243
- enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)
244
- imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))
245
- except:
246
- enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer)
247
- imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))
248
-
249
- save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False)
250
- print(f'The generated video is named {video_save_dir}/{video_name_enhancer}')
251
- os.remove(enhanced_path)
252
-
253
- os.remove(path)
254
- os.remove(new_audio_path)
255
-
256
- return return_path
257
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import yaml
4
+ import numpy as np
5
+ import warnings
6
+ from skimage import img_as_ubyte
7
+ import safetensors
8
+ import safetensors.torch
9
+ warnings.filterwarnings('ignore')
10
+
11
+
12
+ import imageio
13
+ import torch
14
+ import torchvision
15
+
16
+
17
+ from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector
18
+ from src.facerender.modules.mapping import MappingNet
19
+ from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
20
+ from src.facerender.modules.make_animation import make_animation
21
+
22
+ from pydub import AudioSegment
23
+ from src.utils.face_enhancer import enhancer_generator_with_len, enhancer_list
24
+ from src.utils.paste_pic import paste_pic
25
+ from src.utils.videoio import save_video_with_watermark
26
+
27
+ try:
28
+ import webui # in webui
29
+ in_webui = True
30
+ except:
31
+ in_webui = False
32
+
33
+ class AnimateFromCoeff():
34
+
35
+ def __init__(self, sadtalker_path, device):
36
+
37
+ with open(sadtalker_path['facerender_yaml']) as f:
38
+ config = yaml.safe_load(f)
39
+
40
+ generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
41
+ **config['model_params']['common_params'])
42
+ kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],
43
+ **config['model_params']['common_params'])
44
+ he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],
45
+ **config['model_params']['common_params'])
46
+ mapping = MappingNet(**config['model_params']['mapping_params'])
47
+
48
+ generator.to(device)
49
+ kp_extractor.to(device)
50
+ he_estimator.to(device)
51
+ mapping.to(device)
52
+ for param in generator.parameters():
53
+ param.requires_grad = False
54
+ for param in kp_extractor.parameters():
55
+ param.requires_grad = False
56
+ for param in he_estimator.parameters():
57
+ param.requires_grad = False
58
+ for param in mapping.parameters():
59
+ param.requires_grad = False
60
+
61
+ if sadtalker_path is not None:
62
+ if 'checkpoint' in sadtalker_path: # use safe tensor
63
+ self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None)
64
+ else:
65
+ self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)
66
+ else:
67
+ raise AttributeError("Checkpoint should be specified for video head pose estimator.")
68
+
69
+ if sadtalker_path['mappingnet_checkpoint'] is not None:
70
+ self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping)
71
+ else:
72
+ raise AttributeError("Checkpoint should be specified for video head pose estimator.")
73
+
74
+ self.kp_extractor = kp_extractor
75
+ self.generator = generator
76
+ self.he_estimator = he_estimator
77
+ self.mapping = mapping
78
+
79
+ self.kp_extractor.eval()
80
+ self.generator.eval()
81
+ self.he_estimator.eval()
82
+ self.mapping.eval()
83
+
84
+ self.device = device
85
+
86
+ def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None,
87
+ kp_detector=None, he_estimator=None,
88
+ device="cpu"):
89
+
90
+ checkpoint = safetensors.torch.load_file(checkpoint_path)
91
+
92
+ if generator is not None:
93
+ x_generator = {}
94
+ for k,v in checkpoint.items():
95
+ if 'generator' in k:
96
+ x_generator[k.replace('generator.', '')] = v
97
+ generator.load_state_dict(x_generator)
98
+ if kp_detector is not None:
99
+ x_generator = {}
100
+ for k,v in checkpoint.items():
101
+ if 'kp_extractor' in k:
102
+ x_generator[k.replace('kp_extractor.', '')] = v
103
+ kp_detector.load_state_dict(x_generator)
104
+ if he_estimator is not None:
105
+ x_generator = {}
106
+ for k,v in checkpoint.items():
107
+ if 'he_estimator' in k:
108
+ x_generator[k.replace('he_estimator.', '')] = v
109
+ he_estimator.load_state_dict(x_generator)
110
+
111
+ return None
112
+
113
+ def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
114
+ kp_detector=None, he_estimator=None, optimizer_generator=None,
115
+ optimizer_discriminator=None, optimizer_kp_detector=None,
116
+ optimizer_he_estimator=None, device="cpu"):
117
+ checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
118
+ if generator is not None:
119
+ generator.load_state_dict(checkpoint['generator'])
120
+ if kp_detector is not None:
121
+ kp_detector.load_state_dict(checkpoint['kp_detector'])
122
+ if he_estimator is not None:
123
+ he_estimator.load_state_dict(checkpoint['he_estimator'])
124
+ if discriminator is not None:
125
+ try:
126
+ discriminator.load_state_dict(checkpoint['discriminator'])
127
+ except:
128
+ print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
129
+ if optimizer_generator is not None:
130
+ optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
131
+ if optimizer_discriminator is not None:
132
+ try:
133
+ optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
134
+ except RuntimeError as e:
135
+ print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
136
+ if optimizer_kp_detector is not None:
137
+ optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
138
+ if optimizer_he_estimator is not None:
139
+ optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
140
+
141
+ return checkpoint['epoch']
142
+
143
+ import tarfile
144
+
145
+ def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
146
+ optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
147
+
148
+ # Eğer .tar dosyasıysa içeriğini kontrol et
149
+ if checkpoint_path.endswith(".tar"):
150
+ try:
151
+ with tarfile.open(checkpoint_path, "r") as tar:
152
+ members = tar.getnames()
153
+ if not any(name.startswith("storages") for name in members):
154
+ print("⚠️ 'storages' klasörü .tar dosyasında bulunamadı. Devam ediliyor...")
155
+ else:
156
+ print("✔️ 'storages' bulundu.")
157
+ except Exception as e:
158
+ print(f"Tar kontrol hatası: {e}")
159
+
160
+ # Checkpoint yükle
161
+ checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
162
+
163
+ # Mapping yükleme
164
+ if mapping is not None and 'mapping' in checkpoint:
165
+ mapping.load_state_dict(checkpoint['mapping'])
166
+
167
+ # Diğer parametreler varsa
168
+ if discriminator is not None and 'discriminator' in checkpoint:
169
+ discriminator.load_state_dict(checkpoint['discriminator'])
170
+
171
+ if optimizer_mapping is not None and 'optimizer_mapping' in checkpoint:
172
+ optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
173
+
174
+ if optimizer_discriminator is not None and 'optimizer_discriminator' in checkpoint:
175
+ optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
176
+
177
+ return checkpoint.get('epoch', 0)
178
+
179
+
180
+ def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):
181
+
182
+ source_image=x['source_image'].type(torch.FloatTensor)
183
+ source_semantics=x['source_semantics'].type(torch.FloatTensor)
184
+ target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
185
+ source_image=source_image.to(self.device)
186
+ source_semantics=source_semantics.to(self.device)
187
+ target_semantics=target_semantics.to(self.device)
188
+ if 'yaw_c_seq' in x:
189
+ yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
190
+ yaw_c_seq = x['yaw_c_seq'].to(self.device)
191
+ else:
192
+ yaw_c_seq = None
193
+ if 'pitch_c_seq' in x:
194
+ pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
195
+ pitch_c_seq = x['pitch_c_seq'].to(self.device)
196
+ else:
197
+ pitch_c_seq = None
198
+ if 'roll_c_seq' in x:
199
+ roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
200
+ roll_c_seq = x['roll_c_seq'].to(self.device)
201
+ else:
202
+ roll_c_seq = None
203
+
204
+ frame_num = x['frame_num']
205
+
206
+ predictions_video = make_animation(source_image, source_semantics, target_semantics,
207
+ self.generator, self.kp_extractor, self.he_estimator, self.mapping,
208
+ yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)
209
+
210
+ predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])
211
+ predictions_video = predictions_video[:frame_num]
212
+
213
+ video = []
214
+ for idx in range(predictions_video.shape[0]):
215
+ image = predictions_video[idx]
216
+ image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)
217
+ video.append(image)
218
+ result = img_as_ubyte(video)
219
+
220
+ ### the generated video is 256x256, so we keep the aspect ratio,
221
+ original_size = crop_info[0]
222
+ if original_size:
223
+ result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]
224
+
225
+ video_name = x['video_name'] + '.mp4'
226
+ path = os.path.join(video_save_dir, 'temp_'+video_name)
227
+
228
+ imageio.mimsave(path, result, fps=float(25))
229
+
230
+ av_path = os.path.join(video_save_dir, video_name)
231
+ return_path = av_path
232
+
233
+ audio_path = x['audio_path']
234
+ audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
235
+ new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')
236
+ start_time = 0
237
+ # cog will not keep the .mp3 filename
238
+ sound = AudioSegment.from_file(audio_path)
239
+ frames = frame_num
240
+ end_time = start_time + frames*1/25*1000
241
+ word1=sound.set_frame_rate(16000)
242
+ word = word1[start_time:end_time]
243
+ word.export(new_audio_path, format="wav")
244
+
245
+ save_video_with_watermark(path, new_audio_path, av_path, watermark= False)
246
+ print(f'The generated video is named {video_save_dir}/{video_name}')
247
+
248
+ if 'full' in preprocess.lower():
249
+ # only add watermark to the full image.
250
+ video_name_full = x['video_name'] + '_full.mp4'
251
+ full_video_path = os.path.join(video_save_dir, video_name_full)
252
+ return_path = full_video_path
253
+ paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)
254
+ print(f'The generated video is named {video_save_dir}/{video_name_full}')
255
+ else:
256
+ full_video_path = av_path
257
+
258
+ #### paste back then enhancers
259
+ if enhancer:
260
+ video_name_enhancer = x['video_name'] + '_enhanced.mp4'
261
+ enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)
262
+ av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer)
263
+ return_path = av_path_enhancer
264
+
265
+ try:
266
+ enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)
267
+ imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))
268
+ except:
269
+ enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer)
270
+ imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))
271
+
272
+ save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False)
273
+ print(f'The generated video is named {video_save_dir}/{video_name_enhancer}')
274
+ os.remove(enhanced_path)
275
+
276
+ os.remove(path)
277
+ os.remove(new_audio_path)
278
+
279
+ return return_path
280
+