akin23 commited on
Commit
8bfa9fb
·
verified ·
1 Parent(s): 2e6ced0

Upload inference.py

Browse files
Files changed (1) hide show
  1. inference.py +145 -0
inference.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from glob import glob
2
+ import shutil
3
+ import torch
4
+ from time import strftime
5
+ import os, sys, time
6
+ from argparse import ArgumentParser
7
+
8
+ from src.utils.preprocess import CropAndExtract
9
+ from src.test_audio2coeff import Audio2Coeff
10
+ from src.facerender.animate import AnimateFromCoeff
11
+ from src.generate_batch import get_data
12
+ from src.generate_facerender_batch import get_facerender_data
13
+ from src.utils.init_path import init_path
14
+
15
+ def main(args):
16
+ #torch.backends.cudnn.enabled = False
17
+
18
+ pic_path = args.source_image
19
+ audio_path = args.driven_audio
20
+ save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S"))
21
+ os.makedirs(save_dir, exist_ok=True)
22
+ pose_style = args.pose_style
23
+ device = args.device
24
+ batch_size = args.batch_size
25
+ input_yaw_list = args.input_yaw
26
+ input_pitch_list = args.input_pitch
27
+ input_roll_list = args.input_roll
28
+ ref_eyeblink = args.ref_eyeblink
29
+ ref_pose = args.ref_pose
30
+
31
+ current_root_path = os.path.split(sys.argv[0])[0]
32
+
33
+ sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess)
34
+
35
+ #init model
36
+ preprocess_model = CropAndExtract(sadtalker_paths, device)
37
+
38
+ audio_to_coeff = Audio2Coeff(sadtalker_paths, device)
39
+
40
+ animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device)
41
+
42
+ #crop image and extract 3dmm from image
43
+ first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
44
+ os.makedirs(first_frame_dir, exist_ok=True)
45
+ print('3DMM Extraction for source image')
46
+ first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\
47
+ source_image_flag=True, pic_size=args.size)
48
+ if first_coeff_path is None:
49
+ print("Can't get the coeffs of the input")
50
+ return
51
+
52
+ if ref_eyeblink is not None:
53
+ ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0]
54
+ ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname)
55
+ os.makedirs(ref_eyeblink_frame_dir, exist_ok=True)
56
+ print('3DMM Extraction for the reference video providing eye blinking')
57
+ ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False)
58
+ else:
59
+ ref_eyeblink_coeff_path=None
60
+
61
+ if ref_pose is not None:
62
+ if ref_pose == ref_eyeblink:
63
+ ref_pose_coeff_path = ref_eyeblink_coeff_path
64
+ else:
65
+ ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0]
66
+ ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname)
67
+ os.makedirs(ref_pose_frame_dir, exist_ok=True)
68
+ print('3DMM Extraction for the reference video providing pose')
69
+ ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False)
70
+ else:
71
+ ref_pose_coeff_path=None
72
+
73
+ #audio2ceoff
74
+ batch = get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=args.still)
75
+ coeff_path = audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path)
76
+
77
+ # 3dface render
78
+ if args.face3dvis:
79
+ from src.face3d.visualize import gen_composed_video
80
+ gen_composed_video(args, device, first_coeff_path, coeff_path, audio_path, os.path.join(save_dir, '3dface.mp4'))
81
+
82
+ #coeff2video
83
+ data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path,
84
+ batch_size, input_yaw_list, input_pitch_list, input_roll_list,
85
+ expression_scale=args.expression_scale, still_mode=args.still, preprocess=args.preprocess, size=args.size)
86
+
87
+ result = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
88
+ enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess, img_size=args.size)
89
+
90
+ shutil.move(result, save_dir+'.mp4')
91
+ print('The generated video is named:', save_dir+'.mp4')
92
+
93
+ if not args.verbose:
94
+ shutil.rmtree(save_dir)
95
+
96
+
97
+ if __name__ == '__main__':
98
+
99
+ parser = ArgumentParser()
100
+ parser.set_defaults(still=True) # ← bunu ekle
101
+ parser.add_argument("--driven_audio", default='./examples/driven_audio/output.wav', help="path to driven audio")
102
+ parser.add_argument("--source_image", default='./examples/source_image/Serdar_Ali2.png', help="path to source image")
103
+ parser.add_argument("--ref_eyeblink", default=None, help="path to reference video providing eye blinking")
104
+ parser.add_argument("--ref_pose", default=None, help="path to reference video providing pose")
105
+ parser.add_argument("--checkpoint_dir", default='./checkpoints', help="path to output")
106
+ parser.add_argument("--result_dir", default='./results', help="path to output")
107
+ parser.add_argument("--pose_style", type=int, default=0, help="input pose style from [0, 46)")
108
+ parser.add_argument("--batch_size", type=int, default=2, help="the batch size of facerender")
109
+ parser.add_argument("--size", type=int, default=256, help="the image size of the facerender")
110
+ parser.add_argument("--expression_scale", type=float, default=1., help="the batch size of facerender")
111
+ parser.add_argument('--input_yaw', nargs='+', type=int, default=None, help="the input yaw degree of the user ")
112
+ parser.add_argument('--input_pitch', nargs='+', type=int, default=None, help="the input pitch degree of the user")
113
+ parser.add_argument('--input_roll', nargs='+', type=int, default=None, help="the input roll degree of the user")
114
+ parser.add_argument("--enhancer", type=str, default='gfpgan', help="Face enhancer, [gfpgan, RestoreFormer]")
115
+ parser.add_argument('--background_enhancer', type=str, default=None, help="background enhancer, [realesrgan]")
116
+ parser.add_argument("--cpu", dest="cpu", action="store_true")
117
+ parser.add_argument("--face3dvis", action="store_true", help="generate 3d face and 3d landmarks")
118
+ parser.add_argument("--still", action="store_true", help="can crop back to the original videos for the full body animation")
119
+ parser.add_argument("--preprocess", default='full', choices=['crop', 'extcrop', 'resize', 'full', 'extfull'], help="how to preprocess the images")
120
+ parser.add_argument("--verbose",action="store_true", help="saving the intermedia output or not" )
121
+ parser.add_argument("--old_version",action="store_true", help="use the pth other than safetensor version" )
122
+
123
+
124
+ # net structure and parameters
125
+ parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='useless')
126
+ parser.add_argument('--init_path', type=str, default=None, help='Useless')
127
+ parser.add_argument('--use_last_fc',default=False, help='zero initialize the last fc')
128
+ parser.add_argument('--bfm_folder', type=str, default='./checkpoints/BFM_Fitting/')
129
+ parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model')
130
+
131
+ # default renderer parameters
132
+ parser.add_argument('--focal', type=float, default=1015.)
133
+ parser.add_argument('--center', type=float, default=112.)
134
+ parser.add_argument('--camera_d', type=float, default=10.)
135
+ parser.add_argument('--z_near', type=float, default=5.)
136
+ parser.add_argument('--z_far', type=float, default=15.)
137
+
138
+ args = parser.parse_args()
139
+
140
+ if torch.cuda.is_available() and not args.cpu:
141
+ args.device = "cuda"
142
+ else:
143
+ args.device = "cpu"
144
+
145
+ main(args)