|
|
|
|
|
import torch |
|
|
import torch.utils.data as data |
|
|
import torchvision.transforms as transforms |
|
|
|
|
|
from PIL import Image, ImageDraw |
|
|
import json |
|
|
|
|
|
import os.path as osp |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
class CPDataset(data.Dataset): |
|
|
""" |
|
|
Dataset for CP-VTON. |
|
|
""" |
|
|
def __init__(self, opt): |
|
|
super(CPDataset, self).__init__() |
|
|
|
|
|
self.opt = opt |
|
|
self.root = opt.dataroot |
|
|
self.datamode = opt.datamode |
|
|
self.data_list = opt.data_list |
|
|
self.fine_height = opt.fine_height |
|
|
self.fine_width = opt.fine_width |
|
|
self.semantic_nc = opt.semantic_nc |
|
|
self.data_path = osp.join(opt.dataroot, opt.datamode) |
|
|
|
|
|
if self.datamode == 'train': |
|
|
self.transform_cloth = transforms.Compose([ |
|
|
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1), |
|
|
transforms.ToTensor(), |
|
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) |
|
|
]) |
|
|
self.transform = transforms.Compose([ |
|
|
transforms.ToTensor(), |
|
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) |
|
|
]) |
|
|
else: |
|
|
self.transform_cloth = transforms.Compose([ |
|
|
transforms.ToTensor(), |
|
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) |
|
|
]) |
|
|
self.transform = self.transform_cloth |
|
|
self.transform_wo_normalize = transforms.Compose([transforms.ToTensor()]) |
|
|
|
|
|
|
|
|
im_names = [] |
|
|
c_names = [] |
|
|
with open(osp.join(opt.dataroot, opt.data_list), 'r') as f: |
|
|
for line in f.readlines(): |
|
|
im_name, c_name = line.strip().split() |
|
|
im_names.append(im_name) |
|
|
c_names.append(c_name) |
|
|
|
|
|
self.im_names = im_names |
|
|
self.c_names = dict() |
|
|
self.c_names['paired'] = im_names |
|
|
self.c_names['unpaired'] = c_names |
|
|
|
|
|
def name(self): |
|
|
return "CPDataset" |
|
|
|
|
|
def make_grid(self, N, iH, iW): |
|
|
grid_x = torch.linspace(0, 1.0, iW).view(1, 1, iW, 1).expand(N, iH, -1, -1) |
|
|
grid_y = torch.linspace(0, 1.0, iH).view(1, iH, 1, 1).expand(N, -1, iW, -1) |
|
|
grid = torch.cat([grid_x, grid_y], 3) |
|
|
return grid |
|
|
|
|
|
def get_agnostic(self, im, im_parse, pose_data): |
|
|
parse_array = np.array(im_parse) |
|
|
parse_head = ((parse_array == 4).astype(np.float32) + |
|
|
(parse_array == 13).astype(np.float32)) |
|
|
parse_lower = ((parse_array == 9).astype(np.float32) + |
|
|
(parse_array == 12).astype(np.float32) + |
|
|
(parse_array == 16).astype(np.float32) + |
|
|
(parse_array == 17).astype(np.float32) + |
|
|
(parse_array == 18).astype(np.float32) + |
|
|
(parse_array == 19).astype(np.float32)) |
|
|
|
|
|
agnostic = im.copy() |
|
|
agnostic_draw = ImageDraw.Draw(agnostic) |
|
|
|
|
|
length_a = np.linalg.norm(pose_data[5] - pose_data[2]) |
|
|
length_b = np.linalg.norm(pose_data[12] - pose_data[9]) |
|
|
point = (pose_data[9] + pose_data[12]) / 2 |
|
|
pose_data[9] = point + (pose_data[9] - point) / length_b * length_a |
|
|
pose_data[12] = point + (pose_data[12] - point) / length_b * length_a |
|
|
|
|
|
r = int(length_a / 16) + 1 |
|
|
|
|
|
|
|
|
for i in [9, 12]: |
|
|
pointx, pointy = pose_data[i] |
|
|
agnostic_draw.ellipse((pointx-r*3, pointy-r*6, pointx+r*3, pointy+r*6), 'gray', 'gray') |
|
|
agnostic_draw.line([tuple(pose_data[i]) for i in [2, 9]], 'gray', width=r*6) |
|
|
agnostic_draw.line([tuple(pose_data[i]) for i in [5, 12]], 'gray', width=r*6) |
|
|
agnostic_draw.line([tuple(pose_data[i]) for i in [9, 12]], 'gray', width=r*12) |
|
|
agnostic_draw.polygon([tuple(pose_data[i]) for i in [2, 5, 12, 9]], 'gray', 'gray') |
|
|
|
|
|
|
|
|
pointx, pointy = pose_data[1] |
|
|
agnostic_draw.rectangle((pointx-r*5, pointy-r*9, pointx+r*5, pointy), 'gray', 'gray') |
|
|
|
|
|
|
|
|
agnostic_draw.line([tuple(pose_data[i]) for i in [2, 5]], 'gray', width=r*12) |
|
|
for i in [2, 5]: |
|
|
pointx, pointy = pose_data[i] |
|
|
agnostic_draw.ellipse((pointx-r*5, pointy-r*6, pointx+r*5, pointy+r*6), 'gray', 'gray') |
|
|
for i in [3, 4, 6, 7]: |
|
|
if (pose_data[i-1, 0] == 0.0 and pose_data[i-1, 1] == 0.0) or (pose_data[i, 0] == 0.0 and pose_data[i, 1] == 0.0): |
|
|
continue |
|
|
agnostic_draw.line([tuple(pose_data[j]) for j in [i - 1, i]], 'gray', width=r*10) |
|
|
pointx, pointy = pose_data[i] |
|
|
agnostic_draw.ellipse((pointx-r*5, pointy-r*5, pointx+r*5, pointy+r*5), 'gray', 'gray') |
|
|
|
|
|
for parse_id, pose_ids in [(14, [5, 6, 7]), (15, [2, 3, 4])]: |
|
|
mask_arm = Image.new('L', (768, 1024), 'white') |
|
|
mask_arm_draw = ImageDraw.Draw(mask_arm) |
|
|
pointx, pointy = pose_data[pose_ids[0]] |
|
|
mask_arm_draw.ellipse((pointx-r*5, pointy-r*6, pointx+r*5, pointy+r*6), 'black', 'black') |
|
|
for i in pose_ids[1:]: |
|
|
if (pose_data[i-1, 0] == 0.0 and pose_data[i-1, 1] == 0.0) or (pose_data[i, 0] == 0.0 and pose_data[i, 1] == 0.0): |
|
|
continue |
|
|
mask_arm_draw.line([tuple(pose_data[j]) for j in [i - 1, i]], 'black', width=r*10) |
|
|
pointx, pointy = pose_data[i] |
|
|
if i != pose_ids[-1]: |
|
|
mask_arm_draw.ellipse((pointx-r*5, pointy-r*5, pointx+r*5, pointy+r*5), 'black', 'black') |
|
|
mask_arm_draw.ellipse((pointx-r*4, pointy-r*4, pointx+r*4, pointy+r*4), 'black', 'black') |
|
|
|
|
|
parse_arm = (np.array(mask_arm) / 255) * (parse_array == parse_id).astype(np.float32) |
|
|
agnostic.paste(im, None, Image.fromarray(np.uint8(parse_arm * 255), 'L')) |
|
|
|
|
|
agnostic.paste(im, None, Image.fromarray(np.uint8(parse_head * 255), 'L')) |
|
|
agnostic.paste(im, None, Image.fromarray(np.uint8(parse_lower * 255), 'L')) |
|
|
return agnostic |
|
|
|
|
|
def __getitem__(self, index): |
|
|
im_name = self.im_names[index] |
|
|
im_name = 'image/' + im_name |
|
|
c_name = {} |
|
|
c = {} |
|
|
cm = {} |
|
|
for key in ['paired']: |
|
|
c_name[key] = self.c_names[key][index] |
|
|
c[key] = Image.open(osp.join(self.data_path, 'cloth', c_name[key])).convert('RGB') |
|
|
c[key] = transforms.Resize((self.fine_height, self.fine_width), interpolation=2)(c[key]) |
|
|
cm[key] = Image.open(osp.join(self.data_path, 'cloth-mask', c_name[key])) |
|
|
cm[key] = transforms.Resize((self.fine_height, self.fine_width), interpolation=0)(cm[key]) |
|
|
|
|
|
c[key] = self.transform_cloth(c[key]) |
|
|
cm_array = np.array(cm[key]) |
|
|
cm_array = (cm_array >= 128).astype(np.float32) |
|
|
cm[key] = torch.from_numpy(cm_array) |
|
|
cm[key].unsqueeze_(0) |
|
|
|
|
|
|
|
|
im_pil_big = Image.open(osp.join(self.data_path, im_name)) |
|
|
im_pil = transforms.Resize((self.fine_height, self.fine_width), interpolation=2)(im_pil_big) |
|
|
im = self.transform(im_pil) |
|
|
|
|
|
|
|
|
parse_name = im_name.replace('image', 'image-parse-v3').replace('.jpg', '.png') |
|
|
im_parse_pil_big = Image.open(osp.join(self.data_path, parse_name)) |
|
|
im_parse_pil = transforms.Resize((self.fine_height, self.fine_width), interpolation=0)(im_parse_pil_big) |
|
|
parse = torch.from_numpy(np.array(im_parse_pil)[None]).long() |
|
|
im_parse = self.transform(im_parse_pil.convert('RGB')) |
|
|
|
|
|
|
|
|
labels = { |
|
|
0: ['background', [0, 10]], |
|
|
1: ['hair', [1, 2]], |
|
|
2: ['face', [4, 13]], |
|
|
3: ['upper', [5, 6, 7]], |
|
|
4: ['bottom', [9, 12]], |
|
|
5: ['left_arm', [14]], |
|
|
6: ['right_arm', [15]], |
|
|
7: ['left_leg', [16]], |
|
|
8: ['right_leg', [17]], |
|
|
9: ['left_shoe', [18]], |
|
|
10: ['right_shoe', [19]], |
|
|
11: ['socks', [8]], |
|
|
12: ['noise', [3, 11]] |
|
|
} |
|
|
|
|
|
parse_map = torch.FloatTensor(20, self.fine_height, self.fine_width).zero_() |
|
|
parse_map = parse_map.scatter_(0, parse, 1.0) |
|
|
new_parse_map = torch.FloatTensor(self.semantic_nc, self.fine_height, self.fine_width).zero_() |
|
|
|
|
|
for i in range(len(labels)): |
|
|
for label in labels[i][1]: |
|
|
new_parse_map[i] += parse_map[label] |
|
|
|
|
|
parse_onehot = torch.FloatTensor(1, self.fine_height, self.fine_width).zero_() |
|
|
for i in range(len(labels)): |
|
|
for label in labels[i][1]: |
|
|
parse_onehot[0] += parse_map[label] * i |
|
|
|
|
|
|
|
|
image_parse_agnostic = Image.open(osp.join(self.data_path, parse_name.replace('image-parse-v3', 'image-parse-agnostic-v3.2'))) |
|
|
image_parse_agnostic = transforms.Resize((self.fine_height, self.fine_width), interpolation=0)(image_parse_agnostic) |
|
|
parse_agnostic = torch.from_numpy(np.array(image_parse_agnostic)[None]).long() |
|
|
image_parse_agnostic = self.transform(image_parse_agnostic.convert('RGB')) |
|
|
|
|
|
parse_agnostic_map = torch.FloatTensor(20, self.fine_height, self.fine_width).zero_() |
|
|
parse_agnostic_map = parse_agnostic_map.scatter_(0, parse_agnostic, 1.0) |
|
|
new_parse_agnostic_map = torch.FloatTensor(self.semantic_nc, self.fine_height, self.fine_width).zero_() |
|
|
for i in range(len(labels)): |
|
|
for label in labels[i][1]: |
|
|
new_parse_agnostic_map[i] += parse_agnostic_map[label] |
|
|
|
|
|
|
|
|
pcm = new_parse_map[3:4] |
|
|
im_c = im * pcm + (1 - pcm) |
|
|
|
|
|
|
|
|
pose_name = im_name.replace('image', 'openpose_img').replace('.jpg', '_rendered.png') |
|
|
pose_map = Image.open(osp.join(self.data_path, pose_name)) |
|
|
pose_map = transforms.Resize((self.fine_height, self.fine_width), interpolation=2)(pose_map) |
|
|
pose_map = self.transform(pose_map) |
|
|
|
|
|
|
|
|
pose_name = im_name.replace('image', 'openpose_json').replace('.jpg', '_keypoints.json') |
|
|
with open(osp.join(self.data_path, pose_name), 'r') as f: |
|
|
pose_label = json.load(f) |
|
|
pose_data = pose_label['people'][0]['pose_keypoints_2d'] |
|
|
pose_data = np.array(pose_data) |
|
|
pose_data = pose_data.reshape((-1, 3))[:, :2] |
|
|
|
|
|
|
|
|
densepose_name = im_name.replace('image', 'image-densepose') |
|
|
densepose_map = Image.open(osp.join(self.data_path, densepose_name)) |
|
|
densepose_map = transforms.Resize((self.fine_height, self.fine_width), interpolation=2)(densepose_map) |
|
|
densepose_map = self.transform(densepose_map) |
|
|
|
|
|
|
|
|
agnostic = self.get_agnostic(im_pil_big, im_parse_pil_big, pose_data) |
|
|
agnostic = transforms.Resize((self.fine_height, self.fine_width), interpolation=2)(agnostic) |
|
|
agnostic = self.transform(agnostic) |
|
|
|
|
|
|
|
|
lower_clothes_mask = new_parse_map[4:5,:,:] |
|
|
|
|
|
densepose_map_wo_normalize = Image.open(osp.join(self.data_path, densepose_name)) |
|
|
densepose_map_wo_normalize = self.transform_wo_normalize(densepose_map_wo_normalize) |
|
|
densepose_end_of_torso_mask = torch.FloatTensor((densepose_map_wo_normalize[1:2,:,:].cpu().numpy() == (80/255.)).astype(np.int32)) |
|
|
densepose_end_of_torso_mask = transforms.Resize((self.fine_height, self.fine_width), interpolation=0)(densepose_end_of_torso_mask) |
|
|
|
|
|
grid = self.make_grid(1, self.fine_height, self.fine_width).permute(0, 3, 1, 2) |
|
|
grid_x, grid_y = torch.split(grid, 1, dim=1) |
|
|
grid_y_max = (1. - densepose_end_of_torso_mask) * 0. + grid_y * densepose_end_of_torso_mask |
|
|
grid_y_max = torch.max(grid_y_max) |
|
|
grid_y_max_idx = grid_y_max * self.fine_height |
|
|
grid_y_max_idx = int(grid_y_max_idx) |
|
|
|
|
|
clothes_no_loss_mask = torch.zeros_like(densepose_end_of_torso_mask) |
|
|
clothes_no_loss_mask[:, :grid_y_max_idx, :] = 1 |
|
|
|
|
|
result = { |
|
|
'c_name': c_name, |
|
|
'im_name': im_name, |
|
|
|
|
|
'cloth': c, |
|
|
'cloth_mask': cm, |
|
|
|
|
|
'parse_agnostic': new_parse_agnostic_map, |
|
|
'densepose': densepose_map, |
|
|
'pose': pose_map, |
|
|
|
|
|
'agnostic' : agnostic, |
|
|
|
|
|
'parse_onehot' : parse_onehot, |
|
|
'parse': new_parse_map, |
|
|
'pcm': pcm, |
|
|
'parse_cloth': im_c, |
|
|
|
|
|
'image': im, |
|
|
|
|
|
'lower_clothes_mask': lower_clothes_mask, |
|
|
'clothes_no_loss_mask': clothes_no_loss_mask |
|
|
} |
|
|
|
|
|
return result |
|
|
|
|
|
def __len__(self): |
|
|
return len(self.im_names) |
|
|
|
|
|
|
|
|
class CPDataLoader(object): |
|
|
def __init__(self, opt, dataset): |
|
|
super(CPDataLoader, self).__init__() |
|
|
|
|
|
if opt.shuffle: |
|
|
train_sampler = torch.utils.data.sampler.RandomSampler(dataset) |
|
|
else: |
|
|
train_sampler = None |
|
|
|
|
|
self.data_loader = torch.utils.data.DataLoader( |
|
|
dataset, batch_size=opt.batch_size, shuffle=(train_sampler is None), |
|
|
num_workers=opt.workers, pin_memory=True, drop_last=True, sampler=train_sampler) |
|
|
self.dataset = dataset |
|
|
self.data_iter = self.data_loader.__iter__() |
|
|
|
|
|
def next_batch(self): |
|
|
try: |
|
|
batch = self.data_iter.__next__() |
|
|
except StopIteration: |
|
|
self.data_iter = self.data_loader.__iter__() |
|
|
batch = self.data_iter.__next__() |
|
|
|
|
|
return batch |