This commit is contained in:
chenxuanhong
2022-02-13 14:20:26 +08:00
parent 9429c6d7be
commit a148db410c
11 changed files with 200 additions and 145 deletions
+7 -6
View File
@@ -2,7 +2,7 @@
"GUI.py": 1644423287.9844918,
"test.py": 1643529962.5602193,
"train.py": 1643397924.974299,
"components\\Generator.py": 1642347735.351465,
"components\\Generator.py": 1644689001.9005148,
"components\\projected_discriminator.py": 1642348101.4661522,
"components\\pg_modules\\blocks.py": 1640773190.0,
"components\\pg_modules\\diffaug.py": 1640773190.0,
@@ -60,7 +60,7 @@
"face_crop.py": 1643789609.1834445,
"face_crop_video.py": 1643815024.5516832,
"similarity.py": 1643269705.1073737,
"train_multigpu.py": 1644509438.008675,
"train_multigpu.py": 1644689820.5085416,
"components\\arcface_decoder.py": 1643396144.2575414,
"components\\Generator_nobias.py": 1643179001.810856,
"data_tools\\data_loader_VGGFace2HQ_multigpu.py": 1644330414.9587426,
@@ -102,13 +102,14 @@
"wandb\\run-20220129_035624-3hmwgcgw\\files\\config.yaml": 1643426465.6088357,
"dnnlib\\util.py": 1640773190.0,
"dnnlib\\__init__.py": 1640773190.0,
"components\\Generator_ori.py": 1644229508.0031855,
"components\\Generator_ori.py": 1644689174.414655,
"losses\\cos.py": 1644229583.4023254,
"data_tools\\data_loader_VGGFace2HQ_multigpu1.py": 1644297868.397411,
"speed_test.py": 1644476745.605093,
"components\\DeConv_Invo.py": 1644426607.1588645,
"components\\Generator_reduce_up.py": 1644477248.9149294,
"components\\Generator_upsample.py": 1644426070.2325442,
"components\\Generator_reduce_up.py": 1644688655.2096283,
"components\\Generator_upsample.py": 1644689723.8293872,
"components\\misc\\Involution.py": 1644509321.5267963,
"train_yamls\\train_Invoup.yaml": 1644550037.4785244
"train_yamls\\train_Invoup.yaml": 1644689981.9794765,
"flops.py": 1644689428.618648
}
+1 -1
View File
@@ -18,7 +18,7 @@
***OR***
- wandb (pip install wandb)
- Do not need to install tensorboard and tensorboardX any more.
***Logger is an option setting, which can be adjust with train.py --logger [wandb, tensorbaord, None]***
+15 -15
View File
@@ -5,7 +5,7 @@
# Created Date: Sunday January 16th 2022
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Sunday, 16th January 2022 11:42:14 pm
# Last Modified: Sunday, 13th February 2022 2:03:21 am
# Modified By: Chen Xuanhong
# Copyright (c) 2022 Shanghai Jiao Tong University
#############################################################
@@ -166,21 +166,21 @@ class Generator(nn.Module):
# if isinstance(layer,nn.Conv2d):
# nn.init.xavier_uniform_(layer.weight)
def forward(self, input, id):
x = input # 3*224*224
skip1 = self.first_layer(x)
skip2 = self.down1(skip1)
skip3 = self.down2(skip2)
skip4 = self.down3(skip3)
res = self.down4(skip4)
def forward(self, img, id):
# x = input # 3*224*224
res = self.first_layer(img)
res = self.down1(res)
res = self.down2(res)
res = self.down3(res)
res = self.down4(res)
for i in range(len(self.BottleNeck)):
x = self.BottleNeck[i](res, id)
res = self.BottleNeck[i](res, id)
x = self.up4(x)
x = self.up3(x)
x = self.up2(x)
x = self.up1(x)
x = self.last_layer(x)
res = self.up4(res)
res = self.up3(res)
res = self.up2(res)
res = self.up1(res)
res = self.last_layer(res)
return x
return res
@@ -5,16 +5,14 @@
# Created Date: Sunday January 16th 2022
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Wednesday, 26th January 2022 2:36:41 pm
# Last Modified: Sunday, 13th February 2022 3:03:05 am
# Modified By: Chen Xuanhong
# Copyright (c) 2022 Shanghai Jiao Tong University
#############################################################
from audioop import bias
import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
from components.DeConv_Invo import DeConv
class InstanceNorm(nn.Module):
def __init__(self, epsilon=1e-8):
@@ -61,7 +59,7 @@ class ResnetBlock_Adain(nn.Module):
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding = p, bias=False), InstanceNorm()]
conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding = p), InstanceNorm()]
self.conv1 = nn.Sequential(*conv1)
self.style1 = ApplyStyle(latent_size, dim)
self.act1 = activation
@@ -76,7 +74,7 @@ class ResnetBlock_Adain(nn.Module):
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=False), InstanceNorm()]
conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]
self.conv2 = nn.Sequential(*conv2)
self.style2 = ApplyStyle(latent_size, dim)
@@ -101,59 +99,57 @@ class Generator(nn.Module):
chn = kwargs["g_conv_dim"]
k_size = kwargs["g_kernel_size"]
res_num = kwargs["res_num"]
in_channel = kwargs["in_channel"]
padding_size= int((k_size -1)/2)
padding_type= 'reflect'
activation = nn.ReLU(True)
self.first_layer = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64), activation)
self.first_layer = nn.Sequential(nn.Conv2d(3, in_channel, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(in_channel), activation)
### downsample
self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128), activation)
self.down1 = nn.Sequential(nn.Conv2d(in_channel, in_channel*2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(in_channel*2), activation)
self.down2 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256), activation)
self.down2 = nn.Sequential(nn.Conv2d(in_channel*2, in_channel*4, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(in_channel*4), activation)
self.down3 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512), activation)
self.down3 = nn.Sequential(nn.Conv2d(in_channel*4, in_channel*8, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(in_channel*8), activation)
self.down4 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512), activation)
self.down4 = nn.Sequential(nn.Conv2d(in_channel*8, in_channel*8, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(in_channel*8), activation)
### resnet blocks
BN = []
for i in range(res_num):
for _ in range(res_num):
BN += [
ResnetBlock_Adain(512, latent_size=chn, padding_type=padding_type, activation=activation)]
ResnetBlock_Adain(in_channel*8, latent_size=chn,
padding_type=padding_type, activation=activation)]
self.BottleNeck = nn.Sequential(*BN)
self.up4 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512), activation
DeConv(in_channel*8,in_channel*8,3),
nn.BatchNorm2d(in_channel*8), activation
)
self.up3 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256), activation
DeConv(in_channel*8,in_channel*4,3),
nn.BatchNorm2d(in_channel*4), activation
)
self.up2 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128), activation
DeConv(in_channel*4,in_channel*2,3),
nn.BatchNorm2d(in_channel*2), activation
)
self.up1 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64), activation
DeConv(in_channel*2,in_channel,3),
nn.BatchNorm2d(in_channel), activation
)
self.last_layer = nn.Sequential(nn.Conv2d(64, 3, kernel_size=3, padding=1, bias=False))
self.last_layer = nn.Sequential(nn.Conv2d(in_channel, 3, kernel_size=3, padding=1))
# self.__weights_init__()
@@ -167,21 +163,21 @@ class Generator(nn.Module):
# if isinstance(layer,nn.Conv2d):
# nn.init.xavier_uniform_(layer.weight)
def forward(self, input, id):
x = input # 3*224*224
skip1 = self.first_layer(x)
skip2 = self.down1(skip1)
skip3 = self.down2(skip2)
skip4 = self.down3(skip3)
res = self.down4(skip4)
def forward(self, img, id):
# x = input # 3*224*224
res = self.first_layer(img)
res = self.down1(res)
res = self.down2(res)
res = self.down3(res)
res = self.down4(res)
for i in range(len(self.BottleNeck)):
x = self.BottleNeck[i](res, id)
res = self.BottleNeck[i](res, id)
x = self.up4(x)
x = self.up3(x)
x = self.up2(x)
x = self.up1(x)
x = self.last_layer(x)
res = self.up4(res)
res = self.up3(res)
res = self.up2(res)
res = self.up1(res)
res = self.last_layer(res)
return x
return res
@@ -5,7 +5,7 @@
# Created Date: Sunday January 16th 2022
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Thursday, 10th February 2022 3:14:08 pm
# Last Modified: Sunday, 13th February 2022 1:35:21 pm
# Modified By: Chen Xuanhong
# Copyright (c) 2022 Shanghai Jiao Tong University
#############################################################
@@ -116,7 +116,9 @@ class Generator(nn.Module):
activation = nn.ReLU(True)
self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(3, 64, kernel_size=7, padding=0, bias=False),
# self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(3, 64, kernel_size=7, padding=0, bias=False),
# nn.BatchNorm2d(64), activation)
self.first_layer = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64), activation)
### downsample
self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
@@ -157,9 +159,9 @@ class Generator(nn.Module):
DeConv(128,64,3),
nn.BatchNorm2d(64), activation
)
self.last_layer = nn.Sequential(nn.ReflectionPad2d(3),
nn.Conv2d(64, 3, kernel_size=7, padding=0))
self.last_layer = nn.Sequential(nn.Conv2d(64, 3, kernel_size=3, padding=1))
# self.last_layer = nn.Sequential(nn.ReflectionPad2d(3),
# nn.Conv2d(64, 3, kernel_size=7, padding=0))
# self.__weights_init__()
@@ -173,21 +175,20 @@ class Generator(nn.Module):
# if isinstance(layer,nn.Conv2d):
# nn.init.xavier_uniform_(layer.weight)
def forward(self, input, id):
x = input # 3*224*224
skip1 = self.first_layer(x)
skip2 = self.down1(skip1)
skip3 = self.down2(skip2)
skip4 = self.down3(skip3)
res = self.down4(skip4)
def forward(self, img, id):
res = self.first_layer(img)
res = self.down1(res)
res = self.down2(res)
res = self.down3(res)
res = self.down4(res)
for i in range(len(self.BottleNeck)):
x = self.BottleNeck[i](res, id)
res = self.BottleNeck[i](res, id)
x = self.up4(x)
x = self.up3(x)
x = self.up2(x)
x = self.up1(x)
x = self.last_layer(x)
res = self.up4(res)
res = self.up3(res)
res = self.up2(res)
res = self.up1(res)
res = self.last_layer(res)
return x
return res
+14 -15
View File
@@ -5,7 +5,7 @@
# Created Date: Sunday January 16th 2022
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Monday, 7th February 2022 6:25:07 pm
# Last Modified: Sunday, 13th February 2022 2:06:14 am
# Modified By: Chen Xuanhong
# Copyright (c) 2022 Shanghai Jiao Tong University
#############################################################
@@ -167,21 +167,20 @@ class Generator(nn.Module):
# if isinstance(layer,nn.Conv2d):
# nn.init.xavier_uniform_(layer.weight)
def forward(self, input, id):
x = input # 3*224*224
skip1 = self.first_layer(x)
skip2 = self.down1(skip1)
skip3 = self.down2(skip2)
skip4 = self.down3(skip3)
res = self.down4(skip4)
def forward(self, img, id):
res = self.first_layer(img)
res = self.down1(res)
res = self.down2(res)
res = self.down3(res)
res = self.down4(res)
for i in range(len(self.BottleNeck)):
x = self.BottleNeck[i](res, id)
res = self.BottleNeck[i](res, id)
x = self.up4(x)
x = self.up3(x)
x = self.up2(x)
x = self.up1(x)
x = self.last_layer(x)
res = self.up4(res)
res = self.up3(res)
res = self.up2(res)
res = self.up1(res)
res = self.last_layer(res)
return x
return res
+19 -20
View File
@@ -5,7 +5,7 @@
# Created Date: Sunday January 16th 2022
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Monday, 24th January 2022 6:47:22 pm
# Last Modified: Sunday, 13th February 2022 3:47:59 am
# Modified By: Chen Xuanhong
# Copyright (c) 2022 Shanghai Jiao Tong University
#############################################################
@@ -117,19 +117,19 @@ class Generator(nn.Module):
activation = nn.ReLU(True)
self.stem = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1),
self.first_layer = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64), activation)
### downsample
self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128), activation)
self.down2 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),
self.down2 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256), activation)
self.down3 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),
self.down3 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512), activation)
self.down4 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
self.down4 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512), activation)
### resnet blocks
@@ -177,21 +177,20 @@ class Generator(nn.Module):
# if isinstance(layer,nn.Conv2d):
# nn.init.xavier_uniform_(layer.weight)
def forward(self, input, id):
x = input # 3*224*224
skip1 = self.stem(x)
skip2 = self.down1(skip1)
skip3 = self.down2(skip2)
skip4 = self.down3(skip3)
res = self.down4(skip4)
def forward(self, img, id):
res = self.first_layer(img)
res = self.down1(res)
res = self.down2(res)
res = self.down3(res)
res = self.down4(res)
for i in range(len(self.BottleNeck)):
x = self.BottleNeck[i](res, id)
res = self.BottleNeck[i](res, id)
x = self.up4(x)
x = self.up3(x)
x = self.up2(x)
x = self.up1(x)
x = self.last_layer(x)
res = self.up4(res)
res = self.up3(res)
res = self.up2(res)
res = self.up1(res)
res = self.last_layer(res)
return x
return res
+21 -19
View File
@@ -5,7 +5,7 @@
# Created Date: Sunday January 16th 2022
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Thursday, 10th February 2022 1:01:09 am
# Last Modified: Sunday, 13th February 2022 2:15:23 am
# Modified By: Chen Xuanhong
# Copyright (c) 2022 Shanghai Jiao Tong University
#############################################################
@@ -104,9 +104,11 @@ class Generator(nn.Module):
padding_type= 'reflect'
activation = nn.ReLU(True)
self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(3, 64, kernel_size=7, padding=0, bias=False),
self.first_layer = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64), activation)
# self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(3, 64, kernel_size=3, padding=0, bias=False),
# nn.BatchNorm2d(64), activation)
### downsample
self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128), activation)
@@ -146,8 +148,9 @@ class Generator(nn.Module):
DeConv(128,64,3),
nn.BatchNorm2d(64), activation
)
self.last_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(64, 3, kernel_size=7, padding=0))
self.last_layer = nn.Sequential(nn.Conv2d(64, 3, kernel_size=3, padding=1))
# self.last_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(64, 3, kernel_size=3, padding=0))
# self.__weights_init__()
@@ -161,21 +164,20 @@ class Generator(nn.Module):
# if isinstance(layer,nn.Conv2d):
# nn.init.xavier_uniform_(layer.weight)
def forward(self, input, id):
x = input # 3*224*224
skip1 = self.first_layer(x)
skip2 = self.down1(skip1)
skip3 = self.down2(skip2)
skip4 = self.down3(skip3)
res = self.down4(skip4)
def forward(self, img, id):
res = self.first_layer(img)
res = self.down1(res)
res = self.down2(res)
res = self.down3(res)
res = self.down4(res)
for i in range(len(self.BottleNeck)):
x = self.BottleNeck[i](res, id)
res = self.BottleNeck[i](res, id)
x = self.up4(x)
x = self.up3(x)
x = self.up2(x)
x = self.up1(x)
x = self.last_layer(x)
res = self.up4(res)
res = self.up3(res)
res = self.up2(res)
res = self.up1(res)
res = self.last_layer(res)
return x
return res
+56
View File
@@ -0,0 +1,56 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#############################################################
# File: flops.py
# Created Date: Sunday February 13th 2022
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Sunday, 13th February 2022 1:37:15 pm
# Modified By: Chen Xuanhong
# Copyright (c) 2022 Shanghai Jiao Tong University
#############################################################
import os
import torch
from thop import profile
from thop import clever_format
if __name__ == '__main__':
script = "Generator_config"
class_name = "Generator"
arcface_ckpt= "arcface_ckpt/arcface_checkpoint.tar"
model_config={
"g_conv_dim": 512,
"g_kernel_size": 3,
"in_channel":64,
"res_num": 9
}
os.environ['CUDA_VISIBLE_DEVICES'] = str(0)
print("GPU used : ", os.environ['CUDA_VISIBLE_DEVICES'])
gscript_name = "components." + script
package = __import__(gscript_name, fromlist=True)
gen_class= getattr(package, class_name)
gen = gen_class(**model_config)
model = gen.cuda().eval().requires_grad_(False)
arcface1 = torch.load(arcface_ckpt, map_location=torch.device("cpu"))
arcface = arcface1['model'].module
arcface = arcface.cuda()
arcface.eval().requires_grad_(False)
attr_img = torch.rand((1,3,512,512)).cuda()
id_img = torch.rand((1,3,112,112)).cuda()
id_latent = torch.rand((1,512)).cuda()
macs, params = profile(model, inputs=(attr_img, id_latent))
macs, params = clever_format([macs, params], "%.3f")
print(macs)
print(params)
+3 -2
View File
@@ -5,7 +5,7 @@
# Created Date: Thursday February 10th 2022
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Thursday, 10th February 2022 3:05:44 pm
# Last Modified: Sunday, 13th February 2022 3:04:07 am
# Modified By: Chen Xuanhong
# Copyright (c) 2022 Shanghai Jiao Tong University
#############################################################
@@ -18,12 +18,13 @@ import torch
if __name__ == '__main__':
script = "Generator_reduce_up"
script = "Generator_config"
class_name = "Generator"
arcface_ckpt= "arcface_ckpt/arcface_checkpoint.tar"
model_config={
"g_conv_dim": 512,
"g_kernel_size": 3,
"in_channel":16,
"res_num": 9
}
+3 -3
View File
@@ -5,7 +5,7 @@
# Created Date: Tuesday April 28th 2020
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Friday, 11th February 2022 12:10:37 am
# Last Modified: Sunday, 13th February 2022 2:16:50 am
# Modified By: Chen Xuanhong
# Copyright (c) 2020 Shanghai Jiao Tong University
#############################################################
@@ -31,7 +31,7 @@ def getParameters():
parser = argparse.ArgumentParser()
# general settings
parser.add_argument('-v', '--version', type=str, default='invoup1',
parser.add_argument('-v', '--version', type=str, default='invoup2',
help="version name for train, test, finetune")
parser.add_argument('-t', '--tag', type=str, default='invo_upsample',
help="tag for current experiment")
@@ -46,7 +46,7 @@ def getParameters():
# training
parser.add_argument('--experiment_description', type=str,
default="使用involution作为上采样")
default="generator网络前向部分残差的赋值错误,现纠正,重新训练网络")
parser.add_argument('--train_yaml', type=str, default="train_Invoup.yaml")