update
This commit is contained in:
@@ -0,0 +1,325 @@
|
||||
import functools
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.nn.utils import spectral_norm
|
||||
|
||||
|
||||
### single layers
|
||||
|
||||
|
||||
def conv2d(*args, **kwargs):
|
||||
return spectral_norm(nn.Conv2d(*args, **kwargs))
|
||||
|
||||
|
||||
def convTranspose2d(*args, **kwargs):
|
||||
return spectral_norm(nn.ConvTranspose2d(*args, **kwargs))
|
||||
|
||||
|
||||
def embedding(*args, **kwargs):
|
||||
return spectral_norm(nn.Embedding(*args, **kwargs))
|
||||
|
||||
|
||||
def linear(*args, **kwargs):
|
||||
return spectral_norm(nn.Linear(*args, **kwargs))
|
||||
|
||||
|
||||
def NormLayer(c, mode='batch'):
|
||||
if mode == 'group':
|
||||
return nn.GroupNorm(c//2, c)
|
||||
elif mode == 'batch':
|
||||
return nn.BatchNorm2d(c)
|
||||
|
||||
|
||||
### Activations
|
||||
|
||||
|
||||
class GLU(nn.Module):
|
||||
def forward(self, x):
|
||||
nc = x.size(1)
|
||||
assert nc % 2 == 0, 'channels dont divide 2!'
|
||||
nc = int(nc/2)
|
||||
return x[:, :nc] * torch.sigmoid(x[:, nc:])
|
||||
|
||||
|
||||
class Swish(nn.Module):
|
||||
def forward(self, feat):
|
||||
return feat * torch.sigmoid(feat)
|
||||
|
||||
|
||||
### Upblocks
|
||||
|
||||
|
||||
class InitLayer(nn.Module):
|
||||
def __init__(self, nz, channel, sz=4):
|
||||
super().__init__()
|
||||
|
||||
self.init = nn.Sequential(
|
||||
convTranspose2d(nz, channel*2, sz, 1, 0, bias=False),
|
||||
NormLayer(channel*2),
|
||||
GLU(),
|
||||
)
|
||||
|
||||
def forward(self, noise):
|
||||
noise = noise.view(noise.shape[0], -1, 1, 1)
|
||||
return self.init(noise)
|
||||
|
||||
|
||||
def UpBlockSmall(in_planes, out_planes):
|
||||
block = nn.Sequential(
|
||||
nn.Upsample(scale_factor=2, mode='nearest'),
|
||||
conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False),
|
||||
NormLayer(out_planes*2), GLU())
|
||||
return block
|
||||
|
||||
|
||||
class UpBlockSmallCond(nn.Module):
|
||||
def __init__(self, in_planes, out_planes, z_dim):
|
||||
super().__init__()
|
||||
self.in_planes = in_planes
|
||||
self.out_planes = out_planes
|
||||
self.up = nn.Upsample(scale_factor=2, mode='nearest')
|
||||
self.conv = conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False)
|
||||
|
||||
which_bn = functools.partial(CCBN, which_linear=linear, input_size=z_dim)
|
||||
self.bn = which_bn(2*out_planes)
|
||||
self.act = GLU()
|
||||
|
||||
def forward(self, x, c):
|
||||
x = self.up(x)
|
||||
x = self.conv(x)
|
||||
x = self.bn(x, c)
|
||||
x = self.act(x)
|
||||
return x
|
||||
|
||||
|
||||
def UpBlockBig(in_planes, out_planes):
|
||||
block = nn.Sequential(
|
||||
nn.Upsample(scale_factor=2, mode='nearest'),
|
||||
conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False),
|
||||
NoiseInjection(),
|
||||
NormLayer(out_planes*2), GLU(),
|
||||
conv2d(out_planes, out_planes*2, 3, 1, 1, bias=False),
|
||||
NoiseInjection(),
|
||||
NormLayer(out_planes*2), GLU()
|
||||
)
|
||||
return block
|
||||
|
||||
|
||||
class UpBlockBigCond(nn.Module):
|
||||
def __init__(self, in_planes, out_planes, z_dim):
|
||||
super().__init__()
|
||||
self.in_planes = in_planes
|
||||
self.out_planes = out_planes
|
||||
self.up = nn.Upsample(scale_factor=2, mode='nearest')
|
||||
self.conv1 = conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False)
|
||||
self.conv2 = conv2d(out_planes, out_planes*2, 3, 1, 1, bias=False)
|
||||
|
||||
which_bn = functools.partial(CCBN, which_linear=linear, input_size=z_dim)
|
||||
self.bn1 = which_bn(2*out_planes)
|
||||
self.bn2 = which_bn(2*out_planes)
|
||||
self.act = GLU()
|
||||
self.noise = NoiseInjection()
|
||||
|
||||
def forward(self, x, c):
|
||||
# block 1
|
||||
x = self.up(x)
|
||||
x = self.conv1(x)
|
||||
x = self.noise(x)
|
||||
x = self.bn1(x, c)
|
||||
x = self.act(x)
|
||||
|
||||
# block 2
|
||||
x = self.conv2(x)
|
||||
x = self.noise(x)
|
||||
x = self.bn2(x, c)
|
||||
x = self.act(x)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class SEBlock(nn.Module):
|
||||
def __init__(self, ch_in, ch_out):
|
||||
super().__init__()
|
||||
self.main = nn.Sequential(
|
||||
nn.AdaptiveAvgPool2d(4),
|
||||
conv2d(ch_in, ch_out, 4, 1, 0, bias=False),
|
||||
Swish(),
|
||||
conv2d(ch_out, ch_out, 1, 1, 0, bias=False),
|
||||
nn.Sigmoid(),
|
||||
)
|
||||
|
||||
def forward(self, feat_small, feat_big):
|
||||
return feat_big * self.main(feat_small)
|
||||
|
||||
|
||||
### Downblocks
|
||||
|
||||
|
||||
class SeparableConv2d(nn.Module):
|
||||
def __init__(self, in_channels, out_channels, kernel_size, bias=False):
|
||||
super(SeparableConv2d, self).__init__()
|
||||
self.depthwise = conv2d(in_channels, in_channels, kernel_size=kernel_size,
|
||||
groups=in_channels, bias=bias, padding=1)
|
||||
self.pointwise = conv2d(in_channels, out_channels,
|
||||
kernel_size=1, bias=bias)
|
||||
|
||||
def forward(self, x):
|
||||
out = self.depthwise(x)
|
||||
out = self.pointwise(out)
|
||||
return out
|
||||
|
||||
|
||||
class DownBlock(nn.Module):
|
||||
def __init__(self, in_planes, out_planes, separable=False):
|
||||
super().__init__()
|
||||
if not separable:
|
||||
self.main = nn.Sequential(
|
||||
conv2d(in_planes, out_planes, 4, 2, 1),
|
||||
NormLayer(out_planes),
|
||||
nn.LeakyReLU(0.2, inplace=True),
|
||||
)
|
||||
else:
|
||||
self.main = nn.Sequential(
|
||||
SeparableConv2d(in_planes, out_planes, 3),
|
||||
NormLayer(out_planes),
|
||||
nn.LeakyReLU(0.2, inplace=True),
|
||||
nn.AvgPool2d(2, 2),
|
||||
)
|
||||
|
||||
def forward(self, feat):
|
||||
return self.main(feat)
|
||||
|
||||
|
||||
class DownBlockPatch(nn.Module):
|
||||
def __init__(self, in_planes, out_planes, separable=False):
|
||||
super().__init__()
|
||||
self.main = nn.Sequential(
|
||||
DownBlock(in_planes, out_planes, separable),
|
||||
conv2d(out_planes, out_planes, 1, 1, 0, bias=False),
|
||||
NormLayer(out_planes),
|
||||
nn.LeakyReLU(0.2, inplace=True),
|
||||
)
|
||||
|
||||
def forward(self, feat):
|
||||
return self.main(feat)
|
||||
|
||||
|
||||
### CSM
|
||||
|
||||
|
||||
class ResidualConvUnit(nn.Module):
|
||||
def __init__(self, cin, activation, bn):
|
||||
super().__init__()
|
||||
self.conv = nn.Conv2d(cin, cin, kernel_size=3, stride=1, padding=1, bias=True)
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
def forward(self, x):
|
||||
return self.skip_add.add(self.conv(x), x)
|
||||
|
||||
|
||||
class FeatureFusionBlock(nn.Module):
|
||||
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, lowest=False):
|
||||
super().__init__()
|
||||
|
||||
self.deconv = deconv
|
||||
self.align_corners = align_corners
|
||||
|
||||
self.expand = expand
|
||||
out_features = features
|
||||
if self.expand==True:
|
||||
out_features = features//2
|
||||
|
||||
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
def forward(self, *xs):
|
||||
output = xs[0]
|
||||
|
||||
if len(xs) == 2:
|
||||
output = self.skip_add.add(output, xs[1])
|
||||
|
||||
output = nn.functional.interpolate(
|
||||
output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
|
||||
)
|
||||
|
||||
output = self.out_conv(output)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
### Misc
|
||||
|
||||
|
||||
class NoiseInjection(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.weight = nn.Parameter(torch.zeros(1), requires_grad=True)
|
||||
|
||||
def forward(self, feat, noise=None):
|
||||
if noise is None:
|
||||
batch, _, height, width = feat.shape
|
||||
noise = torch.randn(batch, 1, height, width).to(feat.device)
|
||||
|
||||
return feat + self.weight * noise
|
||||
|
||||
|
||||
class CCBN(nn.Module):
|
||||
''' conditional batchnorm '''
|
||||
def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1):
|
||||
super().__init__()
|
||||
self.output_size, self.input_size = output_size, input_size
|
||||
|
||||
# Prepare gain and bias layers
|
||||
self.gain = which_linear(input_size, output_size)
|
||||
self.bias = which_linear(input_size, output_size)
|
||||
|
||||
# epsilon to avoid dividing by 0
|
||||
self.eps = eps
|
||||
# Momentum
|
||||
self.momentum = momentum
|
||||
|
||||
self.register_buffer('stored_mean', torch.zeros(output_size))
|
||||
self.register_buffer('stored_var', torch.ones(output_size))
|
||||
|
||||
def forward(self, x, y):
|
||||
# Calculate class-conditional gains and biases
|
||||
gain = (1 + self.gain(y)).view(y.size(0), -1, 1, 1)
|
||||
bias = self.bias(y).view(y.size(0), -1, 1, 1)
|
||||
out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None,
|
||||
self.training, 0.1, self.eps)
|
||||
return out * gain + bias
|
||||
|
||||
|
||||
class Interpolate(nn.Module):
|
||||
"""Interpolation module."""
|
||||
|
||||
def __init__(self, size, mode='bilinear', align_corners=False):
|
||||
"""Init.
|
||||
Args:
|
||||
scale_factor (float): scaling
|
||||
mode (str): interpolation mode
|
||||
"""
|
||||
super(Interpolate, self).__init__()
|
||||
|
||||
self.interp = nn.functional.interpolate
|
||||
self.size = size
|
||||
self.mode = mode
|
||||
self.align_corners = align_corners
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
Args:
|
||||
x (tensor): input
|
||||
Returns:
|
||||
tensor: interpolated data
|
||||
"""
|
||||
|
||||
x = self.interp(
|
||||
x,
|
||||
size=self.size,
|
||||
mode=self.mode,
|
||||
align_corners=self.align_corners,
|
||||
)
|
||||
|
||||
return x
|
||||
@@ -0,0 +1,76 @@
|
||||
# Differentiable Augmentation for Data-Efficient GAN Training
|
||||
# Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han
|
||||
# https://arxiv.org/pdf/2006.10738
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def DiffAugment(x, policy='', channels_first=True):
|
||||
if policy:
|
||||
if not channels_first:
|
||||
x = x.permute(0, 3, 1, 2)
|
||||
for p in policy.split(','):
|
||||
for f in AUGMENT_FNS[p]:
|
||||
x = f(x)
|
||||
if not channels_first:
|
||||
x = x.permute(0, 2, 3, 1)
|
||||
x = x.contiguous()
|
||||
return x
|
||||
|
||||
|
||||
def rand_brightness(x):
|
||||
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
|
||||
return x
|
||||
|
||||
|
||||
def rand_saturation(x):
|
||||
x_mean = x.mean(dim=1, keepdim=True)
|
||||
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
|
||||
return x
|
||||
|
||||
|
||||
def rand_contrast(x):
|
||||
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
|
||||
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
|
||||
return x
|
||||
|
||||
|
||||
def rand_translation(x, ratio=0.125):
|
||||
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
|
||||
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
|
||||
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
|
||||
grid_batch, grid_x, grid_y = torch.meshgrid(
|
||||
torch.arange(x.size(0), dtype=torch.long, device=x.device),
|
||||
torch.arange(x.size(2), dtype=torch.long, device=x.device),
|
||||
torch.arange(x.size(3), dtype=torch.long, device=x.device),
|
||||
)
|
||||
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
|
||||
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
|
||||
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
|
||||
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
|
||||
return x
|
||||
|
||||
|
||||
def rand_cutout(x, ratio=0.2):
|
||||
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
|
||||
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
|
||||
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
|
||||
grid_batch, grid_x, grid_y = torch.meshgrid(
|
||||
torch.arange(x.size(0), dtype=torch.long, device=x.device),
|
||||
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
|
||||
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
|
||||
)
|
||||
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
|
||||
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
|
||||
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
|
||||
mask[grid_batch, grid_x, grid_y] = 0
|
||||
x = x * mask.unsqueeze(1)
|
||||
return x
|
||||
|
||||
|
||||
AUGMENT_FNS = {
|
||||
'color': [rand_brightness, rand_saturation, rand_contrast],
|
||||
'translation': [rand_translation],
|
||||
'cutout': [rand_cutout],
|
||||
}
|
||||
@@ -0,0 +1,186 @@
|
||||
from functools import partial
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from components.pg_modules.blocks import DownBlock, DownBlockPatch, conv2d
|
||||
from components.pg_modules.projector import F_RandomProj
|
||||
from components.pg_modules.diffaug import DiffAugment
|
||||
|
||||
|
||||
class SingleDisc(nn.Module):
|
||||
def __init__(self, nc=None, ndf=None, start_sz=256, end_sz=8, head=None, separable=False, patch=False):
|
||||
super().__init__()
|
||||
channel_dict = {4: 512, 8: 512, 16: 256, 32: 128, 64: 64, 128: 64,
|
||||
256: 32, 512: 16, 1024: 8}
|
||||
|
||||
# interpolate for start sz that are not powers of two
|
||||
if start_sz not in channel_dict.keys():
|
||||
sizes = np.array(list(channel_dict.keys()))
|
||||
start_sz = sizes[np.argmin(abs(sizes - start_sz))]
|
||||
self.start_sz = start_sz
|
||||
|
||||
# if given ndf, allocate all layers with the same ndf
|
||||
if ndf is None:
|
||||
nfc = channel_dict
|
||||
else:
|
||||
nfc = {k: ndf for k, v in channel_dict.items()}
|
||||
|
||||
# for feature map discriminators with nfc not in channel_dict
|
||||
# this is the case for the pretrained backbone (midas.pretrained)
|
||||
if nc is not None and head is None:
|
||||
nfc[start_sz] = nc
|
||||
|
||||
layers = []
|
||||
|
||||
# Head if the initial input is the full modality
|
||||
if head:
|
||||
layers += [conv2d(nc, nfc[256], 3, 1, 1, bias=False),
|
||||
nn.LeakyReLU(0.2, inplace=True)]
|
||||
|
||||
# Down Blocks
|
||||
DB = partial(DownBlockPatch, separable=separable) if patch else partial(DownBlock, separable=separable)
|
||||
while start_sz > end_sz:
|
||||
layers.append(DB(nfc[start_sz], nfc[start_sz//2]))
|
||||
start_sz = start_sz // 2
|
||||
|
||||
layers.append(conv2d(nfc[end_sz], 1, 4, 1, 0, bias=False))
|
||||
self.main = nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x, c):
|
||||
return self.main(x)
|
||||
|
||||
|
||||
class SingleDiscCond(nn.Module):
|
||||
def __init__(self, nc=None, ndf=None, start_sz=256, end_sz=8, head=None, separable=False, patch=False, c_dim=1000, cmap_dim=64, embedding_dim=128):
|
||||
super().__init__()
|
||||
self.cmap_dim = cmap_dim
|
||||
|
||||
# midas channels
|
||||
channel_dict = {4: 512, 8: 512, 16: 256, 32: 128, 64: 64, 128: 64,
|
||||
256: 32, 512: 16, 1024: 8}
|
||||
|
||||
# interpolate for start sz that are not powers of two
|
||||
if start_sz not in channel_dict.keys():
|
||||
sizes = np.array(list(channel_dict.keys()))
|
||||
start_sz = sizes[np.argmin(abs(sizes - start_sz))]
|
||||
self.start_sz = start_sz
|
||||
|
||||
# if given ndf, allocate all layers with the same ndf
|
||||
if ndf is None:
|
||||
nfc = channel_dict
|
||||
else:
|
||||
nfc = {k: ndf for k, v in channel_dict.items()}
|
||||
|
||||
# for feature map discriminators with nfc not in channel_dict
|
||||
# this is the case for the pretrained backbone (midas.pretrained)
|
||||
if nc is not None and head is None:
|
||||
nfc[start_sz] = nc
|
||||
|
||||
layers = []
|
||||
|
||||
# Head if the initial input is the full modality
|
||||
if head:
|
||||
layers += [conv2d(nc, nfc[256], 3, 1, 1, bias=False),
|
||||
nn.LeakyReLU(0.2, inplace=True)]
|
||||
|
||||
# Down Blocks
|
||||
DB = partial(DownBlockPatch, separable=separable) if patch else partial(DownBlock, separable=separable)
|
||||
while start_sz > end_sz:
|
||||
layers.append(DB(nfc[start_sz], nfc[start_sz//2]))
|
||||
start_sz = start_sz // 2
|
||||
self.main = nn.Sequential(*layers)
|
||||
|
||||
# additions for conditioning on class information
|
||||
self.cls = conv2d(nfc[end_sz], self.cmap_dim, 4, 1, 0, bias=False)
|
||||
self.embed = nn.Embedding(num_embeddings=c_dim, embedding_dim=embedding_dim)
|
||||
self.embed_proj = nn.Sequential(
|
||||
nn.Linear(self.embed.embedding_dim, self.cmap_dim),
|
||||
nn.LeakyReLU(0.2, inplace=True),
|
||||
)
|
||||
|
||||
def forward(self, x, c):
|
||||
h = self.main(x)
|
||||
out = self.cls(h)
|
||||
|
||||
# conditioning via projection
|
||||
cmap = self.embed_proj(self.embed(c.argmax(1))).unsqueeze(-1).unsqueeze(-1)
|
||||
out = (out * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class MultiScaleD(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
channels,
|
||||
resolutions,
|
||||
num_discs=1,
|
||||
proj_type=2, # 0 = no projection, 1 = cross channel mixing, 2 = cross scale mixing
|
||||
cond=0,
|
||||
separable=False,
|
||||
patch=False,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
assert num_discs in [1, 2, 3, 4]
|
||||
|
||||
# the first disc is on the lowest level of the backbone
|
||||
self.disc_in_channels = channels[:num_discs]
|
||||
self.disc_in_res = resolutions[:num_discs]
|
||||
Disc = SingleDiscCond if cond else SingleDisc
|
||||
|
||||
mini_discs = []
|
||||
for i, (cin, res) in enumerate(zip(self.disc_in_channels, self.disc_in_res)):
|
||||
start_sz = res if not patch else 16
|
||||
mini_discs += [str(i), Disc(nc=cin, start_sz=start_sz, end_sz=8, separable=separable, patch=patch)],
|
||||
self.mini_discs = nn.ModuleDict(mini_discs)
|
||||
|
||||
def forward(self, features, c):
|
||||
all_logits = []
|
||||
for k, disc in self.mini_discs.items():
|
||||
all_logits.append(disc(features[k], c).view(features[k].size(0), -1))
|
||||
|
||||
all_logits = torch.cat(all_logits, dim=1)
|
||||
return all_logits
|
||||
|
||||
|
||||
class ProjectedDiscriminator(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
diffaug=True,
|
||||
interp224=True,
|
||||
backbone_kwargs={},
|
||||
**kwargs
|
||||
):
|
||||
super().__init__()
|
||||
self.diffaug = diffaug
|
||||
self.interp224 = interp224
|
||||
self.feature_network = F_RandomProj(**backbone_kwargs)
|
||||
self.discriminator = MultiScaleD(
|
||||
channels=self.feature_network.CHANNELS,
|
||||
resolutions=self.feature_network.RESOLUTIONS,
|
||||
**backbone_kwargs,
|
||||
)
|
||||
|
||||
def train(self, mode=True):
|
||||
self.feature_network = self.feature_network.train(False)
|
||||
self.discriminator = self.discriminator.train(mode)
|
||||
return self
|
||||
|
||||
def eval(self):
|
||||
return self.train(False)
|
||||
|
||||
def forward(self, x, c):
|
||||
if self.diffaug:
|
||||
x = DiffAugment(x, policy='color,translation,cutout')
|
||||
|
||||
if self.interp224:
|
||||
x = F.interpolate(x, 224, mode='bilinear', align_corners=False)
|
||||
|
||||
features = self.feature_network(x)
|
||||
logits = self.discriminator(features, c)
|
||||
|
||||
return logits
|
||||
@@ -0,0 +1,178 @@
|
||||
# original implementation: https://github.com/odegeasslbc/FastGAN-pytorch/blob/main/models.py
|
||||
#
|
||||
# modified by Axel Sauer for "Projected GANs Converge Faster"
|
||||
#
|
||||
import torch.nn as nn
|
||||
from pg_modules.blocks import (InitLayer, UpBlockBig, UpBlockBigCond, UpBlockSmall, UpBlockSmallCond, SEBlock, conv2d)
|
||||
|
||||
|
||||
def normalize_second_moment(x, dim=1, eps=1e-8):
|
||||
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
|
||||
|
||||
|
||||
class DummyMapping(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(self, z, c, **kwargs):
|
||||
return z.unsqueeze(1) # to fit the StyleGAN API
|
||||
|
||||
|
||||
class FastganSynthesis(nn.Module):
|
||||
def __init__(self, ngf=128, z_dim=256, nc=3, img_resolution=256, lite=False):
|
||||
super().__init__()
|
||||
self.img_resolution = img_resolution
|
||||
self.z_dim = z_dim
|
||||
|
||||
# channel multiplier
|
||||
nfc_multi = {2: 16, 4:16, 8:8, 16:4, 32:2, 64:2, 128:1, 256:0.5,
|
||||
512:0.25, 1024:0.125}
|
||||
nfc = {}
|
||||
for k, v in nfc_multi.items():
|
||||
nfc[k] = int(v*ngf)
|
||||
|
||||
# layers
|
||||
self.init = InitLayer(z_dim, channel=nfc[2], sz=4)
|
||||
|
||||
UpBlock = UpBlockSmall if lite else UpBlockBig
|
||||
|
||||
self.feat_8 = UpBlock(nfc[4], nfc[8])
|
||||
self.feat_16 = UpBlock(nfc[8], nfc[16])
|
||||
self.feat_32 = UpBlock(nfc[16], nfc[32])
|
||||
self.feat_64 = UpBlock(nfc[32], nfc[64])
|
||||
self.feat_128 = UpBlock(nfc[64], nfc[128])
|
||||
self.feat_256 = UpBlock(nfc[128], nfc[256])
|
||||
|
||||
self.se_64 = SEBlock(nfc[4], nfc[64])
|
||||
self.se_128 = SEBlock(nfc[8], nfc[128])
|
||||
self.se_256 = SEBlock(nfc[16], nfc[256])
|
||||
|
||||
self.to_big = conv2d(nfc[img_resolution], nc, 3, 1, 1, bias=True)
|
||||
|
||||
if img_resolution > 256:
|
||||
self.feat_512 = UpBlock(nfc[256], nfc[512])
|
||||
self.se_512 = SEBlock(nfc[32], nfc[512])
|
||||
if img_resolution > 512:
|
||||
self.feat_1024 = UpBlock(nfc[512], nfc[1024])
|
||||
|
||||
def forward(self, input, c, **kwargs):
|
||||
# map noise to hypersphere as in "Progressive Growing of GANS"
|
||||
input = normalize_second_moment(input[:, 0])
|
||||
|
||||
feat_4 = self.init(input)
|
||||
feat_8 = self.feat_8(feat_4)
|
||||
feat_16 = self.feat_16(feat_8)
|
||||
feat_32 = self.feat_32(feat_16)
|
||||
feat_64 = self.se_64(feat_4, self.feat_64(feat_32))
|
||||
feat_128 = self.se_128(feat_8, self.feat_128(feat_64))
|
||||
|
||||
if self.img_resolution >= 128:
|
||||
feat_last = feat_128
|
||||
|
||||
if self.img_resolution >= 256:
|
||||
feat_last = self.se_256(feat_16, self.feat_256(feat_last))
|
||||
|
||||
if self.img_resolution >= 512:
|
||||
feat_last = self.se_512(feat_32, self.feat_512(feat_last))
|
||||
|
||||
if self.img_resolution >= 1024:
|
||||
feat_last = self.feat_1024(feat_last)
|
||||
|
||||
return self.to_big(feat_last)
|
||||
|
||||
|
||||
class FastganSynthesisCond(nn.Module):
|
||||
def __init__(self, ngf=64, z_dim=256, nc=3, img_resolution=256, num_classes=1000, lite=False):
|
||||
super().__init__()
|
||||
|
||||
self.z_dim = z_dim
|
||||
nfc_multi = {2: 16, 4:16, 8:8, 16:4, 32:2, 64:2, 128:1, 256:0.5,
|
||||
512:0.25, 1024:0.125, 2048:0.125}
|
||||
nfc = {}
|
||||
for k, v in nfc_multi.items():
|
||||
nfc[k] = int(v*ngf)
|
||||
|
||||
self.img_resolution = img_resolution
|
||||
|
||||
self.init = InitLayer(z_dim, channel=nfc[2], sz=4)
|
||||
|
||||
UpBlock = UpBlockSmallCond if lite else UpBlockBigCond
|
||||
|
||||
self.feat_8 = UpBlock(nfc[4], nfc[8], z_dim)
|
||||
self.feat_16 = UpBlock(nfc[8], nfc[16], z_dim)
|
||||
self.feat_32 = UpBlock(nfc[16], nfc[32], z_dim)
|
||||
self.feat_64 = UpBlock(nfc[32], nfc[64], z_dim)
|
||||
self.feat_128 = UpBlock(nfc[64], nfc[128], z_dim)
|
||||
self.feat_256 = UpBlock(nfc[128], nfc[256], z_dim)
|
||||
|
||||
self.se_64 = SEBlock(nfc[4], nfc[64])
|
||||
self.se_128 = SEBlock(nfc[8], nfc[128])
|
||||
self.se_256 = SEBlock(nfc[16], nfc[256])
|
||||
|
||||
self.to_big = conv2d(nfc[img_resolution], nc, 3, 1, 1, bias=True)
|
||||
|
||||
if img_resolution > 256:
|
||||
self.feat_512 = UpBlock(nfc[256], nfc[512])
|
||||
self.se_512 = SEBlock(nfc[32], nfc[512])
|
||||
if img_resolution > 512:
|
||||
self.feat_1024 = UpBlock(nfc[512], nfc[1024])
|
||||
|
||||
self.embed = nn.Embedding(num_classes, z_dim)
|
||||
|
||||
def forward(self, input, c, update_emas=False):
|
||||
c = self.embed(c.argmax(1))
|
||||
|
||||
# map noise to hypersphere as in "Progressive Growing of GANS"
|
||||
input = normalize_second_moment(input[:, 0])
|
||||
|
||||
feat_4 = self.init(input)
|
||||
feat_8 = self.feat_8(feat_4, c)
|
||||
feat_16 = self.feat_16(feat_8, c)
|
||||
feat_32 = self.feat_32(feat_16, c)
|
||||
feat_64 = self.se_64(feat_4, self.feat_64(feat_32, c))
|
||||
feat_128 = self.se_128(feat_8, self.feat_128(feat_64, c))
|
||||
|
||||
if self.img_resolution >= 128:
|
||||
feat_last = feat_128
|
||||
|
||||
if self.img_resolution >= 256:
|
||||
feat_last = self.se_256(feat_16, self.feat_256(feat_last, c))
|
||||
|
||||
if self.img_resolution >= 512:
|
||||
feat_last = self.se_512(feat_32, self.feat_512(feat_last, c))
|
||||
|
||||
if self.img_resolution >= 1024:
|
||||
feat_last = self.feat_1024(feat_last, c)
|
||||
|
||||
return self.to_big(feat_last)
|
||||
|
||||
|
||||
class Generator(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
z_dim=256,
|
||||
c_dim=0,
|
||||
w_dim=0,
|
||||
img_resolution=256,
|
||||
img_channels=3,
|
||||
ngf=128,
|
||||
cond=0,
|
||||
mapping_kwargs={},
|
||||
synthesis_kwargs={}
|
||||
):
|
||||
super().__init__()
|
||||
self.z_dim = z_dim
|
||||
self.c_dim = c_dim
|
||||
self.w_dim = w_dim
|
||||
self.img_resolution = img_resolution
|
||||
self.img_channels = img_channels
|
||||
|
||||
# Mapping and Synthesis Networks
|
||||
self.mapping = DummyMapping() # to fit the StyleGAN API
|
||||
Synthesis = FastganSynthesisCond if cond else FastganSynthesis
|
||||
self.synthesis = Synthesis(ngf=ngf, z_dim=z_dim, nc=img_channels, img_resolution=img_resolution, **synthesis_kwargs)
|
||||
|
||||
def forward(self, z, c, **kwargs):
|
||||
w = self.mapping(z, c)
|
||||
img = self.synthesis(w, c)
|
||||
return img
|
||||
@@ -0,0 +1,537 @@
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
||||
# and proprietary rights in and to this software, related documentation
|
||||
# and any modifications thereto. Any use, reproduction, disclosure or
|
||||
# distribution of this software and related documentation without an express
|
||||
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
||||
#
|
||||
# modified by Axel Sauer for "Projected GANs Converge Faster"
|
||||
#
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch_utils import misc
|
||||
from torch_utils import persistence
|
||||
from torch_utils.ops import conv2d_resample
|
||||
from torch_utils.ops import upfirdn2d
|
||||
from torch_utils.ops import bias_act
|
||||
from torch_utils.ops import fma
|
||||
|
||||
|
||||
@misc.profiled_function
|
||||
def normalize_2nd_moment(x, dim=1, eps=1e-8):
|
||||
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
|
||||
|
||||
|
||||
@misc.profiled_function
|
||||
def modulated_conv2d(
|
||||
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
|
||||
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
|
||||
styles, # Modulation coefficients of shape [batch_size, in_channels].
|
||||
noise = None, # Optional noise tensor to add to the output activations.
|
||||
up = 1, # Integer upsampling factor.
|
||||
down = 1, # Integer downsampling factor.
|
||||
padding = 0, # Padding with respect to the upsampled image.
|
||||
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
|
||||
demodulate = True, # Apply weight demodulation?
|
||||
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
|
||||
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
|
||||
):
|
||||
batch_size = x.shape[0]
|
||||
out_channels, in_channels, kh, kw = weight.shape
|
||||
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
|
||||
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
|
||||
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
|
||||
|
||||
# Pre-normalize inputs to avoid FP16 overflow.
|
||||
if x.dtype == torch.float16 and demodulate:
|
||||
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
|
||||
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
|
||||
|
||||
# Calculate per-sample weights and demodulation coefficients.
|
||||
w = None
|
||||
dcoefs = None
|
||||
if demodulate or fused_modconv:
|
||||
w = weight.unsqueeze(0) # [NOIkk]
|
||||
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
|
||||
if demodulate:
|
||||
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
|
||||
if demodulate and fused_modconv:
|
||||
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
|
||||
|
||||
# Execute by scaling the activations before and after the convolution.
|
||||
if not fused_modconv:
|
||||
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
||||
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
|
||||
if demodulate and noise is not None:
|
||||
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
|
||||
elif demodulate:
|
||||
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
||||
elif noise is not None:
|
||||
x = x.add_(noise.to(x.dtype))
|
||||
return x
|
||||
|
||||
# Execute as one fused op using grouped convolution.
|
||||
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
|
||||
batch_size = int(batch_size)
|
||||
misc.assert_shape(x, [batch_size, in_channels, None, None])
|
||||
x = x.reshape(1, -1, *x.shape[2:])
|
||||
w = w.reshape(-1, in_channels, kh, kw)
|
||||
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
|
||||
x = x.reshape(batch_size, -1, *x.shape[2:])
|
||||
if noise is not None:
|
||||
x = x.add_(noise)
|
||||
return x
|
||||
|
||||
|
||||
@persistence.persistent_class
|
||||
class FullyConnectedLayer(torch.nn.Module):
|
||||
def __init__(self,
|
||||
in_features, # Number of input features.
|
||||
out_features, # Number of output features.
|
||||
bias = True, # Apply additive bias before the activation function?
|
||||
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
|
||||
lr_multiplier = 1, # Learning rate multiplier.
|
||||
bias_init = 0, # Initial value for the additive bias.
|
||||
):
|
||||
super().__init__()
|
||||
self.in_features = in_features
|
||||
self.out_features = out_features
|
||||
self.activation = activation
|
||||
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
|
||||
self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
|
||||
self.weight_gain = lr_multiplier / np.sqrt(in_features)
|
||||
self.bias_gain = lr_multiplier
|
||||
|
||||
def forward(self, x):
|
||||
w = self.weight.to(x.dtype) * self.weight_gain
|
||||
b = self.bias
|
||||
if b is not None:
|
||||
b = b.to(x.dtype)
|
||||
if self.bias_gain != 1:
|
||||
b = b * self.bias_gain
|
||||
|
||||
if self.activation == 'linear' and b is not None:
|
||||
x = torch.addmm(b.unsqueeze(0), x, w.t())
|
||||
else:
|
||||
x = x.matmul(w.t())
|
||||
x = bias_act.bias_act(x, b, act=self.activation)
|
||||
return x
|
||||
|
||||
def extra_repr(self):
|
||||
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
|
||||
|
||||
|
||||
@persistence.persistent_class
|
||||
class Conv2dLayer(torch.nn.Module):
|
||||
def __init__(self,
|
||||
in_channels, # Number of input channels.
|
||||
out_channels, # Number of output channels.
|
||||
kernel_size, # Width and height of the convolution kernel.
|
||||
bias = True, # Apply additive bias before the activation function?
|
||||
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
|
||||
up = 1, # Integer upsampling factor.
|
||||
down = 1, # Integer downsampling factor.
|
||||
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
|
||||
conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
|
||||
channels_last = False, # Expect the input to have memory_format=channels_last?
|
||||
trainable = True, # Update the weights of this layer during training?
|
||||
):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
self.out_channels = out_channels
|
||||
self.activation = activation
|
||||
self.up = up
|
||||
self.down = down
|
||||
self.conv_clamp = conv_clamp
|
||||
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
|
||||
self.padding = kernel_size // 2
|
||||
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
||||
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
||||
|
||||
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
||||
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)
|
||||
bias = torch.zeros([out_channels]) if bias else None
|
||||
if trainable:
|
||||
self.weight = torch.nn.Parameter(weight)
|
||||
self.bias = torch.nn.Parameter(bias) if bias is not None else None
|
||||
else:
|
||||
self.register_buffer('weight', weight)
|
||||
if bias is not None:
|
||||
self.register_buffer('bias', bias)
|
||||
else:
|
||||
self.bias = None
|
||||
|
||||
def forward(self, x, gain=1):
|
||||
w = self.weight * self.weight_gain
|
||||
b = self.bias.to(x.dtype) if self.bias is not None else None
|
||||
flip_weight = (self.up == 1) # slightly faster
|
||||
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
|
||||
|
||||
act_gain = self.act_gain * gain
|
||||
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
||||
x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)
|
||||
return x
|
||||
|
||||
def extra_repr(self):
|
||||
return ' '.join([
|
||||
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
|
||||
f'up={self.up}, down={self.down}'])
|
||||
|
||||
|
||||
@persistence.persistent_class
|
||||
class MappingNetwork(torch.nn.Module):
|
||||
def __init__(self,
|
||||
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
|
||||
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
|
||||
w_dim, # Intermediate latent (W) dimensionality.
|
||||
num_ws, # Number of intermediate latents to output, None = do not broadcast.
|
||||
num_layers = 8, # Number of mapping layers.
|
||||
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
|
||||
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
|
||||
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
|
||||
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
|
||||
w_avg_beta = 0.998, # Decay for tracking the moving average of W during training, None = do not track.
|
||||
):
|
||||
super().__init__()
|
||||
self.z_dim = z_dim
|
||||
self.c_dim = c_dim
|
||||
self.w_dim = w_dim
|
||||
self.num_ws = num_ws
|
||||
self.num_layers = num_layers
|
||||
self.w_avg_beta = w_avg_beta
|
||||
|
||||
if embed_features is None:
|
||||
embed_features = w_dim
|
||||
if c_dim == 0:
|
||||
embed_features = 0
|
||||
if layer_features is None:
|
||||
layer_features = w_dim
|
||||
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
|
||||
|
||||
if c_dim > 0:
|
||||
self.embed = FullyConnectedLayer(c_dim, embed_features)
|
||||
for idx in range(num_layers):
|
||||
in_features = features_list[idx]
|
||||
out_features = features_list[idx + 1]
|
||||
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
|
||||
setattr(self, f'fc{idx}', layer)
|
||||
|
||||
if num_ws is not None and w_avg_beta is not None:
|
||||
self.register_buffer('w_avg', torch.zeros([w_dim]))
|
||||
|
||||
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
|
||||
# Embed, normalize, and concat inputs.
|
||||
x = None
|
||||
with torch.autograd.profiler.record_function('input'):
|
||||
if self.z_dim > 0:
|
||||
misc.assert_shape(z, [None, self.z_dim])
|
||||
x = normalize_2nd_moment(z.to(torch.float32))
|
||||
if self.c_dim > 0:
|
||||
misc.assert_shape(c, [None, self.c_dim])
|
||||
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
|
||||
x = torch.cat([x, y], dim=1) if x is not None else y
|
||||
|
||||
# Main layers.
|
||||
for idx in range(self.num_layers):
|
||||
layer = getattr(self, f'fc{idx}')
|
||||
x = layer(x)
|
||||
|
||||
# Update moving average of W.
|
||||
if update_emas and self.w_avg_beta is not None:
|
||||
with torch.autograd.profiler.record_function('update_w_avg'):
|
||||
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
|
||||
|
||||
# Broadcast.
|
||||
if self.num_ws is not None:
|
||||
with torch.autograd.profiler.record_function('broadcast'):
|
||||
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
|
||||
|
||||
# Apply truncation.
|
||||
if truncation_psi != 1:
|
||||
with torch.autograd.profiler.record_function('truncate'):
|
||||
assert self.w_avg_beta is not None
|
||||
if self.num_ws is None or truncation_cutoff is None:
|
||||
x = self.w_avg.lerp(x, truncation_psi)
|
||||
else:
|
||||
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
|
||||
return x
|
||||
|
||||
def extra_repr(self):
|
||||
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
|
||||
|
||||
|
||||
@persistence.persistent_class
|
||||
class SynthesisLayer(torch.nn.Module):
|
||||
def __init__(self,
|
||||
in_channels, # Number of input channels.
|
||||
out_channels, # Number of output channels.
|
||||
w_dim, # Intermediate latent (W) dimensionality.
|
||||
resolution, # Resolution of this layer.
|
||||
kernel_size = 3, # Convolution kernel size.
|
||||
up = 1, # Integer upsampling factor.
|
||||
use_noise = True, # Enable noise input?
|
||||
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
|
||||
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
|
||||
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
|
||||
channels_last = False, # Use channels_last format for the weights?
|
||||
):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
self.out_channels = out_channels
|
||||
self.w_dim = w_dim
|
||||
self.resolution = resolution
|
||||
self.up = up
|
||||
self.use_noise = use_noise
|
||||
self.activation = activation
|
||||
self.conv_clamp = conv_clamp
|
||||
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
|
||||
self.padding = kernel_size // 2
|
||||
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
||||
|
||||
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
||||
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
||||
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
||||
if use_noise:
|
||||
self.register_buffer('noise_const', torch.randn([resolution, resolution]))
|
||||
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
|
||||
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
||||
|
||||
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
|
||||
assert noise_mode in ['random', 'const', 'none']
|
||||
in_resolution = self.resolution // self.up
|
||||
misc.assert_shape(x, [None, self.in_channels, in_resolution, in_resolution])
|
||||
styles = self.affine(w)
|
||||
|
||||
noise = None
|
||||
if self.use_noise and noise_mode == 'random':
|
||||
noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
|
||||
if self.use_noise and noise_mode == 'const':
|
||||
noise = self.noise_const * self.noise_strength
|
||||
|
||||
flip_weight = (self.up == 1) # slightly faster
|
||||
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
|
||||
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
|
||||
|
||||
act_gain = self.act_gain * gain
|
||||
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
||||
x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
|
||||
return x
|
||||
|
||||
def extra_repr(self):
|
||||
return ' '.join([
|
||||
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
|
||||
f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
|
||||
|
||||
|
||||
@persistence.persistent_class
|
||||
class ToRGBLayer(torch.nn.Module):
|
||||
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
self.out_channels = out_channels
|
||||
self.w_dim = w_dim
|
||||
self.conv_clamp = conv_clamp
|
||||
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
||||
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
||||
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
||||
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
||||
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
||||
|
||||
def forward(self, x, w, fused_modconv=True):
|
||||
styles = self.affine(w) * self.weight_gain
|
||||
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv)
|
||||
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
|
||||
return x
|
||||
|
||||
def extra_repr(self):
|
||||
return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
|
||||
|
||||
|
||||
@persistence.persistent_class
|
||||
class SynthesisBlock(torch.nn.Module):
|
||||
def __init__(self,
|
||||
in_channels, # Number of input channels, 0 = first block.
|
||||
out_channels, # Number of output channels.
|
||||
w_dim, # Intermediate latent (W) dimensionality.
|
||||
resolution, # Resolution of this block.
|
||||
img_channels, # Number of output color channels.
|
||||
is_last, # Is this the last block?
|
||||
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
|
||||
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
|
||||
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
|
||||
use_fp16 = False, # Use FP16 for this block?
|
||||
fp16_channels_last = False, # Use channels-last memory format with FP16?
|
||||
fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
|
||||
**layer_kwargs, # Arguments for SynthesisLayer.
|
||||
):
|
||||
assert architecture in ['orig', 'skip', 'resnet']
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
self.w_dim = w_dim
|
||||
self.resolution = resolution
|
||||
self.img_channels = img_channels
|
||||
self.is_last = is_last
|
||||
self.architecture = architecture
|
||||
self.use_fp16 = use_fp16
|
||||
self.channels_last = (use_fp16 and fp16_channels_last)
|
||||
self.fused_modconv_default = fused_modconv_default
|
||||
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
|
||||
self.num_conv = 0
|
||||
self.num_torgb = 0
|
||||
|
||||
if in_channels == 0:
|
||||
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
|
||||
|
||||
if in_channels != 0:
|
||||
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
|
||||
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
|
||||
self.num_conv += 1
|
||||
|
||||
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
|
||||
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
|
||||
self.num_conv += 1
|
||||
|
||||
if is_last or architecture == 'skip':
|
||||
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
|
||||
conv_clamp=conv_clamp, channels_last=self.channels_last)
|
||||
self.num_torgb += 1
|
||||
|
||||
if in_channels != 0 and architecture == 'resnet':
|
||||
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
|
||||
resample_filter=resample_filter, channels_last=self.channels_last)
|
||||
|
||||
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
|
||||
_ = update_emas # unused
|
||||
misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
|
||||
w_iter = iter(ws.unbind(dim=1))
|
||||
if ws.device.type != 'cuda':
|
||||
force_fp32 = True
|
||||
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
|
||||
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
|
||||
if fused_modconv is None:
|
||||
fused_modconv = self.fused_modconv_default
|
||||
if fused_modconv == 'inference_only':
|
||||
fused_modconv = (not self.training)
|
||||
|
||||
# Input.
|
||||
if self.in_channels == 0:
|
||||
x = self.const.to(dtype=dtype, memory_format=memory_format)
|
||||
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
|
||||
else:
|
||||
misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
|
||||
x = x.to(dtype=dtype, memory_format=memory_format)
|
||||
|
||||
# Main layers.
|
||||
if self.in_channels == 0:
|
||||
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
|
||||
elif self.architecture == 'resnet':
|
||||
y = self.skip(x, gain=np.sqrt(0.5))
|
||||
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
|
||||
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
|
||||
x = y.add_(x)
|
||||
else:
|
||||
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
|
||||
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
|
||||
|
||||
# ToRGB.
|
||||
if img is not None:
|
||||
misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
|
||||
img = upfirdn2d.upsample2d(img, self.resample_filter)
|
||||
if self.is_last or self.architecture == 'skip':
|
||||
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
|
||||
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
|
||||
img = img.add_(y) if img is not None else y
|
||||
|
||||
assert x.dtype == dtype
|
||||
assert img is None or img.dtype == torch.float32
|
||||
return x, img
|
||||
|
||||
def extra_repr(self):
|
||||
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
||||
|
||||
|
||||
@persistence.persistent_class
|
||||
class SynthesisNetwork(torch.nn.Module):
|
||||
def __init__(self,
|
||||
w_dim, # Intermediate latent (W) dimensionality.
|
||||
img_resolution, # Output image resolution.
|
||||
img_channels, # Number of color channels.
|
||||
channel_base = 32768, # Overall multiplier for the number of channels.
|
||||
channel_max = 512, # Maximum number of channels in any layer.
|
||||
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
|
||||
**block_kwargs, # Arguments for SynthesisBlock.
|
||||
):
|
||||
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
|
||||
super().__init__()
|
||||
self.w_dim = w_dim
|
||||
self.img_resolution = img_resolution
|
||||
self.img_resolution_log2 = int(np.log2(img_resolution))
|
||||
self.img_channels = img_channels
|
||||
self.num_fp16_res = num_fp16_res
|
||||
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
|
||||
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
|
||||
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
|
||||
|
||||
self.num_ws = 0
|
||||
for res in self.block_resolutions:
|
||||
in_channels = channels_dict[res // 2] if res > 4 else 0
|
||||
out_channels = channels_dict[res]
|
||||
use_fp16 = (res >= fp16_resolution)
|
||||
is_last = (res == self.img_resolution)
|
||||
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
|
||||
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
|
||||
self.num_ws += block.num_conv
|
||||
if is_last:
|
||||
self.num_ws += block.num_torgb
|
||||
setattr(self, f'b{res}', block)
|
||||
|
||||
def forward(self, ws, c=None, **block_kwargs):
|
||||
block_ws = []
|
||||
with torch.autograd.profiler.record_function('split_ws'):
|
||||
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
|
||||
ws = ws.to(torch.float32)
|
||||
w_idx = 0
|
||||
for res in self.block_resolutions:
|
||||
block = getattr(self, f'b{res}')
|
||||
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
|
||||
w_idx += block.num_conv
|
||||
|
||||
x = img = None
|
||||
for res, cur_ws in zip(self.block_resolutions, block_ws):
|
||||
block = getattr(self, f'b{res}')
|
||||
x, img = block(x, img, cur_ws, **block_kwargs)
|
||||
return img
|
||||
|
||||
def extra_repr(self):
|
||||
return ' '.join([
|
||||
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
|
||||
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
|
||||
f'num_fp16_res={self.num_fp16_res:d}'])
|
||||
|
||||
|
||||
@persistence.persistent_class
|
||||
class Generator(torch.nn.Module):
|
||||
def __init__(self,
|
||||
z_dim, # Input latent (Z) dimensionality.
|
||||
c_dim, # Conditioning label (C) dimensionality.
|
||||
w_dim, # Intermediate latent (W) dimensionality.
|
||||
img_resolution, # Output resolution.
|
||||
img_channels, # Number of output color channels.
|
||||
mapping_kwargs = {}, # Arguments for MappingNetwork.
|
||||
**synthesis_kwargs, # Arguments for SynthesisNetwork.
|
||||
):
|
||||
super().__init__()
|
||||
self.z_dim = z_dim
|
||||
self.c_dim = c_dim
|
||||
self.w_dim = w_dim
|
||||
self.img_resolution = img_resolution
|
||||
self.img_channels = img_channels
|
||||
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
|
||||
self.num_ws = self.synthesis.num_ws
|
||||
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
|
||||
|
||||
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
|
||||
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
|
||||
img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
|
||||
return img
|
||||
@@ -0,0 +1,158 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import timm
|
||||
from components.pg_modules.blocks import FeatureFusionBlock
|
||||
|
||||
|
||||
def _make_scratch_ccm(scratch, in_channels, cout, expand=False):
|
||||
# shapes
|
||||
out_channels = [cout, cout*2, cout*4, cout*8] if expand else [cout]*4
|
||||
|
||||
scratch.layer0_ccm = nn.Conv2d(in_channels[0], out_channels[0], kernel_size=1, stride=1, padding=0, bias=True)
|
||||
scratch.layer1_ccm = nn.Conv2d(in_channels[1], out_channels[1], kernel_size=1, stride=1, padding=0, bias=True)
|
||||
scratch.layer2_ccm = nn.Conv2d(in_channels[2], out_channels[2], kernel_size=1, stride=1, padding=0, bias=True)
|
||||
scratch.layer3_ccm = nn.Conv2d(in_channels[3], out_channels[3], kernel_size=1, stride=1, padding=0, bias=True)
|
||||
|
||||
scratch.CHANNELS = out_channels
|
||||
|
||||
return scratch
|
||||
|
||||
|
||||
def _make_scratch_csm(scratch, in_channels, cout, expand):
|
||||
scratch.layer3_csm = FeatureFusionBlock(in_channels[3], nn.ReLU(False), expand=expand, lowest=True)
|
||||
scratch.layer2_csm = FeatureFusionBlock(in_channels[2], nn.ReLU(False), expand=expand)
|
||||
scratch.layer1_csm = FeatureFusionBlock(in_channels[1], nn.ReLU(False), expand=expand)
|
||||
scratch.layer0_csm = FeatureFusionBlock(in_channels[0], nn.ReLU(False))
|
||||
|
||||
# last refinenet does not expand to save channels in higher dimensions
|
||||
scratch.CHANNELS = [cout, cout, cout*2, cout*4] if expand else [cout]*4
|
||||
|
||||
return scratch
|
||||
|
||||
|
||||
def _make_efficientnet(model):
|
||||
pretrained = nn.Module()
|
||||
pretrained.layer0 = nn.Sequential(model.conv_stem, model.bn1, model.act1, *model.blocks[0:2])
|
||||
pretrained.layer1 = nn.Sequential(*model.blocks[2:3])
|
||||
pretrained.layer2 = nn.Sequential(*model.blocks[3:5])
|
||||
pretrained.layer3 = nn.Sequential(*model.blocks[5:9])
|
||||
return pretrained
|
||||
|
||||
|
||||
def calc_channels(pretrained, inp_res=224):
|
||||
channels = []
|
||||
tmp = torch.zeros(1, 3, inp_res, inp_res)
|
||||
|
||||
# forward pass
|
||||
tmp = pretrained.layer0(tmp)
|
||||
channels.append(tmp.shape[1])
|
||||
tmp = pretrained.layer1(tmp)
|
||||
channels.append(tmp.shape[1])
|
||||
tmp = pretrained.layer2(tmp)
|
||||
channels.append(tmp.shape[1])
|
||||
tmp = pretrained.layer3(tmp)
|
||||
channels.append(tmp.shape[1])
|
||||
|
||||
return channels
|
||||
|
||||
|
||||
def _make_projector(im_res, cout, proj_type, expand=False):
|
||||
assert proj_type in [0, 1, 2], "Invalid projection type"
|
||||
|
||||
### Build pretrained feature network
|
||||
model = timm.create_model('tf_efficientnet_lite0', pretrained=True)
|
||||
pretrained = _make_efficientnet(model)
|
||||
|
||||
# determine resolution of feature maps, this is later used to calculate the number
|
||||
# of down blocks in the discriminators. Interestingly, the best results are achieved
|
||||
# by fixing this to 256, ie., we use the same number of down blocks per discriminator
|
||||
# independent of the dataset resolution
|
||||
im_res = 256
|
||||
pretrained.RESOLUTIONS = [im_res//4, im_res//8, im_res//16, im_res//32]
|
||||
pretrained.CHANNELS = calc_channels(pretrained)
|
||||
|
||||
if proj_type == 0: return pretrained, None
|
||||
|
||||
### Build CCM
|
||||
scratch = nn.Module()
|
||||
scratch = _make_scratch_ccm(scratch, in_channels=pretrained.CHANNELS, cout=cout, expand=expand)
|
||||
pretrained.CHANNELS = scratch.CHANNELS
|
||||
|
||||
if proj_type == 1: return pretrained, scratch
|
||||
|
||||
### build CSM
|
||||
scratch = _make_scratch_csm(scratch, in_channels=scratch.CHANNELS, cout=cout, expand=expand)
|
||||
|
||||
# CSM upsamples x2 so the feature map resolution doubles
|
||||
pretrained.RESOLUTIONS = [res*2 for res in pretrained.RESOLUTIONS]
|
||||
pretrained.CHANNELS = scratch.CHANNELS
|
||||
|
||||
return pretrained, scratch
|
||||
|
||||
|
||||
class F_RandomProj(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
im_res=256,
|
||||
cout=64,
|
||||
expand=True,
|
||||
proj_type=2, # 0 = no projection, 1 = cross channel mixing, 2 = cross scale mixing
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
self.proj_type = proj_type
|
||||
self.cout = cout
|
||||
self.expand = expand
|
||||
|
||||
# build pretrained feature network and random decoder (scratch)
|
||||
self.pretrained, self.scratch = _make_projector(im_res=im_res, cout=self.cout, proj_type=self.proj_type, expand=self.expand)
|
||||
self.CHANNELS = self.pretrained.CHANNELS
|
||||
self.RESOLUTIONS = self.pretrained.RESOLUTIONS
|
||||
|
||||
def forward(self, x, get_features=False):
|
||||
# predict feature maps
|
||||
out0 = self.pretrained.layer0(x)
|
||||
out1 = self.pretrained.layer1(out0)
|
||||
out2 = self.pretrained.layer2(out1)
|
||||
out3 = self.pretrained.layer3(out2)
|
||||
|
||||
# start enumerating at the lowest layer (this is where we put the first discriminator)
|
||||
backbone_features = {
|
||||
'0': out0,
|
||||
'1': out1,
|
||||
'2': out2,
|
||||
'3': out3,
|
||||
}
|
||||
if get_features:
|
||||
return backbone_features
|
||||
|
||||
if self.proj_type == 0: return backbone_features
|
||||
|
||||
out0_channel_mixed = self.scratch.layer0_ccm(backbone_features['0'])
|
||||
out1_channel_mixed = self.scratch.layer1_ccm(backbone_features['1'])
|
||||
out2_channel_mixed = self.scratch.layer2_ccm(backbone_features['2'])
|
||||
out3_channel_mixed = self.scratch.layer3_ccm(backbone_features['3'])
|
||||
|
||||
out = {
|
||||
'0': out0_channel_mixed,
|
||||
'1': out1_channel_mixed,
|
||||
'2': out2_channel_mixed,
|
||||
'3': out3_channel_mixed,
|
||||
}
|
||||
|
||||
if self.proj_type == 1: return out
|
||||
|
||||
# from bottom to top
|
||||
out3_scale_mixed = self.scratch.layer3_csm(out3_channel_mixed)
|
||||
out2_scale_mixed = self.scratch.layer2_csm(out3_scale_mixed, out2_channel_mixed)
|
||||
out1_scale_mixed = self.scratch.layer1_csm(out2_scale_mixed, out1_channel_mixed)
|
||||
out0_scale_mixed = self.scratch.layer0_csm(out1_scale_mixed, out0_channel_mixed)
|
||||
|
||||
out = {
|
||||
'0': out0_scale_mixed,
|
||||
'1': out1_scale_mixed,
|
||||
'2': out2_scale_mixed,
|
||||
'3': out3_scale_mixed,
|
||||
}
|
||||
|
||||
return out, backbone_features
|
||||
Reference in New Issue
Block a user