|
- import math
- import random
-
- import torch
- from torch import nn
- from torch.nn import functional as F
- from unet.unet_model_encoder import UNetEncoder
- from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d, conv2d_gradfix
- from swintransformer.swin_transformerv2 import SwinTransformerV2
-
- class PixelNorm(nn.Module):
- def __init__(self):
- super().__init__()
-
- def forward(self, input):
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
-
-
- def make_kernel(k):
- k = torch.tensor(k, dtype=torch.float32)
-
- if k.ndim == 1:
- k = k[None, :] * k[:, None]
-
- k /= k.sum()
-
- return k
-
-
- class Upsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel) * (factor ** 2)
- self.register_buffer("kernel", kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
-
- return out
-
-
- class Downsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel)
- self.register_buffer("kernel", kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
-
- return out
-
-
- class Blur(nn.Module):
- def __init__(self, kernel, pad, upsample_factor=1):
- super().__init__()
-
- kernel = make_kernel(kernel)
-
- if upsample_factor > 1:
- kernel = kernel * (upsample_factor ** 2)
-
- self.register_buffer("kernel", kernel)
-
- self.pad = pad
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, pad=self.pad)
-
- return out
-
-
- class EqualConv2d(nn.Module):
- def __init__(
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
- ):
- super().__init__()
-
- self.weight = nn.Parameter(
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
- )
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
-
- self.stride = stride
- self.padding = padding
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_channel))
-
- else:
- self.bias = None
-
- def forward(self, input):
- out = conv2d_gradfix.conv2d(
- input,
- self.weight * self.scale,
- bias=self.bias,
- stride=self.stride,
- padding=self.padding,
- )
-
- return out
-
- def __repr__(self):
- return (
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},"
- f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})"
- )
-
-
- class EqualLinear(nn.Module):
- def __init__(
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
- ):
- super().__init__()
-
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
-
- else:
- self.bias = None
-
- self.activation = activation
-
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
- self.lr_mul = lr_mul
-
- def forward(self, input):
- if self.activation:
- out = F.linear(input, self.weight * self.scale)
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
-
- else:
- out = F.linear(
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
- )
-
- return out
-
- def __repr__(self):
- return (
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})"
- )
-
-
- class ModulatedConv2d(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- demodulate=True,
- upsample=False,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- fused=True,
- ):
- super().__init__()
-
- self.eps = 1e-8
- self.kernel_size = kernel_size
- self.in_channel = in_channel
- self.out_channel = out_channel
- self.upsample = upsample
- self.downsample = downsample
-
- if upsample:
- factor = 2
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2 + 1
-
- self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
-
- fan_in = in_channel * kernel_size ** 2
- self.scale = 1 / math.sqrt(fan_in)
- self.padding = kernel_size // 2
-
- self.weight = nn.Parameter(
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
- )
-
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
-
- self.demodulate = demodulate
- self.fused = fused
-
- def __repr__(self):
- return (
- f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, "
- f"upsample={self.upsample}, downsample={self.downsample})"
- )
-
- def forward(self, input, style):
- batch, in_channel, height, width = input.shape
-
- if not self.fused:
- weight = self.scale * self.weight.squeeze(0)
- style = self.modulation(style)
-
- if self.demodulate:
- w = weight.unsqueeze(0) * style.view(batch, 1, in_channel, 1, 1)
- dcoefs = (w.square().sum((2, 3, 4)) + 1e-8).rsqrt()
-
- input = input * style.reshape(batch, in_channel, 1, 1)
-
- if self.upsample:
- weight = weight.transpose(0, 1)
- out = conv2d_gradfix.conv_transpose2d(
- input, weight, padding=0, stride=2
- )
- out = self.blur(out)
-
- elif self.downsample:
- input = self.blur(input)
- out = conv2d_gradfix.conv2d(input, weight, padding=0, stride=2)
-
- else:
- out = conv2d_gradfix.conv2d(input, weight, padding=self.padding)
-
- if self.demodulate:
- out = out * dcoefs.view(batch, -1, 1, 1)
-
- return out
-
- style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
- weight = self.scale * self.weight * style
-
- if self.demodulate:
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
-
- weight = weight.view(
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
-
- if self.upsample:
- input = input.view(1, batch * in_channel, height, width)
- weight = weight.view(
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
- weight = weight.transpose(1, 2).reshape(
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
- )
- out = conv2d_gradfix.conv_transpose2d(
- input, weight, padding=0, stride=2, groups=batch
- )
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
- out = self.blur(out)
-
- elif self.downsample:
- input = self.blur(input)
- _, _, height, width = input.shape
- input = input.view(1, batch * in_channel, height, width)
- out = conv2d_gradfix.conv2d(
- input, weight, padding=0, stride=2, groups=batch
- )
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- else:
- input = input.view(1, batch * in_channel, height, width)
- out = conv2d_gradfix.conv2d(
- input, weight, padding=self.padding, groups=batch
- )
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- return out
-
-
- class NoiseInjection(nn.Module):
- def __init__(self):
- super().__init__()
-
- self.weight = nn.Parameter(torch.zeros(1))
-
- def forward(self, image, noise=None):
- if noise is None:
- batch, _, height, width = image.shape
- noise = image.new_empty(batch, 1, height, width).normal_()
-
- return image + self.weight * noise
-
-
- class ConstantInput(nn.Module):
- def __init__(self, channel, size=4):
- super().__init__()
-
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
-
- def forward(self, input):
- batch = input.shape[0]
- out = self.input.repeat(batch, 1, 1, 1)
-
- return out
-
-
- class StyledConv(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=False,
- blur_kernel=[1, 3, 3, 1],
- demodulate=True,
- ):
- super().__init__()
-
- self.conv = ModulatedConv2d(
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=upsample,
- blur_kernel=blur_kernel,
- demodulate=demodulate,
- )
-
- self.noise = NoiseInjection()
- # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
- # self.activate = ScaledLeakyReLU(0.2)
- self.activate = FusedLeakyReLU(out_channel)
-
- def forward(self, input, style, noise=None):
- out = self.conv(input, style)
- out = self.noise(out, noise=noise)
- # out = out + self.bias
- out = self.activate(out)
-
- return out
-
-
- class ToRGB(nn.Module):
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- if upsample:
- self.upsample = Upsample(blur_kernel)
-
- self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
-
- def forward(self, input, style, skip=None):
- out = self.conv(input, style)
- out = out + self.bias
-
- if skip is not None:
- skip = self.upsample(skip)
-
- out = out + skip
-
- return out
-
- class ForwardConvs(nn.Module):
- def __init__(self, input_channel, out_channel,kernel_size,num_layers,style_dim,blur_kernel=[1, 3, 3, 1],upsample=False,
- downsample=False,):
- super().__init__()
- # self.convs = nn.Sequential(
- # StyledConv(
- # input_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel),
- # StyledConv(
- # out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel),
- # )
- # 2*(i-2)
-
- self.convs=nn.ModuleList()
- for _ in range(num_layers):
- self.convs.append(
- StyledConv(
- input_channel, out_channel, kernel_size, style_dim, blur_kernel=blur_kernel
- )
- )
- # self.latent=latent
- # self.noise=noise
- def forward(self, x,latent,noise):
- i=0
- # for conv1,conv2,noise1,noise2 in zip(
- # self.double_conv[::2],self.double_conv[1::2],noise[::2],noise[1::2]
- # ):
- # out=conv1(x,latent[:,i],noise=noise1)
- # out=conv2(out,latent[:,i+1],noise=noise2)
- # i+=2
- for i in range(len(self.convs)):
- if i==0:
- out=self.convs[i](x,latent[:,i],noise=noise[i])
- else:
- out=self.convs[i](out,latent[:,i],noise=noise[i])
- return out
-
- class Generator(nn.Module):
- def __init__(
- self,
- size,
- style_dim,
- n_mlp,
- channel_multiplier=2,
- blur_kernel=[1, 3, 3, 1],
- lr_mlp=0.01,
- ):
- super().__init__()
-
- self.size = size
-
- self.style_dim = style_dim
-
- layers = [PixelNorm()]
-
- for i in range(n_mlp):
- layers.append(
- EqualLinear(
- style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu"
- )
- )
-
- self.style = nn.Sequential(*layers)
-
- self.channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- self.input = ConstantInput(self.channels[4])
- self.conv1 = StyledConv(
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
- )
- self.conv1_1 = StyledConv(
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
- )
- # self.conv1_2 = StyledConv(
- # self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
- # )
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
-
- self.log_size = int(math.log(size, 2))
- # self.num_layers = (self.log_size - 2) * 3 + 2
- self.num_layers = (self.log_size - 2) * 2 + 2
- self.convs = nn.ModuleList()
- self.upsamples = nn.ModuleList()
- self.to_rgbs = nn.ModuleList()
- self.noises = nn.Module()
- self.forward_convs=nn.ModuleList()
- in_channel = self.channels[4]
-
- for layer_idx in range(self.num_layers):
- res = (layer_idx + 8) // 3
- shape = [1, 1, 2 ** res, 2 ** res]
- self.noises.register_buffer(f"noise_{layer_idx}", torch.randn(*shape))
-
- for i in range(3, self.log_size + 1):
- out_channel = self.channels[2 ** i]
-
- self.convs.append(
- StyledConv(
- in_channel,
- out_channel,
- 3,
- style_dim,
- upsample=True,
- blur_kernel=blur_kernel,
- )
- )
-
- self.forward_convs.append(ForwardConvs(input_channel=out_channel,out_channel=out_channel,
- kernel_size=3,num_layers=2*(i-2),style_dim=style_dim,blur_kernel=blur_kernel))
- self.convs.append(
- StyledConv(
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
- )
- )
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
-
- in_channel = out_channel
-
- self.n_latent = self.log_size * 2 - 2 +1+(self.log_size-1)*(self.log_size-2)
-
- def make_noise(self):
- device = self.input.input.device
-
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
-
- for i in range(3, self.log_size + 1):
- for _ in range(3):
- noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
-
- return noises
-
- def mean_latent(self, n_latent):
- latent_in = torch.randn(
- n_latent, self.style_dim, device=self.input.input.device
- )
- latent = self.style(latent_in).mean(0, keepdim=True)
-
- return latent
-
- def get_latent(self, input):
- return self.style(input)
-
- def forward(
- self,
- styles,
- return_latents=False,
- inject_index=None,
- truncation=1,
- truncation_latent=None,
- input_is_latent=False,
- noise=None,
- randomize_noise=True,
- ):
- if not input_is_latent:
- styles = [self.style(s) for s in styles]
-
- if noise is None:
- if randomize_noise:
- noise = [None] * self.num_layers
- else:
- noise = [
- getattr(self.noises, f"noise_{i}") for i in range(self.num_layers)
- ]
-
- if truncation < 1:
- style_t = []
-
- for style in styles:
- style_t.append(
- truncation_latent + truncation * (style - truncation_latent)
- )
-
- styles = style_t
-
- if len(styles) < 2:
- inject_index = self.n_latent
-
- if styles[0].ndim < 3:
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
-
- else:
- latent = styles[0]
-
- else:
- if inject_index is None:
- inject_index = random.randint(1, self.n_latent - 1)
-
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
-
- latent = torch.cat([latent, latent2], 1)
- forward_noise=[None]* ((self.log_size-1)*(self.log_size-2))
- out = self.input(latent)
- out = self.conv1(out, latent[:, 0], noise=noise[0])
- out = self.conv1_1(out, latent[:, 1], noise=noise[1])
- # out = self.conv1_2(out, latent[:, 2], noise=noise[2])
- skip = self.to_rgb1(out, latent[:, 2])
-
- i = 2
- # print(self.convs)
- j=0
- pre_idx=j
- for conv1, conv2, noise1, noise2,to_rgb in zip(
- self.convs[::2], self.convs[1::2],noise[2::2], noise[3::2],self.to_rgbs
- ):
-
- out = conv1(out, latent[:, i], noise=noise1)
- out = self.forward_convs[j](out, latent[:, i + 1:i+1+(j+1)*2], noise=forward_noise[pre_idx:(j+1)*2+pre_idx])
- out = conv2(out, latent[:, i+1+(j+1)*2 ], noise=noise2)
- skip = to_rgb(out, latent[:, i+2+(j+1)*2], skip)
- i += 2+(j+1)*2
- pre_idx+=(j+1)*2
- j+=1
- image = skip
-
- if return_latents:
- return image, latent
-
- else:
- return image, None
-
-
- class ConvLayer(nn.Sequential):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- bias=True,
- activate=True,
- ):
- layers = []
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
-
- stride = 2
- self.padding = 0
-
- else:
- stride = 1
- self.padding = kernel_size // 2
-
- layers.append(
- EqualConv2d(
- in_channel,
- out_channel,
- kernel_size,
- padding=self.padding,
- stride=stride,
- bias=bias and not activate,
- )
- )
-
- if activate:
- layers.append(FusedLeakyReLU(out_channel, bias=bias))
-
- super().__init__(*layers)
-
-
- class ResBlock(nn.Module):
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
-
- self.skip = ConvLayer(
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
- )
-
- def forward(self, input):
- out = self.conv1(input)
- out = self.conv2(out)
-
- skip = self.skip(input)
- out = (out + skip) / math.sqrt(2)
-
- return out
-
- class DiscHead_old(nn.Module):
- def __init__(self, in_chann, out_chann, feat_size):
- """
- in_chann: discriminator feat channel + unet_encoder feat channel, an integer
- out_chann: an integer
- feat_size: discriminator feat's size(height and width), an integer
-
- cat(disc_out, unet_encoder_out, stddev) -> 1x1 -> 3x3 -> residual(1x1, 3x3) -> 3x3 -> Linear*2
- """
- super().__init__()
- self.dim_red = nn.Sequential(
- ConvLayer(in_chann + 1, 512, 1),
- nn.BatchNorm2d(512),
- # FusedLeakyReLU(512, bias=False),
- nn.LeakyReLU(inplace=True)
- )
-
- self.layer2 = nn.Sequential(
- ConvLayer(512, 512, 3),
- nn.BatchNorm2d(512),
- # FusedLeakyReLU(512, bias=False),
- nn.LeakyReLU(inplace=True)
- )
-
- self.stddev_group = 4
- self.stddev_feat = 1
-
- self.final_conv = ConvLayer(512, out_chann, 3)
- self.final_linear = nn.Sequential(
- EqualLinear(out_chann * feat_size * feat_size, out_chann, activation="fused_lrelu"),
- EqualLinear(out_chann, 1),
- )
-
- def forward(self, disc_out, unet_encoder_out,transformer_out):
- batch, channel, height, width = disc_out.shape
- group = min(batch, self.stddev_group)
- stddev = disc_out.view(
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
- )
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
- stddev = stddev.repeat(group, 1, height, width)
- input = torch.cat([disc_out, stddev, unet_encoder_out,transformer_out], dim=1)
- out_red = self.dim_red(input)
- out = self.layer2(out_red)
- out = out + out_red
- out = self.final_conv(out)
-
- out = out.view(batch, -1)
- out = self.final_linear(out)
- return out
- class DiscHead(nn.Module):
- def __init__(self, in_chann, out_chann, feat_size):
- """
- in_chann: discriminator feat channel + unet_encoder feat channel, an integer
- out_chann: an integer
- feat_size: discriminator feat's size(height and width), an integer
-
- cat(disc_out, unet_encoder_out, stddev) -> 1x1 -> 3x3 -> residual(1x1, 3x3) -> 3x3 -> Linear*2
- """
- super().__init__()
-
- self.stddev_group = 4
- self.stddev_feat = 1
-
- self.final_conv = ConvLayer(in_chann+1, out_chann, 3)
- self.final_linear = nn.Sequential(
- EqualLinear(out_chann * feat_size * feat_size, out_chann, activation="fused_lrelu"),
- EqualLinear(out_chann, 1),
- )
-
- def forward(self, input):
- # out = self.convs(input)
- out=input
- batch, channel, height, width = out.shape
- group = min(batch, self.stddev_group)
- stddev = out.view(
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
- )
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
- stddev = stddev.repeat(group, 1, height, width)
- out = torch.cat([out, stddev], 1)
-
- out = self.final_conv(out)
-
- out = out.view(batch, -1)
- out = self.final_linear(out)
-
- return out
-
- class DiscEncoder(nn.Module):
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
- channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- # 3, 128 * channel_multiplier
- self.conv = ConvLayer(3, channels[size], 1)
- self.down1 = ResBlock(channels[size], channels[size // 2], blur_kernel)
- self.down2 = ResBlock(channels[size // 2], channels[size // 4], blur_kernel)
- if size >= 32:
- self.down3 = ResBlock(channels[size // 4], channels[size // 8], blur_kernel)
- else:
- self.down3 = None
- if size >= 64:
- self.down4 = ResBlock(channels[size // 8], channels[size // 16], blur_kernel)
- else:
- self.down4 = None
- if size >= 128:
- self.down5 = ResBlock(channels[size // 16], channels[size // 32], blur_kernel)
- else:
- self.down5 = None
- if size >= 256:
- self.down6 = ResBlock(channels[size // 32], channels[size // 64], blur_kernel)
- else:
- self.down6 = None
- if size >= 512:
- self.down7 = ResBlock(channels[size // 64], channels[size // 128], blur_kernel)
- else:
- self.down7 = None
- if size >= 1024:
- self.down8 = ResBlock(channels[size // 128], channels[size // 256], blur_kernel)
- else:
- self.down8 = None
- self.channels = channels
-
- def forward(self, input):
- x = self.conv(input)
- x1 = self.down1(x)
- x2 = self.down2(x1)
- if self.down3:
- x3 = self.down3(x2)
- else:
- return [x, x1, x2]
-
- if self.down4:
- x4 = self.down4(x3)
- else:
- return [x, x1, x2, x3]
- if self.down5:
- x5 = self.down5(x4)
- else:
- return [x, x1, x2, x3, x4]
- if self.down6:
- x6 = self.down6(x5)
- else:
- return [x, x1, x2, x3, x4, x5]
- if self.down7:
- x7 = self.down7(x6)
- else:
- return [x, x1, x2, x3, x4, x5, x6]
- if self.down8:
- x8 = self.down8(x7)
- else:
- return [x, x1, x2, x3, x4, x5, x6, x7]
-
- return [x, x1, x2, x3, x4, x5, x6, x7, x8]
-
-
- class Discriminator(nn.Module):
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
- self.disc_encoder = DiscEncoder(size, channel_multiplier, blur_kernel)
- disc_out_chann = self.disc_encoder.channels
-
- self.unet_enconder = UNetEncoder(3, disc_out_chann[4], size)
- unet_encoder_out_chann = self.unet_enconder.out_chann
- self.transformer_enconder=SwinTransformerV2(img_size=size,in_chans=3)
- transformer_out_chann=self.transformer_enconder.out_chann
- # self.transformer_enconder_2=SwinTransformerV2(img_size=size,in_chans=3)
- # import pdb
- # pdb.set_trace()
-
- #
- # 4:512 8:512 16:512 32:512 64:512 128:256 256:128 512:64 1024:32
- # 64,128,256,512,1024,1024
- head_in_chann_32 = disc_out_chann[32] + unet_encoder_out_chann[-4]+transformer_out_chann[-4]
-
-
-
- head_in_chann_16 = disc_out_chann[16] + unet_encoder_out_chann[-3]+transformer_out_chann[-3]
-
-
- head_in_chann_8 = disc_out_chann[8] + unet_encoder_out_chann[-2]+transformer_out_chann[-2]
-
-
- head_in_chann_4 = disc_out_chann[4] + unet_encoder_out_chann[-1]+transformer_out_chann[-1]
-
- self.size = size
-
- scratch = nn.Module()
- feature_channels=[head_in_chann_32,head_in_chann_16,head_in_chann_8,head_in_chann_4]
- scratch = _make_scratch_ccm(scratch, in_channels=feature_channels, cout=64, expand=False)
- feature_channels = scratch.CHANNELS
- scratch = _make_scratch_csm(scratch, in_channels=scratch.CHANNELS, cout=64, expand=False)
- self.head_32 = DiscHead(feature_channels[0], 32, 64)
- self.head_16 = DiscHead(feature_channels[1], 64, 32)
- self.head_8 = DiscHead(feature_channels[2], 128, 16)
- self.head_4 = DiscHead(feature_channels[3], 256, 8)
- self.scratch=scratch
- def forward(self, input):
- # [x1, x2, x3, x4, x5, x6, logits]
- unet_encoder_outs = self.unet_enconder(input)
- # [x, x1, x2, x3, x4, x5, x6, x7, x8]
- disc_outs = self.disc_encoder(input)
- _,transformer_outs=self.transformer_enconder(input)
- # import pdb
- # pdb.set_trace()
- out={
- '0':torch.cat([disc_outs[-4], unet_encoder_outs[-5],transformer_outs[-4]],dim=1),
- '1':torch.cat([disc_outs[-3], unet_encoder_outs[-4],transformer_outs[-3]],dim=1),
- '2':torch.cat([disc_outs[-2], unet_encoder_outs[-3],transformer_outs[-2]],dim=1),
- '3':torch.cat([disc_outs[-1], unet_encoder_outs[-2],transformer_outs[-1]],dim=1)
- }
- out0_channel_mixed = self.scratch.layer0_ccm(out['0'])
- out1_channel_mixed = self.scratch.layer1_ccm(out['1'])
- out2_channel_mixed = self.scratch.layer2_ccm(out['2'])
- out3_channel_mixed = self.scratch.layer3_ccm(out['3'])
- out = {
- '0': out0_channel_mixed,
- '1': out1_channel_mixed,
- '2': out2_channel_mixed,
- '3': out3_channel_mixed,
- }
-
- # if self.proj_type == 1: return out
-
- # from bottom to top
- out3_scale_mixed = self.scratch.layer3_csm(out3_channel_mixed)
- out2_scale_mixed = self.scratch.layer2_csm(out3_scale_mixed, out2_channel_mixed)
- out1_scale_mixed = self.scratch.layer1_csm(out2_scale_mixed, out1_channel_mixed)
- out0_scale_mixed = self.scratch.layer0_csm(out1_scale_mixed, out0_channel_mixed)
-
- out = {
- '0': out0_scale_mixed,
- '1': out1_scale_mixed,
- '2': out2_scale_mixed,
- '3': out3_scale_mixed,
- }
- # out = {
- # '0': out1_scale_mixed,
- # '1': out2_scale_mixed,
- # '2': out3_scale_mixed,
- # '3': out3_channel_mixed,
- # }
-
-
- out_32 = self.head_32(out['0'])
-
- out_16 = self.head_16(out['1'])
-
- out_8 = self.head_8(out['2'])
-
- out_4 = self.head_4(out['3'])
-
- return [out_32, out_16, out_8, out_4]
-
- class FeatureFusionBlock(nn.Module):
- def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, lowest=False):
- super().__init__()
-
- self.deconv = deconv
- self.align_corners = align_corners
-
- self.expand = expand
- out_features = features
- if self.expand==True:
- out_features = features//2
-
- self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
- self.skip_add = nn.quantized.FloatFunctional()
-
- def forward(self, *xs):
- output = xs[0]
-
- if len(xs) == 2:
- output = self.skip_add.add(output, xs[1])
-
- output = nn.functional.interpolate(
- output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
- )
-
- output = self.out_conv(output)
-
- return output
-
-
-
- def _make_scratch_ccm(scratch, in_channels, cout, expand=False):
- # shapes
- out_channels = [cout, cout*2, cout*4, cout*8] if expand else [cout]*4
-
- scratch.layer0_ccm = nn.Conv2d(in_channels[0], out_channels[0], kernel_size=1, stride=1, padding=0, bias=True)
- scratch.layer1_ccm = nn.Conv2d(in_channels[1], out_channels[1], kernel_size=1, stride=1, padding=0, bias=True)
- scratch.layer2_ccm = nn.Conv2d(in_channels[2], out_channels[2], kernel_size=1, stride=1, padding=0, bias=True)
- scratch.layer3_ccm = nn.Conv2d(in_channels[3], out_channels[3], kernel_size=1, stride=1, padding=0, bias=True)
-
- scratch.CHANNELS = out_channels
-
- return scratch
-
- def _make_scratch_csm(scratch, in_channels, cout, expand):
- scratch.layer3_csm = FeatureFusionBlock(in_channels[3], nn.ReLU(False), expand=expand, lowest=True)
- scratch.layer2_csm = FeatureFusionBlock(in_channels[2], nn.ReLU(False), expand=expand)
- scratch.layer1_csm = FeatureFusionBlock(in_channels[1], nn.ReLU(False), expand=expand)
- scratch.layer0_csm = FeatureFusionBlock(in_channels[0], nn.ReLU(False))
-
- # last refinenet does not expand to save channels in higher dimensions
- scratch.CHANNELS = [cout, cout, cout*2, cout*4] if expand else [cout]*4
-
- return scratch
|