Open-Sora/opensora/models/vae/model_utils.py
Shen-Chenhui 562a966a77 debug
2024-04-01 16:14:11 +08:00

331 lines
13 KiB
Python

import functools
import math
from typing import Any, Optional, Sequence, Type
import torch.nn as nn
import numpy as np
import torch
# from taming.modules.losses.lpips import LPIPS # need to pip install https://github.com/CompVis/taming-transformers
from taming.modules.discriminator.model import NLayerDiscriminator, weights_init
"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
import torch
from torchvision import models
from collections import namedtuple
from taming.util import get_ckpt_path
class LPIPS(nn.Module):
# Learned perceptual metric
def __init__(self, use_dropout=True):
super().__init__()
self.scaling_layer = ScalingLayer()
self.chns = [64, 128, 256, 512, 512] # vg16 features
self.net = vgg16(pretrained=True, requires_grad=False)
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.load_from_pretrained()
for param in self.parameters():
param.requires_grad = False
def load_from_pretrained(self, name="vgg_lpips"):
ckpt = get_ckpt_path(name, "taming/modules/autoencoder/lpips")
self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
print("loaded pretrained LPIPS loss from {}".format(ckpt))
@classmethod
def from_pretrained(cls, name="vgg_lpips"):
if name != "vgg_lpips":
raise NotImplementedError
model = cls()
ckpt = get_ckpt_path(name)
model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
return model
def forward(self, input, target):
in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
outs0, outs1 = self.net(in0_input), self.net(in1_input)
feats0, feats1, diffs = {}, {}, {}
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
for kk in range(len(self.chns)):
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
val = res[0]
for l in range(1, len(self.chns)):
val += res[l]
return val
# SCH: TODO: this channel shift & scale may need to be changed
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
""" A single linear layer which does a 1x1 conv """
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(), ] if (use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
self.model = nn.Sequential(*layers)
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
def normalize_tensor(x,eps=1e-10):
norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True))
return x/(norm_factor+eps)
def spatial_average(x, keepdim=True):
return x.mean([2,3],keepdim=keepdim)
## NOTE: not used since we only have 'GN'
# def get_norm_layer(norm_type, dtype):
# if norm_type == 'LN':
# # supply a few args with partial function and pass the rest of the args when this norm_fn is called
# norm_fn = functools.partial(nn.LayerNorm, dtype=dtype)
# elif norm_type == 'GN': #
# norm_fn = functools.partial(nn.GroupNorm, dtype=dtype)
# elif norm_type is None:
# norm_fn = lambda: (lambda x: x)
# else:
# raise NotImplementedError(f'norm_type: {norm_type}')
# return norm_fn
class DiagonalGaussianDistribution(object):
def __init__(
self,
parameters,
deterministic=False,
):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) # SCH: channels dim
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device, dtype=self.mean.dtype)
def sample(self):
x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device, dtype=self.mean.dtype)
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean, 2)
+ self.var - 1.0 - self.logvar,
dim=[1, 2, 3, 4]) # TODO: check dimensions
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
dim=[1, 2, 3, 4]) # TODO: check dimensions
def nll(self, sample, dims=[1,2,3,4]): # TODO: check dimensions
if self.deterministic:
return torch.Tensor([0.])
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
dim=dims)
def mode(self):
return self.mean
# class VEA3DLoss(nn.Module):
# def __init__(
# self,
# # disc_start,
# logvar_init=0.0,
# kl_weight=1.0,
# pixelloss_weight=1.0,
# perceptual_weight=1.0,
# disc_loss="hinge"
# ):
# super().__init__()
# assert disc_loss in ["hinge", "vanilla"]
# self.kl_weight = kl_weight
# self.pixel_weight = pixelloss_weight
# # self.perceptual_loss = LPIPS().eval() # TODO
# self.perceptual_weight = perceptual_weight
# # output log variance
# self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
# def forward(
# self,
# inputs,
# reconstructions,
# posteriors,
# weights=None,
# ):
# rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
# nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
# weighted_nll_loss = nll_loss
# if weights is not None:
# weighted_nll_loss = weights*nll_loss
# weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
# nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
# kl_loss = posteriors.kl()
# kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
# loss = weighted_nll_loss + self.kl_weight * kl_loss # TODO: add discriminator loss later
# return loss
class VEA3DLoss(nn.Module):
def __init__(
self,
# disc_start,
logvar_init=0.0,
kl_weight=1.0,
pixelloss_weight=1.0,
disc_num_layers=3,
disc_in_channels=3,
disc_factor=1.0,
disc_weight=1.0,
perceptual_weight=1.0,
use_actnorm=False,
disc_conditional=False,
disc_loss="hinge",
):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
self.kl_weight = kl_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
# output log variance
self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
# self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
# n_layers=disc_num_layers,
# use_actnorm=use_actnorm
# ).apply(weights_init)
# self.discriminator_iter_start = disc_start
# self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
# self.disc_factor = disc_factor
# self.discriminator_weight = disc_weight
# self.disc_conditional = disc_conditional
# TODO: for discriminator
# def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
# if last_layer is not None:
# nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
# g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
# else:
# nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
# g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
# d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
# d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
# d_weight = d_weight * self.discriminator_weight
# return d_weight
def forward(
self,
inputs,
reconstructions,
posteriors,
# optimizer_idx,
# global_step,
last_layer=None,
cond=None,
split="train",
weights=None,
):
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
# SCH: transform to [(B,T), C, H, W] shape for percetual loss over each frame
permutated_input = torch.permute(inputs, (0, 2, 1, 3, 4)) # [B, C, T, H, W] --> [B, T, C, H, W]
permutated_rec = torch.permute(reconstructions, (0, 2, 1, 3, 4))
data_shape = permutated_input.size()
p_loss = self.perceptual_loss(
permutated_input.reshape(-1, data_shape[-3], data_shape[-2],data_shape[-1]).contiguous(),
permutated_rec.reshape(-1, data_shape[-3], data_shape[-2],data_shape[-1]).contiguous()
)
# SCH: shape back p_loss
permuted_p_loss = torch.permute(p_loss.reshape(data_shape[0], data_shape[1], 1, 1, 1), (0,2,1,3,4))
rec_loss = rec_loss + self.perceptual_weight * permuted_p_loss
nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
weighted_nll_loss = nll_loss
if weights is not None:
weighted_nll_loss = weights*nll_loss
weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
kl_loss = posteriors.kl()
kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
loss = weighted_nll_loss + self.kl_weight * kl_loss # TODO: add discriminator loss later
# log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
# "{}/logvar".format(split): self.logvar.detach(),
# "{}/kl_loss".format(split): kl_loss.detach().mean(),
# "{}/nll_loss".format(split): nll_loss.detach().mean(),
# "{}/rec_loss".format(split): rec_loss.detach().mean(),
# # "{}/d_weight".format(split): d_weight.detach(),
# # "{}/disc_factor".format(split): torch.tensor(disc_factor),
# # "{}/g_loss".format(split): g_loss.detach().mean(),
# }
return loss