Open-Sora/opensora/models/stdit/stdit3.py

372 lines
13 KiB
Python
Raw Normal View History

2024-04-29 08:00:14 +02:00
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from timm.models.layers import DropPath
from timm.models.vision_transformer import Mlp
from opensora.acceleration.checkpoint import auto_grad_checkpoint
from opensora.models.layers.blocks import (
Attention,
CaptionEmbedder,
MultiHeadCrossAttention,
PatchEmbed3D,
PositionEmbedding2D,
2024-05-04 14:23:32 +02:00
SizeEmbedder,
2024-04-29 08:00:14 +02:00
T2IFinalLayer,
TimestepEmbedder,
approx_gelu,
get_layernorm,
t2i_modulate,
)
from opensora.registry import MODELS
from opensora.utils.ckpt_utils import load_checkpoint
class STDiT3Block(nn.Module):
def __init__(
self,
hidden_size,
num_heads,
mlp_ratio=4.0,
drop_path=0.0,
rope=None,
qk_norm=False,
temporal=False,
enable_flashattn=False,
enable_layernorm_kernel=False,
enable_sequence_parallelism=False,
):
super().__init__()
self.temporal = temporal
self.hidden_size = hidden_size
self.enable_flashattn = enable_flashattn
self._enable_sequence_parallelism = enable_sequence_parallelism
assert not enable_sequence_parallelism, "Sequence parallelism is not supported in STDiT3Block"
attn_cls = Attention
mha_cls = MultiHeadCrossAttention
self.norm1 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel)
self.attn = attn_cls(
hidden_size,
num_heads=num_heads,
qkv_bias=True,
qk_norm=qk_norm,
rope=rope,
enable_flashattn=enable_flashattn,
)
self.cross_attn = mha_cls(hidden_size, num_heads)
self.norm2 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel)
self.mlp = Mlp(
in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size**0.5)
def t_mask_select(self, x_mask, x, masked_x, T, S):
# x: [B, (T, S), C]
# mased_x: [B, (T, S), C]
# x_mask: [B, T]
x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
masked_x = rearrange(masked_x, "B (T S) C -> B T S C", T=T, S=S)
x = torch.where(x_mask[:, :, None, None], x, masked_x)
x = rearrange(x, "B T S C -> B (T S) C")
return x
def forward(
self,
x,
y,
t,
mask=None, # text mask
x_mask=None, # temporal mask
t0=None, # t with timestamp=0
T=None, # number of frames
S=None, # number of pixel patches
):
# prepare modulate parameters
B, N, C = x.shape
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
self.scale_shift_table[None] + t.reshape(B, 6, -1)
).chunk(6, dim=1)
if x_mask is not None:
shift_msa_zero, scale_msa_zero, gate_msa_zero, shift_mlp_zero, scale_mlp_zero, gate_mlp_zero = (
self.scale_shift_table[None] + t0.reshape(B, 6, -1)
).chunk(6, dim=1)
# modulate (attention)
x_m = t2i_modulate(self.norm1(x), shift_msa, scale_msa)
if x_mask is not None:
x_m_zero = t2i_modulate(self.norm1(x), shift_msa_zero, scale_msa_zero)
x_m = self.t_mask_select(x_mask, x_m, x_m_zero, T, S)
# attention
if self.temporal:
x_m = rearrange(x_m, "B (T S) C -> (B S) T C", T=T, S=S)
x_m = self.attn(x_m)
x_m = rearrange(x_m, "(B S) T C -> B (T S) C", T=T, S=S)
else:
x_m = rearrange(x_m, "B (T S) C -> (B T) S C", T=T, S=S)
x_m = self.attn(x_m)
x_m = rearrange(x_m, "(B T) S C -> B (T S) C", T=T, S=S)
# modulate (attention)
x_m_s = gate_msa * x_m
if x_mask is not None:
x_m_s_zero = gate_msa_zero * x_m
x_m_s = self.t_mask_select(x_mask, x_m_s, x_m_s_zero, T, S)
# residual
x = x + self.drop_path(x_m_s)
# cross attention
x = x + self.cross_attn(x, y, mask)
# modulate (MLP)
x_m = t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)
if x_mask is not None:
x_m_zero = t2i_modulate(self.norm2(x), shift_mlp_zero, scale_mlp_zero)
x_m = self.t_mask_select(x_mask, x_m, x_m_zero, T, S)
# MLP
x_m = self.mlp(x_m)
# modulate (MLP)
x_m_s = gate_mlp * x_m
if x_mask is not None:
x_m_s_zero = gate_mlp_zero * x_m
x_m_s = self.t_mask_select(x_mask, x_m_s, x_m_s_zero, T, S)
# residual
x = x + self.drop_path(x_m_s)
return x
class STDiT3(nn.Module):
def __init__(
self,
input_size=(None, None, None),
input_sq_size=512,
in_channels=4,
patch_size=(1, 2, 2),
hidden_size=1152,
depth=28,
num_heads=16,
mlp_ratio=4.0,
class_dropout_prob=0.1,
pred_sigma=True,
drop_path=0.0,
caption_channels=4096,
model_max_length=300,
dtype=torch.float32,
qk_norm=False,
enable_flashattn=False,
enable_layernorm_kernel=False,
enable_sequence_parallelism=False,
):
super().__init__()
self.pred_sigma = pred_sigma
self.in_channels = in_channels
self.out_channels = in_channels * 2 if pred_sigma else in_channels
# model size related
self.depth = depth
self.mlp_ratio = mlp_ratio
self.hidden_size = hidden_size
self.num_heads = num_heads
# computation related
self.drop_path = drop_path
self.dtype = dtype
self.enable_flashattn = enable_flashattn
self.enable_layernorm_kernel = enable_layernorm_kernel
# input size related
self.patch_size = patch_size
self.input_sq_size = input_sq_size
self.pos_embed = PositionEmbedding2D(hidden_size)
2024-05-02 09:27:03 +02:00
# self.rope = RotaryEmbedding(dim=self.hidden_size // self.num_heads)
2024-04-29 08:00:14 +02:00
# embedding
self.x_embedder = PatchEmbed3D(patch_size, in_channels, hidden_size)
self.t_embedder = TimestepEmbedder(hidden_size)
2024-05-04 14:23:32 +02:00
self.fps_embedder = SizeEmbedder(self.hidden_size)
2024-04-29 08:00:14 +02:00
self.t_block = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 6 * hidden_size, bias=True),
)
self.y_embedder = CaptionEmbedder(
in_channels=caption_channels,
hidden_size=hidden_size,
uncond_prob=class_dropout_prob,
act_layer=approx_gelu,
token_num=model_max_length,
)
# spatial blocks
drop_path = [x.item() for x in torch.linspace(0, self.drop_path, depth)]
self.spatial_blocks = nn.ModuleList(
[
STDiT3Block(
hidden_size=hidden_size,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
drop_path=drop_path[i],
qk_norm=qk_norm,
enable_flashattn=enable_flashattn,
enable_layernorm_kernel=enable_layernorm_kernel,
enable_sequence_parallelism=enable_sequence_parallelism,
)
for i in range(depth)
]
)
# temporal blocks
2024-05-02 09:27:03 +02:00
# drop_path = [x.item() for x in torch.linspace(0, self.drop_path, depth)]
# self.temporal_blocks = nn.ModuleList(
# [
# STDiT3Block(
# hidden_size=hidden_size,
# num_heads=num_heads,
# mlp_ratio=mlp_ratio,
# drop_path=drop_path[i],
# qk_norm=qk_norm,
# enable_flashattn=enable_flashattn,
# enable_layernorm_kernel=enable_layernorm_kernel,
# enable_sequence_parallelism=enable_sequence_parallelism,
# # temporal
# temporal=True,
# rope=self.rope.rotate_queries_or_keys,
# )
# for i in range(depth)
# ]
# )
2024-04-29 08:00:14 +02:00
# final layer
self.final_layer = T2IFinalLayer(hidden_size, np.prod(self.patch_size), self.out_channels)
2024-05-04 14:23:32 +02:00
self.initialize_weights()
def initialize_weights(self):
# Initialize fps_embedder
nn.init.normal_(self.fps.mlp[0].weight, std=0.02)
nn.init.constant_(self.fps.mlp[0].bias, 0)
nn.init.constant_(self.fps.mlp[2].weight, 0)
nn.init.constant_(self.fps.mlp[2].bias, 0)
2024-04-29 08:00:14 +02:00
def get_dynamic_size(self, x):
_, _, T, H, W = x.size()
if T % self.patch_size[0] != 0:
T += self.patch_size[0] - T % self.patch_size[0]
if H % self.patch_size[1] != 0:
H += self.patch_size[1] - H % self.patch_size[1]
if W % self.patch_size[2] != 0:
W += self.patch_size[2] - W % self.patch_size[2]
T = T // self.patch_size[0]
H = H // self.patch_size[1]
W = W // self.patch_size[2]
return (T, H, W)
def forward(self, x, timestep, y, mask=None, x_mask=None, fps=None, height=None, width=None, **kwargs):
2024-05-04 14:23:32 +02:00
B = x.size(0)
2024-04-29 08:00:14 +02:00
x = x.to(self.dtype)
timestep = timestep.to(self.dtype)
y = y.to(self.dtype)
# === get pos embed ===
_, _, Tx, Hx, Wx = x.size()
T, H, W = self.get_dynamic_size(x)
S = H * W
base_size = round(S**0.5)
resolution_sq = (height[0].item() * width[0].item()) ** 0.5
scale = resolution_sq / self.input_sq_size
pos_emb = self.pos_embed(x, H, W, scale=scale, base_size=base_size)
# === get timestep embed ===
t = self.t_embedder(timestep, dtype=x.dtype) # [B, C]
2024-05-04 14:23:32 +02:00
fps = self.fps_embedder(fps.unsqueeze(1), B)
t = t + fps
2024-04-29 08:00:14 +02:00
t_mlp = self.t_block(t)
t0 = t0_mlp = None
if x_mask is not None:
t0_timestep = torch.zeros_like(timestep)
t0 = self.t_embedder(t0_timestep, dtype=x.dtype)
t0 = t0 + fps
t0_mlp = self.t_block(t0)
# === get y embed ===
y = self.y_embedder(y, self.training) # [B, 1, N_token, C]
if mask is not None:
if mask.shape[0] != y.shape[0]:
mask = mask.repeat(y.shape[0] // mask.shape[0], 1)
mask = mask.squeeze(1).squeeze(1)
y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, self.hidden_size)
y_lens = mask.sum(dim=1).tolist()
else:
y_lens = [y.shape[2]] * y.shape[0]
y = y.squeeze(1).view(1, -1, self.hidden_size)
# === get x embed ===
x = self.x_embedder(x) # [B, N, C]
x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
x = x + pos_emb
x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)
# === blocks ===
2024-05-02 09:27:03 +02:00
# for spatial_block, temporal_block in zip(self.spatial_blocks, self.temporal_blocks):
for spatial_block in self.spatial_blocks:
2024-04-29 08:00:14 +02:00
x = auto_grad_checkpoint(spatial_block, x, y, t_mlp, y_lens, x_mask, t0_mlp, T, S)
2024-05-02 09:27:03 +02:00
# x = auto_grad_checkpoint(temporal_block, x, y, t_mlp, y_lens, x_mask, t0_mlp, T, S)
2024-04-29 08:00:14 +02:00
# === final layer ===
x = self.final_layer(x, t, x_mask, t0, T, S)
x = self.unpatchify(x, T, H, W, Tx, Hx, Wx)
# cast to float32 for better accuracy
x = x.to(torch.float32)
return x
def unpatchify(self, x, N_t, N_h, N_w, R_t, R_h, R_w):
"""
Args:
x (torch.Tensor): of shape [B, N, C]
Return:
x (torch.Tensor): of shape [B, C_out, T, H, W]
"""
# N_t, N_h, N_w = [self.input_size[i] // self.patch_size[i] for i in range(3)]
T_p, H_p, W_p = self.patch_size
x = rearrange(
x,
"B (N_t N_h N_w) (T_p H_p W_p C_out) -> B C_out (N_t T_p) (N_h H_p) (N_w W_p)",
N_t=N_t,
N_h=N_h,
N_w=N_w,
T_p=T_p,
H_p=H_p,
W_p=W_p,
C_out=self.out_channels,
)
# unpad
x = x[:, :, :R_t, :R_h, :R_w]
return x
@MODELS.register_module("STDiT3-XL/2")
def STDiT3_XL_2(from_pretrained=None, **kwargs):
model = STDiT3(depth=28, hidden_size=1152, patch_size=(1, 2, 2), num_heads=16, **kwargs)
if from_pretrained is not None:
load_checkpoint(model, from_pretrained)
return model
@MODELS.register_module("STDiT3-3B/2")
def STDiT3_3B_2(from_pretrained=None, **kwargs):
model = STDiT3(depth=28, hidden_size=1872, patch_size=(1, 2, 2), num_heads=26, **kwargs)
if from_pretrained is not None:
load_checkpoint(model, from_pretrained)
return model