2024-06-17 05:27:24 +02:00
|
|
|
import os
|
2024-06-17 17:37:23 +02:00
|
|
|
|
2024-04-29 08:00:14 +02:00
|
|
|
import numpy as np
|
|
|
|
|
import torch
|
2024-06-14 05:10:43 +02:00
|
|
|
import torch.distributed as dist
|
2024-06-17 17:37:23 +02:00
|
|
|
import torch.nn as nn
|
2024-06-24 07:59:29 +02:00
|
|
|
import torch.nn.functional as F
|
2024-04-29 08:00:14 +02:00
|
|
|
from einops import rearrange
|
2024-05-06 04:29:21 +02:00
|
|
|
from rotary_embedding_torch import RotaryEmbedding
|
2024-04-29 08:00:14 +02:00
|
|
|
from timm.models.layers import DropPath
|
|
|
|
|
from timm.models.vision_transformer import Mlp
|
2024-05-07 09:13:20 +02:00
|
|
|
from transformers import PretrainedConfig, PreTrainedModel
|
2024-06-17 17:37:23 +02:00
|
|
|
|
|
|
|
|
from opensora.acceleration.checkpoint import auto_grad_checkpoint
|
2024-06-14 05:10:43 +02:00
|
|
|
from opensora.acceleration.communications import gather_forward_split_backward, split_forward_gather_backward
|
|
|
|
|
from opensora.acceleration.parallel_states import get_sequence_parallel_group
|
2024-04-29 08:00:14 +02:00
|
|
|
from opensora.models.layers.blocks import (
|
|
|
|
|
Attention,
|
|
|
|
|
CaptionEmbedder,
|
|
|
|
|
MultiHeadCrossAttention,
|
|
|
|
|
PatchEmbed3D,
|
|
|
|
|
PositionEmbedding2D,
|
2024-06-17 17:37:23 +02:00
|
|
|
SeqParallelAttention,
|
|
|
|
|
SeqParallelMultiHeadCrossAttention,
|
2024-05-04 14:23:32 +02:00
|
|
|
SizeEmbedder,
|
2024-04-29 08:00:14 +02:00
|
|
|
T2IFinalLayer,
|
|
|
|
|
TimestepEmbedder,
|
|
|
|
|
approx_gelu,
|
|
|
|
|
get_layernorm,
|
|
|
|
|
t2i_modulate,
|
|
|
|
|
)
|
|
|
|
|
from opensora.registry import MODELS
|
|
|
|
|
from opensora.utils.ckpt_utils import load_checkpoint
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class STDiT3Block(nn.Module):
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
hidden_size,
|
|
|
|
|
num_heads,
|
|
|
|
|
mlp_ratio=4.0,
|
|
|
|
|
drop_path=0.0,
|
|
|
|
|
rope=None,
|
|
|
|
|
qk_norm=False,
|
|
|
|
|
temporal=False,
|
2024-05-07 08:51:11 +02:00
|
|
|
enable_flash_attn=False,
|
2024-04-29 08:00:14 +02:00
|
|
|
enable_layernorm_kernel=False,
|
|
|
|
|
enable_sequence_parallelism=False,
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.temporal = temporal
|
|
|
|
|
self.hidden_size = hidden_size
|
2024-05-07 08:51:11 +02:00
|
|
|
self.enable_flash_attn = enable_flash_attn
|
2024-06-14 05:10:43 +02:00
|
|
|
self.enable_sequence_parallelism = enable_sequence_parallelism
|
2024-06-17 17:37:23 +02:00
|
|
|
|
2024-06-14 05:10:43 +02:00
|
|
|
if self.enable_sequence_parallelism and not temporal:
|
|
|
|
|
attn_cls = SeqParallelAttention
|
|
|
|
|
mha_cls = SeqParallelMultiHeadCrossAttention
|
|
|
|
|
else:
|
|
|
|
|
attn_cls = Attention
|
|
|
|
|
mha_cls = MultiHeadCrossAttention
|
2024-06-17 17:37:23 +02:00
|
|
|
|
2024-04-29 08:00:14 +02:00
|
|
|
self.norm1 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel)
|
|
|
|
|
self.attn = attn_cls(
|
|
|
|
|
hidden_size,
|
|
|
|
|
num_heads=num_heads,
|
|
|
|
|
qkv_bias=True,
|
|
|
|
|
qk_norm=qk_norm,
|
|
|
|
|
rope=rope,
|
2024-05-07 08:51:11 +02:00
|
|
|
enable_flash_attn=enable_flash_attn,
|
2024-04-29 08:00:14 +02:00
|
|
|
)
|
|
|
|
|
self.cross_attn = mha_cls(hidden_size, num_heads)
|
|
|
|
|
self.norm2 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel)
|
|
|
|
|
self.mlp = Mlp(
|
|
|
|
|
in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0
|
|
|
|
|
)
|
|
|
|
|
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
|
|
|
|
self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size**0.5)
|
|
|
|
|
|
|
|
|
|
def t_mask_select(self, x_mask, x, masked_x, T, S):
|
|
|
|
|
# x: [B, (T, S), C]
|
|
|
|
|
# mased_x: [B, (T, S), C]
|
|
|
|
|
# x_mask: [B, T]
|
|
|
|
|
x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
|
|
|
|
|
masked_x = rearrange(masked_x, "B (T S) C -> B T S C", T=T, S=S)
|
|
|
|
|
x = torch.where(x_mask[:, :, None, None], x, masked_x)
|
|
|
|
|
x = rearrange(x, "B T S C -> B (T S) C")
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
def forward(
|
|
|
|
|
self,
|
|
|
|
|
x,
|
|
|
|
|
y,
|
|
|
|
|
t,
|
|
|
|
|
mask=None, # text mask
|
|
|
|
|
x_mask=None, # temporal mask
|
|
|
|
|
t0=None, # t with timestamp=0
|
|
|
|
|
T=None, # number of frames
|
|
|
|
|
S=None, # number of pixel patches
|
|
|
|
|
):
|
|
|
|
|
# prepare modulate parameters
|
|
|
|
|
B, N, C = x.shape
|
|
|
|
|
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
|
|
|
|
|
self.scale_shift_table[None] + t.reshape(B, 6, -1)
|
|
|
|
|
).chunk(6, dim=1)
|
|
|
|
|
if x_mask is not None:
|
|
|
|
|
shift_msa_zero, scale_msa_zero, gate_msa_zero, shift_mlp_zero, scale_mlp_zero, gate_mlp_zero = (
|
|
|
|
|
self.scale_shift_table[None] + t0.reshape(B, 6, -1)
|
|
|
|
|
).chunk(6, dim=1)
|
|
|
|
|
|
|
|
|
|
# modulate (attention)
|
|
|
|
|
x_m = t2i_modulate(self.norm1(x), shift_msa, scale_msa)
|
|
|
|
|
if x_mask is not None:
|
|
|
|
|
x_m_zero = t2i_modulate(self.norm1(x), shift_msa_zero, scale_msa_zero)
|
|
|
|
|
x_m = self.t_mask_select(x_mask, x_m, x_m_zero, T, S)
|
|
|
|
|
|
|
|
|
|
# attention
|
|
|
|
|
if self.temporal:
|
|
|
|
|
x_m = rearrange(x_m, "B (T S) C -> (B S) T C", T=T, S=S)
|
|
|
|
|
x_m = self.attn(x_m)
|
|
|
|
|
x_m = rearrange(x_m, "(B S) T C -> B (T S) C", T=T, S=S)
|
|
|
|
|
else:
|
|
|
|
|
x_m = rearrange(x_m, "B (T S) C -> (B T) S C", T=T, S=S)
|
|
|
|
|
x_m = self.attn(x_m)
|
|
|
|
|
x_m = rearrange(x_m, "(B T) S C -> B (T S) C", T=T, S=S)
|
|
|
|
|
|
|
|
|
|
# modulate (attention)
|
|
|
|
|
x_m_s = gate_msa * x_m
|
|
|
|
|
if x_mask is not None:
|
|
|
|
|
x_m_s_zero = gate_msa_zero * x_m
|
|
|
|
|
x_m_s = self.t_mask_select(x_mask, x_m_s, x_m_s_zero, T, S)
|
|
|
|
|
|
|
|
|
|
# residual
|
|
|
|
|
x = x + self.drop_path(x_m_s)
|
|
|
|
|
|
|
|
|
|
# cross attention
|
|
|
|
|
x = x + self.cross_attn(x, y, mask)
|
|
|
|
|
|
|
|
|
|
# modulate (MLP)
|
|
|
|
|
x_m = t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)
|
|
|
|
|
if x_mask is not None:
|
|
|
|
|
x_m_zero = t2i_modulate(self.norm2(x), shift_mlp_zero, scale_mlp_zero)
|
|
|
|
|
x_m = self.t_mask_select(x_mask, x_m, x_m_zero, T, S)
|
|
|
|
|
|
|
|
|
|
# MLP
|
|
|
|
|
x_m = self.mlp(x_m)
|
|
|
|
|
|
|
|
|
|
# modulate (MLP)
|
|
|
|
|
x_m_s = gate_mlp * x_m
|
|
|
|
|
if x_mask is not None:
|
|
|
|
|
x_m_s_zero = gate_mlp_zero * x_m
|
|
|
|
|
x_m_s = self.t_mask_select(x_mask, x_m_s, x_m_s_zero, T, S)
|
|
|
|
|
|
|
|
|
|
# residual
|
|
|
|
|
x = x + self.drop_path(x_m_s)
|
|
|
|
|
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
2024-05-07 09:13:20 +02:00
|
|
|
class STDiT3Config(PretrainedConfig):
|
|
|
|
|
model_type = "STDiT3"
|
|
|
|
|
|
2024-04-29 08:00:14 +02:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
input_size=(None, None, None),
|
|
|
|
|
input_sq_size=512,
|
|
|
|
|
in_channels=4,
|
|
|
|
|
patch_size=(1, 2, 2),
|
|
|
|
|
hidden_size=1152,
|
|
|
|
|
depth=28,
|
|
|
|
|
num_heads=16,
|
|
|
|
|
mlp_ratio=4.0,
|
|
|
|
|
class_dropout_prob=0.1,
|
|
|
|
|
pred_sigma=True,
|
|
|
|
|
drop_path=0.0,
|
|
|
|
|
caption_channels=4096,
|
|
|
|
|
model_max_length=300,
|
2024-05-13 13:39:16 +02:00
|
|
|
qk_norm=True,
|
2024-05-07 08:51:11 +02:00
|
|
|
enable_flash_attn=False,
|
2024-04-29 08:00:14 +02:00
|
|
|
enable_layernorm_kernel=False,
|
|
|
|
|
enable_sequence_parallelism=False,
|
2024-05-06 08:32:28 +02:00
|
|
|
only_train_temporal=False,
|
2024-05-17 08:40:44 +02:00
|
|
|
freeze_y_embedder=False,
|
|
|
|
|
skip_y_embedder=False,
|
2024-05-07 09:13:20 +02:00
|
|
|
**kwargs,
|
2024-04-29 08:00:14 +02:00
|
|
|
):
|
2024-05-07 09:13:20 +02:00
|
|
|
self.input_size = input_size
|
|
|
|
|
self.input_sq_size = input_sq_size
|
2024-04-29 08:00:14 +02:00
|
|
|
self.in_channels = in_channels
|
2024-05-07 09:13:20 +02:00
|
|
|
self.patch_size = patch_size
|
2024-04-29 08:00:14 +02:00
|
|
|
self.hidden_size = hidden_size
|
2024-05-07 09:13:20 +02:00
|
|
|
self.depth = depth
|
2024-04-29 08:00:14 +02:00
|
|
|
self.num_heads = num_heads
|
2024-05-07 09:13:20 +02:00
|
|
|
self.mlp_ratio = mlp_ratio
|
|
|
|
|
self.class_dropout_prob = class_dropout_prob
|
|
|
|
|
self.pred_sigma = pred_sigma
|
2024-04-29 08:00:14 +02:00
|
|
|
self.drop_path = drop_path
|
2024-05-07 09:13:20 +02:00
|
|
|
self.caption_channels = caption_channels
|
|
|
|
|
self.model_max_length = model_max_length
|
|
|
|
|
self.qk_norm = qk_norm
|
2024-05-07 08:51:11 +02:00
|
|
|
self.enable_flash_attn = enable_flash_attn
|
2024-04-29 08:00:14 +02:00
|
|
|
self.enable_layernorm_kernel = enable_layernorm_kernel
|
2024-05-07 09:13:20 +02:00
|
|
|
self.enable_sequence_parallelism = enable_sequence_parallelism
|
|
|
|
|
self.only_train_temporal = only_train_temporal
|
2024-05-17 08:40:44 +02:00
|
|
|
self.freeze_y_embedder = freeze_y_embedder
|
|
|
|
|
self.skip_y_embedder = skip_y_embedder
|
2024-05-07 09:13:20 +02:00
|
|
|
super().__init__(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class STDiT3(PreTrainedModel):
|
|
|
|
|
config_class = STDiT3Config
|
|
|
|
|
|
|
|
|
|
def __init__(self, config):
|
|
|
|
|
super().__init__(config)
|
|
|
|
|
self.pred_sigma = config.pred_sigma
|
|
|
|
|
self.in_channels = config.in_channels
|
|
|
|
|
self.out_channels = config.in_channels * 2 if config.pred_sigma else config.in_channels
|
|
|
|
|
|
|
|
|
|
# model size related
|
|
|
|
|
self.depth = config.depth
|
|
|
|
|
self.mlp_ratio = config.mlp_ratio
|
|
|
|
|
self.hidden_size = config.hidden_size
|
|
|
|
|
self.num_heads = config.num_heads
|
|
|
|
|
|
|
|
|
|
# computation related
|
|
|
|
|
self.drop_path = config.drop_path
|
|
|
|
|
self.enable_flash_attn = config.enable_flash_attn
|
|
|
|
|
self.enable_layernorm_kernel = config.enable_layernorm_kernel
|
2024-06-14 05:10:43 +02:00
|
|
|
self.enable_sequence_parallelism = config.enable_sequence_parallelism
|
2024-04-29 08:00:14 +02:00
|
|
|
|
|
|
|
|
# input size related
|
2024-05-07 09:13:20 +02:00
|
|
|
self.patch_size = config.patch_size
|
|
|
|
|
self.input_sq_size = config.input_sq_size
|
|
|
|
|
self.pos_embed = PositionEmbedding2D(config.hidden_size)
|
2024-05-06 04:29:21 +02:00
|
|
|
self.rope = RotaryEmbedding(dim=self.hidden_size // self.num_heads)
|
2024-04-29 08:00:14 +02:00
|
|
|
|
|
|
|
|
# embedding
|
2024-05-07 09:13:20 +02:00
|
|
|
self.x_embedder = PatchEmbed3D(config.patch_size, config.in_channels, config.hidden_size)
|
|
|
|
|
self.t_embedder = TimestepEmbedder(config.hidden_size)
|
2024-05-04 14:23:32 +02:00
|
|
|
self.fps_embedder = SizeEmbedder(self.hidden_size)
|
2024-04-29 08:00:14 +02:00
|
|
|
self.t_block = nn.Sequential(
|
|
|
|
|
nn.SiLU(),
|
2024-05-07 09:13:20 +02:00
|
|
|
nn.Linear(config.hidden_size, 6 * config.hidden_size, bias=True),
|
2024-04-29 08:00:14 +02:00
|
|
|
)
|
|
|
|
|
self.y_embedder = CaptionEmbedder(
|
2024-05-07 09:13:20 +02:00
|
|
|
in_channels=config.caption_channels,
|
|
|
|
|
hidden_size=config.hidden_size,
|
|
|
|
|
uncond_prob=config.class_dropout_prob,
|
2024-04-29 08:00:14 +02:00
|
|
|
act_layer=approx_gelu,
|
2024-05-07 09:13:20 +02:00
|
|
|
token_num=config.model_max_length,
|
2024-04-29 08:00:14 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# spatial blocks
|
2024-05-07 09:13:20 +02:00
|
|
|
drop_path = [x.item() for x in torch.linspace(0, self.drop_path, config.depth)]
|
2024-04-29 08:00:14 +02:00
|
|
|
self.spatial_blocks = nn.ModuleList(
|
|
|
|
|
[
|
|
|
|
|
STDiT3Block(
|
2024-05-07 09:13:20 +02:00
|
|
|
hidden_size=config.hidden_size,
|
|
|
|
|
num_heads=config.num_heads,
|
|
|
|
|
mlp_ratio=config.mlp_ratio,
|
2024-04-29 08:00:14 +02:00
|
|
|
drop_path=drop_path[i],
|
2024-05-07 09:13:20 +02:00
|
|
|
qk_norm=config.qk_norm,
|
|
|
|
|
enable_flash_attn=config.enable_flash_attn,
|
|
|
|
|
enable_layernorm_kernel=config.enable_layernorm_kernel,
|
|
|
|
|
enable_sequence_parallelism=config.enable_sequence_parallelism,
|
2024-04-29 08:00:14 +02:00
|
|
|
)
|
2024-05-07 09:13:20 +02:00
|
|
|
for i in range(config.depth)
|
2024-04-29 08:00:14 +02:00
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# temporal blocks
|
2024-05-07 09:13:20 +02:00
|
|
|
drop_path = [x.item() for x in torch.linspace(0, self.drop_path, config.depth)]
|
2024-05-06 04:29:21 +02:00
|
|
|
self.temporal_blocks = nn.ModuleList(
|
|
|
|
|
[
|
|
|
|
|
STDiT3Block(
|
2024-05-07 09:13:20 +02:00
|
|
|
hidden_size=config.hidden_size,
|
|
|
|
|
num_heads=config.num_heads,
|
|
|
|
|
mlp_ratio=config.mlp_ratio,
|
2024-05-06 04:29:21 +02:00
|
|
|
drop_path=drop_path[i],
|
2024-05-07 09:13:20 +02:00
|
|
|
qk_norm=config.qk_norm,
|
|
|
|
|
enable_flash_attn=config.enable_flash_attn,
|
|
|
|
|
enable_layernorm_kernel=config.enable_layernorm_kernel,
|
|
|
|
|
enable_sequence_parallelism=config.enable_sequence_parallelism,
|
2024-05-06 04:29:21 +02:00
|
|
|
# temporal
|
|
|
|
|
temporal=True,
|
|
|
|
|
rope=self.rope.rotate_queries_or_keys,
|
|
|
|
|
)
|
2024-05-07 09:13:20 +02:00
|
|
|
for i in range(config.depth)
|
2024-05-06 04:29:21 +02:00
|
|
|
]
|
|
|
|
|
)
|
2024-04-29 08:00:14 +02:00
|
|
|
|
|
|
|
|
# final layer
|
2024-05-07 09:13:20 +02:00
|
|
|
self.final_layer = T2IFinalLayer(config.hidden_size, np.prod(self.patch_size), self.out_channels)
|
2024-04-29 08:00:14 +02:00
|
|
|
|
2024-05-04 14:23:32 +02:00
|
|
|
self.initialize_weights()
|
2024-05-07 09:13:20 +02:00
|
|
|
if config.only_train_temporal:
|
2024-05-06 08:32:28 +02:00
|
|
|
for param in self.parameters():
|
|
|
|
|
param.requires_grad = False
|
|
|
|
|
for block in self.temporal_blocks:
|
|
|
|
|
for param in block.parameters():
|
|
|
|
|
param.requires_grad = True
|
2024-05-04 14:23:32 +02:00
|
|
|
|
2024-05-17 08:40:44 +02:00
|
|
|
if config.freeze_y_embedder:
|
|
|
|
|
for param in self.y_embedder.parameters():
|
|
|
|
|
param.requires_grad = False
|
|
|
|
|
|
2024-05-04 14:23:32 +02:00
|
|
|
def initialize_weights(self):
|
2024-05-06 04:29:21 +02:00
|
|
|
# Initialize transformer layers:
|
|
|
|
|
def _basic_init(module):
|
|
|
|
|
if isinstance(module, nn.Linear):
|
|
|
|
|
torch.nn.init.xavier_uniform_(module.weight)
|
|
|
|
|
if module.bias is not None:
|
|
|
|
|
nn.init.constant_(module.bias, 0)
|
|
|
|
|
|
|
|
|
|
self.apply(_basic_init)
|
|
|
|
|
|
2024-05-04 14:23:32 +02:00
|
|
|
# Initialize fps_embedder
|
2024-05-05 14:26:26 +02:00
|
|
|
nn.init.normal_(self.fps_embedder.mlp[0].weight, std=0.02)
|
|
|
|
|
nn.init.constant_(self.fps_embedder.mlp[0].bias, 0)
|
|
|
|
|
nn.init.constant_(self.fps_embedder.mlp[2].weight, 0)
|
|
|
|
|
nn.init.constant_(self.fps_embedder.mlp[2].bias, 0)
|
2024-05-04 14:23:32 +02:00
|
|
|
|
2024-05-06 04:29:21 +02:00
|
|
|
# Initialize timporal blocks
|
|
|
|
|
for block in self.temporal_blocks:
|
|
|
|
|
nn.init.constant_(block.attn.proj.weight, 0)
|
|
|
|
|
nn.init.constant_(block.cross_attn.proj.weight, 0)
|
|
|
|
|
nn.init.constant_(block.mlp.fc2.weight, 0)
|
|
|
|
|
|
2024-04-29 08:00:14 +02:00
|
|
|
def get_dynamic_size(self, x):
|
|
|
|
|
_, _, T, H, W = x.size()
|
|
|
|
|
if T % self.patch_size[0] != 0:
|
|
|
|
|
T += self.patch_size[0] - T % self.patch_size[0]
|
|
|
|
|
if H % self.patch_size[1] != 0:
|
|
|
|
|
H += self.patch_size[1] - H % self.patch_size[1]
|
|
|
|
|
if W % self.patch_size[2] != 0:
|
|
|
|
|
W += self.patch_size[2] - W % self.patch_size[2]
|
|
|
|
|
T = T // self.patch_size[0]
|
|
|
|
|
H = H // self.patch_size[1]
|
|
|
|
|
W = W // self.patch_size[2]
|
|
|
|
|
return (T, H, W)
|
|
|
|
|
|
2024-05-17 08:40:44 +02:00
|
|
|
def encode_text(self, y, mask=None):
|
|
|
|
|
y = self.y_embedder(y, self.training) # [B, 1, N_token, C]
|
|
|
|
|
if mask is not None:
|
|
|
|
|
if mask.shape[0] != y.shape[0]:
|
|
|
|
|
mask = mask.repeat(y.shape[0] // mask.shape[0], 1)
|
|
|
|
|
mask = mask.squeeze(1).squeeze(1)
|
|
|
|
|
y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, self.hidden_size)
|
|
|
|
|
y_lens = mask.sum(dim=1).tolist()
|
|
|
|
|
else:
|
|
|
|
|
y_lens = [y.shape[2]] * y.shape[0]
|
|
|
|
|
y = y.squeeze(1).view(1, -1, self.hidden_size)
|
|
|
|
|
return y, y_lens
|
|
|
|
|
|
2024-04-29 08:00:14 +02:00
|
|
|
def forward(self, x, timestep, y, mask=None, x_mask=None, fps=None, height=None, width=None, **kwargs):
|
2024-05-07 09:13:20 +02:00
|
|
|
dtype = self.x_embedder.proj.weight.dtype
|
2024-05-04 14:23:32 +02:00
|
|
|
B = x.size(0)
|
2024-05-07 09:13:20 +02:00
|
|
|
x = x.to(dtype)
|
|
|
|
|
timestep = timestep.to(dtype)
|
|
|
|
|
y = y.to(dtype)
|
2024-04-29 08:00:14 +02:00
|
|
|
|
|
|
|
|
# === get pos embed ===
|
|
|
|
|
_, _, Tx, Hx, Wx = x.size()
|
|
|
|
|
T, H, W = self.get_dynamic_size(x)
|
2024-06-24 07:59:29 +02:00
|
|
|
|
|
|
|
|
# adjust for sequence parallelism
|
|
|
|
|
# we need to ensure H * W is divisible by sequence parallel size
|
|
|
|
|
# for simplicity, we can adjust the height to make it divisible
|
|
|
|
|
if self.enable_sequence_parallelism:
|
|
|
|
|
sp_size = dist.get_world_size(get_sequence_parallel_group())
|
|
|
|
|
h_pad_size = sp_size - H % sp_size
|
|
|
|
|
hx_pad_size = h_pad_size * self.patch_size[1]
|
|
|
|
|
|
|
|
|
|
# pad x along the H dimension
|
|
|
|
|
H += h_pad_size
|
|
|
|
|
x = F.pad(x, (0, 0, 0, hx_pad_size))
|
|
|
|
|
|
2024-04-29 08:00:14 +02:00
|
|
|
S = H * W
|
|
|
|
|
base_size = round(S**0.5)
|
|
|
|
|
resolution_sq = (height[0].item() * width[0].item()) ** 0.5
|
|
|
|
|
scale = resolution_sq / self.input_sq_size
|
|
|
|
|
pos_emb = self.pos_embed(x, H, W, scale=scale, base_size=base_size)
|
|
|
|
|
|
|
|
|
|
# === get timestep embed ===
|
|
|
|
|
t = self.t_embedder(timestep, dtype=x.dtype) # [B, C]
|
2024-05-04 14:23:32 +02:00
|
|
|
fps = self.fps_embedder(fps.unsqueeze(1), B)
|
|
|
|
|
t = t + fps
|
2024-04-29 08:00:14 +02:00
|
|
|
t_mlp = self.t_block(t)
|
|
|
|
|
t0 = t0_mlp = None
|
|
|
|
|
if x_mask is not None:
|
|
|
|
|
t0_timestep = torch.zeros_like(timestep)
|
|
|
|
|
t0 = self.t_embedder(t0_timestep, dtype=x.dtype)
|
|
|
|
|
t0 = t0 + fps
|
|
|
|
|
t0_mlp = self.t_block(t0)
|
|
|
|
|
|
|
|
|
|
# === get y embed ===
|
2024-05-17 08:40:44 +02:00
|
|
|
if self.config.skip_y_embedder:
|
2024-05-21 06:05:02 +02:00
|
|
|
y_lens = mask
|
2024-05-21 09:20:14 +02:00
|
|
|
if isinstance(y_lens, torch.Tensor):
|
|
|
|
|
y_lens = y_lens.long().tolist()
|
2024-04-29 08:00:14 +02:00
|
|
|
else:
|
2024-05-17 08:40:44 +02:00
|
|
|
y, y_lens = self.encode_text(y, mask)
|
2024-04-29 08:00:14 +02:00
|
|
|
|
|
|
|
|
# === get x embed ===
|
|
|
|
|
x = self.x_embedder(x) # [B, N, C]
|
|
|
|
|
x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
|
|
|
|
|
x = x + pos_emb
|
2024-06-17 17:37:23 +02:00
|
|
|
|
2024-06-14 05:10:43 +02:00
|
|
|
# shard over the sequence dim if sp is enabled
|
|
|
|
|
if self.enable_sequence_parallelism:
|
|
|
|
|
x = split_forward_gather_backward(x, get_sequence_parallel_group(), dim=2, grad_scale="down")
|
|
|
|
|
S = S // dist.get_world_size(get_sequence_parallel_group())
|
2024-06-17 17:37:23 +02:00
|
|
|
|
2024-04-29 08:00:14 +02:00
|
|
|
x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)
|
2024-06-17 17:37:23 +02:00
|
|
|
|
2024-04-29 08:00:14 +02:00
|
|
|
# === blocks ===
|
2024-05-06 04:29:21 +02:00
|
|
|
for spatial_block, temporal_block in zip(self.spatial_blocks, self.temporal_blocks):
|
2024-04-29 08:00:14 +02:00
|
|
|
x = auto_grad_checkpoint(spatial_block, x, y, t_mlp, y_lens, x_mask, t0_mlp, T, S)
|
2024-05-06 04:29:21 +02:00
|
|
|
x = auto_grad_checkpoint(temporal_block, x, y, t_mlp, y_lens, x_mask, t0_mlp, T, S)
|
2024-06-17 17:37:23 +02:00
|
|
|
|
2024-06-14 05:10:43 +02:00
|
|
|
if self.enable_sequence_parallelism:
|
|
|
|
|
x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
|
|
|
|
|
x = gather_forward_split_backward(x, get_sequence_parallel_group(), dim=2, grad_scale="up")
|
|
|
|
|
S = S * dist.get_world_size(get_sequence_parallel_group())
|
|
|
|
|
x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)
|
2024-04-29 08:00:14 +02:00
|
|
|
|
|
|
|
|
# === final layer ===
|
|
|
|
|
x = self.final_layer(x, t, x_mask, t0, T, S)
|
|
|
|
|
x = self.unpatchify(x, T, H, W, Tx, Hx, Wx)
|
2024-06-17 17:37:23 +02:00
|
|
|
|
2024-04-29 08:00:14 +02:00
|
|
|
# cast to float32 for better accuracy
|
|
|
|
|
x = x.to(torch.float32)
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
def unpatchify(self, x, N_t, N_h, N_w, R_t, R_h, R_w):
|
|
|
|
|
"""
|
|
|
|
|
Args:
|
|
|
|
|
x (torch.Tensor): of shape [B, N, C]
|
|
|
|
|
|
|
|
|
|
Return:
|
|
|
|
|
x (torch.Tensor): of shape [B, C_out, T, H, W]
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
# N_t, N_h, N_w = [self.input_size[i] // self.patch_size[i] for i in range(3)]
|
|
|
|
|
T_p, H_p, W_p = self.patch_size
|
|
|
|
|
x = rearrange(
|
|
|
|
|
x,
|
|
|
|
|
"B (N_t N_h N_w) (T_p H_p W_p C_out) -> B C_out (N_t T_p) (N_h H_p) (N_w W_p)",
|
|
|
|
|
N_t=N_t,
|
|
|
|
|
N_h=N_h,
|
|
|
|
|
N_w=N_w,
|
|
|
|
|
T_p=T_p,
|
|
|
|
|
H_p=H_p,
|
|
|
|
|
W_p=W_p,
|
|
|
|
|
C_out=self.out_channels,
|
|
|
|
|
)
|
|
|
|
|
# unpad
|
|
|
|
|
x = x[:, :, :R_t, :R_h, :R_w]
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@MODELS.register_module("STDiT3-XL/2")
|
|
|
|
|
def STDiT3_XL_2(from_pretrained=None, **kwargs):
|
2024-06-20 12:23:38 +02:00
|
|
|
force_huggingface = kwargs.pop("force_huggingface", False)
|
2024-06-22 17:41:32 +02:00
|
|
|
if force_huggingface or from_pretrained is not None and not os.path.exists(from_pretrained):
|
2024-06-17 05:27:24 +02:00
|
|
|
model = STDiT3.from_pretrained(from_pretrained, **kwargs)
|
|
|
|
|
else:
|
|
|
|
|
config = STDiT3Config(depth=28, hidden_size=1152, patch_size=(1, 2, 2), num_heads=16, **kwargs)
|
|
|
|
|
model = STDiT3(config)
|
|
|
|
|
if from_pretrained is not None:
|
|
|
|
|
load_checkpoint(model, from_pretrained)
|
2024-04-29 08:00:14 +02:00
|
|
|
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@MODELS.register_module("STDiT3-3B/2")
|
2024-06-19 16:05:47 +02:00
|
|
|
def STDiT3_3B_2(from_pretrained=None, **kwargs):
|
2024-06-22 17:41:32 +02:00
|
|
|
force_huggingface = kwargs.pop("force_huggingface", False)
|
|
|
|
|
if force_huggingface or from_pretrained is not None and not os.path.exists(from_pretrained):
|
2024-06-17 05:27:24 +02:00
|
|
|
model = STDiT3.from_pretrained(from_pretrained, **kwargs)
|
|
|
|
|
else:
|
|
|
|
|
config = STDiT3Config(depth=28, hidden_size=1872, patch_size=(1, 2, 2), num_heads=26, **kwargs)
|
|
|
|
|
model = STDiT3(config)
|
|
|
|
|
if from_pretrained is not None:
|
|
|
|
|
load_checkpoint(model, from_pretrained)
|
2024-04-29 08:00:14 +02:00
|
|
|
return model
|