* fix #210

* fix #209
This commit is contained in:
Zheng Zangwei (Alex Zheng) 2024-03-25 13:19:11 +08:00 committed by GitHub
parent 7abee23c4c
commit b454751f8f
14 changed files with 16 additions and 16 deletions

View file

@ -30,7 +30,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
shardformer=True,
)

View file

@ -31,7 +31,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
shardformer=True,
)

View file

@ -30,7 +30,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
shardformer=True,
)

View file

@ -30,7 +30,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
shardformer=True,
)

View file

@ -15,7 +15,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
)
scheduler = dict(

View file

@ -17,7 +17,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
)
scheduler = dict(

View file

@ -16,7 +16,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
)
scheduler = dict(

View file

@ -16,7 +16,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
)
scheduler = dict(

View file

@ -29,7 +29,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
shardformer=True,
)

View file

@ -30,7 +30,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
shardformer=True,
)

View file

@ -30,7 +30,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
shardformer=True,
)

View file

@ -14,7 +14,7 @@ As shown in the figure, we insert a temporal attention right after each spatial
To focus on video generation, we hope to train the model based on a powerful image generation model. [PixArt-α](https://github.com/PixArt-alpha/PixArt-alpha) is an efficiently trained high-quality image generation model with T5-conditioned DiT structure. We initialize our model with PixArt-α and initialize the projection layer of inserted temporal attention with zero. This initialization preserves model's ability of image generation at beginning, while Latte's architecture cannot. The inserted attention increases the number of parameter from 580M to 724M.
![Architecture](https://i0.imgs.ovh/2024/03/16/erC1d.png)
![Architecture](https://image.jiqizhixin.com/uploads/editor/ff49eaba-6b19-43d7-b65d-ad2ecdb9d555/640.jpeg)
Drawing from the success of PixArt-α and Stable Video Diffusion, we also adopt a progressive training strategy: 16x256x256 on 366K pretraining datasets, and then 16x256x256, 16x512x512, and 64x512x512 on 20K datasets. With scaled position embedding, this strategy greatly reduces the computational cost.

View file

@ -100,7 +100,7 @@ vae = dict(
)
text_encoder = dict(
type="t5", # Select text encoder type (t5, clip)
from_pretrained="./pretrained_models/t5_ckpts", # Load from pretrained text encoder
from_pretrained="DeepFloyd/t5-v1_1-xxl", # Load from pretrained text encoder
model_max_length=120, # Maximum length of input text
)
scheduler = dict(
@ -153,7 +153,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
shardformer=True, # Enable shardformer for T5 acceleration
)

View file

@ -100,7 +100,7 @@ vae = dict(
)
text_encoder = dict(
type="t5", # Select text encoder type (t5, clip)
from_pretrained="./pretrained_models/t5_ckpts", # Load from pretrained text encoder
from_pretrained="DeepFloyd/t5-v1_1-xxl", # Load from pretrained text encoder
model_max_length=120, # Maximum length of input text
)
scheduler = dict(
@ -153,7 +153,7 @@ vae = dict(
)
text_encoder = dict(
type="t5",
from_pretrained="./pretrained_models/t5_ckpts",
from_pretrained="DeepFloyd/t5-v1_1-xxl",
model_max_length=120,
shardformer=True, # Enable shardformer for T5 acceleration
)