Open-Sora/scripts/inference.py

168 lines
6.4 KiB
Python
Raw Normal View History

2024-03-15 14:49:38 +01:00
import os
2024-03-17 04:00:23 +01:00
import colossalai
2024-03-23 13:28:34 +01:00
import torch
2024-03-17 04:00:23 +01:00
import torch.distributed as dist
2024-03-23 13:28:34 +01:00
from colossalai.cluster import DistCoordinator
2024-03-15 14:49:38 +01:00
from mmengine.runner import set_random_seed
2024-03-23 13:28:34 +01:00
from opensora.acceleration.parallel_states import set_sequence_parallel_group
2024-04-19 09:21:00 +02:00
from opensora.datasets import IMG_FPS, save_sample
2024-04-15 07:44:30 +02:00
from opensora.models.text_encoder.t5 import text_preprocessing
2024-03-15 14:49:38 +01:00
from opensora.registry import MODELS, SCHEDULERS, build_module
from opensora.utils.config_utils import parse_configs
from opensora.utils.misc import to_torch_dtype
def main():
# ======================================================
2024-03-17 04:00:23 +01:00
# 1. cfg and init distributed env
2024-03-15 14:49:38 +01:00
# ======================================================
cfg = parse_configs(training=False)
print(cfg)
2024-03-17 04:00:23 +01:00
# init distributed
2024-04-17 11:05:08 +02:00
if os.environ.get("WORLD_SIZE", None):
use_dist = True
colossalai.launch_from_torch({})
coordinator = DistCoordinator()
if coordinator.world_size > 1:
set_sequence_parallel_group(dist.group.WORLD)
enable_sequence_parallelism = True
else:
enable_sequence_parallelism = False
2024-03-17 04:00:23 +01:00
else:
2024-04-17 11:05:08 +02:00
use_dist = False
2024-03-17 04:00:23 +01:00
enable_sequence_parallelism = False
2024-03-15 14:49:38 +01:00
# ======================================================
# 2. runtime variables
# ======================================================
torch.set_grad_enabled(False)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = to_torch_dtype(cfg.dtype)
set_random_seed(seed=cfg.seed)
2024-03-23 09:32:51 +01:00
prompts = cfg.prompt
2024-03-15 14:49:38 +01:00
# ======================================================
# 3. build model & load weights
# ======================================================
# 3.1. build model
input_size = (cfg.num_frames, *cfg.image_size)
vae = build_module(cfg.vae, MODELS)
latent_size = vae.get_latent_size(input_size)
text_encoder = build_module(cfg.text_encoder, MODELS, device=device) # T5 must be fp32
model = build_module(
cfg.model,
MODELS,
input_size=latent_size,
in_channels=vae.out_channels,
caption_channels=text_encoder.output_dim,
model_max_length=text_encoder.model_max_length,
dtype=dtype,
2024-03-17 04:00:23 +01:00
enable_sequence_parallelism=enable_sequence_parallelism,
2024-03-15 14:49:38 +01:00
)
text_encoder.y_embedder = model.y_embedder # hack for classifier-free guidance
# 3.2. move to device & eval
vae = vae.to(device, dtype).eval()
model = model.to(device, dtype).eval()
# 3.3. build scheduler
scheduler = build_module(cfg.scheduler, SCHEDULERS)
# 3.4. support for multi-resolution
model_args = dict()
2024-03-28 14:35:33 +01:00
if cfg.multi_resolution == "PixArtMS":
2024-03-15 14:49:38 +01:00
image_size = cfg.image_size
hw = torch.tensor([image_size], device=device, dtype=dtype).repeat(cfg.batch_size, 1)
ar = torch.tensor([[image_size[0] / image_size[1]]], device=device, dtype=dtype).repeat(cfg.batch_size, 1)
model_args["data_info"] = dict(ar=ar, hw=hw)
2024-03-28 14:35:33 +01:00
elif cfg.multi_resolution == "STDiT2":
image_size = cfg.image_size
height = torch.tensor([image_size[0]], device=device, dtype=dtype).repeat(cfg.batch_size)
width = torch.tensor([image_size[1]], device=device, dtype=dtype).repeat(cfg.batch_size)
num_frames = torch.tensor([cfg.num_frames], device=device, dtype=dtype).repeat(cfg.batch_size)
ar = torch.tensor([image_size[0] / image_size[1]], device=device, dtype=dtype).repeat(cfg.batch_size)
2024-04-19 09:21:00 +02:00
if cfg.num_frames == 1:
cfg.fps = IMG_FPS
fps = torch.tensor([cfg.fps], device=device, dtype=dtype).repeat(cfg.batch_size)
2024-03-28 14:35:33 +01:00
model_args["height"] = height
model_args["width"] = width
model_args["num_frames"] = num_frames
model_args["ar"] = ar
model_args["fps"] = fps
2024-03-15 14:49:38 +01:00
# ======================================================
# 4. inference
# ======================================================
sample_idx = 0
if cfg.sample_name is not None:
sample_name = cfg.sample_name
elif cfg.prompt_as_path:
sample_name = ""
else:
sample_name = "sample"
2024-03-15 14:49:38 +01:00
save_dir = cfg.save_dir
os.makedirs(save_dir, exist_ok=True)
2024-03-23 13:28:34 +01:00
# 4.1. batch generation
2024-03-15 14:49:38 +01:00
for i in range(0, len(prompts), cfg.batch_size):
2024-03-23 13:28:34 +01:00
# 4.2 sample in hidden space
2024-04-21 13:07:59 +02:00
batch_prompts_raw = prompts[i : i + cfg.batch_size]
batch_prompts = [text_preprocessing(prompt) for prompt in batch_prompts_raw]
2024-03-23 13:28:34 +01:00
z = torch.randn(len(batch_prompts), vae.out_channels, *latent_size, device=device, dtype=dtype)
# 4.3. diffusion sampling
old_sample_idx = sample_idx
# generate multiple samples for each prompt
for k in range(cfg.num_sample):
sample_idx = old_sample_idx
2024-04-21 13:07:59 +02:00
# Skip if the sample already exists
# This is useful for resuming sampling VBench
if cfg.prompt_as_path:
skip = True
for batch_prompt in batch_prompts_raw:
path = os.path.join(save_dir, f"{sample_name}{batch_prompt}")
if cfg.num_sample != 1:
path = f"{path}-{k}"
path = f"{path}.mp4"
if not os.path.exists(path):
skip = False
break
if skip:
continue
# sampling
samples = scheduler.sample(
model,
text_encoder,
z=z,
prompts=batch_prompts,
device=device,
additional_args=model_args,
)
samples = vae.decode(samples.to(dtype))
2024-04-21 13:07:59 +02:00
# 4.4. save samples
if not use_dist or coordinator.is_master():
for idx, sample in enumerate(samples):
2024-04-21 13:07:59 +02:00
print(f"Prompt: {batch_prompts_raw[idx]}")
if cfg.prompt_as_path:
2024-04-21 13:07:59 +02:00
sample_name_suffix = batch_prompts_raw[idx]
else:
sample_name_suffix = f"_{sample_idx}"
save_path = os.path.join(save_dir, f"{sample_name}{sample_name_suffix}")
if cfg.num_sample != 1:
save_path = f"{save_path}-{k}"
save_sample(sample, fps=cfg.fps // cfg.frame_interval, save_path=save_path)
sample_idx += 1
2024-03-15 14:49:38 +01:00
if __name__ == "__main__":
main()