# Modified from: # DiT: https://github.com/facebookresearch/DiT/blob/main/sample_ddp.py import torch torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True import torch.nn.functional as F import torch.distributed as dist from tqdm import tqdm import os from PIL import Image import numpy as np import math import argparse from tokenizer.tokenizer_image.vq_model import VQ_models from autoregressive.models.gpt import GPT_models from autoregressive.models.generate import generate def create_npz_from_sample_folder(sample_dir, num=50_000): """ Builds a single .npz file from a folder of .png samples. """ samples = [] for i in tqdm(range(num), desc="Building .npz file from samples"): sample_pil = Image.open(f"{sample_dir}/{i:06d}.png") sample_np = np.asarray(sample_pil).astype(np.uint8) samples.append(sample_np) samples = np.stack(samples) assert samples.shape == (num, samples.shape[1], samples.shape[2], 3) npz_path = f"{sample_dir}.npz" np.savez(npz_path, arr_0=samples) print(f"Saved .npz file to {npz_path} [shape={samples.shape}].") return npz_path def main(args): # Setup PyTorch: assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage" torch.set_grad_enabled(False) # Setup DDP: dist.init_process_group("nccl") rank = dist.get_rank() device = rank % torch.cuda.device_count() seed = args.global_seed * dist.get_world_size() + rank torch.manual_seed(seed) torch.cuda.set_device(device) print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.") # create and load model vq_model = VQ_models[args.vq_model]( codebook_size=args.codebook_size, codebook_embed_dim=args.codebook_embed_dim) vq_model.to(device) vq_model.eval() checkpoint = torch.load(args.vq_ckpt, map_location="cpu") vq_model.load_state_dict(checkpoint["model"]) del checkpoint # create and load gpt model precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision] latent_size = args.image_size // args.downsample_size gpt_model = GPT_models[args.gpt_model]( vocab_size=args.codebook_size, block_size=latent_size ** 2, num_classes=args.num_classes, cls_token_num=args.cls_token_num, model_type=args.gpt_type, ).to(device=device, dtype=precision) checkpoint = torch.load(args.gpt_ckpt, map_location="cpu") if args.from_fsdp: # fsdp model_weight = checkpoint elif "model" in checkpoint: # ddp model_weight = checkpoint["model"] elif "module" in checkpoint: # deepspeed model_weight = checkpoint["module"] elif "state_dict" in checkpoint: model_weight = checkpoint["state_dict"] else: raise Exception("please check model weight, maybe add --from-fsdp to run command") # if 'freqs_cis' in model_weight: # model_weight.pop('freqs_cis') gpt_model.load_state_dict(model_weight, strict=False) gpt_model.eval() del checkpoint if args.compile: print(f"compiling the model...") gpt_model = torch.compile( gpt_model, mode="reduce-overhead", fullgraph=True ) # requires PyTorch 2.0 (optional) else: print(f"no model compile") # Create folder to save samples: model_string_name = args.gpt_model.replace("/", "-") if args.from_fsdp: ckpt_string_name = args.gpt_ckpt.split('/')[-2] else: ckpt_string_name = os.path.basename(args.gpt_ckpt).replace(".pth", "").replace(".pt", "") folder_name = f"{model_string_name}-{ckpt_string_name}-size-{args.image_size}-size-{args.image_size_eval}-{args.vq_model}-" \ f"topk-{args.top_k}-topp-{args.top_p}-temperature-{args.temperature}-" \ f"cfg-{args.cfg_scale}-seed-{args.global_seed}" sample_folder_dir = f"{args.sample_dir}/{folder_name}" if rank == 0: os.makedirs(sample_folder_dir, exist_ok=True) print(f"Saving .png samples at {sample_folder_dir}") dist.barrier() # Figure out how many samples we need to generate on each GPU and how many iterations we need to run: n = args.per_proc_batch_size global_batch_size = n * dist.get_world_size() # To make things evenly-divisible, we'll sample a bit more than we need and then discard the extra samples: total_samples = int(math.ceil(args.num_fid_samples / global_batch_size) * global_batch_size) if rank == 0: print(f"Total number of images that will be sampled: {total_samples}") assert total_samples % dist.get_world_size() == 0, "total_samples must be divisible by world_size" samples_needed_this_gpu = int(total_samples // dist.get_world_size()) assert samples_needed_this_gpu % n == 0, "samples_needed_this_gpu must be divisible by the per-GPU batch size" iterations = int(samples_needed_this_gpu // n) pbar = range(iterations) pbar = tqdm(pbar) if rank == 0 else pbar total = 0 for _ in pbar: # Sample inputs: c_indices = torch.randint(0, args.num_classes, (n,), device=device) qzshape = [len(c_indices), args.codebook_embed_dim, latent_size, latent_size] index_sample = generate( gpt_model, c_indices, latent_size ** 2, cfg_scale=args.cfg_scale, cfg_interval=args.cfg_interval, temperature=args.temperature, top_k=args.top_k, top_p=args.top_p, sample_logits=True, ) samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1] if args.image_size_eval != args.image_size: samples = F.interpolate(samples, size=(args.image_size_eval, args.image_size_eval), mode='bicubic') samples = torch.clamp(127.5 * samples + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy() # Save samples to disk as individual .png files for i, sample in enumerate(samples): index = i * dist.get_world_size() + rank + total Image.fromarray(sample).save(f"{sample_folder_dir}/{index:06d}.png") total += global_batch_size # Make sure all processes have finished saving their samples before attempting to convert to .npz dist.barrier() if rank == 0: create_npz_from_sample_folder(sample_folder_dir, args.num_fid_samples) print("Done.") dist.barrier() dist.destroy_process_group() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B") parser.add_argument("--gpt-ckpt", type=str, default=None) parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional") parser.add_argument("--from-fsdp", action='store_true') parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input") parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"]) parser.add_argument("--compile", action='store_true', default=True) parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16") parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model") parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization") parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization") parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=384) parser.add_argument("--image-size-eval", type=int, choices=[256, 384, 512], default=256) parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16) parser.add_argument("--num-classes", type=int, default=1000) parser.add_argument("--cfg-scale", type=float, default=1.5) parser.add_argument("--cfg-interval", type=float, default=-1) parser.add_argument("--sample-dir", type=str, default="samples") parser.add_argument("--per-proc-batch-size", type=int, default=32) parser.add_argument("--num-fid-samples", type=int, default=5000) parser.add_argument("--global-seed", type=int, default=0) parser.add_argument("--top-k", type=int, default=0,help="top-k value to sample with") parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with") parser.add_argument("--top-p", type=float, default=1.0, help="top-p value to sample with") args = parser.parse_args() main(args)