sayakpaul HF staff commited on
Commit
7a15650
1 Parent(s): 08e3e4c

Upload folder using huggingface_hub

Browse files
vae-compilation/benchmark_pixart.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ torch.set_float32_matmul_precision("high")
4
+
5
+ import torch.utils.benchmark as benchmark
6
+ from diffusers import DiffusionPipeline
7
+ import gc
8
+
9
+ from torchao.quantization import (
10
+ int4_weight_only,
11
+ int8_weight_only,
12
+ int8_dynamic_activation_int8_weight,
13
+ quantize_,
14
+ autoquant,
15
+ )
16
+ from torchao.float8.inference import ActivationCasting, QuantConfig, quantize_to_float8
17
+ from torchao.prototype.quant_llm import fp6_llm_weight_only
18
+ from torchao.sparsity import sparsify_, int8_dynamic_activation_int8_semi_sparse_weight
19
+ from tabulate import tabulate
20
+ import argparse
21
+ import json
22
+
23
+
24
+ PROMPT = "Eiffel Tower was Made up of more than 2 million translucent straws to look like a cloud, with the bell tower at the top of the building, Michel installed huge foam-making machines in the forest to blow huge amounts of unpredictable wet clouds in the building's classic architecture."
25
+ PREFIXES = {
26
+ "stabilityai/stable-diffusion-3-medium-diffusers": "sd3",
27
+ "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS": "pixart",
28
+ "fal/AuraFlow": "auraflow",
29
+ }
30
+
31
+
32
+ def flush():
33
+ gc.collect()
34
+ torch.cuda.empty_cache()
35
+ torch.cuda.reset_max_memory_allocated()
36
+ torch.cuda.reset_peak_memory_stats()
37
+
38
+
39
+ def bytes_to_giga_bytes(bytes):
40
+ return f"{(bytes / 1024 / 1024 / 1024):.3f}"
41
+
42
+
43
+ def benchmark_fn(f, *args, **kwargs):
44
+ t0 = benchmark.Timer(
45
+ stmt="f(*args, **kwargs)",
46
+ globals={"args": args, "kwargs": kwargs, "f": f},
47
+ num_threads=torch.get_num_threads(),
48
+ )
49
+ return f"{(t0.blocked_autorange().mean):.3f}"
50
+
51
+
52
+ def load_pipeline(
53
+ ckpt_id: str,
54
+ fuse_attn_projections: bool,
55
+ compile: bool,
56
+ quantization: str,
57
+ sparsify: bool,
58
+ ) -> DiffusionPipeline:
59
+ pipeline = DiffusionPipeline.from_pretrained(ckpt_id, torch_dtype=torch.bfloat16).to("cuda")
60
+
61
+ if fuse_attn_projections:
62
+ pipeline.transformer.fuse_qkv_projections()
63
+ pipeline.vae.fuse_qkv_projections()
64
+
65
+ if quantization == "autoquant" and compile:
66
+ pipeline.transformer.to(memory_format=torch.channels_last)
67
+ pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=True)
68
+ pipeline.vae.to(memory_format=torch.channels_last)
69
+ pipeline.vae.decode = torch.compile(pipeline.vae.decode, mode="max-autotune", fullgraph=True)
70
+
71
+ if not sparsify:
72
+ if quantization == "int8dq":
73
+ quantize_(pipeline.transformer, int8_dynamic_activation_int8_weight())
74
+ quantize_(pipeline.vae, int8_dynamic_activation_int8_weight())
75
+ elif quantization == "int8wo":
76
+ quantize_(pipeline.transformer, int8_weight_only())
77
+ quantize_(pipeline.vae, int8_weight_only())
78
+ elif quantization == "int4wo":
79
+ quantize_(pipeline.transformer, int4_weight_only())
80
+ quantize_(pipeline.vae, int4_weight_only())
81
+ elif quantization == "fp6":
82
+ quantize_(pipeline.transformer, fp6_llm_weight_only())
83
+ quantize_(pipeline.vae, fp6_llm_weight_only())
84
+ elif quantization == "fp8":
85
+ pipeline.transformer = quantize_to_float8(pipeline.transformer, QuantConfig(ActivationCasting.DYNAMIC))
86
+ pipeline.vae = quantize_to_float8(pipeline.vae, QuantConfig(ActivationCasting.DYNAMIC))
87
+ elif quantization == "autoquant":
88
+ pipeline.transformer = autoquant(pipeline.transformer)
89
+ pipeline.vae = autoquant(pipeline.vae)
90
+
91
+ if sparsify:
92
+ sparsify_(pipeline.transformer, int8_dynamic_activation_int8_semi_sparse_weight())
93
+ sparsify_(pipeline.vae, int8_dynamic_activation_int8_semi_sparse_weight())
94
+
95
+ if quantization != "autoquant" and compile:
96
+ pipeline.transformer.to(memory_format=torch.channels_last)
97
+ pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=True)
98
+ pipeline.vae.to(memory_format=torch.channels_last)
99
+ pipeline.vae.decode = torch.compile(pipeline.vae.decode, mode="max-autotune", fullgraph=True)
100
+
101
+ pipeline.set_progress_bar_config(disable=True)
102
+ return pipeline
103
+
104
+
105
+ def run_inference(pipe, batch_size):
106
+ _ = pipe(
107
+ prompt=PROMPT,
108
+ num_images_per_prompt=batch_size,
109
+ generator=torch.manual_seed(2024),
110
+ )
111
+
112
+
113
+ def pretty_print_results(results, precision: int = 6):
114
+ def format_value(value):
115
+ if isinstance(value, float):
116
+ return f"{value:.{precision}f}"
117
+ return value
118
+
119
+ filtered_table = {k: format_value(v) for k, v in results.items()}
120
+ print(tabulate([filtered_table], headers="keys", tablefmt="pipe", stralign="center"))
121
+
122
+
123
+ def run_benchmark(pipeline, args):
124
+ for _ in range(5):
125
+ run_inference(pipeline, batch_size=args.batch_size)
126
+
127
+ time = benchmark_fn(run_inference, pipeline, args.batch_size)
128
+ torch.cuda.empty_cache()
129
+ memory = bytes_to_giga_bytes(torch.cuda.memory_allocated()) # in GBs.
130
+
131
+ info = dict(
132
+ ckpt_id=args.ckpt_id,
133
+ batch_size=args.batch_size,
134
+ fuse=args.fuse_attn_projections,
135
+ compile=args.compile,
136
+ quantization=args.quantization,
137
+ sparsify=args.sparsify,
138
+ memory=memory,
139
+ time=time,
140
+ )
141
+
142
+ pretty_print_results(info)
143
+ return info
144
+
145
+
146
+ def serialize_artifacts(info: dict, pipeline, args):
147
+ ckpt_id = PREFIXES[args.ckpt_id]
148
+ prefix = f"ckpt@{ckpt_id}-bs@{args.batch_size}-fuse@{args.fuse_attn_projections}-compile@{args.compile}-quant@{args.quantization}-sparsify@{args.sparsify}"
149
+ info_file = f"{prefix}_info.json"
150
+ with open(info_file, "w") as f:
151
+ json.dump(info, f)
152
+
153
+ image = pipeline(
154
+ prompt=PROMPT,
155
+ num_images_per_prompt=args.batch_size,
156
+ generator=torch.manual_seed(0),
157
+ ).images[0]
158
+ image.save(f"{prefix}.png")
159
+
160
+
161
+ if __name__ == "__main__":
162
+ parser = argparse.ArgumentParser()
163
+ parser.add_argument("--ckpt_id", default="PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", type=str)
164
+ parser.add_argument("--fuse_attn_projections", action="store_true")
165
+ parser.add_argument("--compile", action="store_true")
166
+ parser.add_argument(
167
+ "--quantization",
168
+ default="None",
169
+ choices=["int8dq", "int8wo", "int4wo", "autoquant", "fp6", "fp8", "None"],
170
+ help="Which quantization technique to apply",
171
+ )
172
+ parser.add_argument("--sparsify", action="store_true")
173
+ parser.add_argument("--batch_size", default=1, type=int, choices=[1, 4, 8])
174
+ args = parser.parse_args()
175
+
176
+ flush()
177
+
178
+ pipeline = load_pipeline(
179
+ ckpt_id=args.ckpt_id,
180
+ fuse_attn_projections=args.fuse_attn_projections,
181
+ compile=args.compile,
182
+ quantization=args.quantization,
183
+ sparsify=args.sparsify,
184
+ )
185
+
186
+ info = run_benchmark(pipeline, args)
187
+ serialize_artifacts(info, pipeline, args)
vae-compilation/exp.slurm ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=torchao
3
+ #SBATCH --nodes=1
4
+ # Set the QoS
5
+ #SBATCH --qos=high
6
+ # set 5h for job wall time limit
7
+ #SBATCH --time=05:00:00
8
+ # activate the requeue option
9
+ #SBATCH --requeue
10
+ #SBATCH --ntasks-per-node=1
11
+ #SBATCH --cpus-per-task=96
12
+ #SBATCH --gres=gpu:8
13
+ #SBATCH --mem=1999G
14
+ #SBATCH --exclusive
15
+ #SBATCH --partition=hopper-prod
16
+ #SBATCH -o /fsx/sayak/logs/%x-%j.out
17
+
18
+ set -xe
19
+
20
+ module load cuda/12.1
21
+
22
+ export NCCL_ASYNC_ERROR_HANDLING=1
23
+ export LARGE_SCALE_TRAINING=1
24
+
25
+ srun --wait=60 --kill-on-bad-exit=1 bash launch.sh
26
+
27
+ echo "END TIME: $(date)"
vae-compilation/exp_autoquant.slurm ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=torchao
3
+ #SBATCH --nodes=1
4
+ # Set the QoS
5
+ #SBATCH --qos=prod
6
+ # set 5h for job wall time limit
7
+ #SBATCH --time=05:00:00
8
+ # activate the requeue option
9
+ #SBATCH --requeue
10
+ #SBATCH --ntasks-per-node=1
11
+ #SBATCH --cpus-per-task=96
12
+ #SBATCH --gres=gpu:8
13
+ #SBATCH --mem=1999G
14
+ #SBATCH --exclusive
15
+ #SBATCH --partition=hopper-prod
16
+ #SBATCH -o /fsx/sayak/logs/benchmark-%x-%j.out
17
+
18
+ set -xe
19
+
20
+ module load cuda/12.1
21
+
22
+ export NCCL_ASYNC_ERROR_HANDLING=1
23
+ export LARGE_SCALE_TRAINING=1
24
+ export CUDA_VISIBLE_DEVICES=0
25
+
26
+ srun --wait=60 --kill-on-bad-exit=1 bash launch_autoquant.sh
27
+
28
+ echo "END TIME: $(date)"
vae-compilation/exp_fp8.slurm ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=torchao
3
+ #SBATCH --nodes=1
4
+ # Set the QoS
5
+ #SBATCH --qos=prod
6
+ # set 5h for job wall time limit
7
+ #SBATCH --time=05:00:00
8
+ # activate the requeue option
9
+ #SBATCH --requeue
10
+ #SBATCH --ntasks-per-node=1
11
+ #SBATCH --cpus-per-task=96
12
+ #SBATCH --gres=gpu:8
13
+ #SBATCH --mem=1999G
14
+ #SBATCH --exclusive
15
+ #SBATCH --partition=hopper-prod
16
+ #SBATCH -o /fsx/sayak/logs/benchmark-%x-%j.out
17
+
18
+ set -xe
19
+
20
+ module load cuda/12.1
21
+
22
+ export NCCL_ASYNC_ERROR_HANDLING=1
23
+ export LARGE_SCALE_TRAINING=1
24
+ export CUDA_VISIBLE_DEVICES=0
25
+
26
+ srun --wait=60 --kill-on-bad-exit=1 bash launch_fp8.sh
27
+
28
+ echo "END TIME: $(date)"
vae-compilation/exp_profile.slurm ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=torchao
3
+ #SBATCH --nodes=1
4
+ # Set the QoS
5
+ #SBATCH --qos=prod
6
+ # set 5h for job wall time limit
7
+ #SBATCH --time=05:00:00
8
+ # activate the requeue option
9
+ #SBATCH --requeue
10
+ #SBATCH --ntasks-per-node=1
11
+ #SBATCH --cpus-per-task=96
12
+ #SBATCH --gres=gpu:8
13
+ #SBATCH --mem=1999G
14
+ #SBATCH --exclusive
15
+ #SBATCH --partition=hopper-prod
16
+ #SBATCH -o /fsx/sayak/logs/profile-%x-%j.out
17
+
18
+ set -xe
19
+
20
+ module load cuda/12.1
21
+
22
+ export NCCL_ASYNC_ERROR_HANDLING=1
23
+ export LARGE_SCALE_TRAINING=1
24
+ export CUDA_VISIBLE_DEVICES=0
25
+
26
+ srun --wait=60 --kill-on-bad-exit=1 bash launch_profile.sh
27
+
28
+ echo "END TIME: $(date)"
vae-compilation/launch.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Possible values for each argument
4
+ ckpt_ids=("PixArt-alpha/PixArt-Sigma-XL-2-1024-MS")
5
+ fuse_attn_projections_flags=("" "--fuse_attn_projections")
6
+ compile_flags=("" "--compile")
7
+ # quantizations=("int8dq" "int8wo" "int4wo" "autoquant" "None" "fp6" "fp8")
8
+ quantizations=("int8dq" "int8wo" "int4wo" "None" "fp6")
9
+ sparsify_flags=("" "--sparsify")
10
+ batch_sizes=(1 4 8)
11
+
12
+ # Loop over all combinations
13
+ for ckpt_id in "${ckpt_ids[@]}"; do
14
+ for quantization in "${quantizations[@]}"; do
15
+ # Determine the valid flags based on quantization value
16
+ if [ "$quantization" == "None" ]; then
17
+ sparsify_flags=("" "--sparsify") # Enable sparsify
18
+ fuse_attn_projections_flags=("") # Disable fuse_attn_projections
19
+ else
20
+ sparsify_flags=("") # Disable sparsify
21
+ fuse_attn_projections_flags=("" "--fuse_attn_projections") # Enable fuse_attn_projections
22
+ fi
23
+
24
+ for fuse_attn_projections in "${fuse_attn_projections_flags[@]}"; do
25
+ for compile in "${compile_flags[@]}"; do
26
+ for sparsify in "${sparsify_flags[@]}"; do
27
+ for batch_size in "${batch_sizes[@]}"; do
28
+ # Construct the command
29
+ cmd="python3 benchmark_pixart.py --ckpt_id \"$ckpt_id\" $fuse_attn_projections $compile --quantization \"$quantization\" $sparsify --batch_size \"$batch_size\""
30
+
31
+ # Echo the command
32
+ echo "Running command: $cmd"
33
+
34
+ # Run the command
35
+ eval $cmd
36
+ done
37
+ done
38
+ done
39
+ done
40
+ done
41
+ done
vae-compilation/launch_autoquant.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ export TORCHAO_AUTOTUNER_ENABLE=1
4
+
5
+ python benchmark_pixart.py --compile --quantization=autoquant
6
+ python benchmark_pixart.py --compile --quantization=autoquant --batch_size=4
7
+ python benchmark_pixart.py --compile --quantization=autoquant --batch_size=8
vae-compilation/launch_fp8.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ python benchmark_pixart.py --compile --quantization=fp8
4
+ python benchmark_pixart.py --compile --quantization=fp8 --batch_size=4
5
+ python benchmark_pixart.py --compile --quantization=fp8 --batch_size=8
vae-compilation/launch_profile.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ export TORCH_LOGS='output_code,graph_breaks,recompiles'
4
+
5
+ TORCH_LOGS_OUT=regular.txt python3 profile_pixart.py --compile 2>&1 | grep "Output code written to: " | awk -F" " '{print $NF}'
6
+ TORCH_LOGS_OUT=int8wo.txt python3 profile_pixart.py --compile --quantization=int8wo 2>&1 | grep "Output code written to: " | awk -F" " '{print $NF}'
7
+ TORCH_LOGS_OUT=int8dyn.txt python3 profile_pixart.py --compile --quantization=int8dyn 2>&1 | grep "Output code written to: " | awk -F" " '{print $NF}'
8
+ TORCH_LOGS_OUT=int4wo.txt python3 profile_pixart.py --compile --quantization=int4wo 2>&1 | grep "Output code written to: " | awk -F" " '{print $NF}'
vae-compilation/profile_pixart.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ torch.set_float32_matmul_precision("high")
4
+
5
+ from torch._inductor import config as inductorconfig
6
+
7
+ inductorconfig.triton.unique_kernel_names = True
8
+
9
+ import functools
10
+ from diffusers import DiffusionPipeline
11
+ import gc
12
+
13
+ from torchao.quantization import (
14
+ int4_weight_only,
15
+ int8_weight_only,
16
+ int8_dynamic_activation_int8_weight,
17
+ quantize_,
18
+ autoquant,
19
+ )
20
+ from torchao.prototype.quant_llm import fp6_llm_weight_only
21
+ from torchao.sparsity import sparsify_, int8_dynamic_activation_int8_semi_sparse_weight
22
+ import argparse
23
+
24
+
25
+ PROMPT = "Eiffel Tower was Made up of more than 2 million translucent straws to look like a cloud, with the bell tower at the top of the building, Michel installed huge foam-making machines in the forest to blow huge amounts of unpredictable wet clouds in the building's classic architecture."
26
+ PREFIXES = {
27
+ "stabilityai/stable-diffusion-3-medium-diffusers": "sd3",
28
+ "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS": "pixart",
29
+ "fal/AuraFlow": "auraflow",
30
+ }
31
+
32
+
33
+ def flush():
34
+ gc.collect()
35
+ torch.cuda.empty_cache()
36
+ torch.cuda.reset_max_memory_allocated()
37
+ torch.cuda.reset_peak_memory_stats()
38
+
39
+
40
+ def profiler_runner(path, fn, *args, **kwargs):
41
+ with torch.profiler.profile(
42
+ activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], record_shapes=True
43
+ ) as prof:
44
+ result = fn(*args, **kwargs)
45
+ prof.export_chrome_trace(path)
46
+ return result
47
+
48
+
49
+ def load_pipeline(
50
+ ckpt_id: str,
51
+ fuse_attn_projections: bool,
52
+ compile: bool,
53
+ quantization: str,
54
+ sparsify: bool,
55
+ ) -> DiffusionPipeline:
56
+ pipeline = DiffusionPipeline.from_pretrained(ckpt_id, torch_dtype=torch.bfloat16).to("cuda")
57
+
58
+ if fuse_attn_projections:
59
+ pipeline.transformer.fuse_qkv_projections()
60
+
61
+ if quantization == "autoquant" and compile:
62
+ pipeline.transformer.to(memory_format=torch.channels_last)
63
+ pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=True)
64
+
65
+ if not sparsify:
66
+ if quantization == "int8dq":
67
+ quantize_(pipeline.transformer, int8_dynamic_activation_int8_weight())
68
+ elif quantization == "int8wo":
69
+ quantize_(pipeline.transformer, int8_weight_only())
70
+ elif quantization == "int4wo":
71
+ quantize_(pipeline.transformer, int4_weight_only())
72
+ elif quantization == "fp6":
73
+ quantize_(pipeline.transformer, fp6_llm_weight_only())
74
+ elif quantization == "autoquant":
75
+ pipeline.transformer = autoquant(pipeline.transformer)
76
+
77
+ if sparsify:
78
+ sparsify_(pipeline.transformer, int8_dynamic_activation_int8_semi_sparse_weight())
79
+
80
+ if quantization != "autoquant" and compile:
81
+ pipeline.transformer.to(memory_format=torch.channels_last)
82
+ pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=True)
83
+
84
+ pipeline.set_progress_bar_config(disable=True)
85
+ return pipeline
86
+
87
+
88
+ def run_inference(pipe, batch_size):
89
+ _ = pipe(
90
+ prompt=PROMPT,
91
+ num_images_per_prompt=batch_size,
92
+ generator=torch.manual_seed(2024),
93
+ )
94
+
95
+
96
+ def run_profile(pipeline, args):
97
+ for _ in range(5):
98
+ run_inference(pipeline, batch_size=args.batch_size)
99
+
100
+ ckpt_id = PREFIXES[args.ckpt_id]
101
+ prefix = f"ckpt@{ckpt_id}-bs@{args.batch_size}-fuse@{args.fuse_attn_projections}-compile@{args.compile}-quant@{args.quantization}-sparsify@{args.sparsify}"
102
+ trace_path = f"{prefix}_trace.json"
103
+
104
+ runner = functools.partial(profiler_runner, trace_path)
105
+ with torch.autograd.profiler.record_function("diffusers-torchao"):
106
+ runner(run_inference, pipeline, args.batch_size)
107
+
108
+ return trace_path
109
+
110
+
111
+ if __name__ == "__main__":
112
+ parser = argparse.ArgumentParser()
113
+ parser.add_argument("--ckpt_id", default="PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", type=str)
114
+ parser.add_argument("--fuse_attn_projections", action="store_true")
115
+ parser.add_argument("--compile", action="store_true")
116
+ parser.add_argument(
117
+ "--quantization",
118
+ default="None",
119
+ choices=["int8dq", "int8wo", "int4wo", "autoquant", "fp6", "None"],
120
+ help="Which quantization technique to apply",
121
+ )
122
+ parser.add_argument("--sparsify", action="store_true")
123
+ parser.add_argument("--batch_size", default=1, type=int, choices=[1, 4, 8])
124
+ args = parser.parse_args()
125
+
126
+ flush()
127
+
128
+ pipeline = load_pipeline(
129
+ ckpt_id=args.ckpt_id,
130
+ fuse_attn_projections=args.fuse_attn_projections,
131
+ compile=args.compile,
132
+ quantization=args.quantization,
133
+ sparsify=args.sparsify,
134
+ )
135
+
136
+ trace_path = run_profile(pipeline, args)
137
+ print(f"Trace path generated at: {trace_path}")