{ "_class_name": "UNet2DConditionModel", "_commit_hash": null, "_diffusers_version": "0.27.2", "_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--hf-internal-testing--tiny-stable-diffusion-torch/snapshots/a88cdfbd91f96ec7f61eb7484b652ff0f4ee701d/unet", "_use_default_values": [ "class_embeddings_concat", "transformer_layers_per_block", "num_attention_heads", "mid_block_only_cross_attention", "resnet_time_scale_shift", "use_linear_projection", "encoder_hid_dim", "resnet_out_scale_factor", "upcast_attention", "dual_cross_attention", "time_cond_proj_dim", "num_class_embeds", "dropout", "conv_out_kernel", "encoder_hid_dim_type", "time_embedding_dim", "mid_block_type", "class_embed_type", "attention_type", "timestep_post_act", "addition_embed_type_num_heads", "conv_in_kernel", "addition_time_embed_dim", "reverse_transformer_layers_per_block", "projection_class_embeddings_input_dim", "time_embedding_act_fn", "cross_attention_norm", "only_cross_attention", "resnet_skip_time_act", "addition_embed_type", "time_embedding_type" ], "act_fn": "silu", "addition_embed_type": null, "addition_embed_type_num_heads": 64, "addition_time_embed_dim": null, "attention_head_dim": 8, "attention_type": "default", "block_out_channels": [ 32, 64 ], "center_input_sample": false, "class_embed_type": null, "class_embeddings_concat": false, "conv_in_kernel": 3, "conv_out_kernel": 3, "cross_attention_dim": 32, "cross_attention_norm": null, "down_block_types": [ "DownBlock2D", "CrossAttnDownBlock2D" ], "downsample_padding": 1, "dropout": 0.0, "dual_cross_attention": false, "encoder_hid_dim": null, "encoder_hid_dim_type": null, "flip_sin_to_cos": true, "freq_shift": 0, "in_channels": 4, "layers_per_block": 2, "mid_block_only_cross_attention": null, "mid_block_scale_factor": 1, "mid_block_type": "UNetMidBlock2DCrossAttn", "neuron": { "auto_cast": "matmul", "auto_cast_type": "bf16", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "dynamic_batch_size": false, "inline_weights_to_neff": false, "input_names": [ "sample", "timestep", "encoder_hidden_states" ], "model_type": "unet", "optlevel": "2", "output_attentions": false, "output_hidden_states": false, "output_names": [ "sample" ], "static_batch_size": 64, "static_height": 32, "static_num_channels": 4, "static_sequence_length": 77, "static_width": 32 }, "norm_eps": 1e-05, "norm_num_groups": 32, "num_attention_heads": null, "num_class_embeds": null, "only_cross_attention": false, "out_channels": 4, "projection_class_embeddings_input_dim": null, "resnet_out_scale_factor": 1.0, "resnet_skip_time_act": false, "resnet_time_scale_shift": "default", "reverse_transformer_layers_per_block": null, "sample_size": 64, "task": "semantic-segmentation", "time_cond_proj_dim": null, "time_embedding_act_fn": null, "time_embedding_dim": null, "time_embedding_type": "positional", "timestep_post_act": null, "transformer_layers_per_block": 1, "transformers_version": null, "up_block_types": [ "CrossAttnUpBlock2D", "UpBlock2D" ], "upcast_attention": false, "use_linear_projection": false }