File size: 768 Bytes
639a33b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
import os
import torch
from diffusers import FluxPipeline
from huggingface_hub import hf_hub_download
# Ensure 'output' folder exists, if not, create it
output_folder = './output/flux_8step'
os.makedirs(output_folder, exist_ok=True)
# Load base model
base_model_id = "trongg/Flux-Dev2Pro_nsfw_fluxtastic-v3"
repo_name = "ByteDance/Hyper-SD"
ckpt_name = "Hyper-FLUX.1-dev-8steps-lora.safetensors"
pipe = FluxPipeline.from_pretrained(base_model_id)
# Load and fuse LoRA weights
pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
pipe.fuse_lora(lora_scale=0.125)
# Unload LoRA to return the model to its original state
pipe.unload_lora_weights()
# Save the transformer model in 'output' folder
model = pipe.transformer
model.save_pretrained(output_folder) |