from diffusers import StableDiffusionPipeline from lora_diffusion import monkeypatch_lora, tune_lora_scale import torch import os, shutil import gradio as gr import subprocess MODEL_NAME="stabilityai/stable-diffusion-2-1-base" INSTANCE_DIR="./data_example" OUTPUT_DIR="./output_example" model_id = "stabilityai/stable-diffusion-2-1-base" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") #prompt = "style of sks, baby lion" torch.manual_seed(1) #image = pipe(prompt, num_inference_steps=50, guidance_scale= 7).images[0] #no need #image # nice. diffusers are cool. #no need #finetuned_lora_weights = "./lora_weight.pt" #global var counter = 0 #Getting Lora fine-tuned weights def monkeypatching(unet_alpha,texten_alpha, in_prompt, wts): #, prompt, pipe): finetuned_lora_weights print("****** inside monkeypatching *******") print(f"in_prompt is - {str(in_prompt)}") global counter #if model == 'Text-encoder': unet_wt = wts[-2] #else: texten_wt = wts[-1] print(f"UNET weight is = {unet_wt}, Text-encoder weight is = {texten_wt}") if counter == 0 : #if wt == "./lora_playgroundai_wt.pt" : monkeypatch_lora(pipe.unet, torch.load(unet_wt)) #finetuned_lora_weights monkeypatch_lora(pipe.text_encoder, torch.load(texten_wt), target_replace_module=["CLIPAttention"]) #text-encoder #"./lora/lora_kiriko.text_encoder.pt" #tune_lora_scale(pipe.unet, alpha) #1.00) tune_lora_scale(pipe.unet, unet_alpha) tune_lora_scale(pipe.text_encoder, texten_alpha) counter +=1 #else: #monkeypatch_lora(pipe.unet, torch.load("./output_example/lora_weight.pt")) #finetuned_lora_weights #tune_lora_scale(pipe.unet, alpha) #1.00) #counter +=1 else : tune_lora_scale(pipe.unet, unet_alpha) tune_lora_scale(pipe.text_encoder, texten_alpha) #tune_lora_scale(pipe.unet, alpha) #1.00) prompt = str(in_prompt) #"style of hclu, " + str(in_prompt) #"baby lion" image = pipe(prompt, num_inference_steps=50, guidance_scale=7).images[0] image.save("./illust_lora.jpg") #"./contents/illust_lora.jpg") return image #{in_prompt} line68 --pl ignore def accelerate_train_lora(steps, images, in_prompt): print("*********** inside accelerate_train_lora ***********") print(f"images are -- {images}") # path can be retrieved by file_obj.name and original filename can be retrieved with file_obj.orig_name for file in images: print(f"file passed -- {file.name}") os.makedirs(INSTANCE_DIR, exist_ok=True) shutil.copy( file.name, INSTANCE_DIR) #/{file.orig_name} #subprocess.Popen(f'accelerate launch {"./train_lora_dreambooth.py"} \ os.system( f"accelerate launch {'./train_lora_dreambooth.py'} --pretrained_model_name_or_path={MODEL_NAME} --instance_data_dir={INSTANCE_DIR} --output_dir={OUTPUT_DIR} --instance_prompt='{in_prompt}' --train_text_encoder --resolution=512 --train_batch_size=1 --gradient_accumulation_steps=1 --learning_rate='1e-4' --learning_rate_text='5e-5' --color_jitter --lr_scheduler='constant' --lr_warmup_steps=0 --max_train_steps={int(steps)}") #10000 print("*********** completing accelerate_train_lora ***********") print(f"files in output_dir -- {os.listdir(OUTPUT_DIR)}") #lora_trained_weights = "./output_example/lora_weight.pt" files = os.listdir(OUTPUT_DIR) file_list = [] for file in files: #os.listdir(OUTPUT_DIR): if file.endswith(".pt"): print("weight files are -- ",os.path.join(f"{OUTPUT_DIR}", file)) file_list.append(os.path.join(f"{OUTPUT_DIR}", file)) return file_list #files[1:] #return f"{OUTPUT_DIR}/*.pt" with gr.Blocks() as demo: gr.Markdown("""
You can skip the queue by duplicating this space and upgrading to GPU in settings:
") #gr.Markdown("""NEW!! : I have fine-tuned the SD model for 15,000 steps using 100 PlaygroundAI images and LORA. You can load this trained model using the example component. Load the weight and start using the Space with the Inference button. Feel free to toggle the Alpha value.""") gr.Markdown( """**Main Features**