JoPmt's picture
Update app.py
bac6480 verified
raw
history blame
5.26 kB
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
import torch
from transformers import pipeline
import gradio as gr
from PIL import Image
from diffusers.utils import load_image
import os, random, gc, re, json, time, shutil, glob
import PIL.Image
import tqdm
from accelerate import Accelerator
from huggingface_hub import HfApi, InferenceClient, ModelCard, RepoCard, upload_folder, hf_hub_download, HfFileSystem
HfApi=HfApi()
HF_TOKEN=os.getenv("HF_TOKEN")
HF_HUB_DISABLE_TELEMETRY=1
DO_NOT_TRACK=1
HF_HUB_ENABLE_HF_TRANSFER=0
accelerator = Accelerator(cpu=True)
InferenceClient=InferenceClient()
apol=[]
pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/lllyasviel/fav_models/fav/realisticStockPhoto_v10.safetensors", torch_dtype=torch.bfloat16, variant=None, use_safetensors=True, safety_checker=None)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights("./", weight_name="SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors", local_files_only="True")
pipe.fuse_lora(fuse_unet=True,fuse_text_encoder=False)
pipe.unet.to(memory_format=torch.channels_last)
pipe = accelerator.prepare(pipe.to("cpu"))
def chdr(apol,prompt,modil,stips,fnamo,gaul):
try:
type="rlstcStckPht_v10"
los=""
tre='./tmpo/'+fnamo+'.json'
tra='./tmpo/'+fnamo+'_0.png'
trm='./tmpo/'+fnamo+'_1.png'
flng=["yssup", "sllab", "stsaerb", "sinep", "selppin", "ssa", "tnuc", "mub", "kcoc", "kcid", "anigav", "dekan", "edun", "slatineg", "xes", "nrop", "stit", "ttub", "bojwolb", "noitartenep", "kcuf", "kcus", "kcil", "elttil", "gnuoy", "thgit", "lrig", "etitep", "dlihc", "yxes"]
flng=[itm[::-1] for itm in flng]
ptn = r"\b" + r"\b|\b".join(flng) + r"\b"
if re.search(ptn, prompt, re.IGNORECASE):
print("onon buddy")
else:
dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type}
with open(tre, 'w') as f:
json.dump(dobj, f)
HfApi.upload_folder(repo_id="JoPmt/hf_community_images",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type,'haed':gaul,}
try:
for pxn in glob.glob('./tmpo/*.png'):
os.remove(pxn)
except:
print("lou")
with open(tre, 'w') as f:
json.dump(dobj, f)
HfApi.upload_folder(repo_id="JoPmt/Tst_datast_imgs",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
try:
for pgn in glob.glob('./tmpo/*.png'):
os.remove(pgn)
for jgn in glob.glob('./tmpo/*.json'):
os.remove(jgn)
del tre
del tra
del trm
except:
print("cant")
except:
print("failed to make obj")
def plax(gaul,req: gr.Request):
gaul=str(req.headers)
return gaul
def plex(prompt,neg_prompt,stips,nut,wit,het,gaul,progress=gr.Progress(track_tqdm=True)):
gc.collect()
apol=[]
modil="realisticStockPhoto_v10"
fnamo=""+str(int(time.time()))+""
if nut == 0:
nm = random.randint(1, 2147483616)
while nm % 32 != 0:
nm = random.randint(1, 2147483616)
else:
nm=nut
lora_scale=0.6
generator = torch.Generator(device="cpu").manual_seed(nm)
image = pipe(prompt=[prompt]*2, negative_prompt=[neg_prompt]*2, generator=generator,denoising_end=1.0,num_inference_steps=stips, output_type="pil",cross_attention_kwargs={"scale": lora_scale},height=het,width=wit)
for a, imze in enumerate(image["images"]):
apol.append(imze)
imze.save('./tmpo/'+fnamo+'_'+str(a)+'.png', 'PNG')
chdr(apol,prompt,modil,stips,fnamo,gaul)
return apol
def aip(ill,api_name="/run"):
return
def pit(ill,api_name="/predict"):
return
with gr.Blocks(theme=random.choice([gr.themes.Monochrome(),gr.themes.Base.from_hub("gradio/seafoam"),gr.themes.Base.from_hub("freddyaboulton/dracula_revamped"),gr.themes.Glass(),gr.themes.Base(),]),analytics_enabled=False) as iface:
##iface.description="Running on cpu, very slow! by JoPmt."
out=gr.Gallery(label="Generated Output Image", columns=1)
inut=gr.Textbox(label="Prompt")
gaul=gr.Textbox(visible=False)
btn=gr.Button("GENERATE")
with gr.Accordion("Advanced Settings", open=False):
inet=gr.Textbox(label="Negative_prompt", value="lowres,text,bad quality,low quality,jpeg artifacts,ugly,bad hands,bad face,blurry,bad eyes,watermark,signature")
inyt=gr.Slider(label="Num inference steps",minimum=1,step=1,maximum=10,value=4)
indt=gr.Slider(label="Manual seed (leave 0 for random)",minimum=0,step=32,maximum=2147483616,value=0)
inwt=gr.Slider(label="Width",minimum=256,step=32,maximum=1024,value=768)
inht=gr.Slider(label="Height",minimum=256,step=32,maximum=1024,value=768)
btn.click(fn=plax,inputs=gaul,outputs=gaul).then(fn=plex, outputs=[out], inputs=[inut,inet,inyt,indt,inwt,inht,gaul])
iface.queue(max_size=1,api_open=False)
iface.launch(max_threads=20,inline=False,show_api=False)