testSortModels / app.py
DemiPoto's picture
Update app.py
5600670 verified
raw
history blame
27.8 kB
import os
import gradio as gr
from random import randint
from operator import itemgetter
import bisect
from all_models import tags_plus_models,models,models_plus_tags
from datetime import datetime
from externalmod import gr_Interface_load
import asyncio
import os
from threading import RLock
lock = RLock()
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
nb_req_simult=10 ########
now2 = 0
inference_timeout = 300
MAX_SEED = 2**32-1
nb_rep=2
nb_mod_dif=20
nb_models=nb_mod_dif*nb_rep
cache_image={}
cache_id_image={}
cache_list_task={}
cache_text_actu={}
def load_fn(models):
global models_load
global num_models
global default_models
models_load = {}
num_models = len(models)
i=0
if num_models!=0:
default_models = models[:num_models]
else:
default_models = {}
for model in models:
i+=1
if i%50==0:
print("\n\n\n-------"+str(i)+'/'+str(len(models))+"-------\n\n\n")
if model not in models_load.keys():
try:
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
except Exception as error:
m = gr.Interface(lambda txt: None, ['text'], ['image'])
print(error)
models_load.update({model: m})
load_fn(models)
tags_plus_models_to_list=[]
list_tags=[]
for tag_plus_m in tags_plus_models:
list_tags.append(tag_plus_m[0]+f" ({tag_plus_m[1]})")
def test_pass(test):
if test==os.getenv('p'):
print("ok")
return gr.Dropdown(label="Lists Tags", show_label=True, choices=list(models_test) , interactive = True)
else:
print("nop")
return gr.Dropdown(label="Lists Tags", show_label=True, choices=list([]) , interactive = True)
def test_pass_aff(test):
if test==os.getenv('p'):
return gr.Accordion( open=True, visible=True) ,gr.Row(visible=False)
else:
return gr.Accordion( open=True, visible=False) , gr.Row()
# https://huggingface.co/docs/api-inference/detailed_parameters
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
from pathlib import Path
kwargs = {}
if height is not None and height >= 256: kwargs["height"] = height
if width is not None and width >= 256: kwargs["width"] = width
if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
noise = ""
if seed >= 0: kwargs["seed"] = seed
else:
rand = randint(1, 500)
for i in range(rand):
noise += " "
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
await asyncio.sleep(3)
try:
result = await asyncio.wait_for(task, timeout=timeout)
except (Exception, asyncio.TimeoutError) as e:
print(e)
print(f"Task timed out: {model_str}")
if not task.done(): task.cancel()
result = None
if task.done() and result is not None:
with lock:
nb_rand1=randint(1, MAX_SEED)
nb_rand2=randint(1, MAX_SEED)
nb_rand3=randint(1, MAX_SEED)
png_path = f"image_{nb_rand1}_{nb_rand2}_{nb_rand3}.png"
result.save(png_path)
image = str(Path(png_path).resolve())
return image
return None
def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
if model_str == 'NA':
return None
try:
loop = asyncio.new_event_loop()
result = loop.run_until_complete(infer(model_str, prompt, nprompt,
height, width, steps, cfg, seed, inference_timeout))
except (Exception, asyncio.CancelledError) as e:
print(e)
print(f"Task aborted: {model_str}")
result = None
finally:
loop.close()
return result
def add_gallery(image, model_str, gallery):
if gallery is None: gallery = []
#with lock:
if image is not None: gallery.append((image, model_str))
return gallery
def reset_gallery(gallery):
return add_gallery(None,"",[])
def load_gallery(gallery,id):
gallery = reset_gallery(gallery)
for c in cache_image[f"{id}"]:
gallery=add_gallery(c[0],c[1],gallery)
return gallery
def load_gallery_sorted(gallery,id):
gallery = reset_gallery(gallery)
for c in sorted(cache_image[f"{id}"], key=itemgetter(1)):
gallery=add_gallery(c[0],c[1],gallery)
return gallery
def add_cache_image(image, model_str,id,cache_image=cache_image):
if image is not None:
cache_image[f"{id}"].append((image,model_str))
#cache_image=sorted(cache_image, key=itemgetter(1))
return
def reset_cache_image(id,cache_image=cache_image):
cache_image[f"{id}"].clear()
return
def reset_cache_image_all_sessions(cache_image=cache_image):
for key, listT in cache_image.items():
listT.clear()
return
def set_session(id):
if id==0:
randTemp=randint(1,MAX_SEED)
cache_image[f"{randTemp}"]=[]
cache_id_image[f"{randTemp}"]=[]
cache_list_task[f"{randTemp}"]=[]
cache_text_actu[f"{randTemp}"]={}
return gr.Number(visible=False,value=randTemp)
else :
return id
def print_info_sessions():
lenTot=0
print("###################################")
print("number of sessions : "+str(len(cache_image)))
for key, listT in cache_image.items():
print("session "+key+" : "+str(len(listT)))
lenTot+=len(listT)
print("images total = "+str(lenTot))
print("###################################")
return
def disp_models(group_model_choice,nb_rep=nb_rep):
listTemp=[]
strTemp='\n'
i=0
for m in group_model_choice:
if m not in listTemp:
listTemp.append(m)
for m in listTemp:
i+=1
strTemp+="\"" + m + "\",\n"
if i%(8/nb_rep)==0:
strTemp+="\n"
return gr.Textbox(label="models",value=strTemp)
def search_models(str_search,tags_plus_models=tags_plus_models):
output1="\n"
output2=""
for m in tags_plus_models[0][2]:
if m.find(str_search)!=-1:
output1+="\"" + m + "\",\n"
outputPlus="\n From tags : \n\n"
for tag_plus_models in tags_plus_models:
if str_search.lower() == tag_plus_models[0].lower() and str_search!="":
for m in tag_plus_models[2]:
output2+="\"" + m + "\",\n"
if output2 != "":
output=output1+outputPlus+output2
else :
output=output1
return gr.Textbox(label="out",value=output)
def search_info(txt_search_info,models_plus_tags=models_plus_tags):
outputList=[]
if txt_search_info.find("\"")!=-1:
start=txt_search_info.find("\"")+1
end=txt_search_info.find("\"",start)
m_name=cutStrg(txt_search_info,start,end)
else :
m_name = txt_search_info
for m in models_plus_tags:
if m_name == m[0]:
outputList=m[1]
if len(outputList)==0:
outputList.append("Model Not Find")
return gr.Textbox(label="out",value=outputList)
def ratio_chosen(choice_ratio,width,height):
if choice_ratio == [None,None]:
return width , height
else :
return gr.Slider(label="Width", info="If 0, the default value is used.", maximum=2024, step=32, value=choice_ratio[0]), gr.Slider(label="Height", info="If 0, the default value is used.", maximum=2024, step=32, value=choice_ratio[1])
list_ratios=[["None",[None,None]],
["4:1 (2048 x 512)",[2048,512]],
["12:5 (1536 x 640)",[1536,640]],
["~16:9 (1344 x 768)",[1344,768]],
["~3:2 (1216 x 832)",[1216,832]],
["~4:3 (1152 x 896)",[1152,896]],
["1:1 (1024 x 1024)",[1024,1024]],
["~3:4 (896 x 1152)",[896,1152]],
["~2:3 (832 x 1216)",[832,1216]],
["~9:16 (768 x 1344)",[768,1344]],
["5:12 (640 x 1536)",[640,1536]],
["1:4 (512 x 2048)",[512,2048]]]
def fonc_add_param(lp,txt_input,neg_input,width,height,steps,cfg,seed):
if lp == [["","",0,0,0,0,-1]]:
lp.remove(["","",0,0,0,0,-1])
lp.append([txt_input,neg_input,width,height,steps,cfg,seed])
return gr.Dataset(samples=lp) , gr.Dropdown(choices=[["a",lp]], value=lp)
def fonc_del_param(lp,txt_input,neg_input,width,height,steps,cfg,seed):
if [txt_input,neg_input,width,height,steps,cfg,seed] in lp :
lp.remove([txt_input,neg_input,width,height,steps,cfg,seed])
if lp == []:
lp.append(["","",0,0,0,0,-1])
return gr.Dataset(samples=lp) , gr.Dropdown(choices=[["a",lp]], value=lp)
def fonc_load_info(nb_of_models_to_gen,index_tag,index_first_model):
str_temp=""
list_models_temp=[]
if index_first_model+nb_of_models_to_gen>len(tags_plus_models[index_tag][2]):
if nb_of_models_to_gen>len(tags_plus_models[index_tag][2]):
str_temp+="warning : to many model chosen"
else:
str_temp+="warning : first model to close to the last model"
nb_of_models_to_gen= len(tags_plus_models[index_tag][2])-index_first_model
str_temp+=f" - only {nb_of_models_to_gen} will be use\n\n"
str_temp+="list of models use (from "
str_temp+=f"{index_first_model+1}/{len(tags_plus_models[index_tag][2])} to {index_first_model+nb_of_models_to_gen}/{len(tags_plus_models[index_tag][2])}) :\n\n"
for i in range(nb_of_models_to_gen):
list_models_temp.append(tags_plus_models[index_tag][2][i+index_first_model])
str_temp+=f"\"{tags_plus_models[index_tag][2][i+index_first_model]}\",\n"
return nb_of_models_to_gen,gr.Textbox(str_temp),gr.Dropdown(choices=[["",list_models_temp]], value=list_models_temp )
def fonc_load_info_custom(nb_of_models_to_gen,list_model_custom,index_first_model):
str_temp=""
list_models_temp=[]
if index_first_model+nb_of_models_to_gen>len(list_model_custom):
if nb_of_models_to_gen>len(list_model_custom):
str_temp+="warning : to many model chosen"
else:
str_temp+="warning : first model to close to the last model"
nb_of_models_to_gen= len(list_model_custom)-index_first_model
str_temp+=f" - only {nb_of_models_to_gen} will be use\n\n"
str_temp+="list of models CUSTOM use (from "
str_temp+=f"{index_first_model+1}/{len(list_model_custom)} to {index_first_model+nb_of_models_to_gen}/{len(list_model_custom)}) :\n\n"
for i in range(nb_of_models_to_gen):
list_models_temp.append(list_model_custom[i+index_first_model])
str_temp+=f"\"{list_model_custom[i+index_first_model]}\",\n"
return nb_of_models_to_gen,gr.Textbox(str_temp),gr.Dropdown(choices=[["",list_models_temp]], value=list_models_temp )
def crea_list_task(id_session,list_param,list_models_to_gen,nb_images_by_prompt):
cache_list_task[f"{id_session}"]=[]
dict_temp={}
for m in list_models_to_gen:
dict_temp={}
dict_temp["model"]=m
dict_temp["id_module"]=-1
dict_temp["task"]=[]
for p in list_param:
for i in range(nb_images_by_prompt):
dict_temp["task"].append({"prompt":p[0],"nprompt":p[1],"width":p[2],"height":p[3],"steps":p[4],"cfg":p[5],"seed":p[6]})
cache_list_task[f"{id_session}"].append(dict_temp)
cache_text_actu[f"{id_session}"]={"nb_modules_use":nb_req_simult,"stop":False,"nb_fail":0,
"nb_models_to_do":len(list_models_to_gen) ,"nb_models_tot":len(list_models_to_gen) ,
"nb_tasks_to_do":len(list_models_to_gen)*len(list_param)*nb_images_by_prompt ,
"nb_tasks_tot":len(list_models_to_gen)*len(list_param)*nb_images_by_prompt }
def fonc_update_actu(text_actu,id):
s=""
s+=f"modules: {cache_text_actu[str(id)]['nb_modules_use']}/{nb_req_simult}\n"
s+=f"models remaining: {cache_text_actu[str(id)]['nb_models_to_do']}/{cache_text_actu[str(id)]['nb_models_tot']}\n"
s+=f"images remaining: {cache_text_actu[str(id)]['nb_tasks_to_do']}/{cache_text_actu[str(id)]['nb_tasks_tot']}\n"
s+=f"fail attempt: {cache_text_actu[str(id)]['nb_fail']}"
return gr.Textbox(s)
def cutStrg(longStrg,start,end):
shortStrg=''
for i in range(end-start):
shortStrg+=longStrg[start+i]
return shortStrg
def aff_models_perso(txt_list_perso,models=models):
list_perso=[]
t1=True
start=txt_list_perso.find('\"')
if start!=-1:
while t1:
start+=1
end=txt_list_perso.find('\"',start)
if end != -1:
txtTemp=cutStrg(txt_list_perso,start,end)
if txtTemp in models:
list_perso.append(cutStrg(txt_list_perso,start,end))
else :
t1=False
start=txt_list_perso.find('\"',end+1)
if start==-1:
t1=False
return gr.Dropdown(choices=[["",list_perso]], value=list_perso )
def add_gallery(image, model_str, gallery):
if gallery is None: gallery = []
#with lock:
if image is not None: gallery.append((image, model_str))
return gallery
def reset_gallery(gallery):
return add_gallery(None,"",[])
def fonc_load_gallery(id_session,gallery):
gallery = reset_gallery(gallery)
for i in range(len(cache_image[f"{id_session}"])):
gallery=add_gallery(cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],gallery)
return gallery
def fonc_load_gallery_by_model(id_session,gallery,models,index):
gallery = reset_gallery(gallery)
for i in range(len(cache_image[f"{id_session}"])):
if cache_image[f"{id_session}"][i]["model"]==models[index]:
gallery=add_gallery(cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],gallery)
return gallery
def fonc_start(id_session,id_module,s,cont):
if cont==False:
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1
print("manual stop")
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED))
task_actu={}
model_actu=""
print(f"in fonc : id module={id_module}\n")
for model_plus_tasks in cache_list_task[f"{id_session}"]:
if model_plus_tasks["id_module"]==id_module:
model_actu=model_plus_tasks["model"]
task_actu=model_plus_tasks["task"][0]
print(f"find model : {model_actu}\n")
if model_actu=="":
for model_plus_tasks in cache_list_task[f"{id_session}"]:
if model_plus_tasks["id_module"]==-1 :
if model_actu=="":
model_plus_tasks["id_module"]=id_module
model_actu=model_plus_tasks["model"]
task_actu=model_plus_tasks["task"][0]
print(f"module num {id_module} take {model_actu}\n")
if model_actu=="":
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1
print("Stop with :"+s+"\n")
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED))
print("begin gen image:")
print(model_actu)
print(task_actu)
result=gen_fn(model_actu, task_actu["prompt"], task_actu["nprompt"], task_actu["height"], task_actu["width"], task_actu["steps"], task_actu["cfg"], task_actu["seed"])
print("reception")
if result!=None:
#result=gr.Image(result)
id_image=len(cache_image[f"{id_session}"])
for model_plus_tasks in cache_list_task[f"{id_session}"]:
if model_plus_tasks["id_module"]==id_module:
model_plus_tasks["task"].remove(task_actu)
cache_text_actu[f"{id_session}"]["nb_tasks_to_do"]-=1
if len(model_plus_tasks["task"])==0:
cache_list_task[f"{id_session}"].remove(model_plus_tasks)
cache_text_actu[f"{id_session}"]["nb_models_to_do"]-=1
task_actu["id_image"]=id_image
task_actu["model"]=model_actu
task_actu["image"]=result
#cache_image[f"{id_session}"].append(result)
#cache_id_image[f"{id_session}"].append(task_actu)
cache_image[f"{id_session}"].append(task_actu)
print("image saved\n")
else:
cache_text_actu[f"{id_session}"]["nb_fail"]+=1
print("fail to generate\n")
num_task_to_do=0
for model_plus_tasks in cache_list_task[f"{id_session}"]:
for task in model_plus_tasks["task"]:
num_task_to_do+=1
print(f"\n {num_task_to_do} tasks to do\n")
return result , gr.Textbox(s+"1"),gr.Number(randint(1,MAX_SEED))
def fonc_init(s):
return gr.Textbox(s+"1")
def index_gallery_first():
return gr.Number(0)
def index_gallery_next(i,list_models):
iT=i+1
return gr.Number(iT%len(list_models))
def index_gallery_prev(i,list_models):
iT=i-1
return gr.Number(iT%len(list_models))
def change_text_model_actu_gal(list_models,index):
return gr.Textbox(f"{list_models[index]}")
def fonc_add_to_text(text,list_models,index):
return gr.Textbox(text+f"\"{list_models[index]}\",\n")
def make_me():
with gr.Column():
id_session=gr.Number(visible=False,value=0)
with gr.Group():
with gr.Row():
with gr.Column(scale=4):
txt_input = gr.Textbox(label='Your prompt:', lines=4, interactive = True)
neg_input = gr.Textbox(label='Negative prompt:', lines=4, interactive = True)
with gr.Column(scale=4):
with gr.Row():
width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True)
height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True)
with gr.Row():
choice_ratio = gr.Dropdown(label="Ratio Width/Height",
info="OverWrite Width and Height (W*H<1024*1024)",
show_label=True, choices=list(list_ratios) , interactive = True, value=list_ratios[0][1])
choice_ratio.change(ratio_chosen,[choice_ratio,width,height],[width,height])
with gr.Row():
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, interactive = True)
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, interactive = True)
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, interactive = True)
add_param=gr.Button("Add to the list")
del_param=gr.Button("Delete to the list")
#gen_button = gr.Button('Generate images', scale=3)
#stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
#gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
list_param=gr.Dropdown(choices=[["a",[["","",0,0,0,0,-1]]]], value=[["","",0,0,0,0,-1]], visible=False)
disp_param = gr.Examples(
label="list of prompt",
examples=list_param.value,
inputs=[txt_input,neg_input,width,height,steps,cfg,seed],
outputs=[txt_input,neg_input,width,height,steps,cfg,seed],
)
add_param.click(fonc_add_param,[list_param,txt_input,neg_input,width,height,steps,cfg,seed],[disp_param.dataset,list_param])
add_param.click(set_session,[id_session],[id_session])
del_param.click(fonc_del_param,[list_param,txt_input,neg_input,width,height,steps,cfg,seed],[disp_param.dataset,list_param])
with gr.Row():
list_models_to_gen=gr.Dropdown(choices=[["",[]]], value=[], visible=False)
disp_info=gr.Textbox(label="Info")
with gr.Column():
with gr.Row():
nb_images_by_prompt=gr.Number(2,label="Number of images by prompt:",interactive=True)
nb_of_models_to_gen=gr.Number(10,label="Number of Models:",interactive=True)
index_tag=gr.Dropdown(label="Tag",choices=list(list_tags),type="index")
index_first_model=gr.Dropdown(label="First model",choices=list([]), type="index")
index_tag.change(lambda i:gr.Dropdown(choices=list([f"({j+1}/{len(tags_plus_models[i][2])}) {tags_plus_models[i][2][j]}" for j in range(len(tags_plus_models[i][2]))])),
index_tag,index_first_model)
load_info=gr.Button("Load Models")
load_info.click(fonc_load_info,[nb_of_models_to_gen,index_tag,index_first_model],[nb_of_models_to_gen,disp_info,list_models_to_gen])
with gr.Accordion("Models Custom",open=False) :
with gr.Row():
text_list_model_custom=gr.Textbox(label="List Models Custom")
with gr.Column():
list_model_custom=gr.Dropdown(choices=[["",[]]], value=[], visible=False)
#use_models_custom=gr.Radio("Use Models Custom",value=False)
cut_model_custom=gr.Button("Cut Text Models Custom")
cut_model_custom.click(aff_models_perso,[text_list_model_custom],[list_model_custom])
index_first_model_custom=gr.Dropdown(label="First model",choices=list([]), type="index")
list_model_custom.change(lambda li:gr.Dropdown(choices=list([f"({j+1}/{len(li)}) {li[j]}" for j in range(len(li))])),
[list_model_custom],index_first_model_custom)
load_model_custom=gr.Button("Load Models Custom")
load_model_custom.click(fonc_load_info_custom,[nb_of_models_to_gen,list_model_custom,index_first_model_custom],[nb_of_models_to_gen,disp_info,list_models_to_gen])
list_models_to_gen.change(crea_list_task,[id_session,list_param,list_models_to_gen,nb_images_by_prompt],[])
with gr.Column():
button_start=gr.Button("START")
button_stop=gr.Button("STOP")
cont=gr.Checkbox(True,visible=False)
button_start.click(lambda:True,[],[cont])
button_stop.click(lambda:False,[],[cont])
text_actu=gr.Textbox("",label="in progress",interactive=False,lines=6)
update_actu=gr.Number(0,visible=False)
update_actu.change(fonc_update_actu,[text_actu,id_session],[text_actu])
with gr.Row():
outputs=[]
id_modules=[]
states=[]
for i in range(nb_req_simult):
#outputs.append(gr.Image(None,interactive=False,render=False))
#id_modules.append(gr.Number(i,interactive=False,render=False))
outputs.append(gr.Image(None,interactive=False,visible=False))
id_modules.append(gr.Number(i,interactive=False,visible=False))
states.append(gr.Textbox("1",interactive=False,visible=False))
for o,i,s in zip(outputs,id_modules,states):
#o.change(fonc_start,[id_session,i],[o])
#o.change(test_change,[],[])
s.change(fonc_start,[id_session,i,s,cont],[o,s,update_actu])
#button_start.click(lambda : gr.Image(None),[],[o])
gen_event = gr.on(triggers=[button_start.click], fn=fonc_init,inputs=[s], outputs=[s])
with gr.Column(scale=2):
gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
interactive=False, show_share_button=True, container=True, format="png",
preview=True, object_fit="cover",columns=4,rows=4)
with gr.Column(scale=3):
button_load_gallery=gr.Button("Load Gallery All")
button_load_gallery.click(fonc_load_gallery,[id_session,gallery],[gallery])
index_gallery=gr.Number(-1,visible=False)
button_load_gallery_first=gr.Button("Load Gallery first model")
button_load_gallery_first.click(index_gallery_first,[],[index_gallery])
with gr.Row():
button_load_gallery_prev=gr.Button("Prev model")
button_load_gallery_next=gr.Button("Next model")
button_load_gallery_next.click(index_gallery_next,[index_gallery,list_models_to_gen],[index_gallery])
button_load_gallery_prev.click(index_gallery_prev,[index_gallery,list_models_to_gen],[index_gallery])
index_gallery.change(fonc_load_gallery_by_model,[id_session,gallery,list_models_to_gen,index_gallery],[gallery])
text_model_actu_gal = gr.Textbox(label='Model Actu:', lines=1, interactive = False)
index_gallery.change(change_text_model_actu_gal,[list_models_to_gen,index_gallery],[text_model_actu_gal])
with gr.Row():
with gr.Column():
button_add_to_bl=gr.Button("Add to Blacklist")
#button_remove_from_bl=gr.Button("Remove from Blacklist")
text_bl=gr.Textbox(label='Blacklist', lines=5, interactive = True)
button_add_to_bl.click(fonc_add_to_text,[text_bl,list_models_to_gen,index_gallery],[text_bl])
#button_remove_from_bl.click(fonc_remove_from_text,[text_bl,list_models_to_gen,index_gallery],[text_bl])
with gr.Column():
button_add_to_fav=gr.Button("Add to Favlist")
text_fav=gr.Textbox(label='Favlist', lines=5, interactive = True)
button_add_to_fav.click(fonc_add_to_text,[text_fav,list_models_to_gen,index_gallery],[text_fav])
js_code = """
console.log('ghgh');
"""
with gr.Blocks(theme="Nymbo/Nymbo_Theme", fill_width=True, css="div.float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}") as demo:
gr.Markdown("<script>" + js_code + "</script>")
make_me()
# https://www.gradio.app/guides/setting-up-a-demo-for-maximum-performance
#demo.queue(concurrency_count=999) # concurrency_count is deprecated in 4.x
demo.queue(default_concurrency_limit=200, max_size=200)
demo.launch(max_threads=400)