Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
import sys | |
from pathlib import Path | |
from transformers import pipeline | |
#pipe = pipeline('text-generation', model_id='Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator') | |
models = [ | |
"Yntec/NovelAIRemix", | |
"Joeythemonster/anything-midjourney-v-4-1", | |
"stablediffusionapi/dreamshaper-v6", #239 | |
"stablediffusionapi/disneypixar", | |
"emilianJR/epiCRealism", | |
"prompthero/openjourney", | |
"stablediffusionapi/realistic-vision-v20-2047", | |
"stablediffusionapi/wand-magic2", | |
"dwancin/memoji", #07.11 | |
"stablediffusionapi/anime-model-v2", | |
"goofyai/3d_render_style_xl" | |
] | |
current_model = models[0] | |
text_gen1=gr.Interface.load("models/Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator") | |
models2=[ | |
gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[2]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[3]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[4]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[5]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[6]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[7]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[8]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[9]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[10]}",live=True,preprocess=False), | |
] | |
def text_it1(inputs,text_gen1=text_gen1): | |
go_t1=text_gen1(inputs) | |
return(go_t1) | |
def set_model(current_model): | |
current_model = models[current_model] | |
return gr.update(label=(f"{current_model}")) | |
def send_it1(inputs, model_choice): | |
proc1=models2[model_choice] | |
output1=proc1(inputs) | |
return(output1) | |
css="""""" | |
with gr.Blocks(css=css) as myface: | |
gr.HTML("""<!DOCTYPE html> | |
<html lang="en"> | |
<head> | |
<meta charset="utf-8" /> | |
<meta name="twitter:card" content="player"/> | |
<meta name="twitter:site" content=""/> | |
<meta name="twitter:player:width" content="100%"/> | |
<meta name="twitter:player:height" content="600"/> | |
</head> | |
</html> | |
""") | |
with gr.Row(): | |
with gr.Tab("Title"): | |
gr.HTML(""" <title>A open-beta for precious people.</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;"> | |
""") | |
with gr.Tab("Description"): | |
gr.HTML("""<div style="text-align:center;"> | |
</div>""") | |
with gr.Row(): | |
with gr.Column(scale=100): | |
magic1 = gr.Textbox(lines=4) | |
gr.HTML("""<style> .gr-button { | |
color: white !important; | |
border-color: #000000 !important; | |
background: #006699 !important; | |
}</style>""") | |
run = gr.Button("Generate Image") | |
with gr.Row(): | |
with gr.Column(scale=100): | |
#Model selection dropdown | |
model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True) | |
with gr.Row(): | |
with gr.Column(style="width=800px"): | |
output1=gr.Image(label=(f"{current_model}")) | |
with gr.Row(): | |
with gr.Column(scale=50): | |
input_text=gr.Textbox(label="Use this box to extend an idea automatically, by typing some words and clicking Extend Idea",lines=2) | |
see_prompts=gr.Button("Extend Idea") | |
def get_valid_prompt(text: str) -> str: | |
dot_split = text.split('.')[0] | |
n_split = text.split('\n')[0] | |
return { | |
len(dot_split) < len(n_split): dot_split, | |
len(n_split) > len(dot_split): n_split, | |
len(n_split) == len(dot_split): dot_split | |
}[True] | |
def short_prompt(inputs): | |
return(inputs) | |
model_name1.change(set_model,inputs=model_name1,outputs=[output1]) | |
run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1]) | |
see_prompts.click(text_it1,inputs=[input_text],outputs=magic1) | |
#see_prompts.click(magic1=get_valid_prompt(pipe(input_text.value, max_length=77)[0]['generated_text'])) | |
myface.queue(concurrency_count=200) | |
myface.launch(inline=True, show_api=False, max_threads=400) |