Spaces:
Running
Running
File size: 6,141 Bytes
b7236f0 4f26c3f 46c56a8 4f26c3f b7236f0 a29b766 4f26c3f 0fa9e15 4f26c3f a29b766 4f26c3f 0fa9e15 4f26c3f 07eca3d 4f26c3f 2ba3409 4f26c3f 2ba3409 7d6469d 4f26c3f 7d6469d 4f26c3f b7236f0 4f26c3f b7236f0 4f26c3f b7236f0 4f26c3f b7236f0 4f26c3f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import gradio as gr
import os
import sys
from pathlib import Path
from all_models import models
current_model = models[0]
#text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
text_gen1=gr.Interface.load("spaces/Yntec/prompt-extend")
#text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend")
#text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
models2=[
gr.Interface.load(f"models/{models[0]}",live=False,preprocess=True, postprocess=False),
gr.Interface.load(f"models/{models[1]}",live=False,preprocess=True, postprocess=False),
gr.Interface.load(f"models/{models[2]}",live=False,preprocess=True, postprocess=False),
gr.Interface.load(f"models/{models[3]}",live=False,preprocess=True, postprocess=False),
gr.Interface.load(f"models/{models[4]}",live=False,preprocess=True, postprocess=False),
gr.Interface.load(f"models/{models[5]}",live=False,preprocess=True, postprocess=False),
#Because there's a model 0, to know the number of models you add 1 to {models[n]}
]
def text_it1(inputs,text_gen1=text_gen1):
go_t1=text_gen1(inputs)
return(go_t1)
def set_model(current_model):
current_model = models[current_model]
return gr.update(label=(f"{current_model}"))
def send_it1(inputs, model_choice): #negative_prompt,
proc1=models2[model_choice]
output1=proc1(inputs)
#negative_prompt=negative_prompt
return(output1)
css=""""""
with gr.Blocks(css=css) as myface:
gr.HTML("""
<div style="text-align: center; max-width: 1200px; margin: 0 auto;">
<div>
<style>
h1 {
font-size: 6em;
color: #c9c9c9;
margin-top: 30px;
margin-bottom: 30px;
text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
}
h3 {
color: #c9c9c9; !important;
}
h4 {
color: #ffffff; !important;
}
.gradio-container {
background-image: linear-gradient(#252525, #1e1e1e, #181818) !important;
color: #aaaaaa !important;
font-family: 'IBM Plex Sans', sans-serif !important;
}
.text-gray-500 {
color: #c9c9c9 !important;
}
.gr-box {
background-image: linear-gradient(#181818, #1e1e1e, #252525) !important;
border-top-color: #000000 !important;
border-right-color: #ffffff !important;
border-bottom-color: #ffffff !important;
border-left-color: #000000 !important;
}
.gr-input {
color: #c9c9c9; !important;
background-color: #252525 !important;
}
:root {
--neutral-100: #000000 !important;
}
</style>
<body>
<div class="center"><h1>Printing Press</h1>
</div>
</body>
</div>
<p style="margin-bottom: 9px; color: #aaaaaa;">
<h3>Top 684 Blitz Diffusion Models - A permanently online (unless huggingface is acting up, ha!) backup copy of <a href="https://huggingface.co/spaces/Yntec/ToyWorld"><u><p style="color:#8150df;"><b>Toy World!</b></p></u></a></h3></p>
</div>
""")
with gr.Row():
with gr.Column(scale=100):
#Model selection dropdown
model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
with gr.Row():
with gr.Column(scale=100):
magic1=gr.Textbox(label="Your Prompt", lines=4) #Positive
#with gr.Column(scale=100):
#negative_prompt=gr.Textbox(label="Negative Prompt", lines=1)
gr.HTML("""<style> .gr-button {
color: #ffffff !important;
text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
background-image: linear-gradient(#635a76, #a489d2) !important;
border-radius: 24px !important;
border: solid 1px !important;
border-top-color: #c99fff !important;
border-right-color: #000000 !important;
border-bottom-color: #000000 !important;
border-left-color: #c99fff !important;
padding: 6px 30px;
}
.gr-button:active {
color: #c99fff !important;
font-size: 98% !important;
text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important;
background-image: linear-gradient(#a489d2, #635a76) !important;
border-top-color: #000000 !important;
border-right-color: #ffffff !important;
border-bottom-color: #ffffff !important;
border-left-color: #000000 !important;
}
.gr-button:hover {
filter: brightness(130%);
}
</style>""")
run=gr.Button("Generate Image")
with gr.Row():
with gr.Column(style="width=800px"):
output1=gr.Image(label=(f"{current_model}"))
with gr.Row():
with gr.Column(scale=50):
input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2)
see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above")
use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above")
def short_prompt(inputs):
return(inputs)
model_name1.change(set_model,inputs=model_name1,outputs=[output1])
run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=False, max_threads=400) |