Spaces:
Runtime error
Runtime error
File size: 8,956 Bytes
07b8c5e fee6732 53c8204 07b8c5e 9c246bf af35943 9c246bf d226d4e d4f55c7 90cb8f1 07b8c5e 90cb8f1 07b8c5e bd32e71 404edfc bd32e71 d4f55c7 bd32e71 404edfc bd32e71 404edfc 07b8c5e ddf5341 07b8c5e 7240ddc ddf5341 07b8c5e ddf5341 07b8c5e ddf5341 07b8c5e 404edfc 8b235cc 5f9b2c5 86f908d f39f62b 86f908d f39f62b 5ccfd0c f39f62b 5ccfd0c f39f62b ddf5341 f39f62b 5ccfd0c f39f62b 0ff8cd7 6589d49 300f50b 6589d49 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
import gradio as gr
import requests
import io
from PIL import Image
import json
from image_processing import downscale_image, limit_colors, convert_to_grayscale, convert_to_black_and_white, resize_image, DITHER_METHODS, QUANTIZATION_METHODS
import logging
class SomeClass:
def __init__(self):
self.images = []
with open('loras.json', 'r') as f:
loras = json.load(f)
def update_selection(selected_state: gr.SelectData):
logging.debug(f"Inside update_selection, selected_state: {selected_state}")
selected_lora_index = selected_state.index
selected_lora = loras[selected_lora_index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
return (
gr.update(placeholder=new_placeholder),
updated_text,
selected_state
)
def run_lora(prompt, selected_state, progress=gr.Progress(track_tqdm=True)):
logging.debug(f"Inside run_lora, selected_state: {selected_state}")
if not selected_state:
logging.error("selected_state is None or empty.")
raise gr.Error("You must select a LoRA before proceeding.") # Popup error when no LoRA is selected
selected_lora_index = selected_state.index # Changed this line
selected_lora = loras[selected_lora_index]
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}"
trigger_word = selected_lora["trigger_word"]
#token = os.getenv("API_TOKEN")
payload = {
"inputs": f"{prompt} {trigger_word}",
"parameters":{"negative_prompt": "bad art, ugly, watermark, deformed"},
}
#headers = {"Authorization": f"Bearer {token}"}
# Add a print statement to display the API request
print(f"API Request: {api_url}")
#print(f"API Headers: {headers}")
print(f"API Payload: {payload}")
error_count = 0
pbar = tqdm(total=None, desc="Loading model")
while(True):
response = requests.post(api_url, json=payload)
if response.status_code == 200:
return Image.open(io.BytesIO(response.content))
elif response.status_code == 503:
#503 is triggered when the model is doing cold boot. It also gives you a time estimate from when the model is loaded but it is not super precise
time.sleep(1)
pbar.update(1)
elif response.status_code == 500 and error_count < 5:
print(response.content)
time.sleep(1)
error_count += 1
continue
else:
logging.error(f"API Error: {response.status_code}")
raise gr.Error("API Error: Unable to fetch the image.") # Raise a Gradio error here
def postprocess(
image,
enabled,
downscale,
need_rescale,
enable_color_limit,
number_of_colors,
quantization_method,
dither_method,
use_k_means,
is_grayscale,
number_of_shades,
quantization_method_grayscale,
dither_method_grayscale,
use_k_means_grayscale,
is_black_and_white,
is_inversed_black_and_white,
black_and_white_threshold,
use_color_palette,
palette_image,
palette_colors,
dither_method_palette
):
if not enabled:
return image
processed_image = image.copy()
if downscale > 1:
processed_image = downscale_image(processed_image, downscale)
if enable_color_limit:
processed_image = limit_colors(
image=processed_image,
limit=number_of_colors,
quantize=QUANTIZATION_METHODS[quantization_method],
dither=DITHER_METHODS[dither_method],
use_k_means=use_k_means
)
if is_grayscale:
processed_image = convert_to_grayscale(processed_image)
processed_image = limit_colors(
image=processed_image,
limit=number_of_shades,
quantize=QUANTIZATION_METHODS[quantization_method_grayscale],
dither=DITHER_METHODS[dither_method_grayscale],
use_k_means=use_k_means_grayscale
)
if is_black_and_white:
processed_image = convert_to_black_and_white(processed_image, black_and_white_threshold, is_inversed_black_and_white)
if use_color_palette:
processed_image = limit_colors(
image=processed_image,
palette=palette_image,
palette_colors=palette_colors,
dither=DITHER_METHODS[dither_method_palette]
)
if need_rescale:
processed_image = resize_image(processed_image, image.size)
return processed_image
with gr.Blocks() as app:
title = gr.Markdown("# artificialguybr LoRA portfolio")
description = gr.Markdown("### This is a Pixel Art Generator using SD Loras.")
selected_state = gr.State()
with gr.Row():
gallery = gr.Gallery([(item["image"], item["title"]) for item in loras], label="LoRA Gallery", allow_preview=False, columns=1)
with gr.Column():
prompt_title = gr.Markdown("### Click on a LoRA in the gallery to create with it")
selected_info = gr.Markdown("")
with gr.Row():
prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA")
button = gr.Button("Run")
result = gr.Image(interactive=False, label="Generated Image")
post_processed_result = gr.Image(interactive=False, label="Post-Processed Image")
# Accordion moved here, inside the same gr.Blocks context
with gr.Accordion(label="Pixel art", open=True):
with gr.Row():
enabled = gr.Checkbox(label="Enable", value=False)
downscale = gr.Slider(label="Downscale", minimum=1, maximum=32, step=2, value=8)
need_rescale = gr.Checkbox(label="Rescale to original size", value=True)
with gr.Tabs():
with gr.TabItem("Color"):
enable_color_limit = gr.Checkbox(label="Enable", value=False)
palette_size_color = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
quantization_methods_color = gr.Radio(choices=["Median Cut", "Maximum Coverage", "Fast Octree"], label="Colors Quantization Method", value="Median Cut")
dither_methods_color = gr.Radio(choices=["None", "Floyd-Steinberg"], label="Colors Dither Method", value="None")
k_means_color = gr.Checkbox(label="Enable k-means for color quantization", value=True)
with gr.TabItem("Grayscale"):
enable_grayscale = gr.Checkbox(label="Enable", value=False)
palette_size_gray = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
quantization_methods_gray = gr.Radio(choices=["Median Cut", "Maximum Coverage", "Fast Octree"], label="Colors Quantization Method", value="Median Cut")
dither_methods_gray = gr.Radio(choices=["None", "Floyd-Steinberg"], label="Colors Dither Method", value="None")
k_means_gray = gr.Checkbox(label="Enable k-means for color quantization", value=True)
with gr.TabItem("Black and white"):
enable_black_and_white = gr.Checkbox(label="Enable", value=False)
inverse_black_and_white = gr.Checkbox(label="Inverse", value=False)
threshold_black_and_white = gr.Slider(label="Threshold", minimum=1, maximum=256, step=1, value=128)
with gr.TabItem("Custom color palette"):
enable_custom_palette = gr.Checkbox(label="Enable", value=False)
palette_image = gr.Image(label="Color palette image", type="pil")
palette_size_custom = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
dither_methods_custom = gr.Radio(choices=["None", "Floyd-Steinberg"], label="Colors Dither Method", value="None")
post_process_button = gr.Button("Apply Post-Processing")
# The rest of your code for setting up the app
gallery.select(update_selection, outputs=[prompt, selected_info, selected_state])
prompt.submit(fn=run_lora, inputs=[prompt, selected_state], outputs=[result, post_processed_result])
post_process_button.click(fn=postprocess, inputs=[post_processed_result, enabled, downscale, need_rescale, enable_color_limit, number_of_colors, quantization_method, dither_method, use_k_means, is_grayscale, number_of_shades, quantization_method_grayscale, dither_method_grayscale, use_k_means_grayscale, is_black_and_white, is_inversed_black_and_white, black_and_white_threshold, use_color_palette, palette_image, palette_colors, dither_method_palette], outputs=[post_processed_result])
app.queue(max_size=20, concurrency_count=5)
app.launch()
|