yeq6x's picture
.
c8be5fc
import gradio as gr
import io
import os
from PIL import Image
import base64
from scripts.process_utils import initialize, process_image_as_base64, image_to_base64
from scripts.anime import init_model
from scripts.generate_prompt import load_wd14_tagger_model
from datetime import datetime
from pytz import timezone
from scripts.survey import handle_form_submission, handle_visit_choice, handle_proceed, localize, script, generate_image, send_feedback
# 初期化
initialize(_use_local=False, use_gpu=True, use_dotenv=True)
init_model(use_local=False)
load_wd14_tagger_model()
def process_image(input_image, mode, weight1=None, weight2=None):
tokyo_time = datetime.now(timezone('Asia/Tokyo')).strftime("%Y-%m-%d %H:%M:%S") # 日本時間のタイムスタンプ
print(f"[{tokyo_time}] Processing image with mode={mode}, weight1={weight1}, weight2={weight2}")
# feedback用のファイル名
tokyo_time = datetime.now(timezone('Asia/Tokyo')).strftime("%Y%m%d_%H%M%S")
filename = f"{tokyo_time}_mode={mode}_weight1={weight1}_weight2={weight2}.png"
# 既存の画像処理ロジック
if mode == "original":
sotai_image, sketch_image = process_image_as_base64(input_image, mode, None, None)
elif mode == "refine":
sotai_image, sketch_image = process_image_as_base64(input_image, mode, weight1, weight2)
return sotai_image, sketch_image, None, filename
def mix_images(sotai_image_data, sketch_image_data, opacity1, opacity2):
sotai_image = Image.open(io.BytesIO(base64.b64decode(sotai_image_data))).convert('RGBA')
sketch_image = Image.open(io.BytesIO(base64.b64decode(sketch_image_data))).convert('RGBA')
if sotai_image.size != sketch_image.size:
sketch_image = sketch_image.resize(sotai_image.size, Image.Resampling.LANCZOS)
mixed_image = Image.new('RGBA', sotai_image.size, (255, 255, 255, 255))
sotai_alpha = sotai_image.getchannel('A').point(lambda x: int(x * opacity1))
sketch_alpha = sketch_image.getchannel('A').point(lambda x: int(x * opacity2))
mixed_image.paste(sketch_image, (0, 0), mask=sketch_alpha)
mixed_image.paste(sotai_image, (0, 0), mask=sotai_alpha)
return mixed_image
def send_mixed_feedback(sotai_image_data, sketch_image_data, filename):
mixed_image = mix_images(sotai_image_data, sketch_image_data, 0.5, 0.5)
return send_feedback(mixed_image, filename)
with gr.Blocks() as demo:
form_visible_flag = gr.Textbox(value="false", elem_id="form_flag", visible=False)
# title
gr.HTML("<h1>Image2Body demo</h1>")
# description with translations and additional notes
gr.HTML("""
<p>Upload an image and select processing options to generate body and sketch images.</p>
<p>まだstandingタグのついた女性キャラクターの1000枚の画像しか学習していないため、他のポーズは上手くできないことをご了承ください。</p>
<p>さらなる情報は<a href="https://x.com/Yeq6X" target="_blank">@Yeq6X</a>までお問い合わせください。</p>
<p>Note: Currently, the model has been trained on only 1000 images of female characters with the 'standing' tag, so other poses may not be processed accurately.</p>
<p>For more information, please contact <a href="https://x.com/Yeq6X" target="_blank">@Yeq6X</a>.</p>
<p>注意:目前模型仅使用带有“standing”标签的1000张女性角色图像进行训练,因此其他姿势可能无法准确处理。</p>
<p>如需更多信息,请联系<a href="https://x.com/Yeq6X" target="_blank">@Yeq6X</a>。</p>
""")
# 訪問回数の選択
with gr.Column(visible=False) as visit_section:
# 言語選択セクション
with gr.Row():
language_choice = gr.Radio(
choices=["en", "ja", "zh"],
label="Select Language / 言語を選択 / 选择语言",
value="en"
)
localized = localize("en")
welcome_message = gr.HTML(localized["welcome_message"])
visit_choice = gr.Radio(choices=localized["visit_choices"], label="")
# 初回訪問のアンケートセクション
with gr.Column(visible=False) as survey_section:
# フォームセクション
form_section = gr.HTML(localize("en")["form_html"])
# 2回目以降の進むボタンセクション
with gr.Column(visible=False) as proceed_section:
# gr.HTML("<h2>再訪ありがとうございます!</h2>")
# proceed_button = gr.Button("進む")
proceed_message = gr.HTML(localize("en")["returning_message"])
proceed_button = gr.Button(localize("en")["proceed_button"], variant="primary")
# 言語選択変更時の更新
def update_language(language):
localized = localize(language)
return (
gr.update(value=localized["welcome_message"]),
gr.update(choices=localized["visit_choices"]),
gr.update(value=localized["returning_message"]),
gr.update(value=localized["proceed_button"]),
gr.update(value=localized["form_html"])
)
language_choice.change(
update_language,
inputs=[language_choice],
outputs=[welcome_message, visit_choice, proceed_message, proceed_button, form_section]
)
# フォーム送信時の画面切り替え
def handle_submit():
return gr.update(visible=False), gr.update(visible=True)
submit_flag = gr.Textbox(visible=False, value="false")
submit_flag.change(
handle_submit,
inputs=[],
outputs=[form_section]
)
# メイン画面セクション
with gr.Column(visible=True) as main_section:
# interface
submit = None
with gr.Row():
with gr.Column() as input_col:
input_image = gr.Image(type="pil", label="Input Image", height=512)
with gr.Tab("original"):
original_mode = gr.Text("original", label="Mode", visible=False)
original_submit = gr.Button("Submit", variant="primary")
with gr.Tab("refine"):
refine_input = [
gr.Text("refine", label="Mode", visible=False),
gr.Slider(0, 2, value=0.6, step=0.05, label="Weight 1 (Sketch)"),
gr.Slider(0, 1, value=0.05, step=0.025, label="Weight 2 (Body)")
]
refine_submit = gr.Button("Submit", variant="primary")
gr.Examples(
examples=[f"images/sample{i}.png" for i in [1, 2, 4, 5, 6, 7, 10, 16, 18, 19]],
inputs=[input_image]
)
with gr.Column() as output_col:
sotai_image_data = gr.Text(label="Sotai Image data", visible=False)
sketch_image_data = gr.Text(label="Sketch Image data", visible=False)
mixed_image = gr.Image(label="Output Image", elem_id="output_image")
opacity_slider1 = gr.Slider(0, 1, value=0.5, step=0.05, label="Opacity (Sotai)")
opacity_slider2 = gr.Slider(0, 1, value=0.5, step=0.05, label="Opacity (Sketch)")
send_filename = gr.Textbox(label="Feedback", visible=False)
gr.HTML("<h3>Send Feedback Image/画像を送信</h3>")
gr.HTML("<p>Images are used only for developer review and will not be shared.</p>")
gr.HTML("<p>画像は開発者が確認するためだけに使用され、公開されません。</p>")
send_feedback_button = gr.Button("Contribute as Feedback to Developer/開発者へのフィードバックとして協力する")
feed_back_result = gr.Textbox(label="Feedback Result")
original_submit.click(
process_image,
inputs=[input_image, original_mode],
outputs=[sotai_image_data, sketch_image_data, mixed_image, send_filename]
)
refine_submit.click(
process_image,
inputs=[input_image, refine_input[0], refine_input[1], refine_input[2]],
outputs=[sotai_image_data, sketch_image_data, mixed_image, send_filename]
)
sotai_image_data.change(
mix_images,
inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2],
outputs=mixed_image
)
opacity_slider1.change(
mix_images,
inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2],
outputs=mixed_image
)
opacity_slider2.change(
mix_images,
inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2],
outputs=mixed_image
)
# フラグ変更時に画面切り替え
form_visible_flag.change(
handle_form_submission,
inputs=[form_visible_flag],
outputs=[survey_section, main_section]
)
# 選択肢に応じてセクションを切り替え
visit_choice.change(
handle_visit_choice,
inputs=[visit_choice, language_choice],
outputs=[visit_section, survey_section, proceed_section]
)
# 進むボタン押下時の画面遷移
proceed_button.click(
handle_proceed,
inputs=[],
outputs=[proceed_section, main_section]
)
send_feedback_button.click(
send_mixed_feedback,
inputs=[sotai_image_data, sketch_image_data, send_filename],
outputs=[feed_back_result],
)
# JavaScriptの読み込み
demo.load(None, None, None, js=script)
demo.launch()