|
import spaces |
|
import gradio as gr |
|
import subprocess |
|
from PIL import Image |
|
import json |
|
import os |
|
import time |
|
|
|
import mp_box |
|
import draw_landmarks68 |
|
import landmarks68_utils |
|
import io |
|
import numpy as np |
|
|
|
from glibvision.cv2_utils import pil_to_bgr_image,bgr_to_rgb |
|
from gradio_utils import save_image,save_buffer,clear_old_files ,read_file |
|
from close_eyes import process_close_eyes_image |
|
from open_mouth import process_open_mouth |
|
|
|
|
|
''' |
|
Face landmark detection based Face Detection. |
|
https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker |
|
from model card |
|
https://storage.googleapis.com/mediapipe-assets/MediaPipe%20BlazeFace%20Model%20Card%20(Short%20Range).pdf |
|
Licensed Apache License, Version 2.0 |
|
Train with google's dataset(more detail see model card) |
|
|
|
''' |
|
|
|
|
|
def process_images(image,eyelid_thick=1,eyelid_blur=9,inpaint_radius=10,inpaint_blur=30,mask_dilate=10,dilate_blur=10, |
|
open_size_y=8,inside_layer_low_depth=False,hole_image_name="dark01", |
|
make_animation=True,eyes_duration=200,mouth_duration=40, |
|
progress=gr.Progress(track_tqdm=True)): |
|
clear_old_files() |
|
if image == None: |
|
raise gr.Error("Need Image") |
|
|
|
progress(0, desc="Start Making Animation") |
|
boxes,mp_image,face_landmarker_result = mp_box.mediapipe_to_box(image) |
|
annotated_image,bbox,landmark_points = draw_landmarks68.draw_landmarks_on_image(image,face_landmarker_result) |
|
landmark_list = draw_landmarks68.convert_to_landmark_group_json(landmark_points) |
|
|
|
|
|
|
|
|
|
galleries = [] |
|
|
|
progressed = 0 |
|
progress_step = 0.8/open_size_y |
|
animations = [] |
|
np_image = pil_to_bgr_image(image) |
|
if make_animation: |
|
start_index = 0 |
|
else: |
|
start_index = open_size_y-1 |
|
|
|
for i in range(start_index,open_size_y): |
|
mouth_opened = process_open_mouth(np_image,landmark_list,0,i,True,inside_layer_low_depth,0,hole_image_name+".jpg") |
|
animations.append(mouth_opened) |
|
mouth_opened_path = save_image(mouth_opened) |
|
galleries.append((mouth_opened_path,f"mouth-opened {i}")) |
|
progressed+=progress_step |
|
progress(progressed) |
|
|
|
if make_animation: |
|
np_image = pil_to_bgr_image(animations[0]) |
|
|
|
eyes_closed_np,mask_np = process_close_eyes_image(np_image,landmark_list,eyelid_thick,eyelid_blur,inpaint_radius,inpaint_blur,mask_dilate,dilate_blur) |
|
eyes_closed = Image.fromarray(bgr_to_rgb(eyes_closed_np)) |
|
|
|
eyes_closed_path = save_image(eyes_closed) |
|
galleries.append((eyes_closed_path,"eyes-closed")) |
|
|
|
eyes_closed_mask_path = save_image(Image.fromarray(mask_np)) |
|
galleries.append((eyes_closed_mask_path,"eyes-closed-mask")) |
|
|
|
|
|
duractions = [mouth_duration]*len(animations)*2+[eyes_duration] |
|
if make_animation: |
|
animations = animations + animations[::-1]+[eyes_closed] |
|
output_buffer = io.BytesIO() |
|
animations[0].save(output_buffer, |
|
save_all=True, |
|
append_images=animations[1:], |
|
duration=duractions, |
|
loop=0, |
|
format='WebP') |
|
webp_path = save_buffer(output_buffer) |
|
|
|
|
|
return webp_path,galleries |
|
|
|
|
|
|
|
|
|
css=""" |
|
#col-left { |
|
margin: 0 auto; |
|
max-width: 640px; |
|
} |
|
#col-right { |
|
margin: 0 auto; |
|
max-width: 640px; |
|
} |
|
.grid-container { |
|
display: flex; |
|
align-items: center; |
|
justify-content: center; |
|
gap:10px |
|
} |
|
|
|
.image { |
|
width: 128px; |
|
height: 128px; |
|
object-fit: cover; |
|
} |
|
|
|
.text { |
|
font-size: 16px; |
|
} |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(css=css, elem_id="demo-container") as demo: |
|
with gr.Column(): |
|
gr.HTML(read_file("demo_header.html")) |
|
gr.HTML(read_file("demo_tools.html")) |
|
with gr.Row(): |
|
with gr.Column(): |
|
image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB',elem_id="image_upload", type="pil", label="Upload") |
|
with gr.Row(elem_id="prompt-container", equal_height=False): |
|
with gr.Row(): |
|
btn = gr.Button("Create Closed-eye and Mouth-opened", elem_id="run_button",variant="primary") |
|
|
|
with gr.Accordion(label="Eyes-Closed Advanced Settings", open=False): |
|
with gr.Row( equal_height=True): |
|
eyelid_thick = gr.Slider( |
|
label="Eyelid thick", |
|
minimum=0, |
|
maximum=20, |
|
step=1, |
|
value=1) |
|
eyelid_blur = gr.Slider( |
|
label="Eyelid blur", |
|
minimum=0, |
|
maximum=30, |
|
step=1, |
|
value=7) |
|
with gr.Row( equal_height=True): |
|
inpaint_radius = gr.Slider( |
|
label="Inpaint Radius", |
|
minimum=1, |
|
maximum=20, |
|
step=1, |
|
value=10,info="incresing make smooth but slow") |
|
inpaint_blur = gr.Slider( |
|
label="Inpaint blur", |
|
minimum=0, |
|
maximum=30, |
|
step=1, |
|
value=20) |
|
with gr.Row( equal_height=True): |
|
mask_dilate = gr.Slider( |
|
label="Mask dilate", |
|
minimum=0, |
|
maximum=20, |
|
step=1, |
|
value=10) |
|
|
|
dilate_blur = gr.Slider( |
|
label="dilate blur", |
|
minimum=0, |
|
maximum=20, |
|
step=1, |
|
value=10) |
|
with gr.Row( equal_height=True): |
|
eyes_duration = gr.Slider( |
|
label="Eyeclosed animation duration", |
|
minimum=1, |
|
maximum=500, |
|
step=1, |
|
value=200) |
|
with gr.Accordion(label="Mouth-Opened Advanced Settings", open=False): |
|
with gr.Row( equal_height=True): |
|
make_animation = gr.Checkbox(label="animation",value=True,info="take long time if open-size is large") |
|
open_size_y = gr.Slider( |
|
label="Open Size", |
|
minimum=1, |
|
maximum=40, |
|
step=1, |
|
value=8,info="Large size is for img2img/inpaint") |
|
inside_layer_low_depth=gr.Checkbox(label="Inner Layer Low",value=False,info="if value >20 check on better result") |
|
|
|
hole_image_name=gr.Dropdown(label="inner image name",choices=["dark01","black","mid01","mid02"],value="dark01",info="if you use img2img black is better") |
|
with gr.Row( equal_height=True): |
|
mouth_duration = gr.Slider( |
|
label="mouhtopen animation duration",info="per frame", |
|
minimum=1, |
|
maximum=500, |
|
step=1, |
|
value=40) |
|
with gr.Column(): |
|
animation_out = gr.Image(height=760,label="Animation", elem_id="output-animation") |
|
image_out = gr.Gallery(label="Output", elem_id="output-img",preview=True) |
|
|
|
|
|
btn.click(fn=process_images, inputs=[image,eyelid_thick,eyelid_blur,inpaint_radius,inpaint_blur,mask_dilate,dilate_blur, |
|
open_size_y,inside_layer_low_depth,hole_image_name,make_animation, |
|
eyes_duration,mouth_duration],outputs=[animation_out,image_out] ,api_name='infer') |
|
gr.Examples( |
|
examples =[ |
|
["examples/00003245_00.jpg","examples/00003245_00.webp"], |
|
["examples/00002062.jpg","examples/00002062.webp"], |
|
["examples/00100265.jpg","examples/00100265.webp"], |
|
["examples/00824006.jpg","examples/00824006.webp"], |
|
["examples/00824008.jpg","examples/00824008.webp"], |
|
["examples/02316230.jpg","examples/02316230.webp"], |
|
["examples/00825000.jpg","examples/00825000.webp"], |
|
["examples/00826007.jpg","examples/00826007.webp"], |
|
["examples/00827009.jpg","examples/00827009.webp"], |
|
["examples/00828003.jpg","examples/00828003.webp"], |
|
], |
|
|
|
|
|
inputs=[image,animation_out],examples_per_page=5 |
|
) |
|
gr.HTML(read_file("demo_footer.html")) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|