|
|
|
|
|
|
|
|
|
""" |
|
# Currently on hold,Verifying whether to use other technologies. |
|
# this is temporaly fixed for work huggingface space |
|
|
|
スクリプト名 |
|
batch_open_mouth.py |
|
|
|
概要 |
|
静止画から口を開ける画像を作成 |
|
|
|
説明 |
|
|
|
引数 |
|
argparseを参照 |
|
|
|
不具合 |
|
横方向は思ったより機能していない。 |
|
Issueが山盛り |
|
https://github.com/akjava/lip_recognition_tools/issues |
|
|
|
著者: Akihito Miyazaki |
|
作成日: 2024-04-23 |
|
更新履歴: |
|
- 2024-04-23: 最初のリリース |
|
- 2024-09-15:hole_offsetを追加 |
|
# 口の中の位置は、create_hole_image.pyの画像を変更すること |
|
|
|
""" |
|
|
|
import cv2 |
|
import numpy as np |
|
from PIL import Image |
|
import lip_utils |
|
import create_top_lip |
|
import create_bottom_lip |
|
import create_chin_image |
|
import create_no_mouth |
|
import create_hole_image |
|
import os |
|
import argparse |
|
import landmarks68_utils |
|
import math |
|
import sys |
|
|
|
from glibvision.common_utils import check_exists_files |
|
|
|
|
|
def parse_arguments(): |
|
parser = argparse.ArgumentParser(description='Open Mouth') |
|
|
|
parser.add_argument('--no_close_lip',"-ccl",help='Close Lip画像を作らない',action="store_false") |
|
parser.add_argument('--landmark',"-l",help='landmarkdata') |
|
parser.add_argument('--input',"-i",help='変換する画像の元(必須) 口を閉じていること',required=True) |
|
parser.add_argument('--output',"-o",help='画像の保存先(別途一時的なレイヤーファイルも作られる)') |
|
parser.add_argument('--open_size_x',"-x",help='横方向へ広がるサイズ(いまいち機能していない)',default = 0,type=int) |
|
parser.add_argument('--open_size_y',"-y",help='縦方向への口の広がり(最大20ぐらい)',default=9,type=int) |
|
parser.add_argument('--hole_offset',"-hole",help='口内画像の上下',type=int,default=0) |
|
parser.add_argument('--hole_image_name',"-hname",help='口内画像・ただし、hole_images内にあること',default="dark01.jpg") |
|
parser.add_argument('--side_edge_expand',"-see",help='横の端をどれだけ動かすか',type=float,default=0.02) |
|
parser.add_argument('--inside_layer_low_depth',"-illd",action="store_true",help="basically not good small size but works for large and img2img") |
|
|
|
lip_utils.DEBUG=True |
|
|
|
return parser.parse_args() |
|
|
|
if __name__ == "__main__": |
|
args=parse_arguments() |
|
|
|
img_path = args.input |
|
|
|
output = args.output |
|
|
|
if output==None: |
|
base,ext = os.path.splitext(img_path) |
|
output = f"{base}_{args.open_size_y:02d}.jpg" |
|
|
|
|
|
|
|
landmark = None |
|
if check_exists_files([landmark,img_path],[],False): |
|
print("File Error happend and exit app") |
|
exit(1) |
|
img = cv2.imread(img_path) |
|
|
|
|
|
use_close_lip = args.no_close_lip |
|
process_open_mouth(img,None,use_close_lip) |
|
|
|
|
|
def process_open_mouth(cv_image,landmarks_list,open_size_x=0,open_size_y=8,use_close_lip=True,inside_layer_low_depth=False,hole_offset=0,hole_image_name="dark01.jpg",side_edge_expand=0.02): |
|
img = cv_image |
|
img_h, img_w = lip_utils.get_image_size(img) |
|
|
|
|
|
|
|
top_points=lip_utils.get_landmark_points(landmarks_list,lip_utils.TOP_LIP) |
|
print(top_points) |
|
|
|
|
|
|
|
|
|
''' |
|
right_outer = top_points[0] |
|
left_outer = top_points[6] |
|
lip_width = lip_utils.distance_2d(left_outer,right_outer) |
|
#print(lip_width) |
|
lip_diff = [left_outer[0]-right_outer[0],left_outer[1]-right_outer[1]] |
|
|
|
side_edge_expand_point =[lip_diff[0]*side_edge_expand,lip_diff[1]*side_edge_expand] |
|
#print(side_edge_expand_point) |
|
print(f"side-edge expanded {side_edge_expand_point}") |
|
|
|
top_points[0][0]-=int(side_edge_expand_point[0]) |
|
top_points[6][0]+=int(side_edge_expand_point[0]) |
|
top_points[0][1]-=int(side_edge_expand_point[1]) |
|
top_points[6][1]+=int(side_edge_expand_point[1]) |
|
#img = cv2.imread(img_path,cv2.IMREAD_UNCHANGED) #4channel got problem use green back |
|
#img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) |
|
''' |
|
|
|
if use_close_lip: |
|
import close_lip |
|
img,mask = close_lip.process_close_lip_image(img,landmarks_list) |
|
close_lip_image = img |
|
|
|
|
|
|
|
|
|
|
|
margin = 12 |
|
|
|
|
|
|
|
hole_points = lip_utils.get_lip_hole_points(landmarks_list) |
|
|
|
|
|
|
|
|
|
|
|
(bottom_width,bottom_height)=lip_utils.get_bottom_lip_width_height(landmarks_list) |
|
left_thick,mid_thick,right_thick = lip_utils.get_top_lip_thicks(landmarks_list) |
|
bottom_base = bottom_height/1.5 |
|
|
|
diff_left = max(0,int(left_thick - bottom_base)) |
|
diff_right = max(0,int(right_thick - bottom_base)) |
|
diff_mid = max(0,int((diff_right+diff_left)*0.4)) |
|
diff_avg = int((diff_right+diff_left)*0.5) |
|
|
|
|
|
|
|
fix_top_thick_hole_points = [] |
|
top_point = [1,2,3] |
|
for idx,point in enumerate(hole_points): |
|
if idx in top_point: |
|
new_point = np.copy(point) |
|
if idx == 2: |
|
new_point[1] -= int(diff_avg*0.5) |
|
else: |
|
new_point[1] -= int(diff_avg*1) |
|
fix_top_thick_hole_points.append(new_point) |
|
else: |
|
fix_top_thick_hole_points.append(point) |
|
|
|
|
|
mask = lip_utils.create_mask_from_points(img,fix_top_thick_hole_points,2,2) |
|
inverse_mask = cv2.bitwise_not(mask) |
|
if lip_utils.DEBUG: |
|
cv2.imwrite("holeed_mask.jpg",mask) |
|
cv2.imwrite("holeed_inverse_mask.jpg",inverse_mask) |
|
|
|
img_transparent = lip_utils.apply_mask_alpha(img,mask) |
|
img_inpainted = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA) |
|
if lip_utils.DEBUG: |
|
cv2.imwrite("holeed_transparent.png",img_transparent) |
|
cv2.imwrite("holeed_inpainted.jpg",img_inpainted) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gaussian_size = 10 |
|
gaused = cv2.GaussianBlur(img_inpainted, (0,0 ), sigmaX=gaussian_size, sigmaY=gaussian_size) |
|
|
|
|
|
|
|
if lip_utils.DEBUG: |
|
cv2.imwrite("holed_gaused.jpg",gaused) |
|
mask_1d = np.squeeze(mask) |
|
img_inpainted[mask_1d==255] = gaused[mask_1d==255] |
|
|
|
|
|
|
|
thresh, binary_mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def calculate_weights(image): |
|
|
|
|
|
weights = np.ones_like(image) * 0.5 |
|
return weights |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
top_lip_layer,lip_mask = create_top_lip.process_lip_image(img_transparent,landmarks_list, margin, open_size_y, open_size_x) |
|
hole_image = create_hole_image.process_create_hole_image(img,landmarks_list,open_size_y,open_size_x,hole_offset,hole_image_name) |
|
hole_image_apply_mask = lip_utils.apply_mask(hole_image,lip_mask) |
|
if lip_utils.DEBUG: |
|
cv2.imwrite("hole_image.jpg",hole_image) |
|
cv2.imwrite("hole_image_apply_mask.png",hole_image_apply_mask) |
|
|
|
bottom_lip_layer = create_bottom_lip.process_lip_image(img,landmarks_list, margin, open_size_y*2, open_size_x) |
|
chin_layer = create_chin_image.process_chin_image(img,landmarks_list, margin, open_size_y, open_size_x) |
|
|
|
no_mouth_face = create_no_mouth.process_create_no_mouth_image(img,landmarks_list) |
|
|
|
|
|
chin_points = lip_utils.get_landmark_points(landmarks_list,lip_utils.POINTS_CHIN) |
|
points =[] |
|
lip_points = lip_utils.get_lip_mask_points(landmarks_list) |
|
center_lips = lip_points[2:5] |
|
print("center") |
|
print(center_lips) |
|
center_lips = center_lips[::-1] |
|
print(center_lips) |
|
points.extend(center_lips+chin_points[4:13]) |
|
print(points) |
|
for i in range(4,len(points)-1): |
|
points[i][1] += open_size_y |
|
|
|
jaw_mask_line = lip_utils.create_mask(no_mouth_face,(0,0,0)) |
|
cv2.polylines(jaw_mask_line, [np.array(points)], isClosed=True, color=(0,255,0), thickness=1) |
|
if lip_utils.DEBUG: |
|
cv2.imwrite("open_mouth_jaw_mask_line.jpg",jaw_mask_line) |
|
|
|
dilation_size=3 |
|
jaw_mask = lip_utils.create_mask_from_points(img,points,dilation_size,3) |
|
|
|
|
|
|
|
|
|
from PIL import Image |
|
|
|
def convert_cv2_to_pil(cv2_img,is_bgra=True): |
|
""" |
|
OpenCV (cv2) 画像を PIL 画像に変換する関数 |
|
""" |
|
|
|
if is_bgra: |
|
rgb_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGRA2RGBA) |
|
|
|
pil_img = Image.fromarray(rgb_img) |
|
return pil_img |
|
|
|
|
|
pil_images =[] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if inside_layer_low_depth: |
|
layers = [no_mouth_face,hole_image_apply_mask,top_lip_layer,chin_layer,bottom_lip_layer] |
|
else: |
|
layers = [no_mouth_face,top_lip_layer,chin_layer,bottom_lip_layer,hole_image_apply_mask] |
|
for layer in layers: |
|
pil_images.append(convert_cv2_to_pil(layer)) |
|
|
|
|
|
|
|
layers = layers[::-1] |
|
output_image = None |
|
for i in range(len(pil_images)): |
|
if output_image == None: |
|
|
|
|
|
output_image = pil_images[i] |
|
continue |
|
else: |
|
pil_image1 = output_image |
|
|
|
|
|
|
|
|
|
output_image = Image.alpha_composite(pil_image1, pil_images[i]) |
|
|
|
|
|
|
|
|
|
output_image = output_image.convert("RGB") |
|
|
|
|
|
|
|
|
|
return output_image |
|
|
|
name,ext = os.path.splitext(output) |
|
|
|
|
|
|
|
|
|
|
|
if ext == "": |
|
output += ".jpg" |
|
|
|
output_image_path = output.replace(".png",".jpg") |
|
output_image.save(output_image_path) |
|
cv2.imwrite(output_image_path.replace(".jpg","_mask.jpg"),jaw_mask) |
|
|
|
if close_lip_image is not None: |
|
pass |
|
|
|
cv2.imwrite("close-lip.jpg",close_lip_image) |
|
|
|
print(f"open-mouth created {output}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|