Spaces:
Runtime error
Runtime error
File size: 5,800 Bytes
5002afb 5bccd70 5002afb 74d79ec 5002afb 74d79ec 5002afb 74d79ec 5002afb 548fea0 aa20d61 5002afb aa20d61 548fea0 aa20d61 548fea0 aa20d61 86694d5 3b8749f aa20d61 642c931 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import os
os.system("pip freeze")
from huggingface_hub import hf_hub_download
os.system("pip -qq install facenet_pytorch")
from facenet_pytorch import MTCNN
from torchvision import transforms
import torch, PIL
from tqdm.notebook import tqdm
import gradio as gr
import torch
modelarcanev4 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.4", filename="ArcaneGANv0.4.jit")
modelarcanev3 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.3", filename="ArcaneGANv0.3.jit")
modelarcanev2 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.2", filename="ArcaneGANv0.2.jit")
mtcnn = MTCNN(image_size=256, margin=80)
# simplest ye olde trustworthy MTCNN for face detection with landmarks
def detect(img):
# Detect faces
batch_boxes, batch_probs, batch_points = mtcnn.detect(img, landmarks=True)
# Select faces
if not mtcnn.keep_all:
batch_boxes, batch_probs, batch_points = mtcnn.select_boxes(
batch_boxes, batch_probs, batch_points, img, method=mtcnn.selection_method
)
return batch_boxes, batch_points
# my version of isOdd, should make a separate repo for it :D
def makeEven(_x):
return _x if (_x % 2 == 0) else _x+1
# the actual scaler function
def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2, VERBOSE=False):
x, y = _img.size
ratio = 2 #initial ratio
#scale to desired face size
if (boxes is not None):
if len(boxes)>0:
ratio = target_face/max(boxes[0][2:]-boxes[0][:2]);
ratio = min(ratio, max_upscale)
if VERBOSE: print('up by', ratio)
if fixed_ratio>0:
if VERBOSE: print('fixed ratio')
ratio = fixed_ratio
x*=ratio
y*=ratio
#downscale to fit into max res
res = x*y
if res > max_res:
ratio = pow(res/max_res,1/2);
if VERBOSE: print(ratio)
x=int(x/ratio)
y=int(y/ratio)
#make dimensions even, because usually NNs fail on uneven dimensions due skip connection size mismatch
x = makeEven(int(x))
y = makeEven(int(y))
size = (x, y)
return _img.resize(size)
"""
A useful scaler algorithm, based on face detection.
Takes PIL.Image, returns a uniformly scaled PIL.Image
boxes: a list of detected bboxes
_img: PIL.Image
max_res: maximum pixel area to fit into. Use to stay below the VRAM limits of your GPU.
target_face: desired face size. Upscale or downscale the whole image to fit the detected face into that dimension.
fixed_ratio: fixed scale. Ignores the face size, but doesn't ignore the max_res limit.
max_upscale: maximum upscale ratio. Prevents from scaling images with tiny faces to a blurry mess.
"""
def scale_by_face_size(_img, max_res=1_500_000, target_face=256, fix_ratio=0, max_upscale=2, VERBOSE=False):
boxes = None
boxes, _ = detect(_img)
if VERBOSE: print('boxes',boxes)
img_resized = scale(boxes, _img, max_res, target_face, fix_ratio, max_upscale, VERBOSE)
return img_resized
size = 256
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
t_stds = torch.tensor(stds).cpu().half().float()[:,None,None]
t_means = torch.tensor(means).cpu().half().float()[:,None,None]
def makeEven(_x):
return int(_x) if (_x % 2 == 0) else int(_x+1)
img_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(means,stds)])
def tensor2im(var):
return var.mul(t_stds).add(t_means).mul(255.).clamp(0,255).permute(1,2,0)
def proc_pil_img(input_image, model):
transformed_image = img_transforms(input_image)[None,...].cpu().half().float()
with torch.no_grad():
result_image = model(transformed_image)[0]
output_image = tensor2im(result_image)
output_image = output_image.detach().cpu().numpy().astype('uint8')
output_image = PIL.Image.fromarray(output_image)
return output_image
modelv4 = torch.jit.load(modelarcanev4,map_location='cpu').eval().cpu().half().float()
modelv3 = torch.jit.load(modelarcanev3,map_location='cpu').eval().cpu().half().float()
modelv2 = torch.jit.load(modelarcanev2,map_location='cpu').eval().cpu().half().float()
def version4(im):
im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1)
res = proc_pil_img(im, modelv4)
return res
def version3(im):
im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1)
res = proc_pil_img(im, modelv3)
return res
def version2(im):
im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1)
res = proc_pil_img(im, modelv2)
return res
block = gr.Blocks()
with block:
gr.Markdown("Gradio Demo for ArcaneGAN, portrait to Arcane style. To use it, simply upload your image. Try out the different versions by clicking on the tabs. Please use a cropped portrait picture for best results.")
with gr.Tab("version four"):
with gr.Row():
facepaint4 = gr.inputs.Image(type="pil",shape=(512,512))
faceout4 = gr.outputs.Image(type="pil")
face_run = gr.Button("Run")
face_run.click(version4, inputs=facepaint4, outputs=faceout4)
with gr.Tab("version three"):
with gr.Row():
facepaint3 = gr.inputs.Image(type="pil")
faceout3 = gr.outputs.Image(type="pil")
face_run = gr.Button("Run")
face_run.click(version3, inputs=facepaint3, outputs=faceout3)
with gr.Tab("version two"):
with gr.Row():
facepaint2 = gr.inputs.Image(type="pil")
faceout2 = gr.outputs.Image(type="pil")
face_run = gr.Button("Run")
face_run.click(version2, inputs=facepaint2, outputs=faceout2)
block.launch(enable_queue=True) |