Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,11 +2,6 @@ import os
|
|
2 |
import cv2
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
5 |
-
import random
|
6 |
-
import base64
|
7 |
-
import requests
|
8 |
-
import json
|
9 |
-
import time
|
10 |
from transformers import DetrForObjectDetection, DetrImageProcessor
|
11 |
import torch
|
12 |
|
@@ -25,7 +20,7 @@ def detect_face_and_neck(image):
|
|
25 |
face_box = None
|
26 |
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
27 |
if score > 0.7:
|
28 |
-
if label == 1: # Person
|
29 |
neck_box = box
|
30 |
elif label == 2: # Face
|
31 |
face_box = box
|
@@ -33,24 +28,37 @@ def detect_face_and_neck(image):
|
|
33 |
return face_box, neck_box
|
34 |
|
35 |
# Function to overlay jewelry on the detected regions
|
36 |
-
def place_jewelry(image, jewelry_image,
|
37 |
x, y, w, h = position
|
38 |
-
resized_jewelry = cv2.resize(jewelry_image, (w, h))
|
39 |
|
40 |
-
image
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
return image
|
43 |
|
44 |
# Try-on function for jewelry
|
45 |
def tryon_jewelry(person_img, jewelry_img, jewelry_type):
|
|
|
|
|
|
|
|
|
|
|
46 |
face_box, neck_box = detect_face_and_neck(person_img)
|
47 |
-
|
48 |
if jewelry_type == "Necklace" and neck_box is not None:
|
49 |
-
|
|
|
50 |
elif jewelry_type == "Earrings" and face_box is not None:
|
51 |
-
|
|
|
52 |
else:
|
53 |
-
result_img = person_img #
|
54 |
|
55 |
return result_img
|
56 |
|
|
|
2 |
import cv2
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
5 |
from transformers import DetrForObjectDetection, DetrImageProcessor
|
6 |
import torch
|
7 |
|
|
|
20 |
face_box = None
|
21 |
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
22 |
if score > 0.7:
|
23 |
+
if label == 1: # Person (this can include neck)
|
24 |
neck_box = box
|
25 |
elif label == 2: # Face
|
26 |
face_box = box
|
|
|
28 |
return face_box, neck_box
|
29 |
|
30 |
# Function to overlay jewelry on the detected regions
|
31 |
+
def place_jewelry(image, jewelry_image, position):
|
32 |
x, y, w, h = position
|
33 |
+
resized_jewelry = cv2.resize(jewelry_image, (int(w), int(h)))
|
34 |
|
35 |
+
# Ensure that the image has an alpha channel (RGBA) for blending
|
36 |
+
if resized_jewelry.shape[2] == 4:
|
37 |
+
# Blending using alpha transparency
|
38 |
+
for c in range(0, 3):
|
39 |
+
image[y:y+h, x:x+w, c] = resized_jewelry[:, :, c] * (resized_jewelry[:, :, 3] / 255.0) + image[y:y+h, x:x+w, c] * (1.0 - resized_jewelry[:, :, 3] / 255.0)
|
40 |
+
else:
|
41 |
+
image[y:y+h, x:x+w] = resized_jewelry
|
42 |
|
43 |
return image
|
44 |
|
45 |
# Try-on function for jewelry
|
46 |
def tryon_jewelry(person_img, jewelry_img, jewelry_type):
|
47 |
+
# Ensure images are valid
|
48 |
+
if person_img is None or jewelry_img is None:
|
49 |
+
return None
|
50 |
+
|
51 |
+
# Detect face and neck using Hugging Face model
|
52 |
face_box, neck_box = detect_face_and_neck(person_img)
|
53 |
+
|
54 |
if jewelry_type == "Necklace" and neck_box is not None:
|
55 |
+
# Apply necklace on neck region
|
56 |
+
result_img = place_jewelry(person_img, jewelry_img, neck_box)
|
57 |
elif jewelry_type == "Earrings" and face_box is not None:
|
58 |
+
# Assuming ears are part of the face box for simplicity
|
59 |
+
result_img = place_jewelry(person_img, jewelry_img, face_box)
|
60 |
else:
|
61 |
+
result_img = person_img # If no detection, return original image
|
62 |
|
63 |
return result_img
|
64 |
|