mie035 commited on
Commit
fd98010
1 Parent(s): ac4bd16
Files changed (3) hide show
  1. app.py +75 -3
  2. hair_segmenter.tflite +3 -0
  3. requirements.txt +19 -0
app.py CHANGED
@@ -1,7 +1,79 @@
1
  import gradio as gr
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  iface.launch()
 
1
  import gradio as gr
2
+ import cv2
3
+ import math
4
+ import numpy as np
5
+ import os
6
+ import numpy as np
7
+ import mediapipe as mp
8
 
9
+ from mediapipe.tasks import python
10
+ from mediapipe.tasks.python import vision
11
 
12
+ print("hello world")
13
+ # Height and width that will be used by the model
14
+ DESIRED_HEIGHT = 480
15
+ DESIRED_WIDTH = 480
16
+
17
+ # Performs resizing and showing the image
18
+ def resize_and_show(image):
19
+ h, w = image.shape[:2]
20
+ if h < w:
21
+ img = cv2.resize(image, (DESIRED_WIDTH, math.floor(h/(w/DESIRED_WIDTH))))
22
+ else:
23
+ img = cv2.resize(image, (math.floor(w/(h/DESIRED_HEIGHT)), DESIRED_HEIGHT))
24
+ cv2.imshow('color', img)
25
+ cv2.waitKey(1000)
26
+ cv2.destroyAllWindows()
27
+
28
+ def segmentate(image):
29
+ BG_COLOR = (192, 192, 192) # gray
30
+ MASK_COLOR = (255, 255, 255) # white
31
+
32
+ # Create the options that will be used for ImageSegmenter
33
+ base_options = python.BaseOptions(model_asset_path='./hair_segmenter.tflite')
34
+ options = vision.ImageSegmenterOptions(base_options=base_options,output_category_mask=True)
35
+
36
+ # Create the image segmenter
37
+ with vision.ImageSegmenter.create_from_options(options) as segmenter:
38
+
39
+ # Create the MediaPipe image file that will be segmented
40
+ # image = mp.Image.create_from_file(image_file_name)
41
+
42
+ # Retrieve the masks for the segmented image
43
+ segmentation_result = segmenter.segment(image)
44
+ category_mask = segmentation_result.category_mask
45
+
46
+ # Generate solid color images for showing the output segmentation mask.
47
+ image_data = image.numpy_view()
48
+ fg_image = np.zeros(image_data.shape, dtype=np.uint8)
49
+ fg_image[:] = MASK_COLOR
50
+ bg_image = np.zeros(image_data.shape, dtype=np.uint8)
51
+ bg_image[:] = BG_COLOR
52
+
53
+ condition = np.stack((category_mask.numpy_view(),) * 3, axis=-1) > 0.2
54
+ output_image = np.where(condition, fg_image, bg_image)
55
+
56
+ # print(f'Segmentation mask of {name}:')
57
+ # resize_and_show(output_image)
58
+ return output_image
59
+
60
+ # GUI
61
+ title = 'mediapipe hair segmentation'
62
+ description = 'hair segmentation using mediapipe'
63
+ examples = [[f'examples/{name}', 3] for name in sorted(os.listdir('examples'))]
64
+
65
+ iface = gr.Interface(
66
+ fn=segmentate,
67
+ inputs=[
68
+ gr.Image(type='pil', label='Input Image')
69
+ ],
70
+ outputs=[
71
+ gr.Image(label='image segmentated')
72
+ ],
73
+ examples=examples,
74
+ allow_flagging='never',
75
+ cache_examples=False,
76
+ title=title,
77
+ description=description
78
+ )
79
  iface.launch()
hair_segmenter.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2628cf3ce5f695f604cbea2841e00befcaa3624bf80caf3664bef2656d59bf84
3
+ size 781618
requirements.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio==3.41.0
2
+ absl-py
3
+ attrs
4
+ cffi
5
+ contourpy
6
+ cycler
7
+ flatbuffers
8
+ fonttools
9
+ kiwisolver
10
+ matplotlib
11
+ mediapipe
12
+ numpy
13
+ opencv-contrib-python
14
+ opencv-python
15
+ pillow
16
+ protobuf
17
+ pycparser
18
+ pyparsing
19
+ sounddevice