Spaces:
Runtime error
Runtime error
Pavankunchala
commited on
Commit
•
19ddaea
1
Parent(s):
b534665
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import time
|
3 |
+
from pathlib import Path
|
4 |
+
import cv2
|
5 |
+
from openvino.inference_engine import IECore
|
6 |
+
import matplotlib.cm
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
+
import numpy as np
|
9 |
+
import streamlit as st
|
10 |
+
from PIL import Image
|
11 |
+
import tempfile
|
12 |
+
DEMO_IMAGE = 'dog-new.jpg'
|
13 |
+
DEMO_VIDEO = 'demo.mp4'
|
14 |
+
@st.cache
|
15 |
+
def normalize_minmax(data):
|
16 |
+
|
17 |
+
return (data - data.min()) / (data.max() - data.min())
|
18 |
+
@st.cache
|
19 |
+
def convert_result_to_image(result, colormap="inferno"):
|
20 |
+
|
21 |
+
cmap = matplotlib.cm.get_cmap(colormap)
|
22 |
+
result = result.squeeze(0)
|
23 |
+
result = normalize_minmax(result)
|
24 |
+
result = cmap(result)[:, :, :3] * 255
|
25 |
+
result = result.astype(np.uint8)
|
26 |
+
return result
|
27 |
+
@st.cache
|
28 |
+
def to_rgb(image_data) -> np.ndarray:
|
29 |
+
|
30 |
+
return cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
|
31 |
+
st.title("Depth Estimation App")
|
32 |
+
st.sidebar.title('Depth Estimation')
|
33 |
+
st.sidebar.subheader('Parameters')
|
34 |
+
DEVICE = "CPU"
|
35 |
+
MODEL_FILE = "models/MiDaS_small.xml"
|
36 |
+
model_xml_path = Path(MODEL_FILE)
|
37 |
+
ie = IECore()
|
38 |
+
net = ie.read_network(model=model_xml_path, weights=model_xml_path.with_suffix(".bin"))
|
39 |
+
exec_net = ie.load_network(network=net, device_name=DEVICE)
|
40 |
+
input_key = list(exec_net.input_info)[0]
|
41 |
+
output_key = list(exec_net.outputs.keys())[0]
|
42 |
+
network_input_shape = exec_net.input_info[input_key].tensor_desc.dims
|
43 |
+
network_image_height, network_image_width = network_input_shape[2:]
|
44 |
+
app_mode = st.sidebar.selectbox('Choose the App mode',
|
45 |
+
['Run on Image','Run on Video'],index = 0)
|
46 |
+
if app_mode == "Run on Image":
|
47 |
+
st.markdown('Running on Image')
|
48 |
+
st.sidebar.text('Params for Image')
|
49 |
+
st.markdown(
|
50 |
+
"""
|
51 |
+
<style>
|
52 |
+
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
|
53 |
+
width: 400px;
|
54 |
+
}
|
55 |
+
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
|
56 |
+
width: 400px;
|
57 |
+
margin-left: -400px;
|
58 |
+
}
|
59 |
+
</style>
|
60 |
+
""",
|
61 |
+
unsafe_allow_html=True,
|
62 |
+
)
|
63 |
+
img_file_buffer = st.sidebar.file_uploader("Upload an image", type=[ "jpg", "jpeg",'png'])
|
64 |
+
if img_file_buffer is not None:
|
65 |
+
image = np.array(Image.open(img_file_buffer))
|
66 |
+
else:
|
67 |
+
demo_image = DEMO_IMAGE
|
68 |
+
image = np.array(Image.open(demo_image))
|
69 |
+
st.sidebar.text('Original Image')
|
70 |
+
st.sidebar.image(image)
|
71 |
+
resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width))
|
72 |
+
# reshape image to network input shape NCHW
|
73 |
+
input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)
|
74 |
+
result = exec_net.infer(inputs={input_key: input_image})[output_key]
|
75 |
+
# convert network result of disparity map to an image that shows
|
76 |
+
# distance as colors
|
77 |
+
result_image = convert_result_to_image(result=result)
|
78 |
+
# resize back to original image shape. cv2.resize expects shape
|
79 |
+
# in (width, height), [::-1] reverses the (height, width) shape to match this.
|
80 |
+
result_image = cv2.resize(result_image, image.shape[:2][::-1])
|
81 |
+
st.subheader('Output Image')
|
82 |
+
st.image(result_image,use_column_width= True)
|
83 |
+
if app_mode =='Run on Video':
|
84 |
+
st.markdown('Running on Video')
|
85 |
+
use_webcam = st.sidebar.button('Use Webcam')
|
86 |
+
video_file_buffer = st.sidebar.file_uploader("Upload a video", type=[ "mp4", "mov",'avi','asf', 'm4v' ])
|
87 |
+
tfflie = tempfile.NamedTemporaryFile(delete=False)
|
88 |
+
stop_button = st.sidebar.button('Stop Processing')
|
89 |
+
if stop_button:
|
90 |
+
st.stop()
|
91 |
+
if not video_file_buffer:
|
92 |
+
if use_webcam:
|
93 |
+
vid = cv2.VideoCapture(0)
|
94 |
+
|
95 |
+
else:
|
96 |
+
vid = cv2.VideoCapture(DEMO_VIDEO)
|
97 |
+
tfflie.name = DEMO_VIDEO
|
98 |
+
|
99 |
+
|
100 |
+
else:
|
101 |
+
tfflie.write(video_file_buffer.read())
|
102 |
+
vid = cv2.VideoCapture(tfflie.name)
|
103 |
+
|
104 |
+
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
|
105 |
+
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
106 |
+
fps = int(vid.get(cv2.CAP_PROP_FPS))#codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
|
107 |
+
codec = cv2.VideoWriter_fourcc('X','V','I','D')
|
108 |
+
out = cv2.VideoWriter('output_depth.mp4', codec, fps, (width, height))
|
109 |
+
start_time = time.perf_counter()
|
110 |
+
total_inference_duration = 0
|
111 |
+
stframe = st.empty()
|
112 |
+
SCALE_OUTPUT = 1
|
113 |
+
st.markdown("**Frame Rate**")
|
114 |
+
kpi1_text = st.markdown("0")
|
115 |
+
save_video = st.checkbox('Save video')
|
116 |
+
while vid.isOpened():
|
117 |
+
ret, image = vid.read()
|
118 |
+
new_time = time.time()
|
119 |
+
input_video_frame_height, input_video_frame_width = image.shape[:2]
|
120 |
+
target_frame_height = int(input_video_frame_height * SCALE_OUTPUT)
|
121 |
+
target_frame_width = int(input_video_frame_width * SCALE_OUTPUT)
|
122 |
+
if not ret:
|
123 |
+
vid.release()
|
124 |
+
break
|
125 |
+
resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width))
|
126 |
+
# reshape image to network input shape NCHW
|
127 |
+
input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)
|
128 |
+
inference_start_time = time.perf_counter()
|
129 |
+
result = exec_net.infer(inputs={input_key: input_image})[output_key]
|
130 |
+
inference_stop_time = time.perf_counter()
|
131 |
+
inference_duration = inference_stop_time - inference_start_time
|
132 |
+
total_inference_duration += inference_duration
|
133 |
+
result_frame = to_rgb(convert_result_to_image(result))
|
134 |
+
# Resize image and result to target frame shape
|
135 |
+
result_frame = cv2.resize(result_frame, (target_frame_width, target_frame_height))
|
136 |
+
image = cv2.resize(image, (target_frame_width, target_frame_height))
|
137 |
+
# Put image and result side by side
|
138 |
+
stacked_frame = np.vstack((image, result_frame))
|
139 |
+
if save_video:
|
140 |
+
out.write(stacked_frame)
|
141 |
+
stframe.image(stacked_frame,channels = 'BGR',use_column_width=True)
|
142 |
+
fps = 1.0/(time.time() - new_time)
|
143 |
+
kpi1_text.write(f"<h1 style='text-align: center; color: red;'>{'{:.1f}'.format(fps)}</h1>", unsafe_allow_html=True)
|
144 |
+
|
145 |
+
vid.release()
|
146 |
+
out.release()
|
147 |
+
cv2.destroyAllWindows()
|
148 |
+
st.success('Video is Processed')
|
149 |
+
st.stop()
|