dibahadie's picture
Rename test.py to app.py
d61adaf verified
raw
history blame
2.46 kB
import os
from ast import literal_eval
from segment_key import *
from matplotlib import pyplot as plt
def show_kps(contour):
list1 = range(0, 310)
list2 = list(zip(get_features_right(contour), list1))
x_coords = [point[0] for point in list2]
y_coords = [point[1] for point in list2]
plt.scatter(x_coords, y_coords, c='red', marker='o', label='Keypoints')
list2 = list(zip(get_features_left(contour), list1))
x_coords = [point[0] for point in list2]
y_coords = [point[1] for point in list2]
plt.scatter(x_coords, y_coords, c='red', marker='o', label='Keypoints')
list2 = list(zip(list1, get_features_up(contour)))
x_coords = [point[0] for point in list2]
y_coords = [point[1] for point in list2]
plt.scatter(x_coords, y_coords, c='red', marker='o', label='Keypoints')
list2 = list(zip(list1, get_features_down(contour)))
x_coords = [point[0] for point in list2]
y_coords = [point[1] for point in list2]
plt.scatter(x_coords, y_coords, c='red', marker='o', label='Keypoints')
plt.show()
def get_all_features():
contours = []
with open('prediction/database.txt', 'r') as file:
lines = file.readlines()
for line in lines:
results = (line.split(';')[1])
results = results.replace(",,", ",'',")
results = literal_eval(results)
results = np.array(results)
contours.append((line.split(';')[0], results))
return contours
def cos_similarity(feature1, feature2):
return np.dot(feature1, feature2) / (np.linalg.norm(feature1) * np.linalg.norm(feature2))
def predict_match(image_path):
main_name = os.path.basename(image_path)[:-11] + '.jpg'
main_feature = final_features(image_path)
contours = get_all_features()
l = []
for image in contours:
feature = image[1]
feature_similarity = 1 - cos_similarity(feature, main_feature)
l.append([image[0], feature_similarity])
l.sort(key=lambda x: x[1])
print(l)
print(l[0])
index_in_list = -1
for i in range(len(l)):
if l[i][0] == main_name:
index_in_list = i
return l[0][0]
# Create the Gradio interface
iface = gr.Interface(
fn=predict_match,
inputs=gr.Image(type='numpy'),
outputs=["text"],
title="YOLOv8 Object Detection",
description="Upload an image to detect objects using the YOLOv8 model.",
)
# Launch the interface
iface.launch()