import gradio as gr
from delf import DeepLocalFeatures
delf = DeepLocalFeatures()
def predict(image_a, image_b):
return delf.match(image_a, image_b)
footer = r"""
Demo for DELF
"""
coffe = r"""
"""
with gr.Blocks(title="DELF") as app:
gr.HTML("Match images using DELF
")
gr.HTML("Neural network and logic for processing images to identify keypoints and their "
"descriptors.
")
with gr.Row(equal_height=False):
with gr.Column():
with gr.Row(equal_height=True):
with gr.Column():
input_img_a = gr.Image(type="pil", label="Input image A")
with gr.Column():
input_img_b = gr.Image(type="pil", label="Input image B")
run_btn = gr.Button(variant="primary")
with gr.Column():
output_img = gr.Image(type="pil", label="Output image")
gr.ClearButton(components=[input_img_a, input_img_b, output_img], variant="stop")
run_btn.click(predict, [input_img_a, input_img_b], [output_img])
with gr.Row():
blobs_a = [[f"examples/image_a/{x:02d}.jpg"] for x in range(1, 5)]
examples_a = gr.Dataset(components=[input_img_a], samples=blobs_a)
examples_a.click(lambda x: x[0], [examples_a], [input_img_a])
with gr.Row():
blobs_b = [[f"examples/image_b/{x:02d}.jpg"] for x in range(1, 5)]
examples_b = gr.Dataset(components=[input_img_b], samples=blobs_b)
examples_b.click(lambda x: x[0], [examples_b], [input_img_b])
with gr.Row():
gr.HTML(footer)
with gr.Row():
gr.HTML(coffe)
app.launch(share=False, debug=True, show_error=True)
app.queue()