Spaces:
Running
on
Zero
Running
on
Zero
import os | |
import cv2 | |
import imutils | |
import torch | |
import numpy as np | |
import gradio as gr | |
def parse_video(video_file): | |
vs = cv2.VideoCapture(video_file) | |
frames = [] | |
while True: | |
(gotit, frame) = vs.read() | |
if frame is not None: | |
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
frames.append(frame) | |
if not gotit: | |
break | |
return np.stack(frames) | |
def cotracker_demo( | |
input_video, | |
grid_size: int = 10, | |
tracks_leave_trace: bool = False, | |
): | |
load_video = parse_video(input_video) | |
load_video = torch.from_numpy(load_video).permute(0, 3, 1, 2)[None].float() | |
import time | |
def current_milli_time(): | |
return round(time.time() * 1000) | |
filename = str(current_milli_time()) | |
return os.path.join( | |
os.path.dirname(__file__), "results", f"{filename}.mp4" | |
) | |
app = gr.Interface( | |
title="🎨 CoTracker: It is Better to Track Together", | |
description="<div style='text-align: left;'> \ | |
<p>Welcome to <a href='http://co-tracker.github.io' target='_blank'>CoTracker</a>! This space demonstrates point (pixel) tracking in videos. \ | |
Points are sampled on a regular grid and are tracked jointly. </p> \ | |
<p> To get started, simply upload your <b>.mp4</b> video in landscape orientation or click on one of the example videos to load them. The shorter the video, the faster the processing. We recommend submitting short videos of length <b>2-7 seconds</b>.</p> \ | |
<ul style='display: inline-block; text-align: left;'> \ | |
<li>The total number of grid points is the square of <b>Grid Size</b>.</li> \ | |
<li>Check <b>Visualize Track Traces</b> to visualize traces of all the tracked points. </li> \ | |
</ul> \ | |
<p style='text-align: left'>For more details, check out our <a href='https://github.com/facebookresearch/co-tracker' target='_blank'>GitHub Repo</a> ⭐</p> \ | |
</div>", | |
fn=cotracker_demo, | |
inputs=[ | |
gr.Video(type="file", label="Input video", interactive=True), | |
gr.Slider(minimum=10, maximum=80, step=1, value=10, label="Number of tracks"), | |
], | |
outputs=gr.Video(label="Video with predicted tracks"), | |
cache_examples=True, | |
allow_flagging=False, | |
) | |
app.queue(max_size=20, concurrency_count=1).launch(debug=True) | |