Spaces:
Build error
Build error
File size: 2,761 Bytes
28b1a6e 2adbb70 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import gc
import gradio as gr
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from PIL.Image import Resampling
from pytorchvideo.data.encoded_video import EncodedVideo
from pytorchvideo.transforms.functional import uniform_temporal_subsample
from torchvision.io import write_video
from torchvision.transforms.functional import resize
from modeling import Generator
MAX_DURATION = 4
OUT_FPS = 18
DEVICE = "cpu" if not torch.cuda.is_available() else "cuda"
# Reupload of model found here: https://huggingface.co/spaces/awacke1/Image2LineDrawing
model = Generator(3, 1, 3)
weights_path = hf_hub_download("nateraw/image-2-line-drawing", "pytorch_model.bin")
model.load_state_dict(torch.load(weights_path, map_location=DEVICE))
model.eval()
def process_one_second(vid, start_sec, out_fps):
"""Process one second of a video at a given fps
Args:
vid (_type_): A pytorchvideo.EncodedVideo instance containing the video to process
start_sec (_type_): The second to start processing at
out_fps (_type_): The fps to output the video at
Returns:
np.array: The processed video as a numpy array with shape (T, H, W, C)
"""
# C, T, H, W
video_arr = vid.get_clip(start_sec, start_sec + 1)["video"]
# C, T, H, W where T == frames per second
x = uniform_temporal_subsample(video_arr, out_fps)
# C, T, H, W where H has been scaled to 256 (This will probably be no bueno on vertical vids but whatever)
x = resize(x, 256, Resampling.BICUBIC)
# C, T, H, W -> T, C, H, W (basically T acts as batch size now)
x = x.permute(1, 0, 2, 3)
with torch.no_grad():
# T, 1, H, W
out = model(x)
# T, C, H, W -> T, H, W, C Rescaled to 0-255
out = out.permute(0, 2, 3, 1).clip(0, 1) * 255
# Greyscale -> RGB
out = out.repeat(1, 1, 1, 3)
return out
def fn(fpath):
start_sec = 0
vid = EncodedVideo.from_path(fpath)
duration = min(MAX_DURATION, int(vid.duration))
for i in range(duration):
print(f"🖼️ Processing step {i + 1}/{duration}...")
video = process_one_second(vid, start_sec=i + start_sec, out_fps=OUT_FPS)
gc.collect()
if i == 0:
video_all = video
else:
video_all = np.concatenate((video_all, video))
write_video("out.mp4", video_all, fps=OUT_FPS)
return "out.mp4"
webcam_interface = gr.Interface(
fn, gr.Video(source="webcam"), gr.Video(type="file", format="mp4")
)
video_interface = gr.Interface(
fn, gr.Video(type="file"), gr.Video(type="file", format="mp4")
)
if __name__ == '__main__':
gr.TabbedInterface(
[webcam_interface, video_interface],
["Run on Your Webcam!", "Run on Videos!"],
).launch()
|