Spaces:
Running
Running
meta-artem
commited on
Commit
•
a976824
1
Parent(s):
e56d7ba
Update app.py
Browse files
app.py
CHANGED
@@ -1,27 +1,23 @@
|
|
1 |
import os
|
2 |
-
|
3 |
-
os.system("git clone https://github.com/google-research/frame-interpolation")
|
4 |
import sys
|
5 |
-
|
6 |
-
sys.path.append("frame-interpolation")
|
7 |
import numpy as np
|
8 |
import tensorflow as tf
|
9 |
import mediapy
|
10 |
from PIL import Image
|
11 |
-
from eval import interpolator, util
|
12 |
import gradio as gr
|
13 |
-
|
14 |
from huggingface_hub import snapshot_download
|
15 |
|
16 |
-
|
|
|
|
|
17 |
|
|
|
|
|
18 |
|
19 |
def load_model(model_name):
|
20 |
model = interpolator.Interpolator(snapshot_download(repo_id=model_name), None)
|
21 |
-
|
22 |
return model
|
23 |
|
24 |
-
|
25 |
model_names = [
|
26 |
"akhaliq/frame-interpolation-film-style",
|
27 |
"NimaBoscarino/frame-interpolation_film_l1",
|
@@ -33,26 +29,27 @@ models = {model_name: load_model(model_name) for model_name in model_names}
|
|
33 |
ffmpeg_path = util.get_ffmpeg_path()
|
34 |
mediapy.set_ffmpeg(ffmpeg_path)
|
35 |
|
36 |
-
|
37 |
def resize(width, img):
|
38 |
-
|
39 |
-
|
40 |
-
wpercent = (basewidth / float(img.size[0]))
|
41 |
hsize = int((float(img.size[1]) * float(wpercent)))
|
42 |
-
img = img.resize((
|
43 |
return img
|
44 |
|
|
|
|
|
|
|
|
|
45 |
|
46 |
-
def resize_img(img1,
|
47 |
img_target_size = Image.open(img1)
|
48 |
img_to_resize = resize_and_crop(
|
49 |
-
|
50 |
(img_target_size.size[0], img_target_size.size[1]), # set width and height to match img1
|
51 |
crop_origin="middle"
|
52 |
)
|
53 |
img_to_resize.save('resized_img2.png')
|
54 |
|
55 |
-
|
56 |
def predict(frame1, frame2, times_to_interpolate, model_name):
|
57 |
model = models[model_name]
|
58 |
|
@@ -72,7 +69,6 @@ def predict(frame1, frame2, times_to_interpolate, model_name):
|
|
72 |
mediapy.write_video("out.mp4", frames, fps=30)
|
73 |
return "out.mp4"
|
74 |
|
75 |
-
|
76 |
title = "frame-interpolation"
|
77 |
description = "Gradio demo for FILM: Frame Interpolation for Large Scene Motion. To use it, simply upload your images and add the times to interpolate number or click on one of the examples to load them. Read more at the links below."
|
78 |
article = "<p style='text-align: center'><a href='https://film-net.github.io/' target='_blank'>FILM: Frame Interpolation for Large Motion</a> | <a href='https://github.com/google-research/frame-interpolation' target='_blank'>Github Repo</a></p>"
|
@@ -82,16 +78,16 @@ examples = [
|
|
82 |
]
|
83 |
|
84 |
gr.Interface(
|
85 |
-
predict,
|
86 |
-
[
|
87 |
-
gr.
|
88 |
-
gr.
|
89 |
-
gr.
|
90 |
-
gr.
|
91 |
],
|
92 |
-
"
|
93 |
title=title,
|
94 |
description=description,
|
95 |
article=article,
|
96 |
-
examples=examples
|
97 |
-
).launch(
|
|
|
1 |
import os
|
|
|
|
|
2 |
import sys
|
|
|
|
|
3 |
import numpy as np
|
4 |
import tensorflow as tf
|
5 |
import mediapy
|
6 |
from PIL import Image
|
|
|
7 |
import gradio as gr
|
|
|
8 |
from huggingface_hub import snapshot_download
|
9 |
|
10 |
+
# Clone the repository and add the path
|
11 |
+
os.system("git clone https://github.com/google-research/frame-interpolation")
|
12 |
+
sys.path.append("frame-interpolation")
|
13 |
|
14 |
+
# Import after appending the path
|
15 |
+
from eval import interpolator, util
|
16 |
|
17 |
def load_model(model_name):
|
18 |
model = interpolator.Interpolator(snapshot_download(repo_id=model_name), None)
|
|
|
19 |
return model
|
20 |
|
|
|
21 |
model_names = [
|
22 |
"akhaliq/frame-interpolation-film-style",
|
23 |
"NimaBoscarino/frame-interpolation_film_l1",
|
|
|
29 |
ffmpeg_path = util.get_ffmpeg_path()
|
30 |
mediapy.set_ffmpeg(ffmpeg_path)
|
31 |
|
|
|
32 |
def resize(width, img):
|
33 |
+
img = Image.fromarray(img)
|
34 |
+
wpercent = (width / float(img.size[0]))
|
|
|
35 |
hsize = int((float(img.size[1]) * float(wpercent)))
|
36 |
+
img = img.resize((width, hsize), Image.LANCZOS)
|
37 |
return img
|
38 |
|
39 |
+
def resize_and_crop(img_path, size, crop_origin="middle"):
|
40 |
+
img = Image.open(img_path)
|
41 |
+
img = img.resize(size, Image.LANCZOS)
|
42 |
+
return img
|
43 |
|
44 |
+
def resize_img(img1, img2_path):
|
45 |
img_target_size = Image.open(img1)
|
46 |
img_to_resize = resize_and_crop(
|
47 |
+
img2_path,
|
48 |
(img_target_size.size[0], img_target_size.size[1]), # set width and height to match img1
|
49 |
crop_origin="middle"
|
50 |
)
|
51 |
img_to_resize.save('resized_img2.png')
|
52 |
|
|
|
53 |
def predict(frame1, frame2, times_to_interpolate, model_name):
|
54 |
model = models[model_name]
|
55 |
|
|
|
69 |
mediapy.write_video("out.mp4", frames, fps=30)
|
70 |
return "out.mp4"
|
71 |
|
|
|
72 |
title = "frame-interpolation"
|
73 |
description = "Gradio demo for FILM: Frame Interpolation for Large Scene Motion. To use it, simply upload your images and add the times to interpolate number or click on one of the examples to load them. Read more at the links below."
|
74 |
article = "<p style='text-align: center'><a href='https://film-net.github.io/' target='_blank'>FILM: Frame Interpolation for Large Motion</a> | <a href='https://github.com/google-research/frame-interpolation' target='_blank'>Github Repo</a></p>"
|
|
|
78 |
]
|
79 |
|
80 |
gr.Interface(
|
81 |
+
fn=predict,
|
82 |
+
inputs=[
|
83 |
+
gr.Image(label="First Frame"),
|
84 |
+
gr.Image(label="Second Frame"),
|
85 |
+
gr.Number(label="Times to Interpolate", value=2),
|
86 |
+
gr.Dropdown(label="Model", choices=model_names),
|
87 |
],
|
88 |
+
outputs=gr.Video(label="Interpolated Frames"),
|
89 |
title=title,
|
90 |
description=description,
|
91 |
article=article,
|
92 |
+
examples=examples,
|
93 |
+
).launch()
|