3v324v23 commited on
Commit
77f594c
1 Parent(s): 735182c

mostly merged

Browse files
Files changed (7) hide show
  1. .gitignore +2 -1
  2. animate.py +17 -15
  3. app.py +16 -14
  4. frame_interpolation +0 -1
  5. pyproject.toml +0 -14
  6. requirements.txt +1 -1
  7. test.py +17 -0
.gitignore CHANGED
@@ -3,7 +3,8 @@ flagged/
3
  .ipynb_checkpoints
4
  temp/
5
  */.ipynb_checkpoints/*
6
-
 
7
 
8
  # Byte-compiled / optimized / DLL files
9
  __pycache__/
 
3
  .ipynb_checkpoints
4
  temp/
5
  */.ipynb_checkpoints/*
6
+ frame-interpolation
7
+ frame_interpolation
8
 
9
  # Byte-compiled / optimized / DLL files
10
  __pycache__/
animate.py CHANGED
@@ -10,6 +10,7 @@ from image_tools.sizes import resize_and_crop
10
  from moviepy.editor import CompositeVideoClip
11
  from moviepy.editor import VideoFileClip as vfc
12
  from PIL import Image
 
13
 
14
 
15
  # get key positions at which frame needs to be generated
@@ -23,9 +24,8 @@ def list_of_positions(num_contours, num_frames=100):
23
  def contourfinder(image1, image2, text=None, num_frames=100):
24
  # Create two blank pages to write into
25
  # I just hardcoded 1024*1024 as the size, ideally this should be np.shape(image1)
26
- blank = np.zeros((2048, 2048, 3), dtype="uint8")
27
- blank2 = np.zeros((2048, 2048, 3), dtype="uint8")
28
-
29
  # Threshold and contours for image 1 and 2
30
  threshold = cv2.Canny(image=image1, threshold1=100, threshold2=200)
31
  contours, hierarchies = cv2.findContours(
@@ -60,14 +60,12 @@ def contourfinder(image1, image2, text=None, num_frames=100):
60
 
61
  if i in positions:
62
  frames.append(blank)
63
-
64
  # Complile to video
65
  vid1.write(blank)
66
-
67
  vid1.release()
68
- clip1 = vfc("/home/user/app/vid1.mp4")
69
- # print(f"Type clip 1{type(clip1)}")
70
-
71
  positions = list_of_positions((len(contours2)))
72
 
73
  for i in range(0, len(contours2)):
@@ -80,7 +78,8 @@ def contourfinder(image1, image2, text=None, num_frames=100):
80
  vid2.write(blank2)
81
 
82
  vid2.release()
83
- clip3 = vfc("/home/user/app/vid2.mp4")
 
84
 
85
  # Next is the text vid
86
 
@@ -160,24 +159,27 @@ def resize(width, img):
160
  return img
161
 
162
 
 
 
 
163
  def resize_img(img1, img2):
164
- img_target_size = Image.open(cv2_images[0])
165
  img_to_resize = resize_and_crop(
166
- cv2_images[1],
167
  (
168
  img_target_size.size[0],
169
  img_target_size.size[1],
170
  ), # set width and height to match cv2_images[0]
171
  crop_origin="middle",
172
  )
173
- img_to_resize.save("resized_cv2_images[1].png")
174
 
175
 
176
  def get_video_frames(images, times_to_interpolate=6, model_name_index=0):
177
  frame1 = images[0]
178
  frame2 = images[1]
179
 
180
- model = models[model_name_index]
181
  cv2_images = [cv2.imread(frame1), cv2.imread(frame2)]
182
 
183
  frame1 = resize(256, frame1)
@@ -187,7 +189,7 @@ def get_video_frames(images, times_to_interpolate=6, model_name_index=0):
187
  frame2.save("test2.png")
188
 
189
  resize_img("test1.png", "test2.png")
190
- input_frames = ["test1.png", "resized_cv2_images[1].png"]
191
 
192
  frames = list(
193
  util.interpolate_recursively_from_files(
@@ -210,7 +212,7 @@ def create_mp4_with_audio(frames, cv2_images, duration, audio, output_path):
210
  clip3 = clip3.set_start((clip1.duration - 0.5) + (clip2.duration)).crossfadein(2)
211
 
212
  new_clip = CompositeVideoClip([clip1, clip2, clip3])
213
- new_clip.set_audio(audio)
214
  new_clip.set_duration(duration)
215
  new_clip.write_videofile(output_path, audio_codec="aac")
216
  return output_path
 
10
  from moviepy.editor import CompositeVideoClip
11
  from moviepy.editor import VideoFileClip as vfc
12
  from PIL import Image
13
+ from pathlib import Path
14
 
15
 
16
  # get key positions at which frame needs to be generated
 
24
  def contourfinder(image1, image2, text=None, num_frames=100):
25
  # Create two blank pages to write into
26
  # I just hardcoded 1024*1024 as the size, ideally this should be np.shape(image1)
27
+ blank=np.zeros(np.shape(image1), dtype="uint8")
28
+ blank2=np.zeros(np.shape(image2), dtype="uint8")
 
29
  # Threshold and contours for image 1 and 2
30
  threshold = cv2.Canny(image=image1, threshold1=100, threshold2=200)
31
  contours, hierarchies = cv2.findContours(
 
60
 
61
  if i in positions:
62
  frames.append(blank)
 
63
  # Complile to video
64
  vid1.write(blank)
65
+
66
  vid1.release()
67
+ full_dir_vid_1 = Path("vid1.mp4").resolve().as_posix()
68
+ clip1 = vfc(full_dir_vid_1)
 
69
  positions = list_of_positions((len(contours2)))
70
 
71
  for i in range(0, len(contours2)):
 
78
  vid2.write(blank2)
79
 
80
  vid2.release()
81
+ full_dir_vid_2 = Path("vid2.mp4").resolve().as_posix()
82
+ clip3 = vfc(full_dir_vid_2)
83
 
84
  # Next is the text vid
85
 
 
159
  return img
160
 
161
 
162
+
163
+
164
+
165
  def resize_img(img1, img2):
166
+ img_target_size = Image.open(img1)
167
  img_to_resize = resize_and_crop(
168
+ img2,
169
  (
170
  img_target_size.size[0],
171
  img_target_size.size[1],
172
  ), # set width and height to match cv2_images[0]
173
  crop_origin="middle",
174
  )
175
+ img_to_resize.save("resized_img2.png")
176
 
177
 
178
  def get_video_frames(images, times_to_interpolate=6, model_name_index=0):
179
  frame1 = images[0]
180
  frame2 = images[1]
181
 
182
+ model = models[model_names[model_name_index]]
183
  cv2_images = [cv2.imread(frame1), cv2.imread(frame2)]
184
 
185
  frame1 = resize(256, frame1)
 
189
  frame2.save("test2.png")
190
 
191
  resize_img("test1.png", "test2.png")
192
+ input_frames = ["test1.png", "resized_img2.png"]
193
 
194
  frames = list(
195
  util.interpolate_recursively_from_files(
 
212
  clip3 = clip3.set_start((clip1.duration - 0.5) + (clip2.duration)).crossfadein(2)
213
 
214
  new_clip = CompositeVideoClip([clip1, clip2, clip3])
215
+ new_clip.audio = audio
216
  new_clip.set_duration(duration)
217
  new_clip.write_videofile(output_path, audio_codec="aac")
218
  return output_path
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import shutil
3
  import subprocess
4
  import uuid
@@ -6,17 +7,9 @@ from pathlib import Path
6
  import gradio as gr
7
  from moviepy.editor import AudioFileClip
8
 
9
- from animate import create_mp4_with_audio, get_video_frames
10
-
11
- # My installs
12
  output_dir = Path("temp/").absolute()
13
  output_dir.mkdir(exist_ok=True, parents=True)
14
- subprocess.call(["git", "submodule", "update", "--init", "--recursive"]) # install frame_interplation
15
-
16
 
17
- os.chdir(
18
- output_dir
19
- ) # change working directory to output_dir because the hf spaces model has no option to specify output directory ¯\_(ツ)_/¯
20
 
21
 
22
  class SpotifyApi:
@@ -78,12 +71,14 @@ def process_inputs(
78
  if spotify_url:
79
  spotify = SpotifyApi()
80
  audio_input.path = spotify.download_episode(spotify_url)
81
- image = get_stable_diffusion_image(prompt)
82
- video = add_static_image_to_audio(image, audio_input)
83
  return video
84
 
85
 
86
- def animate_images(image_paths: list[str], audio_input: str) -> str:
 
 
87
  # Generate a random folder name and change directories to there
88
  foldername = str(uuid.uuid4())[:8]
89
  vid_output_dir = Path(output_dir / foldername)
@@ -93,13 +88,13 @@ def animate_images(image_paths: list[str], audio_input: str) -> str:
93
  audio_input.start_time, audio_input.start_time + audio_input.run_for
94
  )
95
  video_frames, cv2_images = get_video_frames(image_paths)
96
- path = Path(vid_output_dir / "output.mp4").as_posix()
97
  return create_mp4_with_audio(
98
  video_frames, cv2_images, audio_clip.duration, audio_clip, path
99
  )
100
 
101
 
102
- def get_stable_diffusion_image(prompt) -> str:
103
  stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
104
  gallery_dir = stable_diffusion(prompt, fn_index=2)
105
  return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)][:2]
@@ -117,5 +112,12 @@ iface = gr.Interface(
117
  outputs="video",
118
  )
119
 
 
 
 
120
 
121
- iface.launch()
 
 
 
 
 
1
  import os
2
+ import sys
3
  import shutil
4
  import subprocess
5
  import uuid
 
7
  import gradio as gr
8
  from moviepy.editor import AudioFileClip
9
 
 
 
 
10
  output_dir = Path("temp/").absolute()
11
  output_dir.mkdir(exist_ok=True, parents=True)
 
 
12
 
 
 
 
13
 
14
 
15
  class SpotifyApi:
 
71
  if spotify_url:
72
  spotify = SpotifyApi()
73
  audio_input.path = spotify.download_episode(spotify_url)
74
+ images = get_stable_diffusion_images(prompt)
75
+ video = animate_images(images, audio_input)
76
  return video
77
 
78
 
79
+ def animate_images(image_paths: list[str], audio_input: AudioInput) -> str:
80
+ from animate import create_mp4_with_audio, get_video_frames # Only import after git clone and when necessary takes loooong
81
+
82
  # Generate a random folder name and change directories to there
83
  foldername = str(uuid.uuid4())[:8]
84
  vid_output_dir = Path(output_dir / foldername)
 
88
  audio_input.start_time, audio_input.start_time + audio_input.run_for
89
  )
90
  video_frames, cv2_images = get_video_frames(image_paths)
91
+ path = Path(vid_output_dir / "output_final.mp4").as_posix()
92
  return create_mp4_with_audio(
93
  video_frames, cv2_images, audio_clip.duration, audio_clip, path
94
  )
95
 
96
 
97
+ def get_stable_diffusion_images(prompt) -> str:
98
  stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
99
  gallery_dir = stable_diffusion(prompt, fn_index=2)
100
  return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)][:2]
 
112
  outputs="video",
113
  )
114
 
115
+ if __name__ == '__main__':
116
+ subprocess.call(["git", "clone", "https://github.com/google-research/frame-interpolation", "frame_interpolation"]) # install frame_interplation I guess
117
+ sys.path.append("frame_interpolation")
118
 
119
+ # My installs
120
+ os.chdir(
121
+ output_dir
122
+ ) # change working directory to output_dir because the hf spaces model has no option to specify output directory ¯\_(ツ)_/¯
123
+ iface.launch(share=True)
frame_interpolation DELETED
@@ -1 +0,0 @@
1
- Subproject commit e9031587b441d679e841b59ccc5407778ba5f493
 
 
pyproject.toml DELETED
@@ -1,14 +0,0 @@
1
- [tool.poetry]
2
- name = "stablepod"
3
- version = "0.1.0"
4
- description = ""
5
- authors = ["Your Name <you@example.com>"]
6
- readme = "README.md"
7
-
8
- [tool.poetry.dependencies]
9
- python = "3.10.8"
10
-
11
-
12
- [build-system]
13
- requires = ["poetry-core"]
14
- build-backend = "poetry.core.masonry.api"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -13,7 +13,7 @@ natsort==8.1.0
13
 
14
  # Our dependencies
15
  fastapi==0.74.1 # This is because of reasons
16
- gradio
17
  spodcast
18
  moviepy
19
  huggingface-hub
 
13
 
14
  # Our dependencies
15
  fastapi==0.74.1 # This is because of reasons
16
+ gradio==3.4 # This is also because of reasons
17
  spodcast
18
  moviepy
19
  huggingface-hub
test.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from app import animate_images, AudioInput
3
+
4
+ def get_test_image_paths():
5
+ gallery_dir = "temp/images"
6
+ return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)][:2]
7
+
8
+ def get_test_audio_paths():
9
+ audio_path = "temp/audio.mp3"
10
+ audio_input = AudioInput(audio_path, 10, 10)
11
+ return audio_input
12
+
13
+
14
+ image_paths = get_test_image_paths()
15
+ audio_input = get_test_audio_paths()
16
+
17
+ animate_images(image_paths, audio_input)