jbilcke-hf HF staff commited on
Commit
36be800
1 Parent(s): 8010ebe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -19
app.py CHANGED
@@ -16,6 +16,8 @@ from huggingface_hub import hf_hub_download
16
 
17
  #gradio.helpers.CACHED_FOLDER = '/data/cache'
18
 
 
 
19
  pipe = StableVideoDiffusionPipeline.from_pretrained(
20
  "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
21
  )
@@ -36,7 +38,14 @@ def sample(
36
  decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
37
  device: str = "cuda",
38
  output_folder: str = "outputs",
 
39
  ):
 
 
 
 
 
 
40
  if image.mode == "RGBA":
41
  image = image.convert("RGB")
42
 
@@ -89,6 +98,11 @@ with gr.Blocks() as demo:
89
  gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact))
90
  #### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd).
91
  ''')
 
 
 
 
 
92
  with gr.Row():
93
  with gr.Column():
94
  image = gr.Image(label="Upload your image", type="pil")
@@ -100,25 +114,8 @@ with gr.Blocks() as demo:
100
  motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
101
  fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
102
 
103
- image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
104
- generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
105
- gr.Examples(
106
- examples=[
107
- "images/blink_meme.png",
108
- "images/confused2_meme.png",
109
- "images/disaster_meme.png",
110
- "images/distracted_meme.png",
111
- "images/hide_meme.png",
112
- "images/nazare_meme.png",
113
- "images/success_meme.png",
114
- "images/willy_meme.png",
115
- "images/wink_meme.png"
116
- ],
117
- inputs=image,
118
- outputs=[video, seed],
119
- fn=sample,
120
- cache_examples=True,
121
- )
122
 
123
  if __name__ == "__main__":
124
  demo.queue(max_size=20)
 
16
 
17
  #gradio.helpers.CACHED_FOLDER = '/data/cache'
18
 
19
+ SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
20
+
21
  pipe = StableVideoDiffusionPipeline.from_pretrained(
22
  "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
23
  )
 
38
  decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
39
  device: str = "cuda",
40
  output_folder: str = "outputs",
41
+ secret_token: string = "",
42
  ):
43
+ if secret_token != SECRET_TOKEN:
44
+ raise gr.Error(
45
+ f'Invalid secret token. Please fork the original space if you want to use it for yourself.')
46
+
47
+ image = resize_image(image)
48
+
49
  if image.mode == "RGBA":
50
  image = image.convert("RGB")
51
 
 
98
  gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact))
99
  #### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd).
100
  ''')
101
+ secret_token = gr.Text(
102
+ label='Secret Token',
103
+ max_lines=1,
104
+ placeholder='Enter your secret token',
105
+ )
106
  with gr.Row():
107
  with gr.Column():
108
  image = gr.Image(label="Upload your image", type="pil")
 
114
  motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
115
  fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
116
 
117
+ # image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
118
+ generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id, secret_token], outputs=[video, seed], api_name="video")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
  if __name__ == "__main__":
121
  demo.queue(max_size=20)