wwen1997 commited on
Commit
dbd661e
1 Parent(s): 622ea3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -19
app.py CHANGED
@@ -352,6 +352,7 @@ def validate_and_convert_image(image, target_size=(512 , 512)):
352
 
353
  class Drag:
354
 
 
355
  def __init__(self, device, args, height, width, model_length, dtype=torch.float16, use_sift=False):
356
  self.device = device
357
  self.dtype = dtype
@@ -362,21 +363,21 @@ class Drag:
362
  low_cpu_mem_usage=True,
363
  custom_resume=True,
364
  )
365
- unet = unet.to(dtype)
366
 
367
  controlnet = ControlNetSVDModel.from_pretrained(
368
  os.path.join(args.model, "controlnet"),
369
  )
370
- controlnet = controlnet.to(dtype)
371
 
372
- # if is_xformers_available():
373
- # import xformers
374
- # xformers_version = version.parse(xformers.__version__)
375
- # unet.enable_xformers_memory_efficient_attention()
376
- # # controlnet.enable_xformers_memory_efficient_attention()
377
- # else:
378
- # raise ValueError(
379
- # "xformers is not available. Make sure it is installed correctly")
380
 
381
  pipe = StableVideoDiffusionInterpControlPipeline.from_pretrained(
382
  "checkpoints/stable-video-diffusion-img2vid-xt",
@@ -385,6 +386,7 @@ class Drag:
385
  low_cpu_mem_usage=False,
386
  torch_dtype=torch.float16, variant="fp16", local_files_only=True,
387
  )
 
388
 
389
  self.pipeline = pipe
390
  # self.pipeline.enable_model_cpu_offload()
@@ -396,10 +398,7 @@ class Drag:
396
  self.use_sift = use_sift
397
 
398
  @spaces.GPU
399
- def run(self, first_frame_path, last_frame_path, tracking_points, controlnet_cond_scale, motion_bucket_id):
400
-
401
- self.pipeline.to(self.device)
402
-
403
  original_width, original_height = 512, 320 # TODO
404
 
405
  # load_image
@@ -530,7 +529,7 @@ class Drag:
530
  def reset_states(first_frame_path, last_frame_path, tracking_points):
531
  first_frame_path = gr.State()
532
  last_frame_path = gr.State()
533
- tracking_points = gr.State()
534
 
535
  return first_frame_path, last_frame_path, tracking_points
536
 
@@ -549,7 +548,7 @@ def preprocess_image(image):
549
 
550
  image_pil.save(first_frame_path)
551
 
552
- return first_frame_path, first_frame_path, gr.State()
553
 
554
 
555
  def preprocess_image_end(image_end):
@@ -566,7 +565,7 @@ def preprocess_image_end(image_end):
566
 
567
  image_end_pil.save(last_frame_path)
568
 
569
- return last_frame_path, last_frame_path, gr.State()
570
 
571
 
572
  def add_drag(tracking_points):
@@ -680,7 +679,6 @@ if __name__ == "__main__":
680
  args = get_args()
681
  ensure_dirname(args.output_dir)
682
 
683
-
684
  color_list = []
685
  for i in range(20):
686
  color = np.concatenate([np.random.random(4)*255], axis=0)
@@ -710,7 +708,7 @@ if __name__ == "__main__":
710
  Framer = Drag("cuda", args, 320, 512, 14)
711
  first_frame_path = gr.State()
712
  last_frame_path = gr.State()
713
- tracking_points = gr.State()
714
 
715
  with gr.Row():
716
  with gr.Column(scale=1):
 
352
 
353
  class Drag:
354
 
355
+ @spaces.GPU
356
  def __init__(self, device, args, height, width, model_length, dtype=torch.float16, use_sift=False):
357
  self.device = device
358
  self.dtype = dtype
 
363
  low_cpu_mem_usage=True,
364
  custom_resume=True,
365
  )
366
+ unet = unet.to(device, dtype)
367
 
368
  controlnet = ControlNetSVDModel.from_pretrained(
369
  os.path.join(args.model, "controlnet"),
370
  )
371
+ controlnet = controlnet.to(device, dtype)
372
 
373
+ if is_xformers_available():
374
+ import xformers
375
+ xformers_version = version.parse(xformers.__version__)
376
+ unet.enable_xformers_memory_efficient_attention()
377
+ # controlnet.enable_xformers_memory_efficient_attention()
378
+ else:
379
+ raise ValueError(
380
+ "xformers is not available. Make sure it is installed correctly")
381
 
382
  pipe = StableVideoDiffusionInterpControlPipeline.from_pretrained(
383
  "checkpoints/stable-video-diffusion-img2vid-xt",
 
386
  low_cpu_mem_usage=False,
387
  torch_dtype=torch.float16, variant="fp16", local_files_only=True,
388
  )
389
+ pipe.to(device)
390
 
391
  self.pipeline = pipe
392
  # self.pipeline.enable_model_cpu_offload()
 
398
  self.use_sift = use_sift
399
 
400
  @spaces.GPU
401
+ def run(self, first_frame_path, last_frame_path, tracking_points, controlnet_cond_scale, motion_bucket_id):
 
 
 
402
  original_width, original_height = 512, 320 # TODO
403
 
404
  # load_image
 
529
  def reset_states(first_frame_path, last_frame_path, tracking_points):
530
  first_frame_path = gr.State()
531
  last_frame_path = gr.State()
532
+ tracking_points = gr.State([])
533
 
534
  return first_frame_path, last_frame_path, tracking_points
535
 
 
548
 
549
  image_pil.save(first_frame_path)
550
 
551
+ return first_frame_path, first_frame_path, gr.State([])
552
 
553
 
554
  def preprocess_image_end(image_end):
 
565
 
566
  image_end_pil.save(last_frame_path)
567
 
568
+ return last_frame_path, last_frame_path, gr.State([])
569
 
570
 
571
  def add_drag(tracking_points):
 
679
  args = get_args()
680
  ensure_dirname(args.output_dir)
681
 
 
682
  color_list = []
683
  for i in range(20):
684
  color = np.concatenate([np.random.random(4)*255], axis=0)
 
708
  Framer = Drag("cuda", args, 320, 512, 14)
709
  first_frame_path = gr.State()
710
  last_frame_path = gr.State()
711
+ tracking_points = gr.State([])
712
 
713
  with gr.Row():
714
  with gr.Column(scale=1):