ynhe commited on
Commit
78c337a
·
verified ·
1 Parent(s): 0e826d7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -5
README.md CHANGED
@@ -1,8 +1,9 @@
1
  ---
2
- license: "cc-by-nc-4.0"
3
  tags:
4
  - vision
5
  - video-classification
 
6
  ---
7
 
8
  # VideoMAE-v2 (base-sized model, Pretrained on UnlabeledHybrid-1M)
@@ -24,13 +25,15 @@ import numpy as np
24
  import torch
25
 
26
 
27
- config = AutoConfig.from_pretrained("./", trust_remote_code=True)
28
- model = AutoModel.from_pretrained('./', config=config, trust_remote_code=True)
 
 
29
 
30
  video = list(np.random.rand(16, 3, 224, 224))
31
 
32
- processor = VideoMAEImageProcessor.from_pretrained("./")
33
- model = AutoModel.from_pretrained("./",config=config, trust_remote_code=True)
34
 
35
  # B, T, C, H, W -> B, C, T, H, W
36
  inputs = processor(video, return_tensors="pt")
 
1
  ---
2
+ license: cc-by-nc-4.0
3
  tags:
4
  - vision
5
  - video-classification
6
+ pipeline_tag: video-classification
7
  ---
8
 
9
  # VideoMAE-v2 (base-sized model, Pretrained on UnlabeledHybrid-1M)
 
25
  import torch
26
 
27
 
28
+ config = AutoConfig.from_pretrained("OpenGVLab/VideoMAEv2-Base", trust_remote_code=True)
29
+ processor = VideoMAEImageProcessor.from_pretrained("OpenGVLab/VideoMAEv2-Base")
30
+ model = AutoModel.from_pretrained('OpenGVLab/VideoMAEv2-Base', config=config, trust_remote_code=True)
31
+
32
 
33
  video = list(np.random.rand(16, 3, 224, 224))
34
 
35
+
36
+
37
 
38
  # B, T, C, H, W -> B, C, T, H, W
39
  inputs = processor(video, return_tensors="pt")