add download links for pytorch model
Browse files
README.md
CHANGED
@@ -44,7 +44,7 @@ Scaling behavior of EVA-CLIP with zero-shot classification performance averaged
|
|
44 |
| model name | total #params | seen samples | pytorch weight |
|
45 |
|:-----------|:------:|:------:|:------:|
|
46 |
| `EVA_8B_psz14` | 7.5B | 6B | [PT](https://huggingface.co/BAAI/EVA-CLIP-8B/resolve/main/EVA_8B_psz14.bin) (`31.0GB`) |
|
47 |
-
| `EVA_18B_psz14` | 17.5B | 6B | [PT](https://huggingface.co/BAAI/EVA-CLIP-18B/resolve/main/EVA_18B_psz14.bin) (`
|
48 |
|
49 |
|
50 |
</div>
|
@@ -70,7 +70,7 @@ Scaling behavior of EVA-CLIP with zero-shot classification performance averaged
|
|
70 |
|
71 |
| model name | image enc. init. ckpt | text enc. init. ckpt | total #params | training data | training batch size | gpus for training | img. cls. avg. acc. | video cls. avg. acc. | retrieval MR | hf weight | pytorch weight |
|
72 |
|:-----|:-----|:-----------|:------:|:------:|:------:|:------:|:------:|:------:|:------:|:------:|:------:|
|
73 |
-
| `EVA-CLIP-18B` | `EVA_18B_psz14` | `EVA02_CLIP_E_psz14_plus_s9B` | 18.1B | Merged-2B+ | 108K | 360 A100(40GB) | **80.7** | **75.0** | **87.8**| [🤗 HF](https://huggingface.co/BAAI/EVA-CLIP-18B) | [PT](https://huggingface.co/BAAI/EVA-CLIP-18B/resolve/main/EVA_CLIP_18B_psz14_s6B.pt) (`
|
74 |
|
75 |
</div>
|
76 |
|
@@ -157,7 +157,7 @@ from eva_clip import create_model_and_transforms, get_tokenizer
|
|
157 |
from PIL import Image
|
158 |
|
159 |
model_name = "EVA-CLIP-18B"
|
160 |
-
pretrained = "eva_clip" # or "/path/to/EVA_CLIP_18B_psz14_s6B.pt"
|
161 |
|
162 |
image_path = "CLIP.png"
|
163 |
caption = ["a diagram", "a dog", "a cat"]
|
|
|
44 |
| model name | total #params | seen samples | pytorch weight |
|
45 |
|:-----------|:------:|:------:|:------:|
|
46 |
| `EVA_8B_psz14` | 7.5B | 6B | [PT](https://huggingface.co/BAAI/EVA-CLIP-8B/resolve/main/EVA_8B_psz14.bin) (`31.0GB`) |
|
47 |
+
| `EVA_18B_psz14.fp16` | 17.5B | 6B | [PT](https://huggingface.co/BAAI/EVA-CLIP-18B/resolve/main/EVA_18B_psz14.fp16.bin) (`35.3GB`) |
|
48 |
|
49 |
|
50 |
</div>
|
|
|
70 |
|
71 |
| model name | image enc. init. ckpt | text enc. init. ckpt | total #params | training data | training batch size | gpus for training | img. cls. avg. acc. | video cls. avg. acc. | retrieval MR | hf weight | pytorch weight |
|
72 |
|:-----|:-----|:-----------|:------:|:------:|:------:|:------:|:------:|:------:|:------:|:------:|:------:|
|
73 |
+
| `EVA-CLIP-18B` | `EVA_18B_psz14` | `EVA02_CLIP_E_psz14_plus_s9B` | 18.1B | Merged-2B+ | 108K | 360 A100(40GB) | **80.7** | **75.0** | **87.8**| [🤗 HF](https://huggingface.co/BAAI/EVA-CLIP-18B) | [PT](https://huggingface.co/BAAI/EVA-CLIP-18B/resolve/main/EVA_CLIP_18B_psz14_s6B.fp16.pt) (`36.7GB`) |
|
74 |
|
75 |
</div>
|
76 |
|
|
|
157 |
from PIL import Image
|
158 |
|
159 |
model_name = "EVA-CLIP-18B"
|
160 |
+
pretrained = "eva_clip" # or "/path/to/EVA_CLIP_18B_psz14_s6B.fp16.pt"
|
161 |
|
162 |
image_path = "CLIP.png"
|
163 |
caption = ["a diagram", "a dog", "a cat"]
|