Upload Blip2ForConditionalGeneration
Browse files- pytorch_model-00001-of-00002.bin +2 -2
- pytorch_model.bin.index.json +40 -79
pytorch_model-00001-of-00002.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a91a6c44a9cc9c814b244306a341c822dd9ce426b567b6edc2a5be37529cf0a
|
3 |
+
size 9443768765
|
pytorch_model.bin.index.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
-
"total_size":
|
4 |
},
|
5 |
"weight_map": {
|
6 |
"language_model.decoder.block.0.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
@@ -834,9 +834,8 @@
|
|
834 |
"vision_model.encoder.layers.0.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
835 |
"vision_model.encoder.layers.0.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
836 |
"vision_model.encoder.layers.0.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
837 |
-
"vision_model.encoder.layers.0.self_attn.
|
838 |
"vision_model.encoder.layers.0.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
839 |
-
"vision_model.encoder.layers.0.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
840 |
"vision_model.encoder.layers.1.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
841 |
"vision_model.encoder.layers.1.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
842 |
"vision_model.encoder.layers.1.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -847,9 +846,8 @@
|
|
847 |
"vision_model.encoder.layers.1.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
848 |
"vision_model.encoder.layers.1.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
849 |
"vision_model.encoder.layers.1.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
850 |
-
"vision_model.encoder.layers.1.self_attn.
|
851 |
"vision_model.encoder.layers.1.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
852 |
-
"vision_model.encoder.layers.1.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
853 |
"vision_model.encoder.layers.10.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
854 |
"vision_model.encoder.layers.10.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
855 |
"vision_model.encoder.layers.10.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -860,9 +858,8 @@
|
|
860 |
"vision_model.encoder.layers.10.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
861 |
"vision_model.encoder.layers.10.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
862 |
"vision_model.encoder.layers.10.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
863 |
-
"vision_model.encoder.layers.10.self_attn.
|
864 |
"vision_model.encoder.layers.10.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
865 |
-
"vision_model.encoder.layers.10.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
866 |
"vision_model.encoder.layers.11.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
867 |
"vision_model.encoder.layers.11.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
868 |
"vision_model.encoder.layers.11.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -873,9 +870,8 @@
|
|
873 |
"vision_model.encoder.layers.11.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
874 |
"vision_model.encoder.layers.11.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
875 |
"vision_model.encoder.layers.11.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
876 |
-
"vision_model.encoder.layers.11.self_attn.
|
877 |
"vision_model.encoder.layers.11.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
878 |
-
"vision_model.encoder.layers.11.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
879 |
"vision_model.encoder.layers.12.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
880 |
"vision_model.encoder.layers.12.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
881 |
"vision_model.encoder.layers.12.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -886,9 +882,8 @@
|
|
886 |
"vision_model.encoder.layers.12.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
887 |
"vision_model.encoder.layers.12.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
888 |
"vision_model.encoder.layers.12.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
889 |
-
"vision_model.encoder.layers.12.self_attn.
|
890 |
"vision_model.encoder.layers.12.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
891 |
-
"vision_model.encoder.layers.12.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
892 |
"vision_model.encoder.layers.13.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
893 |
"vision_model.encoder.layers.13.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
894 |
"vision_model.encoder.layers.13.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -899,9 +894,8 @@
|
|
899 |
"vision_model.encoder.layers.13.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
900 |
"vision_model.encoder.layers.13.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
901 |
"vision_model.encoder.layers.13.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
902 |
-
"vision_model.encoder.layers.13.self_attn.
|
903 |
"vision_model.encoder.layers.13.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
904 |
-
"vision_model.encoder.layers.13.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
905 |
"vision_model.encoder.layers.14.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
906 |
"vision_model.encoder.layers.14.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
907 |
"vision_model.encoder.layers.14.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -912,9 +906,8 @@
|
|
912 |
"vision_model.encoder.layers.14.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
913 |
"vision_model.encoder.layers.14.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
914 |
"vision_model.encoder.layers.14.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
915 |
-
"vision_model.encoder.layers.14.self_attn.
|
916 |
"vision_model.encoder.layers.14.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
917 |
-
"vision_model.encoder.layers.14.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
918 |
"vision_model.encoder.layers.15.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
919 |
"vision_model.encoder.layers.15.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
920 |
"vision_model.encoder.layers.15.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -925,9 +918,8 @@
|
|
925 |
"vision_model.encoder.layers.15.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
926 |
"vision_model.encoder.layers.15.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
927 |
"vision_model.encoder.layers.15.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
928 |
-
"vision_model.encoder.layers.15.self_attn.
|
929 |
"vision_model.encoder.layers.15.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
930 |
-
"vision_model.encoder.layers.15.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
931 |
"vision_model.encoder.layers.16.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
932 |
"vision_model.encoder.layers.16.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
933 |
"vision_model.encoder.layers.16.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -938,9 +930,8 @@
|
|
938 |
"vision_model.encoder.layers.16.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
939 |
"vision_model.encoder.layers.16.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
940 |
"vision_model.encoder.layers.16.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
941 |
-
"vision_model.encoder.layers.16.self_attn.
|
942 |
"vision_model.encoder.layers.16.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
943 |
-
"vision_model.encoder.layers.16.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
944 |
"vision_model.encoder.layers.17.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
945 |
"vision_model.encoder.layers.17.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
946 |
"vision_model.encoder.layers.17.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -951,9 +942,8 @@
|
|
951 |
"vision_model.encoder.layers.17.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
952 |
"vision_model.encoder.layers.17.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
953 |
"vision_model.encoder.layers.17.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
954 |
-
"vision_model.encoder.layers.17.self_attn.
|
955 |
"vision_model.encoder.layers.17.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
956 |
-
"vision_model.encoder.layers.17.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
957 |
"vision_model.encoder.layers.18.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
958 |
"vision_model.encoder.layers.18.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
959 |
"vision_model.encoder.layers.18.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -964,9 +954,8 @@
|
|
964 |
"vision_model.encoder.layers.18.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
965 |
"vision_model.encoder.layers.18.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
966 |
"vision_model.encoder.layers.18.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
967 |
-
"vision_model.encoder.layers.18.self_attn.
|
968 |
"vision_model.encoder.layers.18.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
969 |
-
"vision_model.encoder.layers.18.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
970 |
"vision_model.encoder.layers.19.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
971 |
"vision_model.encoder.layers.19.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
972 |
"vision_model.encoder.layers.19.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -977,9 +966,8 @@
|
|
977 |
"vision_model.encoder.layers.19.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
978 |
"vision_model.encoder.layers.19.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
979 |
"vision_model.encoder.layers.19.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
980 |
-
"vision_model.encoder.layers.19.self_attn.
|
981 |
"vision_model.encoder.layers.19.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
982 |
-
"vision_model.encoder.layers.19.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
983 |
"vision_model.encoder.layers.2.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
984 |
"vision_model.encoder.layers.2.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
985 |
"vision_model.encoder.layers.2.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -990,9 +978,8 @@
|
|
990 |
"vision_model.encoder.layers.2.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
991 |
"vision_model.encoder.layers.2.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
992 |
"vision_model.encoder.layers.2.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
993 |
-
"vision_model.encoder.layers.2.self_attn.
|
994 |
"vision_model.encoder.layers.2.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
995 |
-
"vision_model.encoder.layers.2.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
996 |
"vision_model.encoder.layers.20.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
997 |
"vision_model.encoder.layers.20.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
998 |
"vision_model.encoder.layers.20.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1003,9 +990,8 @@
|
|
1003 |
"vision_model.encoder.layers.20.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1004 |
"vision_model.encoder.layers.20.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1005 |
"vision_model.encoder.layers.20.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1006 |
-
"vision_model.encoder.layers.20.self_attn.
|
1007 |
"vision_model.encoder.layers.20.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1008 |
-
"vision_model.encoder.layers.20.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1009 |
"vision_model.encoder.layers.21.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1010 |
"vision_model.encoder.layers.21.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1011 |
"vision_model.encoder.layers.21.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1016,9 +1002,8 @@
|
|
1016 |
"vision_model.encoder.layers.21.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1017 |
"vision_model.encoder.layers.21.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1018 |
"vision_model.encoder.layers.21.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1019 |
-
"vision_model.encoder.layers.21.self_attn.
|
1020 |
"vision_model.encoder.layers.21.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1021 |
-
"vision_model.encoder.layers.21.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1022 |
"vision_model.encoder.layers.22.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1023 |
"vision_model.encoder.layers.22.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1024 |
"vision_model.encoder.layers.22.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1029,9 +1014,8 @@
|
|
1029 |
"vision_model.encoder.layers.22.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1030 |
"vision_model.encoder.layers.22.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1031 |
"vision_model.encoder.layers.22.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1032 |
-
"vision_model.encoder.layers.22.self_attn.
|
1033 |
"vision_model.encoder.layers.22.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1034 |
-
"vision_model.encoder.layers.22.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1035 |
"vision_model.encoder.layers.23.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1036 |
"vision_model.encoder.layers.23.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1037 |
"vision_model.encoder.layers.23.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1042,9 +1026,8 @@
|
|
1042 |
"vision_model.encoder.layers.23.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1043 |
"vision_model.encoder.layers.23.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1044 |
"vision_model.encoder.layers.23.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1045 |
-
"vision_model.encoder.layers.23.self_attn.
|
1046 |
"vision_model.encoder.layers.23.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1047 |
-
"vision_model.encoder.layers.23.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1048 |
"vision_model.encoder.layers.24.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1049 |
"vision_model.encoder.layers.24.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1050 |
"vision_model.encoder.layers.24.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1055,9 +1038,8 @@
|
|
1055 |
"vision_model.encoder.layers.24.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1056 |
"vision_model.encoder.layers.24.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1057 |
"vision_model.encoder.layers.24.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1058 |
-
"vision_model.encoder.layers.24.self_attn.
|
1059 |
"vision_model.encoder.layers.24.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1060 |
-
"vision_model.encoder.layers.24.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1061 |
"vision_model.encoder.layers.25.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1062 |
"vision_model.encoder.layers.25.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1063 |
"vision_model.encoder.layers.25.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1068,9 +1050,8 @@
|
|
1068 |
"vision_model.encoder.layers.25.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1069 |
"vision_model.encoder.layers.25.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1070 |
"vision_model.encoder.layers.25.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1071 |
-
"vision_model.encoder.layers.25.self_attn.
|
1072 |
"vision_model.encoder.layers.25.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1073 |
-
"vision_model.encoder.layers.25.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1074 |
"vision_model.encoder.layers.26.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1075 |
"vision_model.encoder.layers.26.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1076 |
"vision_model.encoder.layers.26.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1081,9 +1062,8 @@
|
|
1081 |
"vision_model.encoder.layers.26.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1082 |
"vision_model.encoder.layers.26.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1083 |
"vision_model.encoder.layers.26.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1084 |
-
"vision_model.encoder.layers.26.self_attn.
|
1085 |
"vision_model.encoder.layers.26.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1086 |
-
"vision_model.encoder.layers.26.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1087 |
"vision_model.encoder.layers.27.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1088 |
"vision_model.encoder.layers.27.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1089 |
"vision_model.encoder.layers.27.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1094,9 +1074,8 @@
|
|
1094 |
"vision_model.encoder.layers.27.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1095 |
"vision_model.encoder.layers.27.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1096 |
"vision_model.encoder.layers.27.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1097 |
-
"vision_model.encoder.layers.27.self_attn.
|
1098 |
"vision_model.encoder.layers.27.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1099 |
-
"vision_model.encoder.layers.27.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1100 |
"vision_model.encoder.layers.28.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1101 |
"vision_model.encoder.layers.28.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1102 |
"vision_model.encoder.layers.28.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1107,9 +1086,8 @@
|
|
1107 |
"vision_model.encoder.layers.28.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1108 |
"vision_model.encoder.layers.28.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1109 |
"vision_model.encoder.layers.28.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1110 |
-
"vision_model.encoder.layers.28.self_attn.
|
1111 |
"vision_model.encoder.layers.28.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1112 |
-
"vision_model.encoder.layers.28.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1113 |
"vision_model.encoder.layers.29.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1114 |
"vision_model.encoder.layers.29.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1115 |
"vision_model.encoder.layers.29.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1120,9 +1098,8 @@
|
|
1120 |
"vision_model.encoder.layers.29.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1121 |
"vision_model.encoder.layers.29.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1122 |
"vision_model.encoder.layers.29.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1123 |
-
"vision_model.encoder.layers.29.self_attn.
|
1124 |
"vision_model.encoder.layers.29.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1125 |
-
"vision_model.encoder.layers.29.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1126 |
"vision_model.encoder.layers.3.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1127 |
"vision_model.encoder.layers.3.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1128 |
"vision_model.encoder.layers.3.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1133,9 +1110,8 @@
|
|
1133 |
"vision_model.encoder.layers.3.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1134 |
"vision_model.encoder.layers.3.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1135 |
"vision_model.encoder.layers.3.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1136 |
-
"vision_model.encoder.layers.3.self_attn.
|
1137 |
"vision_model.encoder.layers.3.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1138 |
-
"vision_model.encoder.layers.3.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1139 |
"vision_model.encoder.layers.30.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1140 |
"vision_model.encoder.layers.30.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1141 |
"vision_model.encoder.layers.30.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1146,9 +1122,8 @@
|
|
1146 |
"vision_model.encoder.layers.30.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1147 |
"vision_model.encoder.layers.30.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1148 |
"vision_model.encoder.layers.30.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1149 |
-
"vision_model.encoder.layers.30.self_attn.
|
1150 |
"vision_model.encoder.layers.30.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1151 |
-
"vision_model.encoder.layers.30.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1152 |
"vision_model.encoder.layers.31.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1153 |
"vision_model.encoder.layers.31.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1154 |
"vision_model.encoder.layers.31.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1159,9 +1134,8 @@
|
|
1159 |
"vision_model.encoder.layers.31.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1160 |
"vision_model.encoder.layers.31.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1161 |
"vision_model.encoder.layers.31.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1162 |
-
"vision_model.encoder.layers.31.self_attn.
|
1163 |
"vision_model.encoder.layers.31.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1164 |
-
"vision_model.encoder.layers.31.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1165 |
"vision_model.encoder.layers.32.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1166 |
"vision_model.encoder.layers.32.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1167 |
"vision_model.encoder.layers.32.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1172,9 +1146,8 @@
|
|
1172 |
"vision_model.encoder.layers.32.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1173 |
"vision_model.encoder.layers.32.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1174 |
"vision_model.encoder.layers.32.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1175 |
-
"vision_model.encoder.layers.32.self_attn.
|
1176 |
"vision_model.encoder.layers.32.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1177 |
-
"vision_model.encoder.layers.32.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1178 |
"vision_model.encoder.layers.33.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1179 |
"vision_model.encoder.layers.33.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1180 |
"vision_model.encoder.layers.33.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1185,9 +1158,8 @@
|
|
1185 |
"vision_model.encoder.layers.33.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1186 |
"vision_model.encoder.layers.33.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1187 |
"vision_model.encoder.layers.33.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1188 |
-
"vision_model.encoder.layers.33.self_attn.
|
1189 |
"vision_model.encoder.layers.33.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1190 |
-
"vision_model.encoder.layers.33.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1191 |
"vision_model.encoder.layers.34.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1192 |
"vision_model.encoder.layers.34.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1193 |
"vision_model.encoder.layers.34.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1198,9 +1170,8 @@
|
|
1198 |
"vision_model.encoder.layers.34.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1199 |
"vision_model.encoder.layers.34.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1200 |
"vision_model.encoder.layers.34.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1201 |
-
"vision_model.encoder.layers.34.self_attn.
|
1202 |
"vision_model.encoder.layers.34.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1203 |
-
"vision_model.encoder.layers.34.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1204 |
"vision_model.encoder.layers.35.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1205 |
"vision_model.encoder.layers.35.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1206 |
"vision_model.encoder.layers.35.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1211,9 +1182,8 @@
|
|
1211 |
"vision_model.encoder.layers.35.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1212 |
"vision_model.encoder.layers.35.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1213 |
"vision_model.encoder.layers.35.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1214 |
-
"vision_model.encoder.layers.35.self_attn.
|
1215 |
"vision_model.encoder.layers.35.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1216 |
-
"vision_model.encoder.layers.35.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1217 |
"vision_model.encoder.layers.36.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1218 |
"vision_model.encoder.layers.36.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1219 |
"vision_model.encoder.layers.36.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1224,9 +1194,8 @@
|
|
1224 |
"vision_model.encoder.layers.36.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1225 |
"vision_model.encoder.layers.36.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1226 |
"vision_model.encoder.layers.36.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1227 |
-
"vision_model.encoder.layers.36.self_attn.
|
1228 |
"vision_model.encoder.layers.36.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1229 |
-
"vision_model.encoder.layers.36.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1230 |
"vision_model.encoder.layers.37.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1231 |
"vision_model.encoder.layers.37.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1232 |
"vision_model.encoder.layers.37.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1237,9 +1206,8 @@
|
|
1237 |
"vision_model.encoder.layers.37.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1238 |
"vision_model.encoder.layers.37.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1239 |
"vision_model.encoder.layers.37.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1240 |
-
"vision_model.encoder.layers.37.self_attn.
|
1241 |
"vision_model.encoder.layers.37.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1242 |
-
"vision_model.encoder.layers.37.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1243 |
"vision_model.encoder.layers.38.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1244 |
"vision_model.encoder.layers.38.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1245 |
"vision_model.encoder.layers.38.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1250,9 +1218,8 @@
|
|
1250 |
"vision_model.encoder.layers.38.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1251 |
"vision_model.encoder.layers.38.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1252 |
"vision_model.encoder.layers.38.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1253 |
-
"vision_model.encoder.layers.38.self_attn.
|
1254 |
"vision_model.encoder.layers.38.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1255 |
-
"vision_model.encoder.layers.38.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1256 |
"vision_model.encoder.layers.4.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1257 |
"vision_model.encoder.layers.4.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1258 |
"vision_model.encoder.layers.4.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1263,9 +1230,8 @@
|
|
1263 |
"vision_model.encoder.layers.4.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1264 |
"vision_model.encoder.layers.4.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1265 |
"vision_model.encoder.layers.4.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1266 |
-
"vision_model.encoder.layers.4.self_attn.
|
1267 |
"vision_model.encoder.layers.4.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1268 |
-
"vision_model.encoder.layers.4.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1269 |
"vision_model.encoder.layers.5.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1270 |
"vision_model.encoder.layers.5.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1271 |
"vision_model.encoder.layers.5.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1276,9 +1242,8 @@
|
|
1276 |
"vision_model.encoder.layers.5.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1277 |
"vision_model.encoder.layers.5.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1278 |
"vision_model.encoder.layers.5.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1279 |
-
"vision_model.encoder.layers.5.self_attn.
|
1280 |
"vision_model.encoder.layers.5.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1281 |
-
"vision_model.encoder.layers.5.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1282 |
"vision_model.encoder.layers.6.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1283 |
"vision_model.encoder.layers.6.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1284 |
"vision_model.encoder.layers.6.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1289,9 +1254,8 @@
|
|
1289 |
"vision_model.encoder.layers.6.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1290 |
"vision_model.encoder.layers.6.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1291 |
"vision_model.encoder.layers.6.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1292 |
-
"vision_model.encoder.layers.6.self_attn.
|
1293 |
"vision_model.encoder.layers.6.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1294 |
-
"vision_model.encoder.layers.6.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1295 |
"vision_model.encoder.layers.7.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1296 |
"vision_model.encoder.layers.7.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1297 |
"vision_model.encoder.layers.7.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1302,9 +1266,8 @@
|
|
1302 |
"vision_model.encoder.layers.7.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1303 |
"vision_model.encoder.layers.7.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1304 |
"vision_model.encoder.layers.7.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1305 |
-
"vision_model.encoder.layers.7.self_attn.
|
1306 |
"vision_model.encoder.layers.7.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1307 |
-
"vision_model.encoder.layers.7.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1308 |
"vision_model.encoder.layers.8.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1309 |
"vision_model.encoder.layers.8.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1310 |
"vision_model.encoder.layers.8.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1315,9 +1278,8 @@
|
|
1315 |
"vision_model.encoder.layers.8.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1316 |
"vision_model.encoder.layers.8.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1317 |
"vision_model.encoder.layers.8.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1318 |
-
"vision_model.encoder.layers.8.self_attn.
|
1319 |
"vision_model.encoder.layers.8.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1320 |
-
"vision_model.encoder.layers.8.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1321 |
"vision_model.encoder.layers.9.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1322 |
"vision_model.encoder.layers.9.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1323 |
"vision_model.encoder.layers.9.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
@@ -1328,9 +1290,8 @@
|
|
1328 |
"vision_model.encoder.layers.9.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1329 |
"vision_model.encoder.layers.9.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1330 |
"vision_model.encoder.layers.9.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1331 |
-
"vision_model.encoder.layers.9.self_attn.
|
1332 |
"vision_model.encoder.layers.9.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
1333 |
-
"vision_model.encoder.layers.9.self_attn.v_bias": "pytorch_model-00001-of-00002.bin",
|
1334 |
"vision_model.post_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
1335 |
"vision_model.post_layernorm.weight": "pytorch_model-00001-of-00002.bin"
|
1336 |
}
|
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
+
"total_size": 16298536960
|
4 |
},
|
5 |
"weight_map": {
|
6 |
"language_model.decoder.block.0.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
834 |
"vision_model.encoder.layers.0.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
835 |
"vision_model.encoder.layers.0.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
836 |
"vision_model.encoder.layers.0.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
837 |
+
"vision_model.encoder.layers.0.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
838 |
"vision_model.encoder.layers.0.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
839 |
"vision_model.encoder.layers.1.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
840 |
"vision_model.encoder.layers.1.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
841 |
"vision_model.encoder.layers.1.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
846 |
"vision_model.encoder.layers.1.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
847 |
"vision_model.encoder.layers.1.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
848 |
"vision_model.encoder.layers.1.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
849 |
+
"vision_model.encoder.layers.1.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
850 |
"vision_model.encoder.layers.1.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
851 |
"vision_model.encoder.layers.10.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
852 |
"vision_model.encoder.layers.10.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
853 |
"vision_model.encoder.layers.10.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
858 |
"vision_model.encoder.layers.10.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
859 |
"vision_model.encoder.layers.10.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
860 |
"vision_model.encoder.layers.10.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
861 |
+
"vision_model.encoder.layers.10.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
862 |
"vision_model.encoder.layers.10.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
863 |
"vision_model.encoder.layers.11.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
864 |
"vision_model.encoder.layers.11.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
865 |
"vision_model.encoder.layers.11.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
870 |
"vision_model.encoder.layers.11.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
871 |
"vision_model.encoder.layers.11.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
872 |
"vision_model.encoder.layers.11.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
873 |
+
"vision_model.encoder.layers.11.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
874 |
"vision_model.encoder.layers.11.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
875 |
"vision_model.encoder.layers.12.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
876 |
"vision_model.encoder.layers.12.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
877 |
"vision_model.encoder.layers.12.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
882 |
"vision_model.encoder.layers.12.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
883 |
"vision_model.encoder.layers.12.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
884 |
"vision_model.encoder.layers.12.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
885 |
+
"vision_model.encoder.layers.12.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
886 |
"vision_model.encoder.layers.12.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
887 |
"vision_model.encoder.layers.13.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
888 |
"vision_model.encoder.layers.13.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
889 |
"vision_model.encoder.layers.13.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
894 |
"vision_model.encoder.layers.13.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
895 |
"vision_model.encoder.layers.13.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
896 |
"vision_model.encoder.layers.13.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
897 |
+
"vision_model.encoder.layers.13.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
898 |
"vision_model.encoder.layers.13.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
899 |
"vision_model.encoder.layers.14.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
900 |
"vision_model.encoder.layers.14.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
901 |
"vision_model.encoder.layers.14.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
906 |
"vision_model.encoder.layers.14.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
907 |
"vision_model.encoder.layers.14.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
908 |
"vision_model.encoder.layers.14.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
909 |
+
"vision_model.encoder.layers.14.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
910 |
"vision_model.encoder.layers.14.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
911 |
"vision_model.encoder.layers.15.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
912 |
"vision_model.encoder.layers.15.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
913 |
"vision_model.encoder.layers.15.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
918 |
"vision_model.encoder.layers.15.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
919 |
"vision_model.encoder.layers.15.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
920 |
"vision_model.encoder.layers.15.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
921 |
+
"vision_model.encoder.layers.15.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
922 |
"vision_model.encoder.layers.15.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
923 |
"vision_model.encoder.layers.16.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
924 |
"vision_model.encoder.layers.16.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
925 |
"vision_model.encoder.layers.16.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
930 |
"vision_model.encoder.layers.16.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
931 |
"vision_model.encoder.layers.16.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
932 |
"vision_model.encoder.layers.16.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
933 |
+
"vision_model.encoder.layers.16.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
934 |
"vision_model.encoder.layers.16.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
935 |
"vision_model.encoder.layers.17.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
936 |
"vision_model.encoder.layers.17.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
937 |
"vision_model.encoder.layers.17.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
942 |
"vision_model.encoder.layers.17.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
943 |
"vision_model.encoder.layers.17.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
944 |
"vision_model.encoder.layers.17.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
945 |
+
"vision_model.encoder.layers.17.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
946 |
"vision_model.encoder.layers.17.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
947 |
"vision_model.encoder.layers.18.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
948 |
"vision_model.encoder.layers.18.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
949 |
"vision_model.encoder.layers.18.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
954 |
"vision_model.encoder.layers.18.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
955 |
"vision_model.encoder.layers.18.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
956 |
"vision_model.encoder.layers.18.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
957 |
+
"vision_model.encoder.layers.18.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
958 |
"vision_model.encoder.layers.18.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
959 |
"vision_model.encoder.layers.19.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
960 |
"vision_model.encoder.layers.19.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
961 |
"vision_model.encoder.layers.19.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
966 |
"vision_model.encoder.layers.19.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
967 |
"vision_model.encoder.layers.19.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
968 |
"vision_model.encoder.layers.19.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
969 |
+
"vision_model.encoder.layers.19.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
970 |
"vision_model.encoder.layers.19.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
971 |
"vision_model.encoder.layers.2.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
972 |
"vision_model.encoder.layers.2.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
973 |
"vision_model.encoder.layers.2.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
978 |
"vision_model.encoder.layers.2.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
979 |
"vision_model.encoder.layers.2.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
980 |
"vision_model.encoder.layers.2.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
981 |
+
"vision_model.encoder.layers.2.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
982 |
"vision_model.encoder.layers.2.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
983 |
"vision_model.encoder.layers.20.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
984 |
"vision_model.encoder.layers.20.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
985 |
"vision_model.encoder.layers.20.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
990 |
"vision_model.encoder.layers.20.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
991 |
"vision_model.encoder.layers.20.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
992 |
"vision_model.encoder.layers.20.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
993 |
+
"vision_model.encoder.layers.20.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
994 |
"vision_model.encoder.layers.20.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
995 |
"vision_model.encoder.layers.21.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
996 |
"vision_model.encoder.layers.21.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
997 |
"vision_model.encoder.layers.21.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1002 |
"vision_model.encoder.layers.21.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1003 |
"vision_model.encoder.layers.21.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1004 |
"vision_model.encoder.layers.21.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1005 |
+
"vision_model.encoder.layers.21.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1006 |
"vision_model.encoder.layers.21.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1007 |
"vision_model.encoder.layers.22.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1008 |
"vision_model.encoder.layers.22.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1009 |
"vision_model.encoder.layers.22.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1014 |
"vision_model.encoder.layers.22.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1015 |
"vision_model.encoder.layers.22.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1016 |
"vision_model.encoder.layers.22.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1017 |
+
"vision_model.encoder.layers.22.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1018 |
"vision_model.encoder.layers.22.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1019 |
"vision_model.encoder.layers.23.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1020 |
"vision_model.encoder.layers.23.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1021 |
"vision_model.encoder.layers.23.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1026 |
"vision_model.encoder.layers.23.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1027 |
"vision_model.encoder.layers.23.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1028 |
"vision_model.encoder.layers.23.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1029 |
+
"vision_model.encoder.layers.23.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1030 |
"vision_model.encoder.layers.23.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1031 |
"vision_model.encoder.layers.24.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1032 |
"vision_model.encoder.layers.24.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1033 |
"vision_model.encoder.layers.24.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1038 |
"vision_model.encoder.layers.24.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1039 |
"vision_model.encoder.layers.24.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1040 |
"vision_model.encoder.layers.24.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1041 |
+
"vision_model.encoder.layers.24.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1042 |
"vision_model.encoder.layers.24.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1043 |
"vision_model.encoder.layers.25.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1044 |
"vision_model.encoder.layers.25.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1045 |
"vision_model.encoder.layers.25.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1050 |
"vision_model.encoder.layers.25.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1051 |
"vision_model.encoder.layers.25.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1052 |
"vision_model.encoder.layers.25.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1053 |
+
"vision_model.encoder.layers.25.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1054 |
"vision_model.encoder.layers.25.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1055 |
"vision_model.encoder.layers.26.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1056 |
"vision_model.encoder.layers.26.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1057 |
"vision_model.encoder.layers.26.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1062 |
"vision_model.encoder.layers.26.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1063 |
"vision_model.encoder.layers.26.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1064 |
"vision_model.encoder.layers.26.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1065 |
+
"vision_model.encoder.layers.26.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1066 |
"vision_model.encoder.layers.26.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1067 |
"vision_model.encoder.layers.27.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1068 |
"vision_model.encoder.layers.27.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1069 |
"vision_model.encoder.layers.27.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1074 |
"vision_model.encoder.layers.27.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1075 |
"vision_model.encoder.layers.27.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1076 |
"vision_model.encoder.layers.27.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1077 |
+
"vision_model.encoder.layers.27.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1078 |
"vision_model.encoder.layers.27.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1079 |
"vision_model.encoder.layers.28.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1080 |
"vision_model.encoder.layers.28.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1081 |
"vision_model.encoder.layers.28.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1086 |
"vision_model.encoder.layers.28.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1087 |
"vision_model.encoder.layers.28.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1088 |
"vision_model.encoder.layers.28.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1089 |
+
"vision_model.encoder.layers.28.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1090 |
"vision_model.encoder.layers.28.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1091 |
"vision_model.encoder.layers.29.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1092 |
"vision_model.encoder.layers.29.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1093 |
"vision_model.encoder.layers.29.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1098 |
"vision_model.encoder.layers.29.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1099 |
"vision_model.encoder.layers.29.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1100 |
"vision_model.encoder.layers.29.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1101 |
+
"vision_model.encoder.layers.29.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1102 |
"vision_model.encoder.layers.29.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1103 |
"vision_model.encoder.layers.3.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1104 |
"vision_model.encoder.layers.3.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1105 |
"vision_model.encoder.layers.3.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1110 |
"vision_model.encoder.layers.3.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1111 |
"vision_model.encoder.layers.3.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1112 |
"vision_model.encoder.layers.3.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1113 |
+
"vision_model.encoder.layers.3.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1114 |
"vision_model.encoder.layers.3.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1115 |
"vision_model.encoder.layers.30.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1116 |
"vision_model.encoder.layers.30.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1117 |
"vision_model.encoder.layers.30.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1122 |
"vision_model.encoder.layers.30.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1123 |
"vision_model.encoder.layers.30.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1124 |
"vision_model.encoder.layers.30.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1125 |
+
"vision_model.encoder.layers.30.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1126 |
"vision_model.encoder.layers.30.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1127 |
"vision_model.encoder.layers.31.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1128 |
"vision_model.encoder.layers.31.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1129 |
"vision_model.encoder.layers.31.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1134 |
"vision_model.encoder.layers.31.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1135 |
"vision_model.encoder.layers.31.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1136 |
"vision_model.encoder.layers.31.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1137 |
+
"vision_model.encoder.layers.31.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1138 |
"vision_model.encoder.layers.31.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1139 |
"vision_model.encoder.layers.32.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1140 |
"vision_model.encoder.layers.32.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1141 |
"vision_model.encoder.layers.32.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1146 |
"vision_model.encoder.layers.32.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1147 |
"vision_model.encoder.layers.32.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1148 |
"vision_model.encoder.layers.32.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1149 |
+
"vision_model.encoder.layers.32.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1150 |
"vision_model.encoder.layers.32.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1151 |
"vision_model.encoder.layers.33.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1152 |
"vision_model.encoder.layers.33.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1153 |
"vision_model.encoder.layers.33.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1158 |
"vision_model.encoder.layers.33.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1159 |
"vision_model.encoder.layers.33.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1160 |
"vision_model.encoder.layers.33.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1161 |
+
"vision_model.encoder.layers.33.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1162 |
"vision_model.encoder.layers.33.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1163 |
"vision_model.encoder.layers.34.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1164 |
"vision_model.encoder.layers.34.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1165 |
"vision_model.encoder.layers.34.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1170 |
"vision_model.encoder.layers.34.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1171 |
"vision_model.encoder.layers.34.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1172 |
"vision_model.encoder.layers.34.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1173 |
+
"vision_model.encoder.layers.34.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1174 |
"vision_model.encoder.layers.34.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1175 |
"vision_model.encoder.layers.35.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1176 |
"vision_model.encoder.layers.35.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1177 |
"vision_model.encoder.layers.35.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1182 |
"vision_model.encoder.layers.35.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1183 |
"vision_model.encoder.layers.35.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1184 |
"vision_model.encoder.layers.35.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1185 |
+
"vision_model.encoder.layers.35.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1186 |
"vision_model.encoder.layers.35.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1187 |
"vision_model.encoder.layers.36.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1188 |
"vision_model.encoder.layers.36.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1189 |
"vision_model.encoder.layers.36.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1194 |
"vision_model.encoder.layers.36.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1195 |
"vision_model.encoder.layers.36.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1196 |
"vision_model.encoder.layers.36.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1197 |
+
"vision_model.encoder.layers.36.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1198 |
"vision_model.encoder.layers.36.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1199 |
"vision_model.encoder.layers.37.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1200 |
"vision_model.encoder.layers.37.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1201 |
"vision_model.encoder.layers.37.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1206 |
"vision_model.encoder.layers.37.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1207 |
"vision_model.encoder.layers.37.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1208 |
"vision_model.encoder.layers.37.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1209 |
+
"vision_model.encoder.layers.37.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1210 |
"vision_model.encoder.layers.37.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1211 |
"vision_model.encoder.layers.38.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1212 |
"vision_model.encoder.layers.38.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1213 |
"vision_model.encoder.layers.38.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1218 |
"vision_model.encoder.layers.38.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1219 |
"vision_model.encoder.layers.38.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1220 |
"vision_model.encoder.layers.38.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1221 |
+
"vision_model.encoder.layers.38.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1222 |
"vision_model.encoder.layers.38.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1223 |
"vision_model.encoder.layers.4.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1224 |
"vision_model.encoder.layers.4.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1225 |
"vision_model.encoder.layers.4.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1230 |
"vision_model.encoder.layers.4.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1231 |
"vision_model.encoder.layers.4.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1232 |
"vision_model.encoder.layers.4.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1233 |
+
"vision_model.encoder.layers.4.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1234 |
"vision_model.encoder.layers.4.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1235 |
"vision_model.encoder.layers.5.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1236 |
"vision_model.encoder.layers.5.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1237 |
"vision_model.encoder.layers.5.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1242 |
"vision_model.encoder.layers.5.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1243 |
"vision_model.encoder.layers.5.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1244 |
"vision_model.encoder.layers.5.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1245 |
+
"vision_model.encoder.layers.5.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1246 |
"vision_model.encoder.layers.5.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1247 |
"vision_model.encoder.layers.6.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1248 |
"vision_model.encoder.layers.6.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1249 |
"vision_model.encoder.layers.6.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1254 |
"vision_model.encoder.layers.6.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1255 |
"vision_model.encoder.layers.6.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1256 |
"vision_model.encoder.layers.6.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1257 |
+
"vision_model.encoder.layers.6.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1258 |
"vision_model.encoder.layers.6.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1259 |
"vision_model.encoder.layers.7.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1260 |
"vision_model.encoder.layers.7.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1261 |
"vision_model.encoder.layers.7.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1266 |
"vision_model.encoder.layers.7.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1267 |
"vision_model.encoder.layers.7.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1268 |
"vision_model.encoder.layers.7.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1269 |
+
"vision_model.encoder.layers.7.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1270 |
"vision_model.encoder.layers.7.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1271 |
"vision_model.encoder.layers.8.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1272 |
"vision_model.encoder.layers.8.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1273 |
"vision_model.encoder.layers.8.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1278 |
"vision_model.encoder.layers.8.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1279 |
"vision_model.encoder.layers.8.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1280 |
"vision_model.encoder.layers.8.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1281 |
+
"vision_model.encoder.layers.8.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1282 |
"vision_model.encoder.layers.8.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1283 |
"vision_model.encoder.layers.9.layer_norm1.bias": "pytorch_model-00001-of-00002.bin",
|
1284 |
"vision_model.encoder.layers.9.layer_norm1.weight": "pytorch_model-00001-of-00002.bin",
|
1285 |
"vision_model.encoder.layers.9.layer_norm2.bias": "pytorch_model-00001-of-00002.bin",
|
|
|
1290 |
"vision_model.encoder.layers.9.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
1291 |
"vision_model.encoder.layers.9.self_attn.projection.bias": "pytorch_model-00001-of-00002.bin",
|
1292 |
"vision_model.encoder.layers.9.self_attn.projection.weight": "pytorch_model-00001-of-00002.bin",
|
1293 |
+
"vision_model.encoder.layers.9.self_attn.qkv.bias": "pytorch_model-00001-of-00002.bin",
|
1294 |
"vision_model.encoder.layers.9.self_attn.qkv.weight": "pytorch_model-00001-of-00002.bin",
|
|
|
1295 |
"vision_model.post_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
1296 |
"vision_model.post_layernorm.weight": "pytorch_model-00001-of-00002.bin"
|
1297 |
}
|