Spaces:
Sleeping
Sleeping
File size: 102,195 Bytes
a277bb8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 |
[32mINFO [0m [32m2024-03-29 17:46:26,830 | [34mgit: sha: N/A, status: clean, branch: N/A [0m [32mINFO [0m [32m2024-03-29 17:46:26,830 | [34mCommand: main.py --output_dir ./debug -c config/cfg_fsc147_vit_b.py --datasets config/datasets_fsc147.json --pretrain_model_path checkpoints/groundingdino_swinb_cogcoor.pth --options text_encoder_type=checkpoints/bert-base-uncased[0m [32mINFO [0m [32m2024-03-29 17:46:26,831 | [34mFull config saved to ./debug/config_args_all.json[0m [32mINFO [0m [32m2024-03-29 17:46:26,831 | [34mworld size: 1[0m [32mINFO [0m [32m2024-03-29 17:46:26,831 | [34mrank: 0[0m [32mINFO [0m [32m2024-03-29 17:46:26,831 | [34mlocal_rank: 0[0m [32mINFO [0m [32m2024-03-29 17:46:26,831 | [34margs: Namespace(config_file='config/cfg_fsc147_vit_b.py', options={'text_encoder_type': 'checkpoints/bert-base-uncased'}, datasets='config/datasets_fsc147.json', remove_difficult=False, fix_size=False, output_dir='./debug', note='', device='cuda', seed=42, resume='', pretrain_model_path='checkpoints/groundingdino_swinb_cogcoor.pth', finetune_ignore=None, start_epoch=0, eval=False, num_workers=8, test=False, debug=False, find_unused_params=False, save_results=False, save_log=False, world_size=1, dist_url='env://', rank=0, local_rank=0, amp=False, distributed=False, data_aug_scales=[480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800], data_aug_max_size=1333, data_aug_scales2_resize=[400, 500, 600], data_aug_scales2_crop=[384, 600], data_aug_scale_overlap=None, batch_size=4, modelname='groundingdino', backbone='swin_B_384_22k', position_embedding='sine', pe_temperatureH=20, pe_temperatureW=20, return_interm_indices=[1, 2, 3], enc_layers=6, dec_layers=6, pre_norm=False, dim_feedforward=2048, hidden_dim=256, dropout=0.0, nheads=8, num_queries=900, query_dim=4, num_patterns=0, num_feature_levels=4, enc_n_points=4, dec_n_points=4, two_stage_type='standard', two_stage_bbox_embed_share=False, two_stage_class_embed_share=False, transformer_activation='relu', dec_pred_bbox_embed_share=True, dn_box_noise_scale=1.0, dn_label_noise_ratio=0.5, dn_label_coef=1.0, dn_bbox_coef=1.0, embed_init_tgt=True, dn_labelbook_size=91, max_text_len=256, text_encoder_type='checkpoints/bert-base-uncased', use_text_enhancer=True, use_fusion_layer=True, use_checkpoint=True, use_transformer_ckpt=True, use_text_cross_attention=True, text_dropout=0.0, fusion_dropout=0.0, fusion_droppath=0.1, sub_sentence_present=True, max_labels=90, lr=0.0001, backbone_freeze_keywords=None, freeze_keywords=['backbone.0', 'bert'], lr_backbone=1e-05, lr_backbone_names=['backbone.0', 'bert'], lr_linear_proj_mult=1e-05, lr_linear_proj_names=['ref_point_head', 'sampling_offsets'], weight_decay=0.0001, param_dict_type='ddetr_in_mmdet', ddetr_lr_param=False, epochs=30, lr_drop=10, save_checkpoint_interval=10, clip_max_norm=0.1, onecyclelr=False, multi_step_lr=False, lr_drop_list=[10, 20], frozen_weights=None, dilation=False, pdetr3_bbox_embed_diff_each_layer=False, pdetr3_refHW=-1, random_refpoints_xy=False, fix_refpoints_hw=-1, dabdetr_yolo_like_anchor_update=False, dabdetr_deformable_encoder=False, dabdetr_deformable_decoder=False, use_deformable_box_attn=False, box_attn_type='roi_align', dec_layer_number=None, decoder_layer_noise=False, dln_xy_noise=0.2, dln_hw_noise=0.2, add_channel_attention=False, add_pos_value=False, two_stage_pat_embed=0, two_stage_add_query_num=0, two_stage_learn_wh=False, two_stage_default_hw=0.05, two_stage_keep_all_tokens=False, num_select=900, batch_norm_type='FrozenBatchNorm2d', masks=False, aux_loss=True, set_cost_class=5.0, set_cost_bbox=1.0, set_cost_giou=0.0, cls_loss_coef=5.0, bbox_loss_coef=1.0, giou_loss_coef=0.0, enc_loss_coef=1.0, interm_loss_coef=1.0, no_interm_box_loss=False, mask_loss_coef=1.0, dice_loss_coef=1.0, focal_alpha=0.25, focal_gamma=2.0, decoder_sa_type='sa', matcher_type='HungarianMatcher', decoder_module_seq=['sa', 'ca', 'ffn'], nms_iou_threshold=-1, dec_pred_class_embed_share=True, match_unstable_error=True, use_detached_boxes_dec_out=False, dn_scalar=100, box_threshold=0.23, text_threshold=0, use_coco_eval=False, label_list=['alcohol bottle', 'baguette roll', 'ball', 'banana', 'bead', 'bee', 'birthday candle', 'biscuit', 'boat', 'bottle', 'bowl', 'box', 'bread roll', 'brick', 'buffalo', 'bun', 'calamari ring', 'can', 'candle', 'cap', 'car', 'cartridge', 'cassette', 'cement bag', 'cereal', 'chewing gum piece', 'chopstick', 'clam', 'coffee bean', 'coin', 'cotton ball', 'cow', 'crane', 'crayon', 'croissant', 'crow', 'cup', 'cupcake', 'cupcake holder', 'fish', 'gemstone', 'go game piece', 'goat', 'goldfish snack', 'goose', 'ice cream', 'ice cream cone', 'instant noodle', 'jade stone', 'jeans', 'kidney bean', 'kitchen towel', 'lighter', 'lipstick', 'm&m piece', 'macaron', 'match', 'meat skewer', 'mini blind', 'mosaic tile', 'naan bread', 'nail', 'nut', 'onion ring', 'orange', 'pearl', 'pen', 'pencil', 'penguin', 'pepper', 'person', 'pigeon', 'plate', 'polka dot tile', 'potato', 'rice bag', 'roof tile', 'screw', 'shoe', 'spoon', 'spring roll', 'stair', 'stapler pin', 'straw', 'supermarket shelf', 'swan', 'tomato', 'watermelon', 'window', 'zebra'], val_label_list=['ant', 'bird', 'book', 'bottle cap', 'bullet', 'camel', 'chair', 'chicken wing', 'donut', 'donut holder', 'flamingo', 'flower', 'flower pot', 'grape', 'horse', 'kiwi', 'milk carton', 'oyster', 'oyster shell', 'package of fresh cut fruit', 'peach', 'pill', 'polka dot', 'prawn cracker', 'sausage', 'seagull', 'shallot', 'shirt', 'skateboard', 'toilet paper roll']) [0m [36mDEBUG [0m [36m2024-03-29 17:46:26,832 | [34mbuild model ... ...[0m [36mDEBUG [0m [36m2024-03-29 17:46:28,620 | [34mbuild model, done.[0m [32mINFO [0m [32m2024-03-29 17:46:28,622 | [34mnumber of params:236717952[0m [32mINFO [0m [32m2024-03-29 17:46:28,625 | [34mparams before freezing: { "transformer.level_embed": 1024, "transformer.encoder.layers.0.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.0.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.0.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.0.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.0.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.0.self_attn.value_proj.bias": 256, "transformer.encoder.layers.0.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.0.self_attn.output_proj.bias": 256, "transformer.encoder.layers.0.norm1.weight": 256, "transformer.encoder.layers.0.norm1.bias": 256, "transformer.encoder.layers.0.linear1.weight": 524288, "transformer.encoder.layers.0.linear1.bias": 2048, "transformer.encoder.layers.0.linear2.weight": 524288, "transformer.encoder.layers.0.linear2.bias": 256, "transformer.encoder.layers.0.norm2.weight": 256, "transformer.encoder.layers.0.norm2.bias": 256, "transformer.encoder.layers.1.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.1.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.1.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.1.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.1.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.1.self_attn.value_proj.bias": 256, "transformer.encoder.layers.1.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.1.self_attn.output_proj.bias": 256, "transformer.encoder.layers.1.norm1.weight": 256, "transformer.encoder.layers.1.norm1.bias": 256, "transformer.encoder.layers.1.linear1.weight": 524288, "transformer.encoder.layers.1.linear1.bias": 2048, "transformer.encoder.layers.1.linear2.weight": 524288, "transformer.encoder.layers.1.linear2.bias": 256, "transformer.encoder.layers.1.norm2.weight": 256, "transformer.encoder.layers.1.norm2.bias": 256, "transformer.encoder.layers.2.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.2.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.2.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.2.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.2.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.2.self_attn.value_proj.bias": 256, "transformer.encoder.layers.2.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.2.self_attn.output_proj.bias": 256, "transformer.encoder.layers.2.norm1.weight": 256, "transformer.encoder.layers.2.norm1.bias": 256, "transformer.encoder.layers.2.linear1.weight": 524288, "transformer.encoder.layers.2.linear1.bias": 2048, "transformer.encoder.layers.2.linear2.weight": 524288, "transformer.encoder.layers.2.linear2.bias": 256, "transformer.encoder.layers.2.norm2.weight": 256, "transformer.encoder.layers.2.norm2.bias": 256, "transformer.encoder.layers.3.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.3.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.3.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.3.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.3.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.3.self_attn.value_proj.bias": 256, "transformer.encoder.layers.3.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.3.self_attn.output_proj.bias": 256, "transformer.encoder.layers.3.norm1.weight": 256, "transformer.encoder.layers.3.norm1.bias": 256, "transformer.encoder.layers.3.linear1.weight": 524288, "transformer.encoder.layers.3.linear1.bias": 2048, "transformer.encoder.layers.3.linear2.weight": 524288, "transformer.encoder.layers.3.linear2.bias": 256, "transformer.encoder.layers.3.norm2.weight": 256, "transformer.encoder.layers.3.norm2.bias": 256, "transformer.encoder.layers.4.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.4.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.4.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.4.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.4.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.4.self_attn.value_proj.bias": 256, "transformer.encoder.layers.4.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.4.self_attn.output_proj.bias": 256, "transformer.encoder.layers.4.norm1.weight": 256, "transformer.encoder.layers.4.norm1.bias": 256, "transformer.encoder.layers.4.linear1.weight": 524288, "transformer.encoder.layers.4.linear1.bias": 2048, "transformer.encoder.layers.4.linear2.weight": 524288, "transformer.encoder.layers.4.linear2.bias": 256, "transformer.encoder.layers.4.norm2.weight": 256, "transformer.encoder.layers.4.norm2.bias": 256, "transformer.encoder.layers.5.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.5.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.5.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.5.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.5.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.5.self_attn.value_proj.bias": 256, "transformer.encoder.layers.5.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.5.self_attn.output_proj.bias": 256, "transformer.encoder.layers.5.norm1.weight": 256, "transformer.encoder.layers.5.norm1.bias": 256, "transformer.encoder.layers.5.linear1.weight": 524288, "transformer.encoder.layers.5.linear1.bias": 2048, "transformer.encoder.layers.5.linear2.weight": 524288, "transformer.encoder.layers.5.linear2.bias": 256, "transformer.encoder.layers.5.norm2.weight": 256, "transformer.encoder.layers.5.norm2.bias": 256, "transformer.encoder.text_layers.0.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.0.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.0.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.0.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.0.linear1.weight": 262144, "transformer.encoder.text_layers.0.linear1.bias": 1024, "transformer.encoder.text_layers.0.linear2.weight": 262144, "transformer.encoder.text_layers.0.linear2.bias": 256, "transformer.encoder.text_layers.0.norm1.weight": 256, "transformer.encoder.text_layers.0.norm1.bias": 256, "transformer.encoder.text_layers.0.norm2.weight": 256, "transformer.encoder.text_layers.0.norm2.bias": 256, "transformer.encoder.text_layers.1.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.1.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.1.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.1.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.1.linear1.weight": 262144, "transformer.encoder.text_layers.1.linear1.bias": 1024, "transformer.encoder.text_layers.1.linear2.weight": 262144, "transformer.encoder.text_layers.1.linear2.bias": 256, "transformer.encoder.text_layers.1.norm1.weight": 256, "transformer.encoder.text_layers.1.norm1.bias": 256, "transformer.encoder.text_layers.1.norm2.weight": 256, "transformer.encoder.text_layers.1.norm2.bias": 256, "transformer.encoder.text_layers.2.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.2.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.2.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.2.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.2.linear1.weight": 262144, "transformer.encoder.text_layers.2.linear1.bias": 1024, "transformer.encoder.text_layers.2.linear2.weight": 262144, "transformer.encoder.text_layers.2.linear2.bias": 256, "transformer.encoder.text_layers.2.norm1.weight": 256, "transformer.encoder.text_layers.2.norm1.bias": 256, "transformer.encoder.text_layers.2.norm2.weight": 256, "transformer.encoder.text_layers.2.norm2.bias": 256, "transformer.encoder.text_layers.3.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.3.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.3.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.3.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.3.linear1.weight": 262144, "transformer.encoder.text_layers.3.linear1.bias": 1024, "transformer.encoder.text_layers.3.linear2.weight": 262144, "transformer.encoder.text_layers.3.linear2.bias": 256, "transformer.encoder.text_layers.3.norm1.weight": 256, "transformer.encoder.text_layers.3.norm1.bias": 256, "transformer.encoder.text_layers.3.norm2.weight": 256, "transformer.encoder.text_layers.3.norm2.bias": 256, "transformer.encoder.text_layers.4.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.4.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.4.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.4.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.4.linear1.weight": 262144, "transformer.encoder.text_layers.4.linear1.bias": 1024, "transformer.encoder.text_layers.4.linear2.weight": 262144, "transformer.encoder.text_layers.4.linear2.bias": 256, "transformer.encoder.text_layers.4.norm1.weight": 256, "transformer.encoder.text_layers.4.norm1.bias": 256, "transformer.encoder.text_layers.4.norm2.weight": 256, "transformer.encoder.text_layers.4.norm2.bias": 256, "transformer.encoder.text_layers.5.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.5.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.5.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.5.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.5.linear1.weight": 262144, "transformer.encoder.text_layers.5.linear1.bias": 1024, "transformer.encoder.text_layers.5.linear2.weight": 262144, "transformer.encoder.text_layers.5.linear2.bias": 256, "transformer.encoder.text_layers.5.norm1.weight": 256, "transformer.encoder.text_layers.5.norm1.bias": 256, "transformer.encoder.text_layers.5.norm2.weight": 256, "transformer.encoder.text_layers.5.norm2.bias": 256, "transformer.encoder.fusion_layers.0.gamma_v": 256, "transformer.encoder.fusion_layers.0.gamma_l": 256, "transformer.encoder.fusion_layers.0.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.0.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.0.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.0.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.0.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.0.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.0.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.0.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.0.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.0.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.1.gamma_v": 256, "transformer.encoder.fusion_layers.1.gamma_l": 256, "transformer.encoder.fusion_layers.1.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.1.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.1.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.1.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.1.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.1.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.1.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.1.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.1.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.1.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.2.gamma_v": 256, "transformer.encoder.fusion_layers.2.gamma_l": 256, "transformer.encoder.fusion_layers.2.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.2.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.2.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.2.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.2.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.2.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.2.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.2.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.2.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.2.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.3.gamma_v": 256, "transformer.encoder.fusion_layers.3.gamma_l": 256, "transformer.encoder.fusion_layers.3.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.3.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.3.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.3.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.3.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.3.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.3.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.3.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.3.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.3.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.4.gamma_v": 256, "transformer.encoder.fusion_layers.4.gamma_l": 256, "transformer.encoder.fusion_layers.4.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.4.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.4.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.4.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.4.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.4.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.4.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.4.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.4.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.4.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.5.gamma_v": 256, "transformer.encoder.fusion_layers.5.gamma_l": 256, "transformer.encoder.fusion_layers.5.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.5.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.5.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.5.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.5.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.5.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.5.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.5.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.5.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.5.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.out_l_proj.bias": 256, "transformer.decoder.layers.0.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.0.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.0.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.0.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.0.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.0.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.0.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.0.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.0.norm1.weight": 256, "transformer.decoder.layers.0.norm1.bias": 256, "transformer.decoder.layers.0.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.0.ca_text.in_proj_bias": 768, "transformer.decoder.layers.0.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.0.ca_text.out_proj.bias": 256, "transformer.decoder.layers.0.catext_norm.weight": 256, "transformer.decoder.layers.0.catext_norm.bias": 256, "transformer.decoder.layers.0.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.0.self_attn.in_proj_bias": 768, "transformer.decoder.layers.0.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.0.self_attn.out_proj.bias": 256, "transformer.decoder.layers.0.norm2.weight": 256, "transformer.decoder.layers.0.norm2.bias": 256, "transformer.decoder.layers.0.linear1.weight": 524288, "transformer.decoder.layers.0.linear1.bias": 2048, "transformer.decoder.layers.0.linear2.weight": 524288, "transformer.decoder.layers.0.linear2.bias": 256, "transformer.decoder.layers.0.norm3.weight": 256, "transformer.decoder.layers.0.norm3.bias": 256, "transformer.decoder.layers.1.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.1.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.1.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.1.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.1.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.1.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.1.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.1.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.1.norm1.weight": 256, "transformer.decoder.layers.1.norm1.bias": 256, "transformer.decoder.layers.1.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.1.ca_text.in_proj_bias": 768, "transformer.decoder.layers.1.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.1.ca_text.out_proj.bias": 256, "transformer.decoder.layers.1.catext_norm.weight": 256, "transformer.decoder.layers.1.catext_norm.bias": 256, "transformer.decoder.layers.1.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.1.self_attn.in_proj_bias": 768, "transformer.decoder.layers.1.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.1.self_attn.out_proj.bias": 256, "transformer.decoder.layers.1.norm2.weight": 256, "transformer.decoder.layers.1.norm2.bias": 256, "transformer.decoder.layers.1.linear1.weight": 524288, "transformer.decoder.layers.1.linear1.bias": 2048, "transformer.decoder.layers.1.linear2.weight": 524288, "transformer.decoder.layers.1.linear2.bias": 256, "transformer.decoder.layers.1.norm3.weight": 256, "transformer.decoder.layers.1.norm3.bias": 256, "transformer.decoder.layers.2.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.2.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.2.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.2.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.2.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.2.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.2.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.2.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.2.norm1.weight": 256, "transformer.decoder.layers.2.norm1.bias": 256, "transformer.decoder.layers.2.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.2.ca_text.in_proj_bias": 768, "transformer.decoder.layers.2.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.2.ca_text.out_proj.bias": 256, "transformer.decoder.layers.2.catext_norm.weight": 256, "transformer.decoder.layers.2.catext_norm.bias": 256, "transformer.decoder.layers.2.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.2.self_attn.in_proj_bias": 768, "transformer.decoder.layers.2.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.2.self_attn.out_proj.bias": 256, "transformer.decoder.layers.2.norm2.weight": 256, "transformer.decoder.layers.2.norm2.bias": 256, "transformer.decoder.layers.2.linear1.weight": 524288, "transformer.decoder.layers.2.linear1.bias": 2048, "transformer.decoder.layers.2.linear2.weight": 524288, "transformer.decoder.layers.2.linear2.bias": 256, "transformer.decoder.layers.2.norm3.weight": 256, "transformer.decoder.layers.2.norm3.bias": 256, "transformer.decoder.layers.3.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.3.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.3.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.3.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.3.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.3.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.3.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.3.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.3.norm1.weight": 256, "transformer.decoder.layers.3.norm1.bias": 256, "transformer.decoder.layers.3.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.3.ca_text.in_proj_bias": 768, "transformer.decoder.layers.3.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.3.ca_text.out_proj.bias": 256, "transformer.decoder.layers.3.catext_norm.weight": 256, "transformer.decoder.layers.3.catext_norm.bias": 256, "transformer.decoder.layers.3.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.3.self_attn.in_proj_bias": 768, "transformer.decoder.layers.3.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.3.self_attn.out_proj.bias": 256, "transformer.decoder.layers.3.norm2.weight": 256, "transformer.decoder.layers.3.norm2.bias": 256, "transformer.decoder.layers.3.linear1.weight": 524288, "transformer.decoder.layers.3.linear1.bias": 2048, "transformer.decoder.layers.3.linear2.weight": 524288, "transformer.decoder.layers.3.linear2.bias": 256, "transformer.decoder.layers.3.norm3.weight": 256, "transformer.decoder.layers.3.norm3.bias": 256, "transformer.decoder.layers.4.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.4.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.4.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.4.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.4.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.4.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.4.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.4.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.4.norm1.weight": 256, "transformer.decoder.layers.4.norm1.bias": 256, "transformer.decoder.layers.4.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.4.ca_text.in_proj_bias": 768, "transformer.decoder.layers.4.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.4.ca_text.out_proj.bias": 256, "transformer.decoder.layers.4.catext_norm.weight": 256, "transformer.decoder.layers.4.catext_norm.bias": 256, "transformer.decoder.layers.4.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.4.self_attn.in_proj_bias": 768, "transformer.decoder.layers.4.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.4.self_attn.out_proj.bias": 256, "transformer.decoder.layers.4.norm2.weight": 256, "transformer.decoder.layers.4.norm2.bias": 256, "transformer.decoder.layers.4.linear1.weight": 524288, "transformer.decoder.layers.4.linear1.bias": 2048, "transformer.decoder.layers.4.linear2.weight": 524288, "transformer.decoder.layers.4.linear2.bias": 256, "transformer.decoder.layers.4.norm3.weight": 256, "transformer.decoder.layers.4.norm3.bias": 256, "transformer.decoder.layers.5.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.5.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.5.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.5.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.5.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.5.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.5.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.5.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.5.norm1.weight": 256, "transformer.decoder.layers.5.norm1.bias": 256, "transformer.decoder.layers.5.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.5.ca_text.in_proj_bias": 768, "transformer.decoder.layers.5.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.5.ca_text.out_proj.bias": 256, "transformer.decoder.layers.5.catext_norm.weight": 256, "transformer.decoder.layers.5.catext_norm.bias": 256, "transformer.decoder.layers.5.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.5.self_attn.in_proj_bias": 768, "transformer.decoder.layers.5.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.5.self_attn.out_proj.bias": 256, "transformer.decoder.layers.5.norm2.weight": 256, "transformer.decoder.layers.5.norm2.bias": 256, "transformer.decoder.layers.5.linear1.weight": 524288, "transformer.decoder.layers.5.linear1.bias": 2048, "transformer.decoder.layers.5.linear2.weight": 524288, "transformer.decoder.layers.5.linear2.bias": 256, "transformer.decoder.layers.5.norm3.weight": 256, "transformer.decoder.layers.5.norm3.bias": 256, "transformer.decoder.norm.weight": 256, "transformer.decoder.norm.bias": 256, "transformer.decoder.ref_point_head.layers.0.weight": 131072, "transformer.decoder.ref_point_head.layers.0.bias": 256, "transformer.decoder.ref_point_head.layers.1.weight": 65536, "transformer.decoder.ref_point_head.layers.1.bias": 256, "transformer.decoder.bbox_embed.0.layers.0.weight": 65536, "transformer.decoder.bbox_embed.0.layers.0.bias": 256, "transformer.decoder.bbox_embed.0.layers.1.weight": 65536, "transformer.decoder.bbox_embed.0.layers.1.bias": 256, "transformer.decoder.bbox_embed.0.layers.2.weight": 1024, "transformer.decoder.bbox_embed.0.layers.2.bias": 4, "transformer.tgt_embed.weight": 230400, "transformer.enc_output.weight": 65536, "transformer.enc_output.bias": 256, "transformer.enc_output_norm.weight": 256, "transformer.enc_output_norm.bias": 256, "transformer.enc_out_bbox_embed.layers.0.weight": 65536, "transformer.enc_out_bbox_embed.layers.0.bias": 256, "transformer.enc_out_bbox_embed.layers.1.weight": 65536, "transformer.enc_out_bbox_embed.layers.1.bias": 256, "transformer.enc_out_bbox_embed.layers.2.weight": 1024, "transformer.enc_out_bbox_embed.layers.2.bias": 4, "feature_map_proj.weight": 458752, "feature_map_proj.bias": 256, "feature_map_encoder.layers.0.norm1.weight": 256, "feature_map_encoder.layers.0.norm1.bias": 256, "feature_map_encoder.layers.0.norm2.weight": 256, "feature_map_encoder.layers.0.norm2.bias": 256, "feature_map_encoder.layers.0.self_attn.in_proj_weight": 196608, "feature_map_encoder.layers.0.self_attn.in_proj_bias": 768, "feature_map_encoder.layers.0.self_attn.out_proj.weight": 65536, "feature_map_encoder.layers.0.self_attn.out_proj.bias": 256, "feature_map_encoder.layers.0.mlp.linear1.weight": 524288, "feature_map_encoder.layers.0.mlp.linear1.bias": 2048, "feature_map_encoder.layers.0.mlp.linear2.weight": 524288, "feature_map_encoder.layers.0.mlp.linear2.bias": 256, "feature_map_encoder.layers.1.norm1.weight": 256, "feature_map_encoder.layers.1.norm1.bias": 256, "feature_map_encoder.layers.1.norm2.weight": 256, "feature_map_encoder.layers.1.norm2.bias": 256, "feature_map_encoder.layers.1.self_attn.in_proj_weight": 196608, "feature_map_encoder.layers.1.self_attn.in_proj_bias": 768, "feature_map_encoder.layers.1.self_attn.out_proj.weight": 65536, "feature_map_encoder.layers.1.self_attn.out_proj.bias": 256, "feature_map_encoder.layers.1.mlp.linear1.weight": 524288, "feature_map_encoder.layers.1.mlp.linear1.bias": 2048, "feature_map_encoder.layers.1.mlp.linear2.weight": 524288, "feature_map_encoder.layers.1.mlp.linear2.bias": 256, "feature_map_encoder.layers.2.norm1.weight": 256, "feature_map_encoder.layers.2.norm1.bias": 256, "feature_map_encoder.layers.2.norm2.weight": 256, "feature_map_encoder.layers.2.norm2.bias": 256, "feature_map_encoder.layers.2.self_attn.in_proj_weight": 196608, "feature_map_encoder.layers.2.self_attn.in_proj_bias": 768, "feature_map_encoder.layers.2.self_attn.out_proj.weight": 65536, "feature_map_encoder.layers.2.self_attn.out_proj.bias": 256, "feature_map_encoder.layers.2.mlp.linear1.weight": 524288, "feature_map_encoder.layers.2.mlp.linear1.bias": 2048, "feature_map_encoder.layers.2.mlp.linear2.weight": 524288, "feature_map_encoder.layers.2.mlp.linear2.bias": 256, "feature_map_encoder.norm.weight": 256, "feature_map_encoder.norm.bias": 256, "bert.embeddings.word_embeddings.weight": 23440896, "bert.embeddings.position_embeddings.weight": 393216, "bert.embeddings.token_type_embeddings.weight": 1536, "bert.embeddings.LayerNorm.weight": 768, "bert.embeddings.LayerNorm.bias": 768, "bert.encoder.layer.0.attention.self.query.weight": 589824, "bert.encoder.layer.0.attention.self.query.bias": 768, "bert.encoder.layer.0.attention.self.key.weight": 589824, "bert.encoder.layer.0.attention.self.key.bias": 768, "bert.encoder.layer.0.attention.self.value.weight": 589824, "bert.encoder.layer.0.attention.self.value.bias": 768, "bert.encoder.layer.0.attention.output.dense.weight": 589824, "bert.encoder.layer.0.attention.output.dense.bias": 768, "bert.encoder.layer.0.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.0.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.0.intermediate.dense.weight": 2359296, "bert.encoder.layer.0.intermediate.dense.bias": 3072, "bert.encoder.layer.0.output.dense.weight": 2359296, "bert.encoder.layer.0.output.dense.bias": 768, "bert.encoder.layer.0.output.LayerNorm.weight": 768, "bert.encoder.layer.0.output.LayerNorm.bias": 768, "bert.encoder.layer.1.attention.self.query.weight": 589824, "bert.encoder.layer.1.attention.self.query.bias": 768, "bert.encoder.layer.1.attention.self.key.weight": 589824, "bert.encoder.layer.1.attention.self.key.bias": 768, "bert.encoder.layer.1.attention.self.value.weight": 589824, "bert.encoder.layer.1.attention.self.value.bias": 768, "bert.encoder.layer.1.attention.output.dense.weight": 589824, "bert.encoder.layer.1.attention.output.dense.bias": 768, "bert.encoder.layer.1.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.1.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.1.intermediate.dense.weight": 2359296, "bert.encoder.layer.1.intermediate.dense.bias": 3072, "bert.encoder.layer.1.output.dense.weight": 2359296, "bert.encoder.layer.1.output.dense.bias": 768, "bert.encoder.layer.1.output.LayerNorm.weight": 768, "bert.encoder.layer.1.output.LayerNorm.bias": 768, "bert.encoder.layer.2.attention.self.query.weight": 589824, "bert.encoder.layer.2.attention.self.query.bias": 768, "bert.encoder.layer.2.attention.self.key.weight": 589824, "bert.encoder.layer.2.attention.self.key.bias": 768, "bert.encoder.layer.2.attention.self.value.weight": 589824, "bert.encoder.layer.2.attention.self.value.bias": 768, "bert.encoder.layer.2.attention.output.dense.weight": 589824, "bert.encoder.layer.2.attention.output.dense.bias": 768, "bert.encoder.layer.2.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.2.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.2.intermediate.dense.weight": 2359296, "bert.encoder.layer.2.intermediate.dense.bias": 3072, "bert.encoder.layer.2.output.dense.weight": 2359296, "bert.encoder.layer.2.output.dense.bias": 768, "bert.encoder.layer.2.output.LayerNorm.weight": 768, "bert.encoder.layer.2.output.LayerNorm.bias": 768, "bert.encoder.layer.3.attention.self.query.weight": 589824, "bert.encoder.layer.3.attention.self.query.bias": 768, "bert.encoder.layer.3.attention.self.key.weight": 589824, "bert.encoder.layer.3.attention.self.key.bias": 768, "bert.encoder.layer.3.attention.self.value.weight": 589824, "bert.encoder.layer.3.attention.self.value.bias": 768, "bert.encoder.layer.3.attention.output.dense.weight": 589824, "bert.encoder.layer.3.attention.output.dense.bias": 768, "bert.encoder.layer.3.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.3.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.3.intermediate.dense.weight": 2359296, "bert.encoder.layer.3.intermediate.dense.bias": 3072, "bert.encoder.layer.3.output.dense.weight": 2359296, "bert.encoder.layer.3.output.dense.bias": 768, "bert.encoder.layer.3.output.LayerNorm.weight": 768, "bert.encoder.layer.3.output.LayerNorm.bias": 768, "bert.encoder.layer.4.attention.self.query.weight": 589824, "bert.encoder.layer.4.attention.self.query.bias": 768, "bert.encoder.layer.4.attention.self.key.weight": 589824, "bert.encoder.layer.4.attention.self.key.bias": 768, "bert.encoder.layer.4.attention.self.value.weight": 589824, "bert.encoder.layer.4.attention.self.value.bias": 768, "bert.encoder.layer.4.attention.output.dense.weight": 589824, "bert.encoder.layer.4.attention.output.dense.bias": 768, "bert.encoder.layer.4.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.4.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.4.intermediate.dense.weight": 2359296, "bert.encoder.layer.4.intermediate.dense.bias": 3072, "bert.encoder.layer.4.output.dense.weight": 2359296, "bert.encoder.layer.4.output.dense.bias": 768, "bert.encoder.layer.4.output.LayerNorm.weight": 768, "bert.encoder.layer.4.output.LayerNorm.bias": 768, "bert.encoder.layer.5.attention.self.query.weight": 589824, "bert.encoder.layer.5.attention.self.query.bias": 768, "bert.encoder.layer.5.attention.self.key.weight": 589824, "bert.encoder.layer.5.attention.self.key.bias": 768, "bert.encoder.layer.5.attention.self.value.weight": 589824, "bert.encoder.layer.5.attention.self.value.bias": 768, "bert.encoder.layer.5.attention.output.dense.weight": 589824, "bert.encoder.layer.5.attention.output.dense.bias": 768, "bert.encoder.layer.5.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.5.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.5.intermediate.dense.weight": 2359296, "bert.encoder.layer.5.intermediate.dense.bias": 3072, "bert.encoder.layer.5.output.dense.weight": 2359296, "bert.encoder.layer.5.output.dense.bias": 768, "bert.encoder.layer.5.output.LayerNorm.weight": 768, "bert.encoder.layer.5.output.LayerNorm.bias": 768, "bert.encoder.layer.6.attention.self.query.weight": 589824, "bert.encoder.layer.6.attention.self.query.bias": 768, "bert.encoder.layer.6.attention.self.key.weight": 589824, "bert.encoder.layer.6.attention.self.key.bias": 768, "bert.encoder.layer.6.attention.self.value.weight": 589824, "bert.encoder.layer.6.attention.self.value.bias": 768, "bert.encoder.layer.6.attention.output.dense.weight": 589824, "bert.encoder.layer.6.attention.output.dense.bias": 768, "bert.encoder.layer.6.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.6.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.6.intermediate.dense.weight": 2359296, "bert.encoder.layer.6.intermediate.dense.bias": 3072, "bert.encoder.layer.6.output.dense.weight": 2359296, "bert.encoder.layer.6.output.dense.bias": 768, "bert.encoder.layer.6.output.LayerNorm.weight": 768, "bert.encoder.layer.6.output.LayerNorm.bias": 768, "bert.encoder.layer.7.attention.self.query.weight": 589824, "bert.encoder.layer.7.attention.self.query.bias": 768, "bert.encoder.layer.7.attention.self.key.weight": 589824, "bert.encoder.layer.7.attention.self.key.bias": 768, "bert.encoder.layer.7.attention.self.value.weight": 589824, "bert.encoder.layer.7.attention.self.value.bias": 768, "bert.encoder.layer.7.attention.output.dense.weight": 589824, "bert.encoder.layer.7.attention.output.dense.bias": 768, "bert.encoder.layer.7.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.7.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.7.intermediate.dense.weight": 2359296, "bert.encoder.layer.7.intermediate.dense.bias": 3072, "bert.encoder.layer.7.output.dense.weight": 2359296, "bert.encoder.layer.7.output.dense.bias": 768, "bert.encoder.layer.7.output.LayerNorm.weight": 768, "bert.encoder.layer.7.output.LayerNorm.bias": 768, "bert.encoder.layer.8.attention.self.query.weight": 589824, "bert.encoder.layer.8.attention.self.query.bias": 768, "bert.encoder.layer.8.attention.self.key.weight": 589824, "bert.encoder.layer.8.attention.self.key.bias": 768, "bert.encoder.layer.8.attention.self.value.weight": 589824, "bert.encoder.layer.8.attention.self.value.bias": 768, "bert.encoder.layer.8.attention.output.dense.weight": 589824, "bert.encoder.layer.8.attention.output.dense.bias": 768, "bert.encoder.layer.8.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.8.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.8.intermediate.dense.weight": 2359296, "bert.encoder.layer.8.intermediate.dense.bias": 3072, "bert.encoder.layer.8.output.dense.weight": 2359296, "bert.encoder.layer.8.output.dense.bias": 768, "bert.encoder.layer.8.output.LayerNorm.weight": 768, "bert.encoder.layer.8.output.LayerNorm.bias": 768, "bert.encoder.layer.9.attention.self.query.weight": 589824, "bert.encoder.layer.9.attention.self.query.bias": 768, "bert.encoder.layer.9.attention.self.key.weight": 589824, "bert.encoder.layer.9.attention.self.key.bias": 768, "bert.encoder.layer.9.attention.self.value.weight": 589824, "bert.encoder.layer.9.attention.self.value.bias": 768, "bert.encoder.layer.9.attention.output.dense.weight": 589824, "bert.encoder.layer.9.attention.output.dense.bias": 768, "bert.encoder.layer.9.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.9.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.9.intermediate.dense.weight": 2359296, "bert.encoder.layer.9.intermediate.dense.bias": 3072, "bert.encoder.layer.9.output.dense.weight": 2359296, "bert.encoder.layer.9.output.dense.bias": 768, "bert.encoder.layer.9.output.LayerNorm.weight": 768, "bert.encoder.layer.9.output.LayerNorm.bias": 768, "bert.encoder.layer.10.attention.self.query.weight": 589824, "bert.encoder.layer.10.attention.self.query.bias": 768, "bert.encoder.layer.10.attention.self.key.weight": 589824, "bert.encoder.layer.10.attention.self.key.bias": 768, "bert.encoder.layer.10.attention.self.value.weight": 589824, "bert.encoder.layer.10.attention.self.value.bias": 768, "bert.encoder.layer.10.attention.output.dense.weight": 589824, "bert.encoder.layer.10.attention.output.dense.bias": 768, "bert.encoder.layer.10.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.10.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.10.intermediate.dense.weight": 2359296, "bert.encoder.layer.10.intermediate.dense.bias": 3072, "bert.encoder.layer.10.output.dense.weight": 2359296, "bert.encoder.layer.10.output.dense.bias": 768, "bert.encoder.layer.10.output.LayerNorm.weight": 768, "bert.encoder.layer.10.output.LayerNorm.bias": 768, "bert.encoder.layer.11.attention.self.query.weight": 589824, "bert.encoder.layer.11.attention.self.query.bias": 768, "bert.encoder.layer.11.attention.self.key.weight": 589824, "bert.encoder.layer.11.attention.self.key.bias": 768, "bert.encoder.layer.11.attention.self.value.weight": 589824, "bert.encoder.layer.11.attention.self.value.bias": 768, "bert.encoder.layer.11.attention.output.dense.weight": 589824, "bert.encoder.layer.11.attention.output.dense.bias": 768, "bert.encoder.layer.11.attention.output.LayerNorm.weight": 768, "bert.encoder.layer.11.attention.output.LayerNorm.bias": 768, "bert.encoder.layer.11.intermediate.dense.weight": 2359296, "bert.encoder.layer.11.intermediate.dense.bias": 3072, "bert.encoder.layer.11.output.dense.weight": 2359296, "bert.encoder.layer.11.output.dense.bias": 768, "bert.encoder.layer.11.output.LayerNorm.weight": 768, "bert.encoder.layer.11.output.LayerNorm.bias": 768, "feat_map.weight": 196608, "feat_map.bias": 256, "input_proj.0.0.weight": 65536, "input_proj.0.0.bias": 256, "input_proj.0.1.weight": 256, "input_proj.0.1.bias": 256, "input_proj.1.0.weight": 131072, "input_proj.1.0.bias": 256, "input_proj.1.1.weight": 256, "input_proj.1.1.bias": 256, "input_proj.2.0.weight": 262144, "input_proj.2.0.bias": 256, "input_proj.2.1.weight": 256, "input_proj.2.1.bias": 256, "input_proj.3.0.weight": 2359296, "input_proj.3.0.bias": 256, "input_proj.3.1.weight": 256, "input_proj.3.1.bias": 256, "backbone.0.patch_embed.proj.weight": 6144, "backbone.0.patch_embed.proj.bias": 128, "backbone.0.patch_embed.norm.weight": 128, "backbone.0.patch_embed.norm.bias": 128, "backbone.0.layers.0.blocks.0.norm1.weight": 128, "backbone.0.layers.0.blocks.0.norm1.bias": 128, "backbone.0.layers.0.blocks.0.attn.relative_position_bias_table": 2116, "backbone.0.layers.0.blocks.0.attn.qkv.weight": 49152, "backbone.0.layers.0.blocks.0.attn.qkv.bias": 384, "backbone.0.layers.0.blocks.0.attn.proj.weight": 16384, "backbone.0.layers.0.blocks.0.attn.proj.bias": 128, "backbone.0.layers.0.blocks.0.norm2.weight": 128, "backbone.0.layers.0.blocks.0.norm2.bias": 128, "backbone.0.layers.0.blocks.0.mlp.fc1.weight": 65536, "backbone.0.layers.0.blocks.0.mlp.fc1.bias": 512, "backbone.0.layers.0.blocks.0.mlp.fc2.weight": 65536, "backbone.0.layers.0.blocks.0.mlp.fc2.bias": 128, "backbone.0.layers.0.blocks.1.norm1.weight": 128, "backbone.0.layers.0.blocks.1.norm1.bias": 128, "backbone.0.layers.0.blocks.1.attn.relative_position_bias_table": 2116, "backbone.0.layers.0.blocks.1.attn.qkv.weight": 49152, "backbone.0.layers.0.blocks.1.attn.qkv.bias": 384, "backbone.0.layers.0.blocks.1.attn.proj.weight": 16384, "backbone.0.layers.0.blocks.1.attn.proj.bias": 128, "backbone.0.layers.0.blocks.1.norm2.weight": 128, "backbone.0.layers.0.blocks.1.norm2.bias": 128, "backbone.0.layers.0.blocks.1.mlp.fc1.weight": 65536, "backbone.0.layers.0.blocks.1.mlp.fc1.bias": 512, "backbone.0.layers.0.blocks.1.mlp.fc2.weight": 65536, "backbone.0.layers.0.blocks.1.mlp.fc2.bias": 128, "backbone.0.layers.0.downsample.reduction.weight": 131072, "backbone.0.layers.0.downsample.norm.weight": 512, "backbone.0.layers.0.downsample.norm.bias": 512, "backbone.0.layers.1.blocks.0.norm1.weight": 256, "backbone.0.layers.1.blocks.0.norm1.bias": 256, "backbone.0.layers.1.blocks.0.attn.relative_position_bias_table": 4232, "backbone.0.layers.1.blocks.0.attn.qkv.weight": 196608, "backbone.0.layers.1.blocks.0.attn.qkv.bias": 768, "backbone.0.layers.1.blocks.0.attn.proj.weight": 65536, "backbone.0.layers.1.blocks.0.attn.proj.bias": 256, "backbone.0.layers.1.blocks.0.norm2.weight": 256, "backbone.0.layers.1.blocks.0.norm2.bias": 256, "backbone.0.layers.1.blocks.0.mlp.fc1.weight": 262144, "backbone.0.layers.1.blocks.0.mlp.fc1.bias": 1024, "backbone.0.layers.1.blocks.0.mlp.fc2.weight": 262144, "backbone.0.layers.1.blocks.0.mlp.fc2.bias": 256, "backbone.0.layers.1.blocks.1.norm1.weight": 256, "backbone.0.layers.1.blocks.1.norm1.bias": 256, "backbone.0.layers.1.blocks.1.attn.relative_position_bias_table": 4232, "backbone.0.layers.1.blocks.1.attn.qkv.weight": 196608, "backbone.0.layers.1.blocks.1.attn.qkv.bias": 768, "backbone.0.layers.1.blocks.1.attn.proj.weight": 65536, "backbone.0.layers.1.blocks.1.attn.proj.bias": 256, "backbone.0.layers.1.blocks.1.norm2.weight": 256, "backbone.0.layers.1.blocks.1.norm2.bias": 256, "backbone.0.layers.1.blocks.1.mlp.fc1.weight": 262144, "backbone.0.layers.1.blocks.1.mlp.fc1.bias": 1024, "backbone.0.layers.1.blocks.1.mlp.fc2.weight": 262144, "backbone.0.layers.1.blocks.1.mlp.fc2.bias": 256, "backbone.0.layers.1.downsample.reduction.weight": 524288, "backbone.0.layers.1.downsample.norm.weight": 1024, "backbone.0.layers.1.downsample.norm.bias": 1024, "backbone.0.layers.2.blocks.0.norm1.weight": 512, "backbone.0.layers.2.blocks.0.norm1.bias": 512, "backbone.0.layers.2.blocks.0.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.0.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.0.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.0.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.0.attn.proj.bias": 512, "backbone.0.layers.2.blocks.0.norm2.weight": 512, "backbone.0.layers.2.blocks.0.norm2.bias": 512, "backbone.0.layers.2.blocks.0.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.0.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.0.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.0.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.1.norm1.weight": 512, "backbone.0.layers.2.blocks.1.norm1.bias": 512, "backbone.0.layers.2.blocks.1.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.1.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.1.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.1.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.1.attn.proj.bias": 512, "backbone.0.layers.2.blocks.1.norm2.weight": 512, "backbone.0.layers.2.blocks.1.norm2.bias": 512, "backbone.0.layers.2.blocks.1.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.1.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.1.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.1.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.2.norm1.weight": 512, "backbone.0.layers.2.blocks.2.norm1.bias": 512, "backbone.0.layers.2.blocks.2.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.2.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.2.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.2.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.2.attn.proj.bias": 512, "backbone.0.layers.2.blocks.2.norm2.weight": 512, "backbone.0.layers.2.blocks.2.norm2.bias": 512, "backbone.0.layers.2.blocks.2.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.2.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.2.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.2.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.3.norm1.weight": 512, "backbone.0.layers.2.blocks.3.norm1.bias": 512, "backbone.0.layers.2.blocks.3.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.3.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.3.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.3.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.3.attn.proj.bias": 512, "backbone.0.layers.2.blocks.3.norm2.weight": 512, "backbone.0.layers.2.blocks.3.norm2.bias": 512, "backbone.0.layers.2.blocks.3.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.3.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.3.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.3.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.4.norm1.weight": 512, "backbone.0.layers.2.blocks.4.norm1.bias": 512, "backbone.0.layers.2.blocks.4.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.4.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.4.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.4.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.4.attn.proj.bias": 512, "backbone.0.layers.2.blocks.4.norm2.weight": 512, "backbone.0.layers.2.blocks.4.norm2.bias": 512, "backbone.0.layers.2.blocks.4.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.4.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.4.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.4.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.5.norm1.weight": 512, "backbone.0.layers.2.blocks.5.norm1.bias": 512, "backbone.0.layers.2.blocks.5.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.5.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.5.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.5.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.5.attn.proj.bias": 512, "backbone.0.layers.2.blocks.5.norm2.weight": 512, "backbone.0.layers.2.blocks.5.norm2.bias": 512, "backbone.0.layers.2.blocks.5.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.5.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.5.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.5.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.6.norm1.weight": 512, "backbone.0.layers.2.blocks.6.norm1.bias": 512, "backbone.0.layers.2.blocks.6.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.6.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.6.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.6.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.6.attn.proj.bias": 512, "backbone.0.layers.2.blocks.6.norm2.weight": 512, "backbone.0.layers.2.blocks.6.norm2.bias": 512, "backbone.0.layers.2.blocks.6.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.6.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.6.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.6.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.7.norm1.weight": 512, "backbone.0.layers.2.blocks.7.norm1.bias": 512, "backbone.0.layers.2.blocks.7.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.7.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.7.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.7.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.7.attn.proj.bias": 512, "backbone.0.layers.2.blocks.7.norm2.weight": 512, "backbone.0.layers.2.blocks.7.norm2.bias": 512, "backbone.0.layers.2.blocks.7.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.7.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.7.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.7.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.8.norm1.weight": 512, "backbone.0.layers.2.blocks.8.norm1.bias": 512, "backbone.0.layers.2.blocks.8.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.8.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.8.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.8.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.8.attn.proj.bias": 512, "backbone.0.layers.2.blocks.8.norm2.weight": 512, "backbone.0.layers.2.blocks.8.norm2.bias": 512, "backbone.0.layers.2.blocks.8.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.8.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.8.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.8.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.9.norm1.weight": 512, "backbone.0.layers.2.blocks.9.norm1.bias": 512, "backbone.0.layers.2.blocks.9.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.9.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.9.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.9.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.9.attn.proj.bias": 512, "backbone.0.layers.2.blocks.9.norm2.weight": 512, "backbone.0.layers.2.blocks.9.norm2.bias": 512, "backbone.0.layers.2.blocks.9.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.9.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.9.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.9.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.10.norm1.weight": 512, "backbone.0.layers.2.blocks.10.norm1.bias": 512, "backbone.0.layers.2.blocks.10.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.10.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.10.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.10.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.10.attn.proj.bias": 512, "backbone.0.layers.2.blocks.10.norm2.weight": 512, "backbone.0.layers.2.blocks.10.norm2.bias": 512, "backbone.0.layers.2.blocks.10.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.10.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.10.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.10.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.11.norm1.weight": 512, "backbone.0.layers.2.blocks.11.norm1.bias": 512, "backbone.0.layers.2.blocks.11.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.11.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.11.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.11.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.11.attn.proj.bias": 512, "backbone.0.layers.2.blocks.11.norm2.weight": 512, "backbone.0.layers.2.blocks.11.norm2.bias": 512, "backbone.0.layers.2.blocks.11.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.11.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.11.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.11.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.12.norm1.weight": 512, "backbone.0.layers.2.blocks.12.norm1.bias": 512, "backbone.0.layers.2.blocks.12.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.12.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.12.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.12.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.12.attn.proj.bias": 512, "backbone.0.layers.2.blocks.12.norm2.weight": 512, "backbone.0.layers.2.blocks.12.norm2.bias": 512, "backbone.0.layers.2.blocks.12.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.12.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.12.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.12.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.13.norm1.weight": 512, "backbone.0.layers.2.blocks.13.norm1.bias": 512, "backbone.0.layers.2.blocks.13.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.13.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.13.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.13.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.13.attn.proj.bias": 512, "backbone.0.layers.2.blocks.13.norm2.weight": 512, "backbone.0.layers.2.blocks.13.norm2.bias": 512, "backbone.0.layers.2.blocks.13.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.13.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.13.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.13.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.14.norm1.weight": 512, "backbone.0.layers.2.blocks.14.norm1.bias": 512, "backbone.0.layers.2.blocks.14.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.14.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.14.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.14.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.14.attn.proj.bias": 512, "backbone.0.layers.2.blocks.14.norm2.weight": 512, "backbone.0.layers.2.blocks.14.norm2.bias": 512, "backbone.0.layers.2.blocks.14.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.14.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.14.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.14.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.15.norm1.weight": 512, "backbone.0.layers.2.blocks.15.norm1.bias": 512, "backbone.0.layers.2.blocks.15.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.15.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.15.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.15.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.15.attn.proj.bias": 512, "backbone.0.layers.2.blocks.15.norm2.weight": 512, "backbone.0.layers.2.blocks.15.norm2.bias": 512, "backbone.0.layers.2.blocks.15.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.15.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.15.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.15.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.16.norm1.weight": 512, "backbone.0.layers.2.blocks.16.norm1.bias": 512, "backbone.0.layers.2.blocks.16.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.16.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.16.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.16.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.16.attn.proj.bias": 512, "backbone.0.layers.2.blocks.16.norm2.weight": 512, "backbone.0.layers.2.blocks.16.norm2.bias": 512, "backbone.0.layers.2.blocks.16.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.16.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.16.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.16.mlp.fc2.bias": 512, "backbone.0.layers.2.blocks.17.norm1.weight": 512, "backbone.0.layers.2.blocks.17.norm1.bias": 512, "backbone.0.layers.2.blocks.17.attn.relative_position_bias_table": 8464, "backbone.0.layers.2.blocks.17.attn.qkv.weight": 786432, "backbone.0.layers.2.blocks.17.attn.qkv.bias": 1536, "backbone.0.layers.2.blocks.17.attn.proj.weight": 262144, "backbone.0.layers.2.blocks.17.attn.proj.bias": 512, "backbone.0.layers.2.blocks.17.norm2.weight": 512, "backbone.0.layers.2.blocks.17.norm2.bias": 512, "backbone.0.layers.2.blocks.17.mlp.fc1.weight": 1048576, "backbone.0.layers.2.blocks.17.mlp.fc1.bias": 2048, "backbone.0.layers.2.blocks.17.mlp.fc2.weight": 1048576, "backbone.0.layers.2.blocks.17.mlp.fc2.bias": 512, "backbone.0.layers.2.downsample.reduction.weight": 2097152, "backbone.0.layers.2.downsample.norm.weight": 2048, "backbone.0.layers.2.downsample.norm.bias": 2048, "backbone.0.layers.3.blocks.0.norm1.weight": 1024, "backbone.0.layers.3.blocks.0.norm1.bias": 1024, "backbone.0.layers.3.blocks.0.attn.relative_position_bias_table": 16928, "backbone.0.layers.3.blocks.0.attn.qkv.weight": 3145728, "backbone.0.layers.3.blocks.0.attn.qkv.bias": 3072, "backbone.0.layers.3.blocks.0.attn.proj.weight": 1048576, "backbone.0.layers.3.blocks.0.attn.proj.bias": 1024, "backbone.0.layers.3.blocks.0.norm2.weight": 1024, "backbone.0.layers.3.blocks.0.norm2.bias": 1024, "backbone.0.layers.3.blocks.0.mlp.fc1.weight": 4194304, "backbone.0.layers.3.blocks.0.mlp.fc1.bias": 4096, "backbone.0.layers.3.blocks.0.mlp.fc2.weight": 4194304, "backbone.0.layers.3.blocks.0.mlp.fc2.bias": 1024, "backbone.0.layers.3.blocks.1.norm1.weight": 1024, "backbone.0.layers.3.blocks.1.norm1.bias": 1024, "backbone.0.layers.3.blocks.1.attn.relative_position_bias_table": 16928, "backbone.0.layers.3.blocks.1.attn.qkv.weight": 3145728, "backbone.0.layers.3.blocks.1.attn.qkv.bias": 3072, "backbone.0.layers.3.blocks.1.attn.proj.weight": 1048576, "backbone.0.layers.3.blocks.1.attn.proj.bias": 1024, "backbone.0.layers.3.blocks.1.norm2.weight": 1024, "backbone.0.layers.3.blocks.1.norm2.bias": 1024, "backbone.0.layers.3.blocks.1.mlp.fc1.weight": 4194304, "backbone.0.layers.3.blocks.1.mlp.fc1.bias": 4096, "backbone.0.layers.3.blocks.1.mlp.fc2.weight": 4194304, "backbone.0.layers.3.blocks.1.mlp.fc2.bias": 1024, "backbone.0.norm1.weight": 256, "backbone.0.norm1.bias": 256, "backbone.0.norm2.weight": 512, "backbone.0.norm2.bias": 512, "backbone.0.norm3.weight": 1024, "backbone.0.norm3.bias": 1024 }[0m [32mINFO [0m [32m2024-03-29 17:46:28,634 | [34mparams after freezing: { "transformer.level_embed": 1024, "transformer.encoder.layers.0.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.0.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.0.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.0.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.0.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.0.self_attn.value_proj.bias": 256, "transformer.encoder.layers.0.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.0.self_attn.output_proj.bias": 256, "transformer.encoder.layers.0.norm1.weight": 256, "transformer.encoder.layers.0.norm1.bias": 256, "transformer.encoder.layers.0.linear1.weight": 524288, "transformer.encoder.layers.0.linear1.bias": 2048, "transformer.encoder.layers.0.linear2.weight": 524288, "transformer.encoder.layers.0.linear2.bias": 256, "transformer.encoder.layers.0.norm2.weight": 256, "transformer.encoder.layers.0.norm2.bias": 256, "transformer.encoder.layers.1.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.1.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.1.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.1.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.1.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.1.self_attn.value_proj.bias": 256, "transformer.encoder.layers.1.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.1.self_attn.output_proj.bias": 256, "transformer.encoder.layers.1.norm1.weight": 256, "transformer.encoder.layers.1.norm1.bias": 256, "transformer.encoder.layers.1.linear1.weight": 524288, "transformer.encoder.layers.1.linear1.bias": 2048, "transformer.encoder.layers.1.linear2.weight": 524288, "transformer.encoder.layers.1.linear2.bias": 256, "transformer.encoder.layers.1.norm2.weight": 256, "transformer.encoder.layers.1.norm2.bias": 256, "transformer.encoder.layers.2.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.2.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.2.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.2.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.2.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.2.self_attn.value_proj.bias": 256, "transformer.encoder.layers.2.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.2.self_attn.output_proj.bias": 256, "transformer.encoder.layers.2.norm1.weight": 256, "transformer.encoder.layers.2.norm1.bias": 256, "transformer.encoder.layers.2.linear1.weight": 524288, "transformer.encoder.layers.2.linear1.bias": 2048, "transformer.encoder.layers.2.linear2.weight": 524288, "transformer.encoder.layers.2.linear2.bias": 256, "transformer.encoder.layers.2.norm2.weight": 256, "transformer.encoder.layers.2.norm2.bias": 256, "transformer.encoder.layers.3.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.3.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.3.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.3.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.3.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.3.self_attn.value_proj.bias": 256, "transformer.encoder.layers.3.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.3.self_attn.output_proj.bias": 256, "transformer.encoder.layers.3.norm1.weight": 256, "transformer.encoder.layers.3.norm1.bias": 256, "transformer.encoder.layers.3.linear1.weight": 524288, "transformer.encoder.layers.3.linear1.bias": 2048, "transformer.encoder.layers.3.linear2.weight": 524288, "transformer.encoder.layers.3.linear2.bias": 256, "transformer.encoder.layers.3.norm2.weight": 256, "transformer.encoder.layers.3.norm2.bias": 256, "transformer.encoder.layers.4.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.4.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.4.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.4.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.4.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.4.self_attn.value_proj.bias": 256, "transformer.encoder.layers.4.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.4.self_attn.output_proj.bias": 256, "transformer.encoder.layers.4.norm1.weight": 256, "transformer.encoder.layers.4.norm1.bias": 256, "transformer.encoder.layers.4.linear1.weight": 524288, "transformer.encoder.layers.4.linear1.bias": 2048, "transformer.encoder.layers.4.linear2.weight": 524288, "transformer.encoder.layers.4.linear2.bias": 256, "transformer.encoder.layers.4.norm2.weight": 256, "transformer.encoder.layers.4.norm2.bias": 256, "transformer.encoder.layers.5.self_attn.sampling_offsets.weight": 65536, "transformer.encoder.layers.5.self_attn.sampling_offsets.bias": 256, "transformer.encoder.layers.5.self_attn.attention_weights.weight": 32768, "transformer.encoder.layers.5.self_attn.attention_weights.bias": 128, "transformer.encoder.layers.5.self_attn.value_proj.weight": 65536, "transformer.encoder.layers.5.self_attn.value_proj.bias": 256, "transformer.encoder.layers.5.self_attn.output_proj.weight": 65536, "transformer.encoder.layers.5.self_attn.output_proj.bias": 256, "transformer.encoder.layers.5.norm1.weight": 256, "transformer.encoder.layers.5.norm1.bias": 256, "transformer.encoder.layers.5.linear1.weight": 524288, "transformer.encoder.layers.5.linear1.bias": 2048, "transformer.encoder.layers.5.linear2.weight": 524288, "transformer.encoder.layers.5.linear2.bias": 256, "transformer.encoder.layers.5.norm2.weight": 256, "transformer.encoder.layers.5.norm2.bias": 256, "transformer.encoder.text_layers.0.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.0.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.0.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.0.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.0.linear1.weight": 262144, "transformer.encoder.text_layers.0.linear1.bias": 1024, "transformer.encoder.text_layers.0.linear2.weight": 262144, "transformer.encoder.text_layers.0.linear2.bias": 256, "transformer.encoder.text_layers.0.norm1.weight": 256, "transformer.encoder.text_layers.0.norm1.bias": 256, "transformer.encoder.text_layers.0.norm2.weight": 256, "transformer.encoder.text_layers.0.norm2.bias": 256, "transformer.encoder.text_layers.1.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.1.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.1.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.1.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.1.linear1.weight": 262144, "transformer.encoder.text_layers.1.linear1.bias": 1024, "transformer.encoder.text_layers.1.linear2.weight": 262144, "transformer.encoder.text_layers.1.linear2.bias": 256, "transformer.encoder.text_layers.1.norm1.weight": 256, "transformer.encoder.text_layers.1.norm1.bias": 256, "transformer.encoder.text_layers.1.norm2.weight": 256, "transformer.encoder.text_layers.1.norm2.bias": 256, "transformer.encoder.text_layers.2.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.2.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.2.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.2.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.2.linear1.weight": 262144, "transformer.encoder.text_layers.2.linear1.bias": 1024, "transformer.encoder.text_layers.2.linear2.weight": 262144, "transformer.encoder.text_layers.2.linear2.bias": 256, "transformer.encoder.text_layers.2.norm1.weight": 256, "transformer.encoder.text_layers.2.norm1.bias": 256, "transformer.encoder.text_layers.2.norm2.weight": 256, "transformer.encoder.text_layers.2.norm2.bias": 256, "transformer.encoder.text_layers.3.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.3.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.3.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.3.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.3.linear1.weight": 262144, "transformer.encoder.text_layers.3.linear1.bias": 1024, "transformer.encoder.text_layers.3.linear2.weight": 262144, "transformer.encoder.text_layers.3.linear2.bias": 256, "transformer.encoder.text_layers.3.norm1.weight": 256, "transformer.encoder.text_layers.3.norm1.bias": 256, "transformer.encoder.text_layers.3.norm2.weight": 256, "transformer.encoder.text_layers.3.norm2.bias": 256, "transformer.encoder.text_layers.4.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.4.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.4.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.4.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.4.linear1.weight": 262144, "transformer.encoder.text_layers.4.linear1.bias": 1024, "transformer.encoder.text_layers.4.linear2.weight": 262144, "transformer.encoder.text_layers.4.linear2.bias": 256, "transformer.encoder.text_layers.4.norm1.weight": 256, "transformer.encoder.text_layers.4.norm1.bias": 256, "transformer.encoder.text_layers.4.norm2.weight": 256, "transformer.encoder.text_layers.4.norm2.bias": 256, "transformer.encoder.text_layers.5.self_attn.in_proj_weight": 196608, "transformer.encoder.text_layers.5.self_attn.in_proj_bias": 768, "transformer.encoder.text_layers.5.self_attn.out_proj.weight": 65536, "transformer.encoder.text_layers.5.self_attn.out_proj.bias": 256, "transformer.encoder.text_layers.5.linear1.weight": 262144, "transformer.encoder.text_layers.5.linear1.bias": 1024, "transformer.encoder.text_layers.5.linear2.weight": 262144, "transformer.encoder.text_layers.5.linear2.bias": 256, "transformer.encoder.text_layers.5.norm1.weight": 256, "transformer.encoder.text_layers.5.norm1.bias": 256, "transformer.encoder.text_layers.5.norm2.weight": 256, "transformer.encoder.text_layers.5.norm2.bias": 256, "transformer.encoder.fusion_layers.0.gamma_v": 256, "transformer.encoder.fusion_layers.0.gamma_l": 256, "transformer.encoder.fusion_layers.0.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.0.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.0.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.0.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.0.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.0.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.0.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.0.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.0.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.0.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.0.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.1.gamma_v": 256, "transformer.encoder.fusion_layers.1.gamma_l": 256, "transformer.encoder.fusion_layers.1.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.1.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.1.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.1.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.1.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.1.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.1.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.1.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.1.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.1.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.1.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.2.gamma_v": 256, "transformer.encoder.fusion_layers.2.gamma_l": 256, "transformer.encoder.fusion_layers.2.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.2.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.2.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.2.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.2.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.2.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.2.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.2.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.2.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.2.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.2.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.3.gamma_v": 256, "transformer.encoder.fusion_layers.3.gamma_l": 256, "transformer.encoder.fusion_layers.3.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.3.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.3.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.3.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.3.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.3.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.3.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.3.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.3.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.3.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.3.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.4.gamma_v": 256, "transformer.encoder.fusion_layers.4.gamma_l": 256, "transformer.encoder.fusion_layers.4.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.4.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.4.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.4.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.4.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.4.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.4.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.4.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.4.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.4.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.4.attn.out_l_proj.bias": 256, "transformer.encoder.fusion_layers.5.gamma_v": 256, "transformer.encoder.fusion_layers.5.gamma_l": 256, "transformer.encoder.fusion_layers.5.layer_norm_v.weight": 256, "transformer.encoder.fusion_layers.5.layer_norm_v.bias": 256, "transformer.encoder.fusion_layers.5.layer_norm_l.weight": 256, "transformer.encoder.fusion_layers.5.layer_norm_l.bias": 256, "transformer.encoder.fusion_layers.5.attn.v_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.v_proj.bias": 1024, "transformer.encoder.fusion_layers.5.attn.l_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.l_proj.bias": 1024, "transformer.encoder.fusion_layers.5.attn.values_v_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.values_v_proj.bias": 1024, "transformer.encoder.fusion_layers.5.attn.values_l_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.values_l_proj.bias": 1024, "transformer.encoder.fusion_layers.5.attn.out_v_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.out_v_proj.bias": 256, "transformer.encoder.fusion_layers.5.attn.out_l_proj.weight": 262144, "transformer.encoder.fusion_layers.5.attn.out_l_proj.bias": 256, "transformer.decoder.layers.0.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.0.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.0.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.0.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.0.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.0.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.0.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.0.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.0.norm1.weight": 256, "transformer.decoder.layers.0.norm1.bias": 256, "transformer.decoder.layers.0.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.0.ca_text.in_proj_bias": 768, "transformer.decoder.layers.0.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.0.ca_text.out_proj.bias": 256, "transformer.decoder.layers.0.catext_norm.weight": 256, "transformer.decoder.layers.0.catext_norm.bias": 256, "transformer.decoder.layers.0.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.0.self_attn.in_proj_bias": 768, "transformer.decoder.layers.0.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.0.self_attn.out_proj.bias": 256, "transformer.decoder.layers.0.norm2.weight": 256, "transformer.decoder.layers.0.norm2.bias": 256, "transformer.decoder.layers.0.linear1.weight": 524288, "transformer.decoder.layers.0.linear1.bias": 2048, "transformer.decoder.layers.0.linear2.weight": 524288, "transformer.decoder.layers.0.linear2.bias": 256, "transformer.decoder.layers.0.norm3.weight": 256, "transformer.decoder.layers.0.norm3.bias": 256, "transformer.decoder.layers.1.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.1.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.1.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.1.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.1.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.1.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.1.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.1.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.1.norm1.weight": 256, "transformer.decoder.layers.1.norm1.bias": 256, "transformer.decoder.layers.1.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.1.ca_text.in_proj_bias": 768, "transformer.decoder.layers.1.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.1.ca_text.out_proj.bias": 256, "transformer.decoder.layers.1.catext_norm.weight": 256, "transformer.decoder.layers.1.catext_norm.bias": 256, "transformer.decoder.layers.1.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.1.self_attn.in_proj_bias": 768, "transformer.decoder.layers.1.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.1.self_attn.out_proj.bias": 256, "transformer.decoder.layers.1.norm2.weight": 256, "transformer.decoder.layers.1.norm2.bias": 256, "transformer.decoder.layers.1.linear1.weight": 524288, "transformer.decoder.layers.1.linear1.bias": 2048, "transformer.decoder.layers.1.linear2.weight": 524288, "transformer.decoder.layers.1.linear2.bias": 256, "transformer.decoder.layers.1.norm3.weight": 256, "transformer.decoder.layers.1.norm3.bias": 256, "transformer.decoder.layers.2.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.2.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.2.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.2.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.2.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.2.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.2.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.2.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.2.norm1.weight": 256, "transformer.decoder.layers.2.norm1.bias": 256, "transformer.decoder.layers.2.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.2.ca_text.in_proj_bias": 768, "transformer.decoder.layers.2.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.2.ca_text.out_proj.bias": 256, "transformer.decoder.layers.2.catext_norm.weight": 256, "transformer.decoder.layers.2.catext_norm.bias": 256, "transformer.decoder.layers.2.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.2.self_attn.in_proj_bias": 768, "transformer.decoder.layers.2.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.2.self_attn.out_proj.bias": 256, "transformer.decoder.layers.2.norm2.weight": 256, "transformer.decoder.layers.2.norm2.bias": 256, "transformer.decoder.layers.2.linear1.weight": 524288, "transformer.decoder.layers.2.linear1.bias": 2048, "transformer.decoder.layers.2.linear2.weight": 524288, "transformer.decoder.layers.2.linear2.bias": 256, "transformer.decoder.layers.2.norm3.weight": 256, "transformer.decoder.layers.2.norm3.bias": 256, "transformer.decoder.layers.3.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.3.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.3.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.3.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.3.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.3.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.3.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.3.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.3.norm1.weight": 256, "transformer.decoder.layers.3.norm1.bias": 256, "transformer.decoder.layers.3.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.3.ca_text.in_proj_bias": 768, "transformer.decoder.layers.3.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.3.ca_text.out_proj.bias": 256, "transformer.decoder.layers.3.catext_norm.weight": 256, "transformer.decoder.layers.3.catext_norm.bias": 256, "transformer.decoder.layers.3.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.3.self_attn.in_proj_bias": 768, "transformer.decoder.layers.3.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.3.self_attn.out_proj.bias": 256, "transformer.decoder.layers.3.norm2.weight": 256, "transformer.decoder.layers.3.norm2.bias": 256, "transformer.decoder.layers.3.linear1.weight": 524288, "transformer.decoder.layers.3.linear1.bias": 2048, "transformer.decoder.layers.3.linear2.weight": 524288, "transformer.decoder.layers.3.linear2.bias": 256, "transformer.decoder.layers.3.norm3.weight": 256, "transformer.decoder.layers.3.norm3.bias": 256, "transformer.decoder.layers.4.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.4.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.4.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.4.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.4.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.4.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.4.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.4.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.4.norm1.weight": 256, "transformer.decoder.layers.4.norm1.bias": 256, "transformer.decoder.layers.4.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.4.ca_text.in_proj_bias": 768, "transformer.decoder.layers.4.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.4.ca_text.out_proj.bias": 256, "transformer.decoder.layers.4.catext_norm.weight": 256, "transformer.decoder.layers.4.catext_norm.bias": 256, "transformer.decoder.layers.4.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.4.self_attn.in_proj_bias": 768, "transformer.decoder.layers.4.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.4.self_attn.out_proj.bias": 256, "transformer.decoder.layers.4.norm2.weight": 256, "transformer.decoder.layers.4.norm2.bias": 256, "transformer.decoder.layers.4.linear1.weight": 524288, "transformer.decoder.layers.4.linear1.bias": 2048, "transformer.decoder.layers.4.linear2.weight": 524288, "transformer.decoder.layers.4.linear2.bias": 256, "transformer.decoder.layers.4.norm3.weight": 256, "transformer.decoder.layers.4.norm3.bias": 256, "transformer.decoder.layers.5.cross_attn.sampling_offsets.weight": 65536, "transformer.decoder.layers.5.cross_attn.sampling_offsets.bias": 256, "transformer.decoder.layers.5.cross_attn.attention_weights.weight": 32768, "transformer.decoder.layers.5.cross_attn.attention_weights.bias": 128, "transformer.decoder.layers.5.cross_attn.value_proj.weight": 65536, "transformer.decoder.layers.5.cross_attn.value_proj.bias": 256, "transformer.decoder.layers.5.cross_attn.output_proj.weight": 65536, "transformer.decoder.layers.5.cross_attn.output_proj.bias": 256, "transformer.decoder.layers.5.norm1.weight": 256, "transformer.decoder.layers.5.norm1.bias": 256, "transformer.decoder.layers.5.ca_text.in_proj_weight": 196608, "transformer.decoder.layers.5.ca_text.in_proj_bias": 768, "transformer.decoder.layers.5.ca_text.out_proj.weight": 65536, "transformer.decoder.layers.5.ca_text.out_proj.bias": 256, "transformer.decoder.layers.5.catext_norm.weight": 256, "transformer.decoder.layers.5.catext_norm.bias": 256, "transformer.decoder.layers.5.self_attn.in_proj_weight": 196608, "transformer.decoder.layers.5.self_attn.in_proj_bias": 768, "transformer.decoder.layers.5.self_attn.out_proj.weight": 65536, "transformer.decoder.layers.5.self_attn.out_proj.bias": 256, "transformer.decoder.layers.5.norm2.weight": 256, "transformer.decoder.layers.5.norm2.bias": 256, "transformer.decoder.layers.5.linear1.weight": 524288, "transformer.decoder.layers.5.linear1.bias": 2048, "transformer.decoder.layers.5.linear2.weight": 524288, "transformer.decoder.layers.5.linear2.bias": 256, "transformer.decoder.layers.5.norm3.weight": 256, "transformer.decoder.layers.5.norm3.bias": 256, "transformer.decoder.norm.weight": 256, "transformer.decoder.norm.bias": 256, "transformer.decoder.ref_point_head.layers.0.weight": 131072, "transformer.decoder.ref_point_head.layers.0.bias": 256, "transformer.decoder.ref_point_head.layers.1.weight": 65536, "transformer.decoder.ref_point_head.layers.1.bias": 256, "transformer.decoder.bbox_embed.0.layers.0.weight": 65536, "transformer.decoder.bbox_embed.0.layers.0.bias": 256, "transformer.decoder.bbox_embed.0.layers.1.weight": 65536, "transformer.decoder.bbox_embed.0.layers.1.bias": 256, "transformer.decoder.bbox_embed.0.layers.2.weight": 1024, "transformer.decoder.bbox_embed.0.layers.2.bias": 4, "transformer.tgt_embed.weight": 230400, "transformer.enc_output.weight": 65536, "transformer.enc_output.bias": 256, "transformer.enc_output_norm.weight": 256, "transformer.enc_output_norm.bias": 256, "transformer.enc_out_bbox_embed.layers.0.weight": 65536, "transformer.enc_out_bbox_embed.layers.0.bias": 256, "transformer.enc_out_bbox_embed.layers.1.weight": 65536, "transformer.enc_out_bbox_embed.layers.1.bias": 256, "transformer.enc_out_bbox_embed.layers.2.weight": 1024, "transformer.enc_out_bbox_embed.layers.2.bias": 4, "feature_map_proj.weight": 458752, "feature_map_proj.bias": 256, "feature_map_encoder.layers.0.norm1.weight": 256, "feature_map_encoder.layers.0.norm1.bias": 256, "feature_map_encoder.layers.0.norm2.weight": 256, "feature_map_encoder.layers.0.norm2.bias": 256, "feature_map_encoder.layers.0.self_attn.in_proj_weight": 196608, "feature_map_encoder.layers.0.self_attn.in_proj_bias": 768, "feature_map_encoder.layers.0.self_attn.out_proj.weight": 65536, "feature_map_encoder.layers.0.self_attn.out_proj.bias": 256, "feature_map_encoder.layers.0.mlp.linear1.weight": 524288, "feature_map_encoder.layers.0.mlp.linear1.bias": 2048, "feature_map_encoder.layers.0.mlp.linear2.weight": 524288, "feature_map_encoder.layers.0.mlp.linear2.bias": 256, "feature_map_encoder.layers.1.norm1.weight": 256, "feature_map_encoder.layers.1.norm1.bias": 256, "feature_map_encoder.layers.1.norm2.weight": 256, "feature_map_encoder.layers.1.norm2.bias": 256, "feature_map_encoder.layers.1.self_attn.in_proj_weight": 196608, "feature_map_encoder.layers.1.self_attn.in_proj_bias": 768, "feature_map_encoder.layers.1.self_attn.out_proj.weight": 65536, "feature_map_encoder.layers.1.self_attn.out_proj.bias": 256, "feature_map_encoder.layers.1.mlp.linear1.weight": 524288, "feature_map_encoder.layers.1.mlp.linear1.bias": 2048, "feature_map_encoder.layers.1.mlp.linear2.weight": 524288, "feature_map_encoder.layers.1.mlp.linear2.bias": 256, "feature_map_encoder.layers.2.norm1.weight": 256, "feature_map_encoder.layers.2.norm1.bias": 256, "feature_map_encoder.layers.2.norm2.weight": 256, "feature_map_encoder.layers.2.norm2.bias": 256, "feature_map_encoder.layers.2.self_attn.in_proj_weight": 196608, "feature_map_encoder.layers.2.self_attn.in_proj_bias": 768, "feature_map_encoder.layers.2.self_attn.out_proj.weight": 65536, "feature_map_encoder.layers.2.self_attn.out_proj.bias": 256, "feature_map_encoder.layers.2.mlp.linear1.weight": 524288, "feature_map_encoder.layers.2.mlp.linear1.bias": 2048, "feature_map_encoder.layers.2.mlp.linear2.weight": 524288, "feature_map_encoder.layers.2.mlp.linear2.bias": 256, "feature_map_encoder.norm.weight": 256, "feature_map_encoder.norm.bias": 256, "feat_map.weight": 196608, "feat_map.bias": 256, "input_proj.0.0.weight": 65536, "input_proj.0.0.bias": 256, "input_proj.0.1.weight": 256, "input_proj.0.1.bias": 256, "input_proj.1.0.weight": 131072, "input_proj.1.0.bias": 256, "input_proj.1.1.weight": 256, "input_proj.1.1.bias": 256, "input_proj.2.0.weight": 262144, "input_proj.2.0.bias": 256, "input_proj.2.1.weight": 256, "input_proj.2.1.bias": 256, "input_proj.3.0.weight": 2359296, "input_proj.3.0.bias": 256, "input_proj.3.1.weight": 256, "input_proj.3.1.bias": 256 }[0m [36mDEBUG [0m [36m2024-03-29 17:46:28,636 | [34mbuild dataset ... ...[0m [36mDEBUG [0m [36m2024-03-29 17:46:29,266 | [34mbuild dataset, done.[0m [36mDEBUG [0m [36m2024-03-29 17:46:29,267 | [34mnumber of training dataset: 1, samples: 3659[0m [32mINFO [0m [32m2024-03-29 17:46:29,889 | [34mIgnore keys: [][0m [32mINFO [0m [32m2024-03-29 17:46:30,040 | [34m_IncompatibleKeys(missing_keys=['feature_map_proj.weight', 'feature_map_proj.bias', 'feature_map_encoder.layers.0.norm1.weight', 'feature_map_encoder.layers.0.norm1.bias', 'feature_map_encoder.layers.0.norm2.weight', 'feature_map_encoder.layers.0.norm2.bias', 'feature_map_encoder.layers.0.self_attn.in_proj_weight', 'feature_map_encoder.layers.0.self_attn.in_proj_bias', 'feature_map_encoder.layers.0.self_attn.out_proj.weight', 'feature_map_encoder.layers.0.self_attn.out_proj.bias', 'feature_map_encoder.layers.0.mlp.linear1.weight', 'feature_map_encoder.layers.0.mlp.linear1.bias', 'feature_map_encoder.layers.0.mlp.linear2.weight', 'feature_map_encoder.layers.0.mlp.linear2.bias', 'feature_map_encoder.layers.1.norm1.weight', 'feature_map_encoder.layers.1.norm1.bias', 'feature_map_encoder.layers.1.norm2.weight', 'feature_map_encoder.layers.1.norm2.bias', 'feature_map_encoder.layers.1.self_attn.in_proj_weight', 'feature_map_encoder.layers.1.self_attn.in_proj_bias', 'feature_map_encoder.layers.1.self_attn.out_proj.weight', 'feature_map_encoder.layers.1.self_attn.out_proj.bias', 'feature_map_encoder.layers.1.mlp.linear1.weight', 'feature_map_encoder.layers.1.mlp.linear1.bias', 'feature_map_encoder.layers.1.mlp.linear2.weight', 'feature_map_encoder.layers.1.mlp.linear2.bias', 'feature_map_encoder.layers.2.norm1.weight', 'feature_map_encoder.layers.2.norm1.bias', 'feature_map_encoder.layers.2.norm2.weight', 'feature_map_encoder.layers.2.norm2.bias', 'feature_map_encoder.layers.2.self_attn.in_proj_weight', 'feature_map_encoder.layers.2.self_attn.in_proj_bias', 'feature_map_encoder.layers.2.self_attn.out_proj.weight', 'feature_map_encoder.layers.2.self_attn.out_proj.bias', 'feature_map_encoder.layers.2.mlp.linear1.weight', 'feature_map_encoder.layers.2.mlp.linear1.bias', 'feature_map_encoder.layers.2.mlp.linear2.weight', 'feature_map_encoder.layers.2.mlp.linear2.bias', 'feature_map_encoder.norm.weight', 'feature_map_encoder.norm.bias'], unexpected_keys=['label_enc.weight', 'bert.embeddings.position_ids'])[0m |