Update model.safetensors.index.json
Browse files- model.safetensors.index.json +96 -96
model.safetensors.index.json
CHANGED
|
@@ -455,8 +455,8 @@
|
|
| 455 |
"vision_encoder.blocks.0.attn.proj.weight": "model-00001-of-00013.safetensors",
|
| 456 |
"vision_encoder.blocks.0.attn.q_norm.weight": "model-00001-of-00013.safetensors",
|
| 457 |
"vision_encoder.blocks.0.attn.qkv.weight": "model-00001-of-00013.safetensors",
|
| 458 |
-
"vision_encoder.blocks.0.ls1.
|
| 459 |
-
"vision_encoder.blocks.0.ls2.
|
| 460 |
"vision_encoder.blocks.0.mlp.fc1.bias": "model-00001-of-00013.safetensors",
|
| 461 |
"vision_encoder.blocks.0.mlp.fc1.weight": "model-00001-of-00013.safetensors",
|
| 462 |
"vision_encoder.blocks.0.mlp.fc2.bias": "model-00001-of-00013.safetensors",
|
|
@@ -468,8 +468,8 @@
|
|
| 468 |
"vision_encoder.blocks.1.attn.proj.weight": "model-00001-of-00013.safetensors",
|
| 469 |
"vision_encoder.blocks.1.attn.q_norm.weight": "model-00001-of-00013.safetensors",
|
| 470 |
"vision_encoder.blocks.1.attn.qkv.weight": "model-00001-of-00013.safetensors",
|
| 471 |
-
"vision_encoder.blocks.1.ls1.
|
| 472 |
-
"vision_encoder.blocks.1.ls2.
|
| 473 |
"vision_encoder.blocks.1.mlp.fc1.bias": "model-00001-of-00013.safetensors",
|
| 474 |
"vision_encoder.blocks.1.mlp.fc1.weight": "model-00001-of-00013.safetensors",
|
| 475 |
"vision_encoder.blocks.1.mlp.fc2.bias": "model-00001-of-00013.safetensors",
|
|
@@ -481,8 +481,8 @@
|
|
| 481 |
"vision_encoder.blocks.10.attn.proj.weight": "model-00003-of-00013.safetensors",
|
| 482 |
"vision_encoder.blocks.10.attn.q_norm.weight": "model-00003-of-00013.safetensors",
|
| 483 |
"vision_encoder.blocks.10.attn.qkv.weight": "model-00003-of-00013.safetensors",
|
| 484 |
-
"vision_encoder.blocks.10.ls1.
|
| 485 |
-
"vision_encoder.blocks.10.ls2.
|
| 486 |
"vision_encoder.blocks.10.mlp.fc1.bias": "model-00003-of-00013.safetensors",
|
| 487 |
"vision_encoder.blocks.10.mlp.fc1.weight": "model-00003-of-00013.safetensors",
|
| 488 |
"vision_encoder.blocks.10.mlp.fc2.bias": "model-00003-of-00013.safetensors",
|
|
@@ -494,8 +494,8 @@
|
|
| 494 |
"vision_encoder.blocks.11.attn.proj.weight": "model-00003-of-00013.safetensors",
|
| 495 |
"vision_encoder.blocks.11.attn.q_norm.weight": "model-00003-of-00013.safetensors",
|
| 496 |
"vision_encoder.blocks.11.attn.qkv.weight": "model-00003-of-00013.safetensors",
|
| 497 |
-
"vision_encoder.blocks.11.ls1.
|
| 498 |
-
"vision_encoder.blocks.11.ls2.
|
| 499 |
"vision_encoder.blocks.11.mlp.fc1.bias": "model-00003-of-00013.safetensors",
|
| 500 |
"vision_encoder.blocks.11.mlp.fc1.weight": "model-00003-of-00013.safetensors",
|
| 501 |
"vision_encoder.blocks.11.mlp.fc2.bias": "model-00004-of-00013.safetensors",
|
|
@@ -507,8 +507,8 @@
|
|
| 507 |
"vision_encoder.blocks.12.attn.proj.weight": "model-00004-of-00013.safetensors",
|
| 508 |
"vision_encoder.blocks.12.attn.q_norm.weight": "model-00004-of-00013.safetensors",
|
| 509 |
"vision_encoder.blocks.12.attn.qkv.weight": "model-00004-of-00013.safetensors",
|
| 510 |
-
"vision_encoder.blocks.12.ls1.
|
| 511 |
-
"vision_encoder.blocks.12.ls2.
|
| 512 |
"vision_encoder.blocks.12.mlp.fc1.bias": "model-00004-of-00013.safetensors",
|
| 513 |
"vision_encoder.blocks.12.mlp.fc1.weight": "model-00004-of-00013.safetensors",
|
| 514 |
"vision_encoder.blocks.12.mlp.fc2.bias": "model-00004-of-00013.safetensors",
|
|
@@ -520,8 +520,8 @@
|
|
| 520 |
"vision_encoder.blocks.13.attn.proj.weight": "model-00004-of-00013.safetensors",
|
| 521 |
"vision_encoder.blocks.13.attn.q_norm.weight": "model-00004-of-00013.safetensors",
|
| 522 |
"vision_encoder.blocks.13.attn.qkv.weight": "model-00004-of-00013.safetensors",
|
| 523 |
-
"vision_encoder.blocks.13.ls1.
|
| 524 |
-
"vision_encoder.blocks.13.ls2.
|
| 525 |
"vision_encoder.blocks.13.mlp.fc1.bias": "model-00004-of-00013.safetensors",
|
| 526 |
"vision_encoder.blocks.13.mlp.fc1.weight": "model-00004-of-00013.safetensors",
|
| 527 |
"vision_encoder.blocks.13.mlp.fc2.bias": "model-00004-of-00013.safetensors",
|
|
@@ -533,8 +533,8 @@
|
|
| 533 |
"vision_encoder.blocks.14.attn.proj.weight": "model-00004-of-00013.safetensors",
|
| 534 |
"vision_encoder.blocks.14.attn.q_norm.weight": "model-00004-of-00013.safetensors",
|
| 535 |
"vision_encoder.blocks.14.attn.qkv.weight": "model-00004-of-00013.safetensors",
|
| 536 |
-
"vision_encoder.blocks.14.ls1.
|
| 537 |
-
"vision_encoder.blocks.14.ls2.
|
| 538 |
"vision_encoder.blocks.14.mlp.fc1.bias": "model-00004-of-00013.safetensors",
|
| 539 |
"vision_encoder.blocks.14.mlp.fc1.weight": "model-00004-of-00013.safetensors",
|
| 540 |
"vision_encoder.blocks.14.mlp.fc2.bias": "model-00004-of-00013.safetensors",
|
|
@@ -546,8 +546,8 @@
|
|
| 546 |
"vision_encoder.blocks.15.attn.proj.weight": "model-00004-of-00013.safetensors",
|
| 547 |
"vision_encoder.blocks.15.attn.q_norm.weight": "model-00004-of-00013.safetensors",
|
| 548 |
"vision_encoder.blocks.15.attn.qkv.weight": "model-00004-of-00013.safetensors",
|
| 549 |
-
"vision_encoder.blocks.15.ls1.
|
| 550 |
-
"vision_encoder.blocks.15.ls2.
|
| 551 |
"vision_encoder.blocks.15.mlp.fc1.bias": "model-00004-of-00013.safetensors",
|
| 552 |
"vision_encoder.blocks.15.mlp.fc1.weight": "model-00004-of-00013.safetensors",
|
| 553 |
"vision_encoder.blocks.15.mlp.fc2.bias": "model-00005-of-00013.safetensors",
|
|
@@ -559,8 +559,8 @@
|
|
| 559 |
"vision_encoder.blocks.16.attn.proj.weight": "model-00005-of-00013.safetensors",
|
| 560 |
"vision_encoder.blocks.16.attn.q_norm.weight": "model-00005-of-00013.safetensors",
|
| 561 |
"vision_encoder.blocks.16.attn.qkv.weight": "model-00005-of-00013.safetensors",
|
| 562 |
-
"vision_encoder.blocks.16.ls1.
|
| 563 |
-
"vision_encoder.blocks.16.ls2.
|
| 564 |
"vision_encoder.blocks.16.mlp.fc1.bias": "model-00005-of-00013.safetensors",
|
| 565 |
"vision_encoder.blocks.16.mlp.fc1.weight": "model-00005-of-00013.safetensors",
|
| 566 |
"vision_encoder.blocks.16.mlp.fc2.bias": "model-00005-of-00013.safetensors",
|
|
@@ -572,8 +572,8 @@
|
|
| 572 |
"vision_encoder.blocks.17.attn.proj.weight": "model-00005-of-00013.safetensors",
|
| 573 |
"vision_encoder.blocks.17.attn.q_norm.weight": "model-00005-of-00013.safetensors",
|
| 574 |
"vision_encoder.blocks.17.attn.qkv.weight": "model-00005-of-00013.safetensors",
|
| 575 |
-
"vision_encoder.blocks.17.ls1.
|
| 576 |
-
"vision_encoder.blocks.17.ls2.
|
| 577 |
"vision_encoder.blocks.17.mlp.fc1.bias": "model-00005-of-00013.safetensors",
|
| 578 |
"vision_encoder.blocks.17.mlp.fc1.weight": "model-00005-of-00013.safetensors",
|
| 579 |
"vision_encoder.blocks.17.mlp.fc2.bias": "model-00005-of-00013.safetensors",
|
|
@@ -585,8 +585,8 @@
|
|
| 585 |
"vision_encoder.blocks.18.attn.proj.weight": "model-00005-of-00013.safetensors",
|
| 586 |
"vision_encoder.blocks.18.attn.q_norm.weight": "model-00005-of-00013.safetensors",
|
| 587 |
"vision_encoder.blocks.18.attn.qkv.weight": "model-00005-of-00013.safetensors",
|
| 588 |
-
"vision_encoder.blocks.18.ls1.
|
| 589 |
-
"vision_encoder.blocks.18.ls2.
|
| 590 |
"vision_encoder.blocks.18.mlp.fc1.bias": "model-00005-of-00013.safetensors",
|
| 591 |
"vision_encoder.blocks.18.mlp.fc1.weight": "model-00005-of-00013.safetensors",
|
| 592 |
"vision_encoder.blocks.18.mlp.fc2.bias": "model-00005-of-00013.safetensors",
|
|
@@ -598,8 +598,8 @@
|
|
| 598 |
"vision_encoder.blocks.19.attn.proj.weight": "model-00005-of-00013.safetensors",
|
| 599 |
"vision_encoder.blocks.19.attn.q_norm.weight": "model-00005-of-00013.safetensors",
|
| 600 |
"vision_encoder.blocks.19.attn.qkv.weight": "model-00005-of-00013.safetensors",
|
| 601 |
-
"vision_encoder.blocks.19.ls1.
|
| 602 |
-
"vision_encoder.blocks.19.ls2.
|
| 603 |
"vision_encoder.blocks.19.mlp.fc1.bias": "model-00005-of-00013.safetensors",
|
| 604 |
"vision_encoder.blocks.19.mlp.fc1.weight": "model-00005-of-00013.safetensors",
|
| 605 |
"vision_encoder.blocks.19.mlp.fc2.bias": "model-00006-of-00013.safetensors",
|
|
@@ -611,8 +611,8 @@
|
|
| 611 |
"vision_encoder.blocks.2.attn.proj.weight": "model-00001-of-00013.safetensors",
|
| 612 |
"vision_encoder.blocks.2.attn.q_norm.weight": "model-00001-of-00013.safetensors",
|
| 613 |
"vision_encoder.blocks.2.attn.qkv.weight": "model-00001-of-00013.safetensors",
|
| 614 |
-
"vision_encoder.blocks.2.ls1.
|
| 615 |
-
"vision_encoder.blocks.2.ls2.
|
| 616 |
"vision_encoder.blocks.2.mlp.fc1.bias": "model-00001-of-00013.safetensors",
|
| 617 |
"vision_encoder.blocks.2.mlp.fc1.weight": "model-00001-of-00013.safetensors",
|
| 618 |
"vision_encoder.blocks.2.mlp.fc2.bias": "model-00001-of-00013.safetensors",
|
|
@@ -624,8 +624,8 @@
|
|
| 624 |
"vision_encoder.blocks.20.attn.proj.weight": "model-00006-of-00013.safetensors",
|
| 625 |
"vision_encoder.blocks.20.attn.q_norm.weight": "model-00006-of-00013.safetensors",
|
| 626 |
"vision_encoder.blocks.20.attn.qkv.weight": "model-00006-of-00013.safetensors",
|
| 627 |
-
"vision_encoder.blocks.20.ls1.
|
| 628 |
-
"vision_encoder.blocks.20.ls2.
|
| 629 |
"vision_encoder.blocks.20.mlp.fc1.bias": "model-00006-of-00013.safetensors",
|
| 630 |
"vision_encoder.blocks.20.mlp.fc1.weight": "model-00006-of-00013.safetensors",
|
| 631 |
"vision_encoder.blocks.20.mlp.fc2.bias": "model-00006-of-00013.safetensors",
|
|
@@ -637,8 +637,8 @@
|
|
| 637 |
"vision_encoder.blocks.21.attn.proj.weight": "model-00006-of-00013.safetensors",
|
| 638 |
"vision_encoder.blocks.21.attn.q_norm.weight": "model-00006-of-00013.safetensors",
|
| 639 |
"vision_encoder.blocks.21.attn.qkv.weight": "model-00006-of-00013.safetensors",
|
| 640 |
-
"vision_encoder.blocks.21.ls1.
|
| 641 |
-
"vision_encoder.blocks.21.ls2.
|
| 642 |
"vision_encoder.blocks.21.mlp.fc1.bias": "model-00006-of-00013.safetensors",
|
| 643 |
"vision_encoder.blocks.21.mlp.fc1.weight": "model-00006-of-00013.safetensors",
|
| 644 |
"vision_encoder.blocks.21.mlp.fc2.bias": "model-00006-of-00013.safetensors",
|
|
@@ -650,8 +650,8 @@
|
|
| 650 |
"vision_encoder.blocks.22.attn.proj.weight": "model-00006-of-00013.safetensors",
|
| 651 |
"vision_encoder.blocks.22.attn.q_norm.weight": "model-00006-of-00013.safetensors",
|
| 652 |
"vision_encoder.blocks.22.attn.qkv.weight": "model-00006-of-00013.safetensors",
|
| 653 |
-
"vision_encoder.blocks.22.ls1.
|
| 654 |
-
"vision_encoder.blocks.22.ls2.
|
| 655 |
"vision_encoder.blocks.22.mlp.fc1.bias": "model-00006-of-00013.safetensors",
|
| 656 |
"vision_encoder.blocks.22.mlp.fc1.weight": "model-00006-of-00013.safetensors",
|
| 657 |
"vision_encoder.blocks.22.mlp.fc2.bias": "model-00006-of-00013.safetensors",
|
|
@@ -663,8 +663,8 @@
|
|
| 663 |
"vision_encoder.blocks.23.attn.proj.weight": "model-00006-of-00013.safetensors",
|
| 664 |
"vision_encoder.blocks.23.attn.q_norm.weight": "model-00006-of-00013.safetensors",
|
| 665 |
"vision_encoder.blocks.23.attn.qkv.weight": "model-00006-of-00013.safetensors",
|
| 666 |
-
"vision_encoder.blocks.23.ls1.
|
| 667 |
-
"vision_encoder.blocks.23.ls2.
|
| 668 |
"vision_encoder.blocks.23.mlp.fc1.bias": "model-00006-of-00013.safetensors",
|
| 669 |
"vision_encoder.blocks.23.mlp.fc1.weight": "model-00006-of-00013.safetensors",
|
| 670 |
"vision_encoder.blocks.23.mlp.fc2.bias": "model-00007-of-00013.safetensors",
|
|
@@ -676,8 +676,8 @@
|
|
| 676 |
"vision_encoder.blocks.24.attn.proj.weight": "model-00007-of-00013.safetensors",
|
| 677 |
"vision_encoder.blocks.24.attn.q_norm.weight": "model-00007-of-00013.safetensors",
|
| 678 |
"vision_encoder.blocks.24.attn.qkv.weight": "model-00007-of-00013.safetensors",
|
| 679 |
-
"vision_encoder.blocks.24.ls1.
|
| 680 |
-
"vision_encoder.blocks.24.ls2.
|
| 681 |
"vision_encoder.blocks.24.mlp.fc1.bias": "model-00007-of-00013.safetensors",
|
| 682 |
"vision_encoder.blocks.24.mlp.fc1.weight": "model-00007-of-00013.safetensors",
|
| 683 |
"vision_encoder.blocks.24.mlp.fc2.bias": "model-00007-of-00013.safetensors",
|
|
@@ -689,8 +689,8 @@
|
|
| 689 |
"vision_encoder.blocks.25.attn.proj.weight": "model-00007-of-00013.safetensors",
|
| 690 |
"vision_encoder.blocks.25.attn.q_norm.weight": "model-00007-of-00013.safetensors",
|
| 691 |
"vision_encoder.blocks.25.attn.qkv.weight": "model-00007-of-00013.safetensors",
|
| 692 |
-
"vision_encoder.blocks.25.ls1.
|
| 693 |
-
"vision_encoder.blocks.25.ls2.
|
| 694 |
"vision_encoder.blocks.25.mlp.fc1.bias": "model-00007-of-00013.safetensors",
|
| 695 |
"vision_encoder.blocks.25.mlp.fc1.weight": "model-00007-of-00013.safetensors",
|
| 696 |
"vision_encoder.blocks.25.mlp.fc2.bias": "model-00007-of-00013.safetensors",
|
|
@@ -702,8 +702,8 @@
|
|
| 702 |
"vision_encoder.blocks.26.attn.proj.weight": "model-00007-of-00013.safetensors",
|
| 703 |
"vision_encoder.blocks.26.attn.q_norm.weight": "model-00007-of-00013.safetensors",
|
| 704 |
"vision_encoder.blocks.26.attn.qkv.weight": "model-00007-of-00013.safetensors",
|
| 705 |
-
"vision_encoder.blocks.26.ls1.
|
| 706 |
-
"vision_encoder.blocks.26.ls2.
|
| 707 |
"vision_encoder.blocks.26.mlp.fc1.bias": "model-00007-of-00013.safetensors",
|
| 708 |
"vision_encoder.blocks.26.mlp.fc1.weight": "model-00007-of-00013.safetensors",
|
| 709 |
"vision_encoder.blocks.26.mlp.fc2.bias": "model-00007-of-00013.safetensors",
|
|
@@ -715,8 +715,8 @@
|
|
| 715 |
"vision_encoder.blocks.27.attn.proj.weight": "model-00007-of-00013.safetensors",
|
| 716 |
"vision_encoder.blocks.27.attn.q_norm.weight": "model-00007-of-00013.safetensors",
|
| 717 |
"vision_encoder.blocks.27.attn.qkv.weight": "model-00007-of-00013.safetensors",
|
| 718 |
-
"vision_encoder.blocks.27.ls1.
|
| 719 |
-
"vision_encoder.blocks.27.ls2.
|
| 720 |
"vision_encoder.blocks.27.mlp.fc1.bias": "model-00007-of-00013.safetensors",
|
| 721 |
"vision_encoder.blocks.27.mlp.fc1.weight": "model-00007-of-00013.safetensors",
|
| 722 |
"vision_encoder.blocks.27.mlp.fc2.bias": "model-00008-of-00013.safetensors",
|
|
@@ -728,8 +728,8 @@
|
|
| 728 |
"vision_encoder.blocks.28.attn.proj.weight": "model-00008-of-00013.safetensors",
|
| 729 |
"vision_encoder.blocks.28.attn.q_norm.weight": "model-00008-of-00013.safetensors",
|
| 730 |
"vision_encoder.blocks.28.attn.qkv.weight": "model-00008-of-00013.safetensors",
|
| 731 |
-
"vision_encoder.blocks.28.ls1.
|
| 732 |
-
"vision_encoder.blocks.28.ls2.
|
| 733 |
"vision_encoder.blocks.28.mlp.fc1.bias": "model-00008-of-00013.safetensors",
|
| 734 |
"vision_encoder.blocks.28.mlp.fc1.weight": "model-00008-of-00013.safetensors",
|
| 735 |
"vision_encoder.blocks.28.mlp.fc2.bias": "model-00008-of-00013.safetensors",
|
|
@@ -741,8 +741,8 @@
|
|
| 741 |
"vision_encoder.blocks.29.attn.proj.weight": "model-00008-of-00013.safetensors",
|
| 742 |
"vision_encoder.blocks.29.attn.q_norm.weight": "model-00008-of-00013.safetensors",
|
| 743 |
"vision_encoder.blocks.29.attn.qkv.weight": "model-00008-of-00013.safetensors",
|
| 744 |
-
"vision_encoder.blocks.29.ls1.
|
| 745 |
-
"vision_encoder.blocks.29.ls2.
|
| 746 |
"vision_encoder.blocks.29.mlp.fc1.bias": "model-00008-of-00013.safetensors",
|
| 747 |
"vision_encoder.blocks.29.mlp.fc1.weight": "model-00008-of-00013.safetensors",
|
| 748 |
"vision_encoder.blocks.29.mlp.fc2.bias": "model-00008-of-00013.safetensors",
|
|
@@ -754,8 +754,8 @@
|
|
| 754 |
"vision_encoder.blocks.3.attn.proj.weight": "model-00001-of-00013.safetensors",
|
| 755 |
"vision_encoder.blocks.3.attn.q_norm.weight": "model-00001-of-00013.safetensors",
|
| 756 |
"vision_encoder.blocks.3.attn.qkv.weight": "model-00001-of-00013.safetensors",
|
| 757 |
-
"vision_encoder.blocks.3.ls1.
|
| 758 |
-
"vision_encoder.blocks.3.ls2.
|
| 759 |
"vision_encoder.blocks.3.mlp.fc1.bias": "model-00001-of-00013.safetensors",
|
| 760 |
"vision_encoder.blocks.3.mlp.fc1.weight": "model-00001-of-00013.safetensors",
|
| 761 |
"vision_encoder.blocks.3.mlp.fc2.bias": "model-00002-of-00013.safetensors",
|
|
@@ -767,8 +767,8 @@
|
|
| 767 |
"vision_encoder.blocks.30.attn.proj.weight": "model-00008-of-00013.safetensors",
|
| 768 |
"vision_encoder.blocks.30.attn.q_norm.weight": "model-00008-of-00013.safetensors",
|
| 769 |
"vision_encoder.blocks.30.attn.qkv.weight": "model-00008-of-00013.safetensors",
|
| 770 |
-
"vision_encoder.blocks.30.ls1.
|
| 771 |
-
"vision_encoder.blocks.30.ls2.
|
| 772 |
"vision_encoder.blocks.30.mlp.fc1.bias": "model-00008-of-00013.safetensors",
|
| 773 |
"vision_encoder.blocks.30.mlp.fc1.weight": "model-00008-of-00013.safetensors",
|
| 774 |
"vision_encoder.blocks.30.mlp.fc2.bias": "model-00008-of-00013.safetensors",
|
|
@@ -780,8 +780,8 @@
|
|
| 780 |
"vision_encoder.blocks.31.attn.proj.weight": "model-00008-of-00013.safetensors",
|
| 781 |
"vision_encoder.blocks.31.attn.q_norm.weight": "model-00008-of-00013.safetensors",
|
| 782 |
"vision_encoder.blocks.31.attn.qkv.weight": "model-00008-of-00013.safetensors",
|
| 783 |
-
"vision_encoder.blocks.31.ls1.
|
| 784 |
-
"vision_encoder.blocks.31.ls2.
|
| 785 |
"vision_encoder.blocks.31.mlp.fc1.bias": "model-00008-of-00013.safetensors",
|
| 786 |
"vision_encoder.blocks.31.mlp.fc1.weight": "model-00008-of-00013.safetensors",
|
| 787 |
"vision_encoder.blocks.31.mlp.fc2.bias": "model-00009-of-00013.safetensors",
|
|
@@ -793,8 +793,8 @@
|
|
| 793 |
"vision_encoder.blocks.32.attn.proj.weight": "model-00009-of-00013.safetensors",
|
| 794 |
"vision_encoder.blocks.32.attn.q_norm.weight": "model-00009-of-00013.safetensors",
|
| 795 |
"vision_encoder.blocks.32.attn.qkv.weight": "model-00009-of-00013.safetensors",
|
| 796 |
-
"vision_encoder.blocks.32.ls1.
|
| 797 |
-
"vision_encoder.blocks.32.ls2.
|
| 798 |
"vision_encoder.blocks.32.mlp.fc1.bias": "model-00009-of-00013.safetensors",
|
| 799 |
"vision_encoder.blocks.32.mlp.fc1.weight": "model-00009-of-00013.safetensors",
|
| 800 |
"vision_encoder.blocks.32.mlp.fc2.bias": "model-00009-of-00013.safetensors",
|
|
@@ -806,8 +806,8 @@
|
|
| 806 |
"vision_encoder.blocks.33.attn.proj.weight": "model-00009-of-00013.safetensors",
|
| 807 |
"vision_encoder.blocks.33.attn.q_norm.weight": "model-00009-of-00013.safetensors",
|
| 808 |
"vision_encoder.blocks.33.attn.qkv.weight": "model-00009-of-00013.safetensors",
|
| 809 |
-
"vision_encoder.blocks.33.ls1.
|
| 810 |
-
"vision_encoder.blocks.33.ls2.
|
| 811 |
"vision_encoder.blocks.33.mlp.fc1.bias": "model-00009-of-00013.safetensors",
|
| 812 |
"vision_encoder.blocks.33.mlp.fc1.weight": "model-00009-of-00013.safetensors",
|
| 813 |
"vision_encoder.blocks.33.mlp.fc2.bias": "model-00009-of-00013.safetensors",
|
|
@@ -819,8 +819,8 @@
|
|
| 819 |
"vision_encoder.blocks.34.attn.proj.weight": "model-00009-of-00013.safetensors",
|
| 820 |
"vision_encoder.blocks.34.attn.q_norm.weight": "model-00009-of-00013.safetensors",
|
| 821 |
"vision_encoder.blocks.34.attn.qkv.weight": "model-00009-of-00013.safetensors",
|
| 822 |
-
"vision_encoder.blocks.34.ls1.
|
| 823 |
-
"vision_encoder.blocks.34.ls2.
|
| 824 |
"vision_encoder.blocks.34.mlp.fc1.bias": "model-00009-of-00013.safetensors",
|
| 825 |
"vision_encoder.blocks.34.mlp.fc1.weight": "model-00009-of-00013.safetensors",
|
| 826 |
"vision_encoder.blocks.34.mlp.fc2.bias": "model-00009-of-00013.safetensors",
|
|
@@ -832,8 +832,8 @@
|
|
| 832 |
"vision_encoder.blocks.35.attn.proj.weight": "model-00009-of-00013.safetensors",
|
| 833 |
"vision_encoder.blocks.35.attn.q_norm.weight": "model-00009-of-00013.safetensors",
|
| 834 |
"vision_encoder.blocks.35.attn.qkv.weight": "model-00009-of-00013.safetensors",
|
| 835 |
-
"vision_encoder.blocks.35.ls1.
|
| 836 |
-
"vision_encoder.blocks.35.ls2.
|
| 837 |
"vision_encoder.blocks.35.mlp.fc1.bias": "model-00009-of-00013.safetensors",
|
| 838 |
"vision_encoder.blocks.35.mlp.fc1.weight": "model-00009-of-00013.safetensors",
|
| 839 |
"vision_encoder.blocks.35.mlp.fc2.bias": "model-00010-of-00013.safetensors",
|
|
@@ -845,8 +845,8 @@
|
|
| 845 |
"vision_encoder.blocks.36.attn.proj.weight": "model-00010-of-00013.safetensors",
|
| 846 |
"vision_encoder.blocks.36.attn.q_norm.weight": "model-00010-of-00013.safetensors",
|
| 847 |
"vision_encoder.blocks.36.attn.qkv.weight": "model-00010-of-00013.safetensors",
|
| 848 |
-
"vision_encoder.blocks.36.ls1.
|
| 849 |
-
"vision_encoder.blocks.36.ls2.
|
| 850 |
"vision_encoder.blocks.36.mlp.fc1.bias": "model-00010-of-00013.safetensors",
|
| 851 |
"vision_encoder.blocks.36.mlp.fc1.weight": "model-00010-of-00013.safetensors",
|
| 852 |
"vision_encoder.blocks.36.mlp.fc2.bias": "model-00010-of-00013.safetensors",
|
|
@@ -858,8 +858,8 @@
|
|
| 858 |
"vision_encoder.blocks.37.attn.proj.weight": "model-00010-of-00013.safetensors",
|
| 859 |
"vision_encoder.blocks.37.attn.q_norm.weight": "model-00010-of-00013.safetensors",
|
| 860 |
"vision_encoder.blocks.37.attn.qkv.weight": "model-00010-of-00013.safetensors",
|
| 861 |
-
"vision_encoder.blocks.37.ls1.
|
| 862 |
-
"vision_encoder.blocks.37.ls2.
|
| 863 |
"vision_encoder.blocks.37.mlp.fc1.bias": "model-00010-of-00013.safetensors",
|
| 864 |
"vision_encoder.blocks.37.mlp.fc1.weight": "model-00010-of-00013.safetensors",
|
| 865 |
"vision_encoder.blocks.37.mlp.fc2.bias": "model-00010-of-00013.safetensors",
|
|
@@ -871,8 +871,8 @@
|
|
| 871 |
"vision_encoder.blocks.38.attn.proj.weight": "model-00010-of-00013.safetensors",
|
| 872 |
"vision_encoder.blocks.38.attn.q_norm.weight": "model-00010-of-00013.safetensors",
|
| 873 |
"vision_encoder.blocks.38.attn.qkv.weight": "model-00010-of-00013.safetensors",
|
| 874 |
-
"vision_encoder.blocks.38.ls1.
|
| 875 |
-
"vision_encoder.blocks.38.ls2.
|
| 876 |
"vision_encoder.blocks.38.mlp.fc1.bias": "model-00010-of-00013.safetensors",
|
| 877 |
"vision_encoder.blocks.38.mlp.fc1.weight": "model-00010-of-00013.safetensors",
|
| 878 |
"vision_encoder.blocks.38.mlp.fc2.bias": "model-00010-of-00013.safetensors",
|
|
@@ -884,8 +884,8 @@
|
|
| 884 |
"vision_encoder.blocks.39.attn.proj.weight": "model-00010-of-00013.safetensors",
|
| 885 |
"vision_encoder.blocks.39.attn.q_norm.weight": "model-00010-of-00013.safetensors",
|
| 886 |
"vision_encoder.blocks.39.attn.qkv.weight": "model-00010-of-00013.safetensors",
|
| 887 |
-
"vision_encoder.blocks.39.ls1.
|
| 888 |
-
"vision_encoder.blocks.39.ls2.
|
| 889 |
"vision_encoder.blocks.39.mlp.fc1.bias": "model-00010-of-00013.safetensors",
|
| 890 |
"vision_encoder.blocks.39.mlp.fc1.weight": "model-00010-of-00013.safetensors",
|
| 891 |
"vision_encoder.blocks.39.mlp.fc2.bias": "model-00011-of-00013.safetensors",
|
|
@@ -897,8 +897,8 @@
|
|
| 897 |
"vision_encoder.blocks.4.attn.proj.weight": "model-00002-of-00013.safetensors",
|
| 898 |
"vision_encoder.blocks.4.attn.q_norm.weight": "model-00002-of-00013.safetensors",
|
| 899 |
"vision_encoder.blocks.4.attn.qkv.weight": "model-00002-of-00013.safetensors",
|
| 900 |
-
"vision_encoder.blocks.4.ls1.
|
| 901 |
-
"vision_encoder.blocks.4.ls2.
|
| 902 |
"vision_encoder.blocks.4.mlp.fc1.bias": "model-00002-of-00013.safetensors",
|
| 903 |
"vision_encoder.blocks.4.mlp.fc1.weight": "model-00002-of-00013.safetensors",
|
| 904 |
"vision_encoder.blocks.4.mlp.fc2.bias": "model-00002-of-00013.safetensors",
|
|
@@ -910,8 +910,8 @@
|
|
| 910 |
"vision_encoder.blocks.40.attn.proj.weight": "model-00011-of-00013.safetensors",
|
| 911 |
"vision_encoder.blocks.40.attn.q_norm.weight": "model-00011-of-00013.safetensors",
|
| 912 |
"vision_encoder.blocks.40.attn.qkv.weight": "model-00011-of-00013.safetensors",
|
| 913 |
-
"vision_encoder.blocks.40.ls1.
|
| 914 |
-
"vision_encoder.blocks.40.ls2.
|
| 915 |
"vision_encoder.blocks.40.mlp.fc1.bias": "model-00011-of-00013.safetensors",
|
| 916 |
"vision_encoder.blocks.40.mlp.fc1.weight": "model-00011-of-00013.safetensors",
|
| 917 |
"vision_encoder.blocks.40.mlp.fc2.bias": "model-00011-of-00013.safetensors",
|
|
@@ -923,8 +923,8 @@
|
|
| 923 |
"vision_encoder.blocks.41.attn.proj.weight": "model-00011-of-00013.safetensors",
|
| 924 |
"vision_encoder.blocks.41.attn.q_norm.weight": "model-00011-of-00013.safetensors",
|
| 925 |
"vision_encoder.blocks.41.attn.qkv.weight": "model-00011-of-00013.safetensors",
|
| 926 |
-
"vision_encoder.blocks.41.ls1.
|
| 927 |
-
"vision_encoder.blocks.41.ls2.
|
| 928 |
"vision_encoder.blocks.41.mlp.fc1.bias": "model-00011-of-00013.safetensors",
|
| 929 |
"vision_encoder.blocks.41.mlp.fc1.weight": "model-00011-of-00013.safetensors",
|
| 930 |
"vision_encoder.blocks.41.mlp.fc2.bias": "model-00011-of-00013.safetensors",
|
|
@@ -936,8 +936,8 @@
|
|
| 936 |
"vision_encoder.blocks.42.attn.proj.weight": "model-00011-of-00013.safetensors",
|
| 937 |
"vision_encoder.blocks.42.attn.q_norm.weight": "model-00011-of-00013.safetensors",
|
| 938 |
"vision_encoder.blocks.42.attn.qkv.weight": "model-00011-of-00013.safetensors",
|
| 939 |
-
"vision_encoder.blocks.42.ls1.
|
| 940 |
-
"vision_encoder.blocks.42.ls2.
|
| 941 |
"vision_encoder.blocks.42.mlp.fc1.bias": "model-00011-of-00013.safetensors",
|
| 942 |
"vision_encoder.blocks.42.mlp.fc1.weight": "model-00011-of-00013.safetensors",
|
| 943 |
"vision_encoder.blocks.42.mlp.fc2.bias": "model-00011-of-00013.safetensors",
|
|
@@ -949,8 +949,8 @@
|
|
| 949 |
"vision_encoder.blocks.43.attn.proj.weight": "model-00011-of-00013.safetensors",
|
| 950 |
"vision_encoder.blocks.43.attn.q_norm.weight": "model-00011-of-00013.safetensors",
|
| 951 |
"vision_encoder.blocks.43.attn.qkv.weight": "model-00011-of-00013.safetensors",
|
| 952 |
-
"vision_encoder.blocks.43.ls1.
|
| 953 |
-
"vision_encoder.blocks.43.ls2.
|
| 954 |
"vision_encoder.blocks.43.mlp.fc1.bias": "model-00011-of-00013.safetensors",
|
| 955 |
"vision_encoder.blocks.43.mlp.fc1.weight": "model-00011-of-00013.safetensors",
|
| 956 |
"vision_encoder.blocks.43.mlp.fc2.bias": "model-00012-of-00013.safetensors",
|
|
@@ -962,8 +962,8 @@
|
|
| 962 |
"vision_encoder.blocks.44.attn.proj.weight": "model-00012-of-00013.safetensors",
|
| 963 |
"vision_encoder.blocks.44.attn.q_norm.weight": "model-00012-of-00013.safetensors",
|
| 964 |
"vision_encoder.blocks.44.attn.qkv.weight": "model-00012-of-00013.safetensors",
|
| 965 |
-
"vision_encoder.blocks.44.ls1.
|
| 966 |
-
"vision_encoder.blocks.44.ls2.
|
| 967 |
"vision_encoder.blocks.44.mlp.fc1.bias": "model-00012-of-00013.safetensors",
|
| 968 |
"vision_encoder.blocks.44.mlp.fc1.weight": "model-00012-of-00013.safetensors",
|
| 969 |
"vision_encoder.blocks.44.mlp.fc2.bias": "model-00012-of-00013.safetensors",
|
|
@@ -975,8 +975,8 @@
|
|
| 975 |
"vision_encoder.blocks.45.attn.proj.weight": "model-00012-of-00013.safetensors",
|
| 976 |
"vision_encoder.blocks.45.attn.q_norm.weight": "model-00012-of-00013.safetensors",
|
| 977 |
"vision_encoder.blocks.45.attn.qkv.weight": "model-00012-of-00013.safetensors",
|
| 978 |
-
"vision_encoder.blocks.45.ls1.
|
| 979 |
-
"vision_encoder.blocks.45.ls2.
|
| 980 |
"vision_encoder.blocks.45.mlp.fc1.bias": "model-00012-of-00013.safetensors",
|
| 981 |
"vision_encoder.blocks.45.mlp.fc1.weight": "model-00012-of-00013.safetensors",
|
| 982 |
"vision_encoder.blocks.45.mlp.fc2.bias": "model-00012-of-00013.safetensors",
|
|
@@ -988,8 +988,8 @@
|
|
| 988 |
"vision_encoder.blocks.46.attn.proj.weight": "model-00012-of-00013.safetensors",
|
| 989 |
"vision_encoder.blocks.46.attn.q_norm.weight": "model-00012-of-00013.safetensors",
|
| 990 |
"vision_encoder.blocks.46.attn.qkv.weight": "model-00012-of-00013.safetensors",
|
| 991 |
-
"vision_encoder.blocks.46.ls1.
|
| 992 |
-
"vision_encoder.blocks.46.ls2.
|
| 993 |
"vision_encoder.blocks.46.mlp.fc1.bias": "model-00012-of-00013.safetensors",
|
| 994 |
"vision_encoder.blocks.46.mlp.fc1.weight": "model-00012-of-00013.safetensors",
|
| 995 |
"vision_encoder.blocks.46.mlp.fc2.bias": "model-00012-of-00013.safetensors",
|
|
@@ -1001,8 +1001,8 @@
|
|
| 1001 |
"vision_encoder.blocks.47.attn.proj.weight": "model-00012-of-00013.safetensors",
|
| 1002 |
"vision_encoder.blocks.47.attn.q_norm.weight": "model-00012-of-00013.safetensors",
|
| 1003 |
"vision_encoder.blocks.47.attn.qkv.weight": "model-00012-of-00013.safetensors",
|
| 1004 |
-
"vision_encoder.blocks.47.ls1.
|
| 1005 |
-
"vision_encoder.blocks.47.ls2.
|
| 1006 |
"vision_encoder.blocks.47.mlp.fc1.bias": "model-00012-of-00013.safetensors",
|
| 1007 |
"vision_encoder.blocks.47.mlp.fc1.weight": "model-00012-of-00013.safetensors",
|
| 1008 |
"vision_encoder.blocks.47.mlp.fc2.bias": "model-00013-of-00013.safetensors",
|
|
@@ -1014,8 +1014,8 @@
|
|
| 1014 |
"vision_encoder.blocks.5.attn.proj.weight": "model-00002-of-00013.safetensors",
|
| 1015 |
"vision_encoder.blocks.5.attn.q_norm.weight": "model-00002-of-00013.safetensors",
|
| 1016 |
"vision_encoder.blocks.5.attn.qkv.weight": "model-00002-of-00013.safetensors",
|
| 1017 |
-
"vision_encoder.blocks.5.ls1.
|
| 1018 |
-
"vision_encoder.blocks.5.ls2.
|
| 1019 |
"vision_encoder.blocks.5.mlp.fc1.bias": "model-00002-of-00013.safetensors",
|
| 1020 |
"vision_encoder.blocks.5.mlp.fc1.weight": "model-00002-of-00013.safetensors",
|
| 1021 |
"vision_encoder.blocks.5.mlp.fc2.bias": "model-00002-of-00013.safetensors",
|
|
@@ -1027,8 +1027,8 @@
|
|
| 1027 |
"vision_encoder.blocks.6.attn.proj.weight": "model-00002-of-00013.safetensors",
|
| 1028 |
"vision_encoder.blocks.6.attn.q_norm.weight": "model-00002-of-00013.safetensors",
|
| 1029 |
"vision_encoder.blocks.6.attn.qkv.weight": "model-00002-of-00013.safetensors",
|
| 1030 |
-
"vision_encoder.blocks.6.ls1.
|
| 1031 |
-
"vision_encoder.blocks.6.ls2.
|
| 1032 |
"vision_encoder.blocks.6.mlp.fc1.bias": "model-00002-of-00013.safetensors",
|
| 1033 |
"vision_encoder.blocks.6.mlp.fc1.weight": "model-00002-of-00013.safetensors",
|
| 1034 |
"vision_encoder.blocks.6.mlp.fc2.bias": "model-00002-of-00013.safetensors",
|
|
@@ -1040,8 +1040,8 @@
|
|
| 1040 |
"vision_encoder.blocks.7.attn.proj.weight": "model-00002-of-00013.safetensors",
|
| 1041 |
"vision_encoder.blocks.7.attn.q_norm.weight": "model-00002-of-00013.safetensors",
|
| 1042 |
"vision_encoder.blocks.7.attn.qkv.weight": "model-00002-of-00013.safetensors",
|
| 1043 |
-
"vision_encoder.blocks.7.ls1.
|
| 1044 |
-
"vision_encoder.blocks.7.ls2.
|
| 1045 |
"vision_encoder.blocks.7.mlp.fc1.bias": "model-00002-of-00013.safetensors",
|
| 1046 |
"vision_encoder.blocks.7.mlp.fc1.weight": "model-00002-of-00013.safetensors",
|
| 1047 |
"vision_encoder.blocks.7.mlp.fc2.bias": "model-00003-of-00013.safetensors",
|
|
@@ -1053,8 +1053,8 @@
|
|
| 1053 |
"vision_encoder.blocks.8.attn.proj.weight": "model-00003-of-00013.safetensors",
|
| 1054 |
"vision_encoder.blocks.8.attn.q_norm.weight": "model-00003-of-00013.safetensors",
|
| 1055 |
"vision_encoder.blocks.8.attn.qkv.weight": "model-00003-of-00013.safetensors",
|
| 1056 |
-
"vision_encoder.blocks.8.ls1.
|
| 1057 |
-
"vision_encoder.blocks.8.ls2.
|
| 1058 |
"vision_encoder.blocks.8.mlp.fc1.bias": "model-00003-of-00013.safetensors",
|
| 1059 |
"vision_encoder.blocks.8.mlp.fc1.weight": "model-00003-of-00013.safetensors",
|
| 1060 |
"vision_encoder.blocks.8.mlp.fc2.bias": "model-00003-of-00013.safetensors",
|
|
@@ -1066,8 +1066,8 @@
|
|
| 1066 |
"vision_encoder.blocks.9.attn.proj.weight": "model-00003-of-00013.safetensors",
|
| 1067 |
"vision_encoder.blocks.9.attn.q_norm.weight": "model-00003-of-00013.safetensors",
|
| 1068 |
"vision_encoder.blocks.9.attn.qkv.weight": "model-00003-of-00013.safetensors",
|
| 1069 |
-
"vision_encoder.blocks.9.ls1.
|
| 1070 |
-
"vision_encoder.blocks.9.ls2.
|
| 1071 |
"vision_encoder.blocks.9.mlp.fc1.bias": "model-00003-of-00013.safetensors",
|
| 1072 |
"vision_encoder.blocks.9.mlp.fc1.weight": "model-00003-of-00013.safetensors",
|
| 1073 |
"vision_encoder.blocks.9.mlp.fc2.bias": "model-00003-of-00013.safetensors",
|
|
|
|
| 455 |
"vision_encoder.blocks.0.attn.proj.weight": "model-00001-of-00013.safetensors",
|
| 456 |
"vision_encoder.blocks.0.attn.q_norm.weight": "model-00001-of-00013.safetensors",
|
| 457 |
"vision_encoder.blocks.0.attn.qkv.weight": "model-00001-of-00013.safetensors",
|
| 458 |
+
"vision_encoder.blocks.0.ls1.weight": "model-00001-of-00013.safetensors",
|
| 459 |
+
"vision_encoder.blocks.0.ls2.weight": "model-00001-of-00013.safetensors",
|
| 460 |
"vision_encoder.blocks.0.mlp.fc1.bias": "model-00001-of-00013.safetensors",
|
| 461 |
"vision_encoder.blocks.0.mlp.fc1.weight": "model-00001-of-00013.safetensors",
|
| 462 |
"vision_encoder.blocks.0.mlp.fc2.bias": "model-00001-of-00013.safetensors",
|
|
|
|
| 468 |
"vision_encoder.blocks.1.attn.proj.weight": "model-00001-of-00013.safetensors",
|
| 469 |
"vision_encoder.blocks.1.attn.q_norm.weight": "model-00001-of-00013.safetensors",
|
| 470 |
"vision_encoder.blocks.1.attn.qkv.weight": "model-00001-of-00013.safetensors",
|
| 471 |
+
"vision_encoder.blocks.1.ls1.weight": "model-00001-of-00013.safetensors",
|
| 472 |
+
"vision_encoder.blocks.1.ls2.weight": "model-00001-of-00013.safetensors",
|
| 473 |
"vision_encoder.blocks.1.mlp.fc1.bias": "model-00001-of-00013.safetensors",
|
| 474 |
"vision_encoder.blocks.1.mlp.fc1.weight": "model-00001-of-00013.safetensors",
|
| 475 |
"vision_encoder.blocks.1.mlp.fc2.bias": "model-00001-of-00013.safetensors",
|
|
|
|
| 481 |
"vision_encoder.blocks.10.attn.proj.weight": "model-00003-of-00013.safetensors",
|
| 482 |
"vision_encoder.blocks.10.attn.q_norm.weight": "model-00003-of-00013.safetensors",
|
| 483 |
"vision_encoder.blocks.10.attn.qkv.weight": "model-00003-of-00013.safetensors",
|
| 484 |
+
"vision_encoder.blocks.10.ls1.weight": "model-00003-of-00013.safetensors",
|
| 485 |
+
"vision_encoder.blocks.10.ls2.weight": "model-00003-of-00013.safetensors",
|
| 486 |
"vision_encoder.blocks.10.mlp.fc1.bias": "model-00003-of-00013.safetensors",
|
| 487 |
"vision_encoder.blocks.10.mlp.fc1.weight": "model-00003-of-00013.safetensors",
|
| 488 |
"vision_encoder.blocks.10.mlp.fc2.bias": "model-00003-of-00013.safetensors",
|
|
|
|
| 494 |
"vision_encoder.blocks.11.attn.proj.weight": "model-00003-of-00013.safetensors",
|
| 495 |
"vision_encoder.blocks.11.attn.q_norm.weight": "model-00003-of-00013.safetensors",
|
| 496 |
"vision_encoder.blocks.11.attn.qkv.weight": "model-00003-of-00013.safetensors",
|
| 497 |
+
"vision_encoder.blocks.11.ls1.weight": "model-00003-of-00013.safetensors",
|
| 498 |
+
"vision_encoder.blocks.11.ls2.weight": "model-00004-of-00013.safetensors",
|
| 499 |
"vision_encoder.blocks.11.mlp.fc1.bias": "model-00003-of-00013.safetensors",
|
| 500 |
"vision_encoder.blocks.11.mlp.fc1.weight": "model-00003-of-00013.safetensors",
|
| 501 |
"vision_encoder.blocks.11.mlp.fc2.bias": "model-00004-of-00013.safetensors",
|
|
|
|
| 507 |
"vision_encoder.blocks.12.attn.proj.weight": "model-00004-of-00013.safetensors",
|
| 508 |
"vision_encoder.blocks.12.attn.q_norm.weight": "model-00004-of-00013.safetensors",
|
| 509 |
"vision_encoder.blocks.12.attn.qkv.weight": "model-00004-of-00013.safetensors",
|
| 510 |
+
"vision_encoder.blocks.12.ls1.weight": "model-00004-of-00013.safetensors",
|
| 511 |
+
"vision_encoder.blocks.12.ls2.weight": "model-00004-of-00013.safetensors",
|
| 512 |
"vision_encoder.blocks.12.mlp.fc1.bias": "model-00004-of-00013.safetensors",
|
| 513 |
"vision_encoder.blocks.12.mlp.fc1.weight": "model-00004-of-00013.safetensors",
|
| 514 |
"vision_encoder.blocks.12.mlp.fc2.bias": "model-00004-of-00013.safetensors",
|
|
|
|
| 520 |
"vision_encoder.blocks.13.attn.proj.weight": "model-00004-of-00013.safetensors",
|
| 521 |
"vision_encoder.blocks.13.attn.q_norm.weight": "model-00004-of-00013.safetensors",
|
| 522 |
"vision_encoder.blocks.13.attn.qkv.weight": "model-00004-of-00013.safetensors",
|
| 523 |
+
"vision_encoder.blocks.13.ls1.weight": "model-00004-of-00013.safetensors",
|
| 524 |
+
"vision_encoder.blocks.13.ls2.weight": "model-00004-of-00013.safetensors",
|
| 525 |
"vision_encoder.blocks.13.mlp.fc1.bias": "model-00004-of-00013.safetensors",
|
| 526 |
"vision_encoder.blocks.13.mlp.fc1.weight": "model-00004-of-00013.safetensors",
|
| 527 |
"vision_encoder.blocks.13.mlp.fc2.bias": "model-00004-of-00013.safetensors",
|
|
|
|
| 533 |
"vision_encoder.blocks.14.attn.proj.weight": "model-00004-of-00013.safetensors",
|
| 534 |
"vision_encoder.blocks.14.attn.q_norm.weight": "model-00004-of-00013.safetensors",
|
| 535 |
"vision_encoder.blocks.14.attn.qkv.weight": "model-00004-of-00013.safetensors",
|
| 536 |
+
"vision_encoder.blocks.14.ls1.weight": "model-00004-of-00013.safetensors",
|
| 537 |
+
"vision_encoder.blocks.14.ls2.weight": "model-00004-of-00013.safetensors",
|
| 538 |
"vision_encoder.blocks.14.mlp.fc1.bias": "model-00004-of-00013.safetensors",
|
| 539 |
"vision_encoder.blocks.14.mlp.fc1.weight": "model-00004-of-00013.safetensors",
|
| 540 |
"vision_encoder.blocks.14.mlp.fc2.bias": "model-00004-of-00013.safetensors",
|
|
|
|
| 546 |
"vision_encoder.blocks.15.attn.proj.weight": "model-00004-of-00013.safetensors",
|
| 547 |
"vision_encoder.blocks.15.attn.q_norm.weight": "model-00004-of-00013.safetensors",
|
| 548 |
"vision_encoder.blocks.15.attn.qkv.weight": "model-00004-of-00013.safetensors",
|
| 549 |
+
"vision_encoder.blocks.15.ls1.weight": "model-00004-of-00013.safetensors",
|
| 550 |
+
"vision_encoder.blocks.15.ls2.weight": "model-00005-of-00013.safetensors",
|
| 551 |
"vision_encoder.blocks.15.mlp.fc1.bias": "model-00004-of-00013.safetensors",
|
| 552 |
"vision_encoder.blocks.15.mlp.fc1.weight": "model-00004-of-00013.safetensors",
|
| 553 |
"vision_encoder.blocks.15.mlp.fc2.bias": "model-00005-of-00013.safetensors",
|
|
|
|
| 559 |
"vision_encoder.blocks.16.attn.proj.weight": "model-00005-of-00013.safetensors",
|
| 560 |
"vision_encoder.blocks.16.attn.q_norm.weight": "model-00005-of-00013.safetensors",
|
| 561 |
"vision_encoder.blocks.16.attn.qkv.weight": "model-00005-of-00013.safetensors",
|
| 562 |
+
"vision_encoder.blocks.16.ls1.weight": "model-00005-of-00013.safetensors",
|
| 563 |
+
"vision_encoder.blocks.16.ls2.weight": "model-00005-of-00013.safetensors",
|
| 564 |
"vision_encoder.blocks.16.mlp.fc1.bias": "model-00005-of-00013.safetensors",
|
| 565 |
"vision_encoder.blocks.16.mlp.fc1.weight": "model-00005-of-00013.safetensors",
|
| 566 |
"vision_encoder.blocks.16.mlp.fc2.bias": "model-00005-of-00013.safetensors",
|
|
|
|
| 572 |
"vision_encoder.blocks.17.attn.proj.weight": "model-00005-of-00013.safetensors",
|
| 573 |
"vision_encoder.blocks.17.attn.q_norm.weight": "model-00005-of-00013.safetensors",
|
| 574 |
"vision_encoder.blocks.17.attn.qkv.weight": "model-00005-of-00013.safetensors",
|
| 575 |
+
"vision_encoder.blocks.17.ls1.weight": "model-00005-of-00013.safetensors",
|
| 576 |
+
"vision_encoder.blocks.17.ls2.weight": "model-00005-of-00013.safetensors",
|
| 577 |
"vision_encoder.blocks.17.mlp.fc1.bias": "model-00005-of-00013.safetensors",
|
| 578 |
"vision_encoder.blocks.17.mlp.fc1.weight": "model-00005-of-00013.safetensors",
|
| 579 |
"vision_encoder.blocks.17.mlp.fc2.bias": "model-00005-of-00013.safetensors",
|
|
|
|
| 585 |
"vision_encoder.blocks.18.attn.proj.weight": "model-00005-of-00013.safetensors",
|
| 586 |
"vision_encoder.blocks.18.attn.q_norm.weight": "model-00005-of-00013.safetensors",
|
| 587 |
"vision_encoder.blocks.18.attn.qkv.weight": "model-00005-of-00013.safetensors",
|
| 588 |
+
"vision_encoder.blocks.18.ls1.weight": "model-00005-of-00013.safetensors",
|
| 589 |
+
"vision_encoder.blocks.18.ls2.weight": "model-00005-of-00013.safetensors",
|
| 590 |
"vision_encoder.blocks.18.mlp.fc1.bias": "model-00005-of-00013.safetensors",
|
| 591 |
"vision_encoder.blocks.18.mlp.fc1.weight": "model-00005-of-00013.safetensors",
|
| 592 |
"vision_encoder.blocks.18.mlp.fc2.bias": "model-00005-of-00013.safetensors",
|
|
|
|
| 598 |
"vision_encoder.blocks.19.attn.proj.weight": "model-00005-of-00013.safetensors",
|
| 599 |
"vision_encoder.blocks.19.attn.q_norm.weight": "model-00005-of-00013.safetensors",
|
| 600 |
"vision_encoder.blocks.19.attn.qkv.weight": "model-00005-of-00013.safetensors",
|
| 601 |
+
"vision_encoder.blocks.19.ls1.weight": "model-00005-of-00013.safetensors",
|
| 602 |
+
"vision_encoder.blocks.19.ls2.weight": "model-00006-of-00013.safetensors",
|
| 603 |
"vision_encoder.blocks.19.mlp.fc1.bias": "model-00005-of-00013.safetensors",
|
| 604 |
"vision_encoder.blocks.19.mlp.fc1.weight": "model-00005-of-00013.safetensors",
|
| 605 |
"vision_encoder.blocks.19.mlp.fc2.bias": "model-00006-of-00013.safetensors",
|
|
|
|
| 611 |
"vision_encoder.blocks.2.attn.proj.weight": "model-00001-of-00013.safetensors",
|
| 612 |
"vision_encoder.blocks.2.attn.q_norm.weight": "model-00001-of-00013.safetensors",
|
| 613 |
"vision_encoder.blocks.2.attn.qkv.weight": "model-00001-of-00013.safetensors",
|
| 614 |
+
"vision_encoder.blocks.2.ls1.weight": "model-00001-of-00013.safetensors",
|
| 615 |
+
"vision_encoder.blocks.2.ls2.weight": "model-00001-of-00013.safetensors",
|
| 616 |
"vision_encoder.blocks.2.mlp.fc1.bias": "model-00001-of-00013.safetensors",
|
| 617 |
"vision_encoder.blocks.2.mlp.fc1.weight": "model-00001-of-00013.safetensors",
|
| 618 |
"vision_encoder.blocks.2.mlp.fc2.bias": "model-00001-of-00013.safetensors",
|
|
|
|
| 624 |
"vision_encoder.blocks.20.attn.proj.weight": "model-00006-of-00013.safetensors",
|
| 625 |
"vision_encoder.blocks.20.attn.q_norm.weight": "model-00006-of-00013.safetensors",
|
| 626 |
"vision_encoder.blocks.20.attn.qkv.weight": "model-00006-of-00013.safetensors",
|
| 627 |
+
"vision_encoder.blocks.20.ls1.weight": "model-00006-of-00013.safetensors",
|
| 628 |
+
"vision_encoder.blocks.20.ls2.weight": "model-00006-of-00013.safetensors",
|
| 629 |
"vision_encoder.blocks.20.mlp.fc1.bias": "model-00006-of-00013.safetensors",
|
| 630 |
"vision_encoder.blocks.20.mlp.fc1.weight": "model-00006-of-00013.safetensors",
|
| 631 |
"vision_encoder.blocks.20.mlp.fc2.bias": "model-00006-of-00013.safetensors",
|
|
|
|
| 637 |
"vision_encoder.blocks.21.attn.proj.weight": "model-00006-of-00013.safetensors",
|
| 638 |
"vision_encoder.blocks.21.attn.q_norm.weight": "model-00006-of-00013.safetensors",
|
| 639 |
"vision_encoder.blocks.21.attn.qkv.weight": "model-00006-of-00013.safetensors",
|
| 640 |
+
"vision_encoder.blocks.21.ls1.weight": "model-00006-of-00013.safetensors",
|
| 641 |
+
"vision_encoder.blocks.21.ls2.weight": "model-00006-of-00013.safetensors",
|
| 642 |
"vision_encoder.blocks.21.mlp.fc1.bias": "model-00006-of-00013.safetensors",
|
| 643 |
"vision_encoder.blocks.21.mlp.fc1.weight": "model-00006-of-00013.safetensors",
|
| 644 |
"vision_encoder.blocks.21.mlp.fc2.bias": "model-00006-of-00013.safetensors",
|
|
|
|
| 650 |
"vision_encoder.blocks.22.attn.proj.weight": "model-00006-of-00013.safetensors",
|
| 651 |
"vision_encoder.blocks.22.attn.q_norm.weight": "model-00006-of-00013.safetensors",
|
| 652 |
"vision_encoder.blocks.22.attn.qkv.weight": "model-00006-of-00013.safetensors",
|
| 653 |
+
"vision_encoder.blocks.22.ls1.weight": "model-00006-of-00013.safetensors",
|
| 654 |
+
"vision_encoder.blocks.22.ls2.weight": "model-00006-of-00013.safetensors",
|
| 655 |
"vision_encoder.blocks.22.mlp.fc1.bias": "model-00006-of-00013.safetensors",
|
| 656 |
"vision_encoder.blocks.22.mlp.fc1.weight": "model-00006-of-00013.safetensors",
|
| 657 |
"vision_encoder.blocks.22.mlp.fc2.bias": "model-00006-of-00013.safetensors",
|
|
|
|
| 663 |
"vision_encoder.blocks.23.attn.proj.weight": "model-00006-of-00013.safetensors",
|
| 664 |
"vision_encoder.blocks.23.attn.q_norm.weight": "model-00006-of-00013.safetensors",
|
| 665 |
"vision_encoder.blocks.23.attn.qkv.weight": "model-00006-of-00013.safetensors",
|
| 666 |
+
"vision_encoder.blocks.23.ls1.weight": "model-00006-of-00013.safetensors",
|
| 667 |
+
"vision_encoder.blocks.23.ls2.weight": "model-00007-of-00013.safetensors",
|
| 668 |
"vision_encoder.blocks.23.mlp.fc1.bias": "model-00006-of-00013.safetensors",
|
| 669 |
"vision_encoder.blocks.23.mlp.fc1.weight": "model-00006-of-00013.safetensors",
|
| 670 |
"vision_encoder.blocks.23.mlp.fc2.bias": "model-00007-of-00013.safetensors",
|
|
|
|
| 676 |
"vision_encoder.blocks.24.attn.proj.weight": "model-00007-of-00013.safetensors",
|
| 677 |
"vision_encoder.blocks.24.attn.q_norm.weight": "model-00007-of-00013.safetensors",
|
| 678 |
"vision_encoder.blocks.24.attn.qkv.weight": "model-00007-of-00013.safetensors",
|
| 679 |
+
"vision_encoder.blocks.24.ls1.weight": "model-00007-of-00013.safetensors",
|
| 680 |
+
"vision_encoder.blocks.24.ls2.weight": "model-00007-of-00013.safetensors",
|
| 681 |
"vision_encoder.blocks.24.mlp.fc1.bias": "model-00007-of-00013.safetensors",
|
| 682 |
"vision_encoder.blocks.24.mlp.fc1.weight": "model-00007-of-00013.safetensors",
|
| 683 |
"vision_encoder.blocks.24.mlp.fc2.bias": "model-00007-of-00013.safetensors",
|
|
|
|
| 689 |
"vision_encoder.blocks.25.attn.proj.weight": "model-00007-of-00013.safetensors",
|
| 690 |
"vision_encoder.blocks.25.attn.q_norm.weight": "model-00007-of-00013.safetensors",
|
| 691 |
"vision_encoder.blocks.25.attn.qkv.weight": "model-00007-of-00013.safetensors",
|
| 692 |
+
"vision_encoder.blocks.25.ls1.weight": "model-00007-of-00013.safetensors",
|
| 693 |
+
"vision_encoder.blocks.25.ls2.weight": "model-00007-of-00013.safetensors",
|
| 694 |
"vision_encoder.blocks.25.mlp.fc1.bias": "model-00007-of-00013.safetensors",
|
| 695 |
"vision_encoder.blocks.25.mlp.fc1.weight": "model-00007-of-00013.safetensors",
|
| 696 |
"vision_encoder.blocks.25.mlp.fc2.bias": "model-00007-of-00013.safetensors",
|
|
|
|
| 702 |
"vision_encoder.blocks.26.attn.proj.weight": "model-00007-of-00013.safetensors",
|
| 703 |
"vision_encoder.blocks.26.attn.q_norm.weight": "model-00007-of-00013.safetensors",
|
| 704 |
"vision_encoder.blocks.26.attn.qkv.weight": "model-00007-of-00013.safetensors",
|
| 705 |
+
"vision_encoder.blocks.26.ls1.weight": "model-00007-of-00013.safetensors",
|
| 706 |
+
"vision_encoder.blocks.26.ls2.weight": "model-00007-of-00013.safetensors",
|
| 707 |
"vision_encoder.blocks.26.mlp.fc1.bias": "model-00007-of-00013.safetensors",
|
| 708 |
"vision_encoder.blocks.26.mlp.fc1.weight": "model-00007-of-00013.safetensors",
|
| 709 |
"vision_encoder.blocks.26.mlp.fc2.bias": "model-00007-of-00013.safetensors",
|
|
|
|
| 715 |
"vision_encoder.blocks.27.attn.proj.weight": "model-00007-of-00013.safetensors",
|
| 716 |
"vision_encoder.blocks.27.attn.q_norm.weight": "model-00007-of-00013.safetensors",
|
| 717 |
"vision_encoder.blocks.27.attn.qkv.weight": "model-00007-of-00013.safetensors",
|
| 718 |
+
"vision_encoder.blocks.27.ls1.weight": "model-00007-of-00013.safetensors",
|
| 719 |
+
"vision_encoder.blocks.27.ls2.weight": "model-00008-of-00013.safetensors",
|
| 720 |
"vision_encoder.blocks.27.mlp.fc1.bias": "model-00007-of-00013.safetensors",
|
| 721 |
"vision_encoder.blocks.27.mlp.fc1.weight": "model-00007-of-00013.safetensors",
|
| 722 |
"vision_encoder.blocks.27.mlp.fc2.bias": "model-00008-of-00013.safetensors",
|
|
|
|
| 728 |
"vision_encoder.blocks.28.attn.proj.weight": "model-00008-of-00013.safetensors",
|
| 729 |
"vision_encoder.blocks.28.attn.q_norm.weight": "model-00008-of-00013.safetensors",
|
| 730 |
"vision_encoder.blocks.28.attn.qkv.weight": "model-00008-of-00013.safetensors",
|
| 731 |
+
"vision_encoder.blocks.28.ls1.weight": "model-00008-of-00013.safetensors",
|
| 732 |
+
"vision_encoder.blocks.28.ls2.weight": "model-00008-of-00013.safetensors",
|
| 733 |
"vision_encoder.blocks.28.mlp.fc1.bias": "model-00008-of-00013.safetensors",
|
| 734 |
"vision_encoder.blocks.28.mlp.fc1.weight": "model-00008-of-00013.safetensors",
|
| 735 |
"vision_encoder.blocks.28.mlp.fc2.bias": "model-00008-of-00013.safetensors",
|
|
|
|
| 741 |
"vision_encoder.blocks.29.attn.proj.weight": "model-00008-of-00013.safetensors",
|
| 742 |
"vision_encoder.blocks.29.attn.q_norm.weight": "model-00008-of-00013.safetensors",
|
| 743 |
"vision_encoder.blocks.29.attn.qkv.weight": "model-00008-of-00013.safetensors",
|
| 744 |
+
"vision_encoder.blocks.29.ls1.weight": "model-00008-of-00013.safetensors",
|
| 745 |
+
"vision_encoder.blocks.29.ls2.weight": "model-00008-of-00013.safetensors",
|
| 746 |
"vision_encoder.blocks.29.mlp.fc1.bias": "model-00008-of-00013.safetensors",
|
| 747 |
"vision_encoder.blocks.29.mlp.fc1.weight": "model-00008-of-00013.safetensors",
|
| 748 |
"vision_encoder.blocks.29.mlp.fc2.bias": "model-00008-of-00013.safetensors",
|
|
|
|
| 754 |
"vision_encoder.blocks.3.attn.proj.weight": "model-00001-of-00013.safetensors",
|
| 755 |
"vision_encoder.blocks.3.attn.q_norm.weight": "model-00001-of-00013.safetensors",
|
| 756 |
"vision_encoder.blocks.3.attn.qkv.weight": "model-00001-of-00013.safetensors",
|
| 757 |
+
"vision_encoder.blocks.3.ls1.weight": "model-00001-of-00013.safetensors",
|
| 758 |
+
"vision_encoder.blocks.3.ls2.weight": "model-00002-of-00013.safetensors",
|
| 759 |
"vision_encoder.blocks.3.mlp.fc1.bias": "model-00001-of-00013.safetensors",
|
| 760 |
"vision_encoder.blocks.3.mlp.fc1.weight": "model-00001-of-00013.safetensors",
|
| 761 |
"vision_encoder.blocks.3.mlp.fc2.bias": "model-00002-of-00013.safetensors",
|
|
|
|
| 767 |
"vision_encoder.blocks.30.attn.proj.weight": "model-00008-of-00013.safetensors",
|
| 768 |
"vision_encoder.blocks.30.attn.q_norm.weight": "model-00008-of-00013.safetensors",
|
| 769 |
"vision_encoder.blocks.30.attn.qkv.weight": "model-00008-of-00013.safetensors",
|
| 770 |
+
"vision_encoder.blocks.30.ls1.weight": "model-00008-of-00013.safetensors",
|
| 771 |
+
"vision_encoder.blocks.30.ls2.weight": "model-00008-of-00013.safetensors",
|
| 772 |
"vision_encoder.blocks.30.mlp.fc1.bias": "model-00008-of-00013.safetensors",
|
| 773 |
"vision_encoder.blocks.30.mlp.fc1.weight": "model-00008-of-00013.safetensors",
|
| 774 |
"vision_encoder.blocks.30.mlp.fc2.bias": "model-00008-of-00013.safetensors",
|
|
|
|
| 780 |
"vision_encoder.blocks.31.attn.proj.weight": "model-00008-of-00013.safetensors",
|
| 781 |
"vision_encoder.blocks.31.attn.q_norm.weight": "model-00008-of-00013.safetensors",
|
| 782 |
"vision_encoder.blocks.31.attn.qkv.weight": "model-00008-of-00013.safetensors",
|
| 783 |
+
"vision_encoder.blocks.31.ls1.weight": "model-00008-of-00013.safetensors",
|
| 784 |
+
"vision_encoder.blocks.31.ls2.weight": "model-00009-of-00013.safetensors",
|
| 785 |
"vision_encoder.blocks.31.mlp.fc1.bias": "model-00008-of-00013.safetensors",
|
| 786 |
"vision_encoder.blocks.31.mlp.fc1.weight": "model-00008-of-00013.safetensors",
|
| 787 |
"vision_encoder.blocks.31.mlp.fc2.bias": "model-00009-of-00013.safetensors",
|
|
|
|
| 793 |
"vision_encoder.blocks.32.attn.proj.weight": "model-00009-of-00013.safetensors",
|
| 794 |
"vision_encoder.blocks.32.attn.q_norm.weight": "model-00009-of-00013.safetensors",
|
| 795 |
"vision_encoder.blocks.32.attn.qkv.weight": "model-00009-of-00013.safetensors",
|
| 796 |
+
"vision_encoder.blocks.32.ls1.weight": "model-00009-of-00013.safetensors",
|
| 797 |
+
"vision_encoder.blocks.32.ls2.weight": "model-00009-of-00013.safetensors",
|
| 798 |
"vision_encoder.blocks.32.mlp.fc1.bias": "model-00009-of-00013.safetensors",
|
| 799 |
"vision_encoder.blocks.32.mlp.fc1.weight": "model-00009-of-00013.safetensors",
|
| 800 |
"vision_encoder.blocks.32.mlp.fc2.bias": "model-00009-of-00013.safetensors",
|
|
|
|
| 806 |
"vision_encoder.blocks.33.attn.proj.weight": "model-00009-of-00013.safetensors",
|
| 807 |
"vision_encoder.blocks.33.attn.q_norm.weight": "model-00009-of-00013.safetensors",
|
| 808 |
"vision_encoder.blocks.33.attn.qkv.weight": "model-00009-of-00013.safetensors",
|
| 809 |
+
"vision_encoder.blocks.33.ls1.weight": "model-00009-of-00013.safetensors",
|
| 810 |
+
"vision_encoder.blocks.33.ls2.weight": "model-00009-of-00013.safetensors",
|
| 811 |
"vision_encoder.blocks.33.mlp.fc1.bias": "model-00009-of-00013.safetensors",
|
| 812 |
"vision_encoder.blocks.33.mlp.fc1.weight": "model-00009-of-00013.safetensors",
|
| 813 |
"vision_encoder.blocks.33.mlp.fc2.bias": "model-00009-of-00013.safetensors",
|
|
|
|
| 819 |
"vision_encoder.blocks.34.attn.proj.weight": "model-00009-of-00013.safetensors",
|
| 820 |
"vision_encoder.blocks.34.attn.q_norm.weight": "model-00009-of-00013.safetensors",
|
| 821 |
"vision_encoder.blocks.34.attn.qkv.weight": "model-00009-of-00013.safetensors",
|
| 822 |
+
"vision_encoder.blocks.34.ls1.weight": "model-00009-of-00013.safetensors",
|
| 823 |
+
"vision_encoder.blocks.34.ls2.weight": "model-00009-of-00013.safetensors",
|
| 824 |
"vision_encoder.blocks.34.mlp.fc1.bias": "model-00009-of-00013.safetensors",
|
| 825 |
"vision_encoder.blocks.34.mlp.fc1.weight": "model-00009-of-00013.safetensors",
|
| 826 |
"vision_encoder.blocks.34.mlp.fc2.bias": "model-00009-of-00013.safetensors",
|
|
|
|
| 832 |
"vision_encoder.blocks.35.attn.proj.weight": "model-00009-of-00013.safetensors",
|
| 833 |
"vision_encoder.blocks.35.attn.q_norm.weight": "model-00009-of-00013.safetensors",
|
| 834 |
"vision_encoder.blocks.35.attn.qkv.weight": "model-00009-of-00013.safetensors",
|
| 835 |
+
"vision_encoder.blocks.35.ls1.weight": "model-00009-of-00013.safetensors",
|
| 836 |
+
"vision_encoder.blocks.35.ls2.weight": "model-00010-of-00013.safetensors",
|
| 837 |
"vision_encoder.blocks.35.mlp.fc1.bias": "model-00009-of-00013.safetensors",
|
| 838 |
"vision_encoder.blocks.35.mlp.fc1.weight": "model-00009-of-00013.safetensors",
|
| 839 |
"vision_encoder.blocks.35.mlp.fc2.bias": "model-00010-of-00013.safetensors",
|
|
|
|
| 845 |
"vision_encoder.blocks.36.attn.proj.weight": "model-00010-of-00013.safetensors",
|
| 846 |
"vision_encoder.blocks.36.attn.q_norm.weight": "model-00010-of-00013.safetensors",
|
| 847 |
"vision_encoder.blocks.36.attn.qkv.weight": "model-00010-of-00013.safetensors",
|
| 848 |
+
"vision_encoder.blocks.36.ls1.weight": "model-00010-of-00013.safetensors",
|
| 849 |
+
"vision_encoder.blocks.36.ls2.weight": "model-00010-of-00013.safetensors",
|
| 850 |
"vision_encoder.blocks.36.mlp.fc1.bias": "model-00010-of-00013.safetensors",
|
| 851 |
"vision_encoder.blocks.36.mlp.fc1.weight": "model-00010-of-00013.safetensors",
|
| 852 |
"vision_encoder.blocks.36.mlp.fc2.bias": "model-00010-of-00013.safetensors",
|
|
|
|
| 858 |
"vision_encoder.blocks.37.attn.proj.weight": "model-00010-of-00013.safetensors",
|
| 859 |
"vision_encoder.blocks.37.attn.q_norm.weight": "model-00010-of-00013.safetensors",
|
| 860 |
"vision_encoder.blocks.37.attn.qkv.weight": "model-00010-of-00013.safetensors",
|
| 861 |
+
"vision_encoder.blocks.37.ls1.weight": "model-00010-of-00013.safetensors",
|
| 862 |
+
"vision_encoder.blocks.37.ls2.weight": "model-00010-of-00013.safetensors",
|
| 863 |
"vision_encoder.blocks.37.mlp.fc1.bias": "model-00010-of-00013.safetensors",
|
| 864 |
"vision_encoder.blocks.37.mlp.fc1.weight": "model-00010-of-00013.safetensors",
|
| 865 |
"vision_encoder.blocks.37.mlp.fc2.bias": "model-00010-of-00013.safetensors",
|
|
|
|
| 871 |
"vision_encoder.blocks.38.attn.proj.weight": "model-00010-of-00013.safetensors",
|
| 872 |
"vision_encoder.blocks.38.attn.q_norm.weight": "model-00010-of-00013.safetensors",
|
| 873 |
"vision_encoder.blocks.38.attn.qkv.weight": "model-00010-of-00013.safetensors",
|
| 874 |
+
"vision_encoder.blocks.38.ls1.weight": "model-00010-of-00013.safetensors",
|
| 875 |
+
"vision_encoder.blocks.38.ls2.weight": "model-00010-of-00013.safetensors",
|
| 876 |
"vision_encoder.blocks.38.mlp.fc1.bias": "model-00010-of-00013.safetensors",
|
| 877 |
"vision_encoder.blocks.38.mlp.fc1.weight": "model-00010-of-00013.safetensors",
|
| 878 |
"vision_encoder.blocks.38.mlp.fc2.bias": "model-00010-of-00013.safetensors",
|
|
|
|
| 884 |
"vision_encoder.blocks.39.attn.proj.weight": "model-00010-of-00013.safetensors",
|
| 885 |
"vision_encoder.blocks.39.attn.q_norm.weight": "model-00010-of-00013.safetensors",
|
| 886 |
"vision_encoder.blocks.39.attn.qkv.weight": "model-00010-of-00013.safetensors",
|
| 887 |
+
"vision_encoder.blocks.39.ls1.weight": "model-00010-of-00013.safetensors",
|
| 888 |
+
"vision_encoder.blocks.39.ls2.weight": "model-00011-of-00013.safetensors",
|
| 889 |
"vision_encoder.blocks.39.mlp.fc1.bias": "model-00010-of-00013.safetensors",
|
| 890 |
"vision_encoder.blocks.39.mlp.fc1.weight": "model-00010-of-00013.safetensors",
|
| 891 |
"vision_encoder.blocks.39.mlp.fc2.bias": "model-00011-of-00013.safetensors",
|
|
|
|
| 897 |
"vision_encoder.blocks.4.attn.proj.weight": "model-00002-of-00013.safetensors",
|
| 898 |
"vision_encoder.blocks.4.attn.q_norm.weight": "model-00002-of-00013.safetensors",
|
| 899 |
"vision_encoder.blocks.4.attn.qkv.weight": "model-00002-of-00013.safetensors",
|
| 900 |
+
"vision_encoder.blocks.4.ls1.weight": "model-00002-of-00013.safetensors",
|
| 901 |
+
"vision_encoder.blocks.4.ls2.weight": "model-00002-of-00013.safetensors",
|
| 902 |
"vision_encoder.blocks.4.mlp.fc1.bias": "model-00002-of-00013.safetensors",
|
| 903 |
"vision_encoder.blocks.4.mlp.fc1.weight": "model-00002-of-00013.safetensors",
|
| 904 |
"vision_encoder.blocks.4.mlp.fc2.bias": "model-00002-of-00013.safetensors",
|
|
|
|
| 910 |
"vision_encoder.blocks.40.attn.proj.weight": "model-00011-of-00013.safetensors",
|
| 911 |
"vision_encoder.blocks.40.attn.q_norm.weight": "model-00011-of-00013.safetensors",
|
| 912 |
"vision_encoder.blocks.40.attn.qkv.weight": "model-00011-of-00013.safetensors",
|
| 913 |
+
"vision_encoder.blocks.40.ls1.weight": "model-00011-of-00013.safetensors",
|
| 914 |
+
"vision_encoder.blocks.40.ls2.weight": "model-00011-of-00013.safetensors",
|
| 915 |
"vision_encoder.blocks.40.mlp.fc1.bias": "model-00011-of-00013.safetensors",
|
| 916 |
"vision_encoder.blocks.40.mlp.fc1.weight": "model-00011-of-00013.safetensors",
|
| 917 |
"vision_encoder.blocks.40.mlp.fc2.bias": "model-00011-of-00013.safetensors",
|
|
|
|
| 923 |
"vision_encoder.blocks.41.attn.proj.weight": "model-00011-of-00013.safetensors",
|
| 924 |
"vision_encoder.blocks.41.attn.q_norm.weight": "model-00011-of-00013.safetensors",
|
| 925 |
"vision_encoder.blocks.41.attn.qkv.weight": "model-00011-of-00013.safetensors",
|
| 926 |
+
"vision_encoder.blocks.41.ls1.weight": "model-00011-of-00013.safetensors",
|
| 927 |
+
"vision_encoder.blocks.41.ls2.weight": "model-00011-of-00013.safetensors",
|
| 928 |
"vision_encoder.blocks.41.mlp.fc1.bias": "model-00011-of-00013.safetensors",
|
| 929 |
"vision_encoder.blocks.41.mlp.fc1.weight": "model-00011-of-00013.safetensors",
|
| 930 |
"vision_encoder.blocks.41.mlp.fc2.bias": "model-00011-of-00013.safetensors",
|
|
|
|
| 936 |
"vision_encoder.blocks.42.attn.proj.weight": "model-00011-of-00013.safetensors",
|
| 937 |
"vision_encoder.blocks.42.attn.q_norm.weight": "model-00011-of-00013.safetensors",
|
| 938 |
"vision_encoder.blocks.42.attn.qkv.weight": "model-00011-of-00013.safetensors",
|
| 939 |
+
"vision_encoder.blocks.42.ls1.weight": "model-00011-of-00013.safetensors",
|
| 940 |
+
"vision_encoder.blocks.42.ls2.weight": "model-00011-of-00013.safetensors",
|
| 941 |
"vision_encoder.blocks.42.mlp.fc1.bias": "model-00011-of-00013.safetensors",
|
| 942 |
"vision_encoder.blocks.42.mlp.fc1.weight": "model-00011-of-00013.safetensors",
|
| 943 |
"vision_encoder.blocks.42.mlp.fc2.bias": "model-00011-of-00013.safetensors",
|
|
|
|
| 949 |
"vision_encoder.blocks.43.attn.proj.weight": "model-00011-of-00013.safetensors",
|
| 950 |
"vision_encoder.blocks.43.attn.q_norm.weight": "model-00011-of-00013.safetensors",
|
| 951 |
"vision_encoder.blocks.43.attn.qkv.weight": "model-00011-of-00013.safetensors",
|
| 952 |
+
"vision_encoder.blocks.43.ls1.weight": "model-00011-of-00013.safetensors",
|
| 953 |
+
"vision_encoder.blocks.43.ls2.weight": "model-00012-of-00013.safetensors",
|
| 954 |
"vision_encoder.blocks.43.mlp.fc1.bias": "model-00011-of-00013.safetensors",
|
| 955 |
"vision_encoder.blocks.43.mlp.fc1.weight": "model-00011-of-00013.safetensors",
|
| 956 |
"vision_encoder.blocks.43.mlp.fc2.bias": "model-00012-of-00013.safetensors",
|
|
|
|
| 962 |
"vision_encoder.blocks.44.attn.proj.weight": "model-00012-of-00013.safetensors",
|
| 963 |
"vision_encoder.blocks.44.attn.q_norm.weight": "model-00012-of-00013.safetensors",
|
| 964 |
"vision_encoder.blocks.44.attn.qkv.weight": "model-00012-of-00013.safetensors",
|
| 965 |
+
"vision_encoder.blocks.44.ls1.weight": "model-00012-of-00013.safetensors",
|
| 966 |
+
"vision_encoder.blocks.44.ls2.weight": "model-00012-of-00013.safetensors",
|
| 967 |
"vision_encoder.blocks.44.mlp.fc1.bias": "model-00012-of-00013.safetensors",
|
| 968 |
"vision_encoder.blocks.44.mlp.fc1.weight": "model-00012-of-00013.safetensors",
|
| 969 |
"vision_encoder.blocks.44.mlp.fc2.bias": "model-00012-of-00013.safetensors",
|
|
|
|
| 975 |
"vision_encoder.blocks.45.attn.proj.weight": "model-00012-of-00013.safetensors",
|
| 976 |
"vision_encoder.blocks.45.attn.q_norm.weight": "model-00012-of-00013.safetensors",
|
| 977 |
"vision_encoder.blocks.45.attn.qkv.weight": "model-00012-of-00013.safetensors",
|
| 978 |
+
"vision_encoder.blocks.45.ls1.weight": "model-00012-of-00013.safetensors",
|
| 979 |
+
"vision_encoder.blocks.45.ls2.weight": "model-00012-of-00013.safetensors",
|
| 980 |
"vision_encoder.blocks.45.mlp.fc1.bias": "model-00012-of-00013.safetensors",
|
| 981 |
"vision_encoder.blocks.45.mlp.fc1.weight": "model-00012-of-00013.safetensors",
|
| 982 |
"vision_encoder.blocks.45.mlp.fc2.bias": "model-00012-of-00013.safetensors",
|
|
|
|
| 988 |
"vision_encoder.blocks.46.attn.proj.weight": "model-00012-of-00013.safetensors",
|
| 989 |
"vision_encoder.blocks.46.attn.q_norm.weight": "model-00012-of-00013.safetensors",
|
| 990 |
"vision_encoder.blocks.46.attn.qkv.weight": "model-00012-of-00013.safetensors",
|
| 991 |
+
"vision_encoder.blocks.46.ls1.weight": "model-00012-of-00013.safetensors",
|
| 992 |
+
"vision_encoder.blocks.46.ls2.weight": "model-00012-of-00013.safetensors",
|
| 993 |
"vision_encoder.blocks.46.mlp.fc1.bias": "model-00012-of-00013.safetensors",
|
| 994 |
"vision_encoder.blocks.46.mlp.fc1.weight": "model-00012-of-00013.safetensors",
|
| 995 |
"vision_encoder.blocks.46.mlp.fc2.bias": "model-00012-of-00013.safetensors",
|
|
|
|
| 1001 |
"vision_encoder.blocks.47.attn.proj.weight": "model-00012-of-00013.safetensors",
|
| 1002 |
"vision_encoder.blocks.47.attn.q_norm.weight": "model-00012-of-00013.safetensors",
|
| 1003 |
"vision_encoder.blocks.47.attn.qkv.weight": "model-00012-of-00013.safetensors",
|
| 1004 |
+
"vision_encoder.blocks.47.ls1.weight": "model-00012-of-00013.safetensors",
|
| 1005 |
+
"vision_encoder.blocks.47.ls2.weight": "model-00013-of-00013.safetensors",
|
| 1006 |
"vision_encoder.blocks.47.mlp.fc1.bias": "model-00012-of-00013.safetensors",
|
| 1007 |
"vision_encoder.blocks.47.mlp.fc1.weight": "model-00012-of-00013.safetensors",
|
| 1008 |
"vision_encoder.blocks.47.mlp.fc2.bias": "model-00013-of-00013.safetensors",
|
|
|
|
| 1014 |
"vision_encoder.blocks.5.attn.proj.weight": "model-00002-of-00013.safetensors",
|
| 1015 |
"vision_encoder.blocks.5.attn.q_norm.weight": "model-00002-of-00013.safetensors",
|
| 1016 |
"vision_encoder.blocks.5.attn.qkv.weight": "model-00002-of-00013.safetensors",
|
| 1017 |
+
"vision_encoder.blocks.5.ls1.weight": "model-00002-of-00013.safetensors",
|
| 1018 |
+
"vision_encoder.blocks.5.ls2.weight": "model-00002-of-00013.safetensors",
|
| 1019 |
"vision_encoder.blocks.5.mlp.fc1.bias": "model-00002-of-00013.safetensors",
|
| 1020 |
"vision_encoder.blocks.5.mlp.fc1.weight": "model-00002-of-00013.safetensors",
|
| 1021 |
"vision_encoder.blocks.5.mlp.fc2.bias": "model-00002-of-00013.safetensors",
|
|
|
|
| 1027 |
"vision_encoder.blocks.6.attn.proj.weight": "model-00002-of-00013.safetensors",
|
| 1028 |
"vision_encoder.blocks.6.attn.q_norm.weight": "model-00002-of-00013.safetensors",
|
| 1029 |
"vision_encoder.blocks.6.attn.qkv.weight": "model-00002-of-00013.safetensors",
|
| 1030 |
+
"vision_encoder.blocks.6.ls1.weight": "model-00002-of-00013.safetensors",
|
| 1031 |
+
"vision_encoder.blocks.6.ls2.weight": "model-00002-of-00013.safetensors",
|
| 1032 |
"vision_encoder.blocks.6.mlp.fc1.bias": "model-00002-of-00013.safetensors",
|
| 1033 |
"vision_encoder.blocks.6.mlp.fc1.weight": "model-00002-of-00013.safetensors",
|
| 1034 |
"vision_encoder.blocks.6.mlp.fc2.bias": "model-00002-of-00013.safetensors",
|
|
|
|
| 1040 |
"vision_encoder.blocks.7.attn.proj.weight": "model-00002-of-00013.safetensors",
|
| 1041 |
"vision_encoder.blocks.7.attn.q_norm.weight": "model-00002-of-00013.safetensors",
|
| 1042 |
"vision_encoder.blocks.7.attn.qkv.weight": "model-00002-of-00013.safetensors",
|
| 1043 |
+
"vision_encoder.blocks.7.ls1.weight": "model-00002-of-00013.safetensors",
|
| 1044 |
+
"vision_encoder.blocks.7.ls2.weight": "model-00003-of-00013.safetensors",
|
| 1045 |
"vision_encoder.blocks.7.mlp.fc1.bias": "model-00002-of-00013.safetensors",
|
| 1046 |
"vision_encoder.blocks.7.mlp.fc1.weight": "model-00002-of-00013.safetensors",
|
| 1047 |
"vision_encoder.blocks.7.mlp.fc2.bias": "model-00003-of-00013.safetensors",
|
|
|
|
| 1053 |
"vision_encoder.blocks.8.attn.proj.weight": "model-00003-of-00013.safetensors",
|
| 1054 |
"vision_encoder.blocks.8.attn.q_norm.weight": "model-00003-of-00013.safetensors",
|
| 1055 |
"vision_encoder.blocks.8.attn.qkv.weight": "model-00003-of-00013.safetensors",
|
| 1056 |
+
"vision_encoder.blocks.8.ls1.weight": "model-00003-of-00013.safetensors",
|
| 1057 |
+
"vision_encoder.blocks.8.ls2.weight": "model-00003-of-00013.safetensors",
|
| 1058 |
"vision_encoder.blocks.8.mlp.fc1.bias": "model-00003-of-00013.safetensors",
|
| 1059 |
"vision_encoder.blocks.8.mlp.fc1.weight": "model-00003-of-00013.safetensors",
|
| 1060 |
"vision_encoder.blocks.8.mlp.fc2.bias": "model-00003-of-00013.safetensors",
|
|
|
|
| 1066 |
"vision_encoder.blocks.9.attn.proj.weight": "model-00003-of-00013.safetensors",
|
| 1067 |
"vision_encoder.blocks.9.attn.q_norm.weight": "model-00003-of-00013.safetensors",
|
| 1068 |
"vision_encoder.blocks.9.attn.qkv.weight": "model-00003-of-00013.safetensors",
|
| 1069 |
+
"vision_encoder.blocks.9.ls1.weight": "model-00003-of-00013.safetensors",
|
| 1070 |
+
"vision_encoder.blocks.9.ls2.weight": "model-00003-of-00013.safetensors",
|
| 1071 |
"vision_encoder.blocks.9.mlp.fc1.bias": "model-00003-of-00013.safetensors",
|
| 1072 |
"vision_encoder.blocks.9.mlp.fc1.weight": "model-00003-of-00013.safetensors",
|
| 1073 |
"vision_encoder.blocks.9.mlp.fc2.bias": "model-00003-of-00013.safetensors",
|