File size: 2,249 Bytes
4e6fc5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
{
    "architectures": [
      "H2OVLChatModel"
    ],
    "auto_map": {
      "AutoConfig": "configuration_h2ovl_chat.H2OVLChatConfig",
      "AutoModel": "modeling_h2ovl_chat.H2OVLChatModel",
      "AutoModelForCausalLM": "modeling_h2ovl_chat.H2OVLChatModel"
    },
    
    "downsample_ratio": 0.5,
    "dynamic_image_size": true,
    "force_image_size": 448,
    "max_dynamic_patch": 6,
    "min_dynamic_patch": 1,
    "model_type": "h2ovl_chat",
    "pad2square": false,
    "ps_version": "v2",
    "select_layer": -1,
    "template": "h2ogpt2",
    "torch_dtype": "bfloat16",
    "use_backbone_lora": 0,
    "use_llm_lora": 0,
    "use_thumbnail": true,
    "use_msac": true,
    
    "llm_config": {
      "_name_or_path": "h2oai/h2o-danube2-1.8b-chat",
      "model_type": "mistral",
      "architectures": [
          "MistralForCausalLM"
      ],
      "attention_dropout": 0.0,
      "torch_dtype": "bfloat16",
      "use_bfloat16": true,
      "hidden_size": 2560,
      "intermediate_size": 6912,
      "num_hidden_layers": 24,
      "num_attention_heads": 32,
      "num_key_value_heads": 8,
      "rms_norm_eps": 1e-05,
      "hidden_act": "silu",
      "bos_token_id": 1,
      "eos_token_id": 2,
      "pad_token_id": 0,
      "vocab_size": 32010,
      "add_cross_attention": false,
      "return_dict": true,
      "output_attentions": false,
      "output_hidden_states": false,
      "output_scores": false,
      "prefix": null,
      "rope_theta": 10000,
      "sep_token_id": null,
      "sliding_window": null,
      "tie_word_embeddings": false,
      "tie_encoder_decoder": false,
      "torchscript": false,
      "use_cache": true,
      "transformers_version": "4.44.0"
  },
    
    "vision_config": {
      "architectures": [
        "InternVisionModel"
      ],
      "hidden_size": 1024,
      "image_size": 448,
      "intermediate_size": 4096,
      "model_type": "intern_vit_6b",
      "norm_type": "layer_norm",
      "num_attention_heads": 16,
      "num_channels": 3,
      "num_hidden_layers": 24,
      "patch_size": 14,
      "qk_normalization": false,
      "qkv_bias": true,
      "return_dict": true,
      "torch_dtype": "bfloat16",
      "use_bfloat16": true,
      "use_flash_attn": true
    }
  }