Spaces:
Running
on
Zero
Running
on
Zero
# coding=utf-8 | |
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Qwen2VL model configuration""" | |
import os | |
from typing import Union | |
from transformers.configuration_utils import PretrainedConfig | |
from transformers.utils import logging | |
logger = logging.get_logger(__name__) | |
class Qwen2VLVisionConfig(PretrainedConfig): | |
model_type = "qwen2_vl" | |
def __init__( | |
self, | |
depth=32, | |
embed_dim=1280, | |
hidden_size=3584, | |
hidden_act="quick_gelu", | |
mlp_ratio=4, | |
num_heads=16, | |
in_channels=3, | |
patch_size=14, | |
spatial_merge_size=2, | |
temporal_patch_size=2, | |
**kwargs, | |
): | |
super().__init__(**kwargs) | |
self.depth = depth | |
self.embed_dim = embed_dim | |
self.hidden_size = hidden_size | |
self.hidden_act = hidden_act | |
self.mlp_ratio = mlp_ratio | |
self.num_heads = num_heads | |
self.in_channels = in_channels | |
self.patch_size = patch_size | |
self.spatial_merge_size = spatial_merge_size | |
self.temporal_patch_size = temporal_patch_size | |
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": | |
cls._set_token_in_kwargs(kwargs) | |
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) | |
if config_dict.get("model_type") == "qwen2_vl": | |
config_dict = config_dict["vision_config"] | |
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: | |
logger.warning( | |
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " | |
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." | |
) | |
return cls.from_dict(config_dict, **kwargs) | |
class Qwen2VLConfig(PretrainedConfig): | |
r""" | |
This is the configuration class to store the configuration of a [`Qwen2VLModel`]. It is used to instantiate a | |
Qwen2-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration | |
with the defaults will yield a similar configuration to that of | |
Qwen2-VL-7B-Instruct [Qwen/Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct). | |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | |
documentation from [`PretrainedConfig`] for more information. | |
Args: | |
vocab_size (`int`, *optional*, defaults to 152064): | |
Vocabulary size of the Qwen2VL model. Defines the number of different tokens that can be represented by the | |
`inputs_ids` passed when calling [`Qwen2VLModel`] | |
hidden_size (`int`, *optional*, defaults to 8192): | |
Dimension of the hidden representations. | |
intermediate_size (`int`, *optional*, defaults to 29568): | |
Dimension of the MLP representations. | |
num_hidden_layers (`int`, *optional*, defaults to 80): | |
Number of hidden layers in the Transformer encoder. | |
num_attention_heads (`int`, *optional*, defaults to 64): | |
Number of attention heads for each attention layer in the Transformer encoder. | |
num_key_value_heads (`int`, *optional*, defaults to 8): | |
This is the number of key_value heads that should be used to implement Grouped Query Attention. If | |
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if | |
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When | |
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed | |
by meanpooling all the original heads within that group. For more details checkout [this | |
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`. | |
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): | |
The non-linear activation function (function or string) in the decoder. | |
max_position_embeddings (`int`, *optional*, defaults to 32768): | |
The maximum sequence length that this model might ever be used with. | |
initializer_range (`float`, *optional*, defaults to 0.02): | |
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | |
rms_norm_eps (`float`, *optional*, defaults to 1e-05): | |
The epsilon used by the rms normalization layers. | |
use_cache (`bool`, *optional*, defaults to `True`): | |
Whether or not the model should return the last key/values attentions (not used by all models). Only | |
relevant if `config.is_decoder=True`. | |
tie_word_embeddings (`bool`, *optional*, defaults to `False`): | |
Whether the model's input and output word embeddings should be tied. | |
rope_theta (`float`, *optional*, defaults to 1000000.0): | |
The base period of the RoPE embeddings. | |
use_sliding_window (`bool`, *optional*, defaults to `False`): | |
Whether to use sliding window attention. | |
sliding_window (`int`, *optional*, defaults to 4096): | |
Sliding window attention (SWA) window size. If not specified, will default to `4096`. | |
max_window_layers (`int`, *optional*, defaults to 80): | |
The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention. | |
attention_dropout (`float`, *optional*, defaults to 0.0): | |
The dropout ratio for the attention probabilities. | |
vision_config (`Dict`, *optional*): | |
The config for the visual encoder initialization. | |
rope_scaling (`Dict`, *optional*): | |
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling | |
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is | |
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update | |
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how | |
these scaling strategies behave: | |
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an | |
experimental feature, subject to breaking API changes in future versions. | |
```python | |
>>> from transformers import Qwen2VLForConditionalGeneration, Qwen2VLConfig | |
>>> # Initializing a Qwen2VL style configuration | |
>>> configuration = Qwen2VLConfig() | |
>>> # Initializing a model from the Qwen2-VL-7B style configuration | |
>>> model = Qwen2VLForConditionalGeneration(configuration) | |
>>> # Accessing the model configuration | |
>>> configuration = model.config | |
```""" | |
model_type = "qwen2_vl" | |
keys_to_ignore_at_inference = ["past_key_values"] | |
def __init__( | |
self, | |
vocab_size=152064, | |
hidden_size=8192, | |
intermediate_size=29568, | |
num_hidden_layers=80, | |
num_attention_heads=64, | |
num_key_value_heads=8, | |
hidden_act="silu", | |
max_position_embeddings=32768, | |
initializer_range=0.02, | |
rms_norm_eps=1e-05, | |
use_cache=True, | |
tie_word_embeddings=False, | |
rope_theta=1000000.0, | |
use_sliding_window=False, | |
sliding_window=4096, | |
max_window_layers=80, | |
attention_dropout=0.0, | |
vision_config=None, | |
rope_scaling=None, | |
**kwargs, | |
): | |
if isinstance(vision_config, dict): | |
self.vision_config = Qwen2VLVisionConfig(**vision_config) | |
elif vision_config is None: | |
self.vision_config = Qwen2VLVisionConfig() | |
self.vocab_size = vocab_size | |
self.max_position_embeddings = max_position_embeddings | |
self.hidden_size = hidden_size | |
self.intermediate_size = intermediate_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.use_sliding_window = use_sliding_window | |
self.sliding_window = sliding_window | |
self.max_window_layers = max_window_layers | |
# for backward compatibility | |
if num_key_value_heads is None: | |
num_key_value_heads = num_attention_heads | |
self.num_key_value_heads = num_key_value_heads | |
self.hidden_act = hidden_act | |
self.initializer_range = initializer_range | |
self.rms_norm_eps = rms_norm_eps | |
self.use_cache = use_cache | |
self.rope_theta = rope_theta | |
self.attention_dropout = attention_dropout | |
self.rope_scaling = rope_scaling | |
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) | |