Changyao commited on
Commit
09a1811
·
verified ·
1 Parent(s): 8158771

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 151669,
3
+ "</img>": 151672,
4
+ "</quad>": 151674,
5
+ "</ref>": 151676,
6
+ "</think>": 151668,
7
+ "</tool_call>": 151658,
8
+ "</tool_response>": 151666,
9
+ "<IMG_CONTEXT>": 151671,
10
+ "<IMG_FRAME_BREAK>": 151680,
11
+ "<IMG_LINE_BREAK>": 151679,
12
+ "<box>": 151670,
13
+ "<img>": 151673,
14
+ "<img_uncond>": 151678,
15
+ "<quad>": 151675,
16
+ "<ref>": 151677,
17
+ "<think>": 151667,
18
+ "<tool_call>": 151657,
19
+ "<tool_response>": 151665,
20
+ "<|box_end|>": 151649,
21
+ "<|box_start|>": 151648,
22
+ "<|endoftext|>": 151643,
23
+ "<|file_sep|>": 151664,
24
+ "<|fim_middle|>": 151660,
25
+ "<|fim_pad|>": 151662,
26
+ "<|fim_prefix|>": 151659,
27
+ "<|fim_suffix|>": 151661,
28
+ "<|im_end|>": 151645,
29
+ "<|im_start|>": 151644,
30
+ "<|image_pad|>": 151655,
31
+ "<|object_ref_end|>": 151647,
32
+ "<|object_ref_start|>": 151646,
33
+ "<|quad_end|>": 151651,
34
+ "<|quad_start|>": 151650,
35
+ "<|repo_name|>": 151663,
36
+ "<|video_pad|>": 151656,
37
+ "<|vision_end|>": 151653,
38
+ "<|vision_pad|>": 151654,
39
+ "<|vision_start|>": 151652
40
+ }
config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "anyres_image_size": true,
4
+ "architectures": [
5
+ "NaViL"
6
+ ],
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_navil_chat.NaViLChatConfig",
9
+ "AutoModel": "modeling_navil_chat.NaViL",
10
+ "AutoModelForCausalLM": "modeling_navil_chat.NaViL"
11
+ },
12
+ "downsample_ratio": 0.5,
13
+ "force_image_size": 32,
14
+ "llm_config": {
15
+ "_attn_implementation_autoset": true,
16
+ "_name_or_path": "./pretrained/Qwen3-8B",
17
+ "add_cross_attention": false,
18
+ "architectures": [
19
+ "Qwen3VEForCausalLM"
20
+ ],
21
+ "attention_bias": false,
22
+ "attention_dropout": 0.0,
23
+ "bad_words_ids": null,
24
+ "begin_suppress_tokens": null,
25
+ "bos_token_id": 151643,
26
+ "chunk_size_feed_forward": 0,
27
+ "cross_attention_hidden_size": null,
28
+ "decoder_start_token_id": null,
29
+ "diversity_penalty": 0.0,
30
+ "do_sample": false,
31
+ "early_stopping": false,
32
+ "encoder_no_repeat_ngram_size": 0,
33
+ "eos_token_id": 151645,
34
+ "exponential_decay_length_penalty": null,
35
+ "finetuning_task": null,
36
+ "forced_bos_token_id": null,
37
+ "forced_eos_token_id": null,
38
+ "head_dim": 128,
39
+ "hidden_act": "silu",
40
+ "hidden_size": 4096,
41
+ "id2label": {
42
+ "0": "LABEL_0",
43
+ "1": "LABEL_1"
44
+ },
45
+ "initializer_range": 0.02,
46
+ "intermediate_size": 12288,
47
+ "is_decoder": false,
48
+ "is_encoder_decoder": false,
49
+ "label2id": {
50
+ "LABEL_0": 0,
51
+ "LABEL_1": 1
52
+ },
53
+ "length_penalty": 1.0,
54
+ "max_length": 20,
55
+ "max_position_embeddings": 40960,
56
+ "max_window_layers": 36,
57
+ "min_length": 0,
58
+ "model_type": "qwen3",
59
+ "mrope_section": [
60
+ 16,
61
+ 24,
62
+ 24
63
+ ],
64
+ "no_repeat_ngram_size": 0,
65
+ "num_attention_heads": 32,
66
+ "num_beam_groups": 1,
67
+ "num_beams": 1,
68
+ "num_hidden_layers": 36,
69
+ "num_key_value_heads": 8,
70
+ "num_return_sequences": 1,
71
+ "output_attentions": false,
72
+ "output_hidden_states": false,
73
+ "output_scores": false,
74
+ "pad_token_id": null,
75
+ "prefix": null,
76
+ "problem_type": null,
77
+ "pruned_heads": {},
78
+ "remove_invalid_values": false,
79
+ "repetition_penalty": 1.0,
80
+ "return_dict": true,
81
+ "return_dict_in_generate": false,
82
+ "rms_norm_eps": 1e-06,
83
+ "rope_scaling": null,
84
+ "rope_theta": 1000000,
85
+ "sep_token_id": null,
86
+ "sliding_window": null,
87
+ "suppress_tokens": null,
88
+ "task_specific_params": null,
89
+ "temperature": 1.0,
90
+ "tf_legacy_loss": false,
91
+ "tie_encoder_decoder": false,
92
+ "tie_word_embeddings": false,
93
+ "tokenizer_class": null,
94
+ "top_k": 50,
95
+ "top_p": 1.0,
96
+ "torch_dtype": "bfloat16",
97
+ "torchscript": false,
98
+ "transformers_version": "4.51.0",
99
+ "typical_p": 1.0,
100
+ "use_bfloat16": false,
101
+ "use_cache": false,
102
+ "use_mrope": false,
103
+ "use_sliding_window": false,
104
+ "vocab_size": 151681
105
+ },
106
+ "max_dynamic_patch": 24576,
107
+ "min_dynamic_patch": 256,
108
+ "model_type": "navil_chat",
109
+ "pad2square": false,
110
+ "ps_version": "v2",
111
+ "scale_downsample_ratio": 0.7071,
112
+ "select_layer": -1,
113
+ "template": "qwen3-chat",
114
+ "torch_dtype": "bfloat16",
115
+ "transformers_version": null,
116
+ "use_backbone_lora": 0,
117
+ "use_llm_lora": 0,
118
+ "vision_config": {
119
+ "_attn_implementation_autoset": true,
120
+ "_name_or_path": "",
121
+ "add_cross_attention": false,
122
+ "architectures": [
123
+ "NaViLVisionModelAnyRes"
124
+ ],
125
+ "attention_dropout": 0.0,
126
+ "bad_words_ids": null,
127
+ "begin_suppress_tokens": null,
128
+ "bos_token_id": null,
129
+ "chunk_size_feed_forward": 0,
130
+ "cross_attention_hidden_size": null,
131
+ "decoder_start_token_id": null,
132
+ "diversity_penalty": 0.0,
133
+ "do_sample": false,
134
+ "downsample_ratio": 0.5,
135
+ "drop_path_rate": 0.0,
136
+ "dropout": 0.0,
137
+ "early_stopping": false,
138
+ "encoder_no_repeat_ngram_size": 0,
139
+ "eos_token_id": null,
140
+ "exponential_decay_length_penalty": null,
141
+ "finetuning_task": null,
142
+ "forced_bos_token_id": null,
143
+ "forced_eos_token_id": null,
144
+ "fullatt_block_indexes": null,
145
+ "hidden_act": "gelu",
146
+ "hidden_size": 1792,
147
+ "id2label": {
148
+ "0": "LABEL_0",
149
+ "1": "LABEL_1"
150
+ },
151
+ "image_size": 32,
152
+ "initializer_factor": 1.0,
153
+ "initializer_range": 0.02,
154
+ "intermediate_size": 7168,
155
+ "is_decoder": false,
156
+ "is_encoder_decoder": false,
157
+ "label2id": {
158
+ "LABEL_0": 0,
159
+ "LABEL_1": 1
160
+ },
161
+ "layer_norm_eps": 1e-06,
162
+ "length_penalty": 1.0,
163
+ "max_length": 20,
164
+ "min_length": 0,
165
+ "model_type": "navil_vit",
166
+ "no_repeat_ngram_size": 0,
167
+ "norm_type": "layer_norm",
168
+ "num_attention_heads": 28,
169
+ "num_beam_groups": 1,
170
+ "num_beams": 1,
171
+ "num_channels": 3,
172
+ "num_hidden_layers": 32,
173
+ "num_return_sequences": 1,
174
+ "output_attentions": false,
175
+ "output_hidden_states": false,
176
+ "output_scores": false,
177
+ "pad_token_id": null,
178
+ "patch_size": 16,
179
+ "prefix": null,
180
+ "problem_type": null,
181
+ "pruned_heads": {},
182
+ "qk_normalization": false,
183
+ "qkv_bias": true,
184
+ "remove_invalid_values": false,
185
+ "repetition_penalty": 1.0,
186
+ "return_dict": true,
187
+ "return_dict_in_generate": false,
188
+ "sep_token_id": null,
189
+ "suppress_tokens": null,
190
+ "task_specific_params": null,
191
+ "temperature": 1.0,
192
+ "tf_legacy_loss": false,
193
+ "tie_encoder_decoder": false,
194
+ "tie_word_embeddings": true,
195
+ "tokenizer_class": null,
196
+ "top_k": 50,
197
+ "top_p": 1.0,
198
+ "torch_dtype": "bfloat16",
199
+ "torchscript": false,
200
+ "transformers_version": "4.51.0",
201
+ "typical_p": 1.0,
202
+ "use_bfloat16": true,
203
+ "use_flash_attn": true,
204
+ "vision_fullatt_block_indexes": null,
205
+ "window_size": 8
206
+ }
207
+ }
configuration_navil_chat.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # NaViL
3
+ # Copyright (c) 2025 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from transformers import AutoConfig, LlamaConfig
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ from .configuration_navil_vit import NaViLVisionConfig
14
+
15
+ from .configuration_qwen3 import Qwen3VEConfig
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+
20
+ class NaViLChatConfig(PretrainedConfig):
21
+ model_type = 'navil_chat'
22
+ is_composition = True
23
+
24
+ def __init__(
25
+ self,
26
+ vision_config=None,
27
+ llm_config=None,
28
+ use_backbone_lora=0,
29
+ use_llm_lora=0,
30
+ pad2square=False,
31
+ select_layer=-1,
32
+ force_image_size=None,
33
+ downsample_ratio=0.5,
34
+ template=None,
35
+ anyres_image_size=True,
36
+ scale_downsample_ratio=0.7071,
37
+ ps_version='v1',
38
+ min_dynamic_patch=256,
39
+ max_dynamic_patch=24576,
40
+ **kwargs):
41
+ super().__init__(**kwargs)
42
+
43
+ if vision_config is None:
44
+ vision_config = {}
45
+ logger.info('vision_config is None. Initializing the NaViLVisionConfig with default values.')
46
+
47
+ if llm_config is None:
48
+ llm_config = {'architectures': ['InternLM2VEForCausalLM']}
49
+ logger.info('llm_config is None. Initializing the llm_config with default values (`InternLM2VEForCausalLM`).')
50
+
51
+ self.vision_config = NaViLVisionConfig(**vision_config)
52
+ self.vision_config.downsample_ratio = downsample_ratio
53
+ if llm_config['architectures'][0] == 'Qwen3VEForCausalLM':
54
+ self.llm_config = Qwen3VEConfig(**llm_config)
55
+ else:
56
+ raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
57
+ self.use_backbone_lora = use_backbone_lora
58
+ self.use_llm_lora = use_llm_lora
59
+ self.pad2square = pad2square
60
+ self.select_layer = select_layer
61
+ self.force_image_size = force_image_size
62
+ self.downsample_ratio = downsample_ratio
63
+ self.template = template
64
+
65
+ self.anyres_image_size = anyres_image_size
66
+ self.scale_downsample_ratio = scale_downsample_ratio
67
+ self.ps_version = ps_version # pixel shuffle version
68
+ self.min_dynamic_patch = min_dynamic_patch
69
+ self.max_dynamic_patch = max_dynamic_patch
70
+
71
+ logger.info(f'vision_select_layer: {self.select_layer}')
72
+ logger.info(f'ps_version: {self.ps_version}')
73
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
74
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
75
+
76
+ def to_dict(self):
77
+ """
78
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
79
+
80
+ Returns:
81
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
82
+ """
83
+ output = copy.deepcopy(self.__dict__)
84
+ output['vision_config'] = self.vision_config.to_dict()
85
+ output['llm_config'] = self.llm_config.to_dict()
86
+ output['model_type'] = self.__class__.model_type
87
+ output['use_backbone_lora'] = self.use_backbone_lora
88
+ output['use_llm_lora'] = self.use_llm_lora
89
+ output['pad2square'] = self.pad2square
90
+ output['select_layer'] = self.select_layer
91
+ output['force_image_size'] = self.force_image_size
92
+ output['downsample_ratio'] = self.downsample_ratio
93
+ output['template'] = self.template
94
+
95
+ output['anyres_image_size'] = self.anyres_image_size
96
+ output['scale_downsample_ratio'] = self.scale_downsample_ratio
97
+ output['ps_version'] = self.ps_version
98
+ output['min_dynamic_patch'] = self.min_dynamic_patch
99
+ output['max_dynamic_patch'] = self.max_dynamic_patch
100
+
101
+ return output
configuration_navil_vit.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # NaViL
3
+ # Copyright (c) 2025 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import os
8
+ from typing import Union
9
+
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class NaViLVisionConfig(PretrainedConfig):
17
+ r"""
18
+ This is the configuration class to store the configuration of a [`InternVisionModelAnyRes`]. It is used to
19
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
20
+
21
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
22
+ documentation from [`PretrainedConfig`] for more information.
23
+
24
+ Args:
25
+ num_channels (`int`, *optional*, defaults to 3):
26
+ Number of color channels in the input images (e.g., 3 for RGB).
27
+ patch_size (`int`, *optional*, defaults to 14):
28
+ The size (resolution) of each patch.
29
+ image_size (`int`, *optional*, defaults to 224):
30
+ The size (resolution) of each image.
31
+ qkv_bias (`bool`, *optional*, defaults to `False`):
32
+ Whether to add a bias to the queries and values in the self-attention layers.
33
+ hidden_size (`int`, *optional*, defaults to 3200):
34
+ Dimensionality of the encoder layers and the pooler layer.
35
+ num_attention_heads (`int`, *optional*, defaults to 25):
36
+ Number of attention heads for each attention layer in the Transformer encoder.
37
+ intermediate_size (`int`, *optional*, defaults to 12800):
38
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
39
+ qk_normalization (`bool`, *optional*, defaults to `True`):
40
+ Whether to normalize the queries and keys in the self-attention layers.
41
+ num_hidden_layers (`int`, *optional*, defaults to 48):
42
+ Number of hidden layers in the Transformer encoder.
43
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
44
+ Whether to use flash attention mechanism.
45
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
46
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
47
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
48
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
49
+ The epsilon used by the layer normalization layers.
50
+ dropout (`float`, *optional*, defaults to 0.0):
51
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
52
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
53
+ Dropout rate for stochastic depth.
54
+ attention_dropout (`float`, *optional*, defaults to 0.0):
55
+ The dropout ratio for the attention probabilities.
56
+ initializer_range (`float`, *optional*, defaults to 0.02):
57
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
58
+ initializer_factor (`float`, *optional*, defaults to 0.1):
59
+ A factor for layer scale.
60
+ """
61
+
62
+ model_type = 'navil_vit'
63
+
64
+ def __init__(
65
+ self,
66
+ num_channels=3,
67
+ patch_size=14,
68
+ image_size=224,
69
+ qkv_bias=False,
70
+ hidden_size=3200,
71
+ num_attention_heads=25,
72
+ intermediate_size=12800,
73
+ qk_normalization=True,
74
+ num_hidden_layers=48,
75
+ use_flash_attn=True,
76
+ hidden_act='gelu',
77
+ norm_type='rms_norm',
78
+ layer_norm_eps=1e-6,
79
+ dropout=0.0,
80
+ drop_path_rate=0.0,
81
+ attention_dropout=0.0,
82
+ initializer_range=0.02,
83
+ initializer_factor=0.1,
84
+ downsample_ratio=1.0,
85
+ fullatt_block_indexes=None,
86
+ window_size=8,
87
+ **kwargs,
88
+ ):
89
+ super().__init__(**kwargs)
90
+
91
+ self.hidden_size = hidden_size
92
+ self.intermediate_size = intermediate_size
93
+ self.dropout = dropout
94
+ self.drop_path_rate = drop_path_rate
95
+ self.num_hidden_layers = num_hidden_layers
96
+ self.num_attention_heads = num_attention_heads
97
+ self.num_channels = num_channels
98
+ self.patch_size = patch_size
99
+ self.image_size = image_size
100
+ self.initializer_range = initializer_range
101
+ self.initializer_factor = initializer_factor
102
+ self.attention_dropout = attention_dropout
103
+ self.layer_norm_eps = layer_norm_eps
104
+ self.hidden_act = hidden_act
105
+ self.norm_type = norm_type
106
+ self.qkv_bias = qkv_bias
107
+ self.qk_normalization = qk_normalization
108
+ self.use_flash_attn = use_flash_attn
109
+ self.downsample_ratio = downsample_ratio
110
+ self.fullatt_block_indexes = fullatt_block_indexes
111
+ self.window_size = window_size
112
+
113
+ @classmethod
114
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
115
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
116
+
117
+ if 'vision_config' in config_dict:
118
+ config_dict = config_dict['vision_config']
119
+
120
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
121
+ logger.warning(
122
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
123
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
124
+ )
125
+
126
+ return cls.from_dict(config_dict, **kwargs)
configuration_qwen3.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Qwen3 model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.modeling_rope_utils import rope_config_validation
19
+ from transformers.utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class Qwen3Config(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Qwen3Model`]. It is used to instantiate a
28
+ Qwen3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of
30
+ Qwen3-8B [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B).
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 151936):
38
+ Vocabulary size of the Qwen3 model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`Qwen3Model`]
40
+ hidden_size (`int`, *optional*, defaults to 4096):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 22016):
43
+ Dimension of the MLP representations.
44
+ num_hidden_layers (`int`, *optional*, defaults to 32):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 32):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ num_key_value_heads (`int`, *optional*, defaults to 32):
49
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53
+ by meanpooling all the original heads within that group. For more details checkout [this
54
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
55
+ head_dim (`int`, *optional*, defaults to 128):
56
+ The attention head dimension.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
60
+ The maximum sequence length that this model might ever be used with.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
69
+ Whether the model's input and output word embeddings should be tied.
70
+ rope_theta (`float`, *optional*, defaults to 10000.0):
71
+ The base period of the RoPE embeddings.
72
+ rope_scaling (`Dict`, *optional*):
73
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
74
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
75
+ accordingly.
76
+ Expected contents:
77
+ `rope_type` (`str`):
78
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
79
+ 'llama3'], with 'default' being the original RoPE implementation.
80
+ `factor` (`float`, *optional*):
81
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
82
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
83
+ original maximum pre-trained length.
84
+ `original_max_position_embeddings` (`int`, *optional*):
85
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
86
+ pretraining.
87
+ `attention_factor` (`float`, *optional*):
88
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
89
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
90
+ `factor` field to infer the suggested value.
91
+ `beta_fast` (`float`, *optional*):
92
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
93
+ ramp function. If unspecified, it defaults to 32.
94
+ `beta_slow` (`float`, *optional*):
95
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
96
+ ramp function. If unspecified, it defaults to 1.
97
+ `short_factor` (`List[float]`, *optional*):
98
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
99
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
100
+ size divided by the number of attention heads divided by 2
101
+ `long_factor` (`List[float]`, *optional*):
102
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
103
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
104
+ size divided by the number of attention heads divided by 2
105
+ `low_freq_factor` (`float`, *optional*):
106
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
107
+ `high_freq_factor` (`float`, *optional*):
108
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
109
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
110
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
111
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
112
+ Whether to use sliding window attention.
113
+ sliding_window (`int`, *optional*, defaults to 4096):
114
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
115
+ max_window_layers (`int`, *optional*, defaults to 28):
116
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
117
+ attention_dropout (`float`, *optional*, defaults to 0.0):
118
+ The dropout ratio for the attention probabilities.
119
+
120
+ ```python
121
+ >>> from transformers import Qwen3Model, Qwen3Config
122
+
123
+ >>> # Initializing a Qwen3 style configuration
124
+ >>> configuration = Qwen3Config()
125
+
126
+ >>> # Initializing a model from the Qwen3-8B style configuration
127
+ >>> model = Qwen3Model(configuration)
128
+
129
+ >>> # Accessing the model configuration
130
+ >>> configuration = model.config
131
+ ```"""
132
+
133
+ model_type = "qwen3"
134
+ keys_to_ignore_at_inference = ["past_key_values"]
135
+
136
+ # Default tensor parallel plan for base model `Qwen3`
137
+ base_model_tp_plan = {
138
+ "layers.*.self_attn.q_proj": "colwise",
139
+ "layers.*.self_attn.k_proj": "colwise",
140
+ "layers.*.self_attn.v_proj": "colwise",
141
+ "layers.*.self_attn.o_proj": "rowwise",
142
+ "layers.*.mlp.gate_proj": "colwise",
143
+ "layers.*.mlp.up_proj": "colwise",
144
+ "layers.*.mlp.down_proj": "rowwise",
145
+ }
146
+ base_model_pp_plan = {
147
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
148
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
149
+ "norm": (["hidden_states"], ["hidden_states"]),
150
+ }
151
+
152
+ def __init__(
153
+ self,
154
+ vocab_size=151936,
155
+ hidden_size=4096,
156
+ intermediate_size=22016,
157
+ num_hidden_layers=32,
158
+ num_attention_heads=32,
159
+ num_key_value_heads=32,
160
+ head_dim=128,
161
+ hidden_act="silu",
162
+ max_position_embeddings=32768,
163
+ initializer_range=0.02,
164
+ rms_norm_eps=1e-6,
165
+ use_cache=True,
166
+ tie_word_embeddings=False,
167
+ rope_theta=10000.0,
168
+ rope_scaling=None,
169
+ attention_bias=False,
170
+ use_sliding_window=False,
171
+ sliding_window=4096,
172
+ max_window_layers=28,
173
+ attention_dropout=0.0,
174
+ **kwargs,
175
+ ):
176
+ self.vocab_size = vocab_size
177
+ self.max_position_embeddings = max_position_embeddings
178
+ self.hidden_size = hidden_size
179
+ self.intermediate_size = intermediate_size
180
+ self.num_hidden_layers = num_hidden_layers
181
+ self.num_attention_heads = num_attention_heads
182
+ self.use_sliding_window = use_sliding_window
183
+ self.sliding_window = sliding_window # we check `use_sliding_window` in the modeling code
184
+ self.max_window_layers = max_window_layers
185
+
186
+ # for backward compatibility
187
+ if num_key_value_heads is None:
188
+ num_key_value_heads = num_attention_heads
189
+
190
+ self.num_key_value_heads = num_key_value_heads
191
+ self.head_dim = head_dim
192
+ self.hidden_act = hidden_act
193
+ self.initializer_range = initializer_range
194
+ self.rms_norm_eps = rms_norm_eps
195
+ self.use_cache = use_cache
196
+ self.rope_theta = rope_theta
197
+ self.rope_scaling = rope_scaling
198
+ self.attention_bias = attention_bias
199
+ self.attention_dropout = attention_dropout
200
+ # Validate the correctness of rotary position embeddings parameters
201
+ # BC: if there is a 'type' field, move it to 'rope_type'.
202
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
203
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
204
+ rope_config_validation(self)
205
+
206
+ super().__init__(
207
+ tie_word_embeddings=tie_word_embeddings,
208
+ **kwargs,
209
+ )
210
+ self._attn_implementation = "flash_attention_2"
211
+
212
+ class Qwen3VEConfig(PretrainedConfig):
213
+ r"""
214
+ This is the configuration class to store the configuration of a [`Qwen3Model`]. It is used to instantiate a
215
+ Qwen3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
216
+ with the defaults will yield a similar configuration to that of
217
+ Qwen3-8B [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B).
218
+
219
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
220
+ documentation from [`PretrainedConfig`] for more information.
221
+
222
+
223
+ Args:
224
+ vocab_size (`int`, *optional*, defaults to 151936):
225
+ Vocabulary size of the Qwen3 model. Defines the number of different tokens that can be represented by the
226
+ `inputs_ids` passed when calling [`Qwen3Model`]
227
+ hidden_size (`int`, *optional*, defaults to 4096):
228
+ Dimension of the hidden representations.
229
+ intermediate_size (`int`, *optional*, defaults to 22016):
230
+ Dimension of the MLP representations.
231
+ num_hidden_layers (`int`, *optional*, defaults to 32):
232
+ Number of hidden layers in the Transformer encoder.
233
+ num_attention_heads (`int`, *optional*, defaults to 32):
234
+ Number of attention heads for each attention layer in the Transformer encoder.
235
+ num_key_value_heads (`int`, *optional*, defaults to 32):
236
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
237
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
238
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
239
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
240
+ by meanpooling all the original heads within that group. For more details checkout [this
241
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
242
+ head_dim (`int`, *optional*, defaults to 128):
243
+ The attention head dimension.
244
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
245
+ The non-linear activation function (function or string) in the decoder.
246
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
247
+ The maximum sequence length that this model might ever be used with.
248
+ initializer_range (`float`, *optional*, defaults to 0.02):
249
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
250
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
251
+ The epsilon used by the rms normalization layers.
252
+ use_cache (`bool`, *optional*, defaults to `True`):
253
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
254
+ relevant if `config.is_decoder=True`.
255
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
256
+ Whether the model's input and output word embeddings should be tied.
257
+ rope_theta (`float`, *optional*, defaults to 10000.0):
258
+ The base period of the RoPE embeddings.
259
+ rope_scaling (`Dict`, *optional*):
260
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
261
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
262
+ accordingly.
263
+ Expected contents:
264
+ `rope_type` (`str`):
265
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
266
+ 'llama3'], with 'default' being the original RoPE implementation.
267
+ `factor` (`float`, *optional*):
268
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
269
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
270
+ original maximum pre-trained length.
271
+ `original_max_position_embeddings` (`int`, *optional*):
272
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
273
+ pretraining.
274
+ `attention_factor` (`float`, *optional*):
275
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
276
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
277
+ `factor` field to infer the suggested value.
278
+ `beta_fast` (`float`, *optional*):
279
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
280
+ ramp function. If unspecified, it defaults to 32.
281
+ `beta_slow` (`float`, *optional*):
282
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
283
+ ramp function. If unspecified, it defaults to 1.
284
+ `short_factor` (`List[float]`, *optional*):
285
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
286
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
287
+ size divided by the number of attention heads divided by 2
288
+ `long_factor` (`List[float]`, *optional*):
289
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
290
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
291
+ size divided by the number of attention heads divided by 2
292
+ `low_freq_factor` (`float`, *optional*):
293
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
294
+ `high_freq_factor` (`float`, *optional*):
295
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
296
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
297
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
298
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
299
+ Whether to use sliding window attention.
300
+ sliding_window (`int`, *optional*, defaults to 4096):
301
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
302
+ max_window_layers (`int`, *optional*, defaults to 28):
303
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
304
+ attention_dropout (`float`, *optional*, defaults to 0.0):
305
+ The dropout ratio for the attention probabilities.
306
+
307
+ ```python
308
+ >>> from transformers import Qwen3Model, Qwen3Config
309
+
310
+ >>> # Initializing a Qwen3 style configuration
311
+ >>> configuration = Qwen3Config()
312
+
313
+ >>> # Initializing a model from the Qwen3-8B style configuration
314
+ >>> model = Qwen3Model(configuration)
315
+
316
+ >>> # Accessing the model configuration
317
+ >>> configuration = model.config
318
+ ```"""
319
+
320
+ model_type = "qwen3"
321
+ keys_to_ignore_at_inference = ["past_key_values"]
322
+
323
+ # Default tensor parallel plan for base model `Qwen3`
324
+ base_model_tp_plan = {
325
+ "layers.*.self_attn.q_proj": "colwise",
326
+ "layers.*.self_attn.k_proj": "colwise",
327
+ "layers.*.self_attn.v_proj": "colwise",
328
+ "layers.*.self_attn.o_proj": "rowwise",
329
+ "layers.*.mlp.gate_proj": "colwise",
330
+ "layers.*.mlp.up_proj": "colwise",
331
+ "layers.*.mlp.down_proj": "rowwise",
332
+ }
333
+ base_model_pp_plan = {
334
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
335
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
336
+ "norm": (["hidden_states"], ["hidden_states"]),
337
+ }
338
+
339
+ def __init__(
340
+ self,
341
+ vocab_size=151936,
342
+ hidden_size=4096,
343
+ intermediate_size=22016,
344
+ num_hidden_layers=32,
345
+ num_attention_heads=32,
346
+ num_key_value_heads=32,
347
+ head_dim=128,
348
+ hidden_act="silu",
349
+ max_position_embeddings=32768,
350
+ initializer_range=0.02,
351
+ rms_norm_eps=1e-6,
352
+ use_cache=True,
353
+ tie_word_embeddings=False,
354
+ rope_theta=10000.0,
355
+ rope_scaling=None,
356
+ attention_bias=False,
357
+ use_sliding_window=False,
358
+ sliding_window=4096,
359
+ max_window_layers=28,
360
+ attention_dropout=0.0,
361
+ **kwargs,
362
+ ):
363
+ self.vocab_size = vocab_size
364
+ self.max_position_embeddings = max_position_embeddings
365
+ self.hidden_size = hidden_size
366
+ self.intermediate_size = intermediate_size
367
+ self.num_hidden_layers = num_hidden_layers
368
+ self.num_attention_heads = num_attention_heads
369
+ self.use_sliding_window = use_sliding_window
370
+ self.sliding_window = sliding_window # we check `use_sliding_window` in the modeling code
371
+ self.max_window_layers = max_window_layers
372
+
373
+ # for backward compatibility
374
+ if num_key_value_heads is None:
375
+ num_key_value_heads = num_attention_heads
376
+
377
+ self.num_key_value_heads = num_key_value_heads
378
+ self.head_dim = head_dim
379
+ self.hidden_act = hidden_act
380
+ self.initializer_range = initializer_range
381
+ self.rms_norm_eps = rms_norm_eps
382
+ self.use_cache = use_cache
383
+ self.rope_theta = rope_theta
384
+ self.rope_scaling = rope_scaling
385
+ self.attention_bias = attention_bias
386
+ self.attention_dropout = attention_dropout
387
+ # Validate the correctness of rotary position embeddings parameters
388
+ # BC: if there is a 'type' field, move it to 'rope_type'.
389
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
390
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
391
+ rope_config_validation(self)
392
+
393
+ super().__init__(
394
+ tie_word_embeddings=tie_word_embeddings,
395
+ **kwargs,
396
+ )
397
+ self._attn_implementation = "flash_attention_2"
398
+
399
+ __all__ = ["Qwen3Config", "Qwen3VEConfig"]
constants.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IMG_CONTEXT_TOKEN = '<IMG_CONTEXT>'
2
+ IMG_START_TOKEN = '<img>'
3
+ IMG_END_TOKEN = '</img>'
4
+ IMG_LINE_BREAK_TOKEN = '<IMG_LINE_BREAK>'
5
+ IMG_FRAME_BREAK_TOKEN = '<IMG_FRAME_BREAK>'
6
+ QUAD_START_TOKEN = '<quad>'
7
+ QUAD_END_TOKEN = '</quad>'
8
+ REF_START_TOKEN = '<ref>'
9
+ REF_END_TOKEN = '</ref>'
10
+ BOX_START_TOKEN = '<box>'
11
+ BOX_END_TOKEN = '</box>'
12
+
13
+ IMG_UNCOND_TOKEN = '<img_uncond>'
14
+
15
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
16
+ IMAGENET_STD = (0.229, 0.224, 0.225)
17
+ CLIP_MEAN = (0.4814546, 0.4578275, 0.40821073)
18
+ CLIP_STD = (0.2686295, 0.2613025, 0.2757711)
19
+ SIGLIP_MEAN = (0.5, 0.5, 0.5)
20
+ SIGLIP_STD = (0.5, 0.5, 0.5)
21
+ VAE_MEAN = (0.5, 0.5, 0.5)
22
+ VAE_STD = (0.5, 0.5, 0.5)
23
+
24
+ SPECIAL_TOKEN_LIST = [
25
+ BOX_END_TOKEN, BOX_START_TOKEN,
26
+ IMG_CONTEXT_TOKEN, IMG_END_TOKEN,
27
+ IMG_START_TOKEN, QUAD_END_TOKEN,
28
+ QUAD_START_TOKEN, REF_END_TOKEN,
29
+ REF_START_TOKEN, IMG_UNCOND_TOKEN,
30
+ IMG_LINE_BREAK_TOKEN, IMG_FRAME_BREAK_TOKEN,
31
+ ]
conversation.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation prompt templates.
3
+
4
+ We kindly request that you import fastchat instead of copying this file if you wish to use it.
5
+ If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
6
+ """
7
+
8
+ import dataclasses
9
+ from enum import IntEnum, auto
10
+ from typing import Any, Dict, List, Tuple, Union
11
+
12
+
13
+ class SeparatorStyle(IntEnum):
14
+ """Separator styles."""
15
+
16
+ ADD_COLON_SINGLE = auto()
17
+ ADD_COLON_TWO = auto()
18
+ ADD_COLON_SPACE_SINGLE = auto()
19
+ NO_COLON_SINGLE = auto()
20
+ NO_COLON_TWO = auto()
21
+ ADD_NEW_LINE_SINGLE = auto()
22
+ LLAMA2 = auto()
23
+ CHATGLM = auto()
24
+ CHATML = auto()
25
+ CHATINTERN = auto()
26
+ DOLLY = auto()
27
+ RWKV = auto()
28
+ PHOENIX = auto()
29
+ ROBIN = auto()
30
+ FALCON_CHAT = auto()
31
+ CHATGLM3 = auto()
32
+ INTERNVL_ZH = auto()
33
+ MPT = auto()
34
+
35
+
36
+ @dataclasses.dataclass
37
+ class Conversation:
38
+ """A class that manages prompt templates and keeps all conversation history."""
39
+
40
+ # The name of this template
41
+ name: str
42
+ # The template of the system prompt
43
+ system_template: str = '{system_message}'
44
+ # The system message
45
+ system_message: str = ''
46
+ # The names of two roles
47
+ roles: Tuple[str] = ('USER', 'ASSISTANT')
48
+ # All messages. Each item is (role, message).
49
+ messages: List[List[str]] = ()
50
+ # The number of few shot examples
51
+ offset: int = 0
52
+ # The separator style and configurations
53
+ sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
54
+ sep: str = '\n'
55
+ sep2: str = None
56
+ # Stop criteria (the default one is EOS token)
57
+ stop_str: Union[str, List[str]] = None
58
+ # Stops generation if meeting any token in this list
59
+ stop_token_ids: List[int] = None
60
+
61
+ def get_prompt(self) -> str:
62
+ """Get the prompt for generation."""
63
+ system_prompt = self.system_template.format(system_message=self.system_message)
64
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
65
+ ret = system_prompt + self.sep
66
+ for role, message in self.messages:
67
+ if message:
68
+ ret += role + ': ' + message + self.sep
69
+ else:
70
+ ret += role + ':'
71
+ return ret
72
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
73
+ seps = [self.sep, self.sep2]
74
+ ret = system_prompt + seps[0]
75
+ for i, (role, message) in enumerate(self.messages):
76
+ if message:
77
+ ret += role + ': ' + message + seps[i % 2]
78
+ else:
79
+ ret += role + ':'
80
+ return ret
81
+ elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
82
+ ret = system_prompt + self.sep
83
+ for role, message in self.messages:
84
+ if message:
85
+ ret += role + ': ' + message + self.sep
86
+ else:
87
+ ret += role + ': ' # must be end with a space
88
+ return ret
89
+ elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
90
+ ret = '' if system_prompt == '' else system_prompt + self.sep
91
+ for role, message in self.messages:
92
+ if message:
93
+ ret += role + '\n' + message + self.sep
94
+ else:
95
+ ret += role + '\n'
96
+ return ret
97
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
98
+ ret = system_prompt
99
+ for role, message in self.messages:
100
+ if message:
101
+ ret += role + message + self.sep
102
+ else:
103
+ ret += role
104
+ return ret
105
+ elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
106
+ seps = [self.sep, self.sep2]
107
+ ret = system_prompt
108
+ for i, (role, message) in enumerate(self.messages):
109
+ if message:
110
+ ret += role + message + seps[i % 2]
111
+ else:
112
+ ret += role
113
+ return ret
114
+ elif self.sep_style == SeparatorStyle.RWKV:
115
+ ret = system_prompt
116
+ for i, (role, message) in enumerate(self.messages):
117
+ if message:
118
+ ret += (
119
+ role
120
+ + ': '
121
+ + message.replace('\r\n', '\n').replace('\n\n', '\n')
122
+ )
123
+ ret += '\n\n'
124
+ else:
125
+ ret += role + ':'
126
+ return ret
127
+ elif self.sep_style == SeparatorStyle.LLAMA2:
128
+ seps = [self.sep, self.sep2]
129
+ if self.system_message:
130
+ ret = system_prompt
131
+ else:
132
+ ret = '[INST] '
133
+ for i, (role, message) in enumerate(self.messages):
134
+ tag = self.roles[i % 2]
135
+ if message:
136
+ if i == 0:
137
+ ret += message + ' '
138
+ else:
139
+ ret += tag + ' ' + message + seps[i % 2]
140
+ else:
141
+ ret += tag
142
+ return ret
143
+ elif self.sep_style == SeparatorStyle.CHATGLM:
144
+ # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
145
+ # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
146
+ round_add_n = 1 if self.name == 'chatglm2' else 0
147
+ if system_prompt:
148
+ ret = system_prompt + self.sep
149
+ else:
150
+ ret = ''
151
+
152
+ for i, (role, message) in enumerate(self.messages):
153
+ if i % 2 == 0:
154
+ ret += f'[Round {i//2 + round_add_n}]{self.sep}'
155
+
156
+ if message:
157
+ ret += f'{role}:{message}{self.sep}'
158
+ else:
159
+ ret += f'{role}:'
160
+ return ret
161
+ elif self.sep_style == SeparatorStyle.CHATML:
162
+ ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
163
+ for role, message in self.messages:
164
+ if message:
165
+ ret += role + '\n' + message + self.sep + '\n'
166
+ else:
167
+ ret += role + '\n'
168
+ return ret
169
+ elif self.sep_style == SeparatorStyle.CHATGLM3:
170
+ ret = ''
171
+ if self.system_message:
172
+ ret += system_prompt
173
+ for role, message in self.messages:
174
+ if message:
175
+ ret += role + '\n' + ' ' + message
176
+ else:
177
+ ret += role
178
+ return ret
179
+ elif self.sep_style == SeparatorStyle.CHATINTERN:
180
+ # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
181
+ seps = [self.sep, self.sep2]
182
+ ret = system_prompt
183
+ for i, (role, message) in enumerate(self.messages):
184
+ # if i % 2 == 0:
185
+ # ret += "<s>"
186
+ if message:
187
+ ret += role + ':' + message + seps[i % 2] + '\n'
188
+ else:
189
+ ret += role + ':'
190
+ return ret
191
+ elif self.sep_style == SeparatorStyle.DOLLY:
192
+ seps = [self.sep, self.sep2]
193
+ ret = system_prompt
194
+ for i, (role, message) in enumerate(self.messages):
195
+ if message:
196
+ ret += role + ':\n' + message + seps[i % 2]
197
+ if i % 2 == 1:
198
+ ret += '\n\n'
199
+ else:
200
+ ret += role + ':\n'
201
+ return ret
202
+ elif self.sep_style == SeparatorStyle.PHOENIX:
203
+ ret = system_prompt
204
+ for role, message in self.messages:
205
+ if message:
206
+ ret += role + ': ' + '<s>' + message + '</s>'
207
+ else:
208
+ ret += role + ': ' + '<s>'
209
+ return ret
210
+ elif self.sep_style == SeparatorStyle.ROBIN:
211
+ ret = system_prompt + self.sep
212
+ for role, message in self.messages:
213
+ if message:
214
+ ret += role + ':\n' + message + self.sep
215
+ else:
216
+ ret += role + ':\n'
217
+ return ret
218
+ elif self.sep_style == SeparatorStyle.FALCON_CHAT:
219
+ ret = ''
220
+ if self.system_message:
221
+ ret += system_prompt + self.sep
222
+ for role, message in self.messages:
223
+ if message:
224
+ ret += role + ': ' + message + self.sep
225
+ else:
226
+ ret += role + ':'
227
+
228
+ return ret
229
+ elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
230
+ seps = [self.sep2, self.sep]
231
+ ret = self.system_message + seps[0]
232
+ for i, (role, message) in enumerate(self.messages):
233
+ if message:
234
+ ret += role + ': ' + message + seps[i % 2]
235
+ else:
236
+ ret += role + ':'
237
+ return ret
238
+ elif self.sep_style == SeparatorStyle.MPT:
239
+ ret = system_prompt + self.sep
240
+ for role, message in self.messages:
241
+ if message:
242
+ if type(message) is tuple:
243
+ message, _, _ = message
244
+ ret += role + message + self.sep
245
+ else:
246
+ ret += role
247
+ return ret
248
+ else:
249
+ raise ValueError(f'Invalid style: {self.sep_style}')
250
+
251
+ def set_system_message(self, system_message: str):
252
+ """Set the system message."""
253
+ self.system_message = system_message
254
+
255
+ def append_message(self, role: str, message: str):
256
+ """Append a new message."""
257
+ self.messages.append([role, message])
258
+
259
+ def update_last_message(self, message: str):
260
+ """Update the last output.
261
+
262
+ The last message is typically set to be None when constructing the prompt,
263
+ so we need to update it in-place after getting the response from a model.
264
+ """
265
+ self.messages[-1][1] = message
266
+
267
+ def to_gradio_chatbot(self):
268
+ """Convert the conversation to gradio chatbot format."""
269
+ ret = []
270
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
271
+ if i % 2 == 0:
272
+ ret.append([msg, None])
273
+ else:
274
+ ret[-1][-1] = msg
275
+ return ret
276
+
277
+ def to_openai_api_messages(self):
278
+ """Convert the conversation to OpenAI chat completion format."""
279
+ ret = [{'role': 'system', 'content': self.system_message}]
280
+
281
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
282
+ if i % 2 == 0:
283
+ ret.append({'role': 'user', 'content': msg})
284
+ else:
285
+ if msg is not None:
286
+ ret.append({'role': 'assistant', 'content': msg})
287
+ return ret
288
+
289
+ def copy(self):
290
+ return Conversation(
291
+ name=self.name,
292
+ system_template=self.system_template,
293
+ system_message=self.system_message,
294
+ roles=self.roles,
295
+ messages=[[x, y] for x, y in self.messages],
296
+ offset=self.offset,
297
+ sep_style=self.sep_style,
298
+ sep=self.sep,
299
+ sep2=self.sep2,
300
+ stop_str=self.stop_str,
301
+ stop_token_ids=self.stop_token_ids,
302
+ )
303
+
304
+ def dict(self):
305
+ return {
306
+ 'template_name': self.name,
307
+ 'system_message': self.system_message,
308
+ 'roles': self.roles,
309
+ 'messages': self.messages,
310
+ 'offset': self.offset,
311
+ }
312
+
313
+
314
+ # A global registry for all conversation templates
315
+ conv_templates: Dict[str, Conversation] = {}
316
+
317
+
318
+ def register_conv_template(template: Conversation, override: bool = False):
319
+ """Register a new conversation template."""
320
+ if not override:
321
+ assert (
322
+ template.name not in conv_templates
323
+ ), f'{template.name} has been registered.'
324
+
325
+ conv_templates[template.name] = template
326
+
327
+
328
+ def get_conv_template(name: str) -> Conversation:
329
+ """Get a conversation template."""
330
+ return conv_templates[name].copy()
331
+
332
+
333
+ # InternVL-Chat-V1-1 template
334
+ register_conv_template(
335
+ Conversation(
336
+ name='internvl_zh',
337
+ system_template='',
338
+ roles=('<human>', '<bot>'),
339
+ sep_style=SeparatorStyle.INTERNVL_ZH,
340
+ sep='</s>',
341
+ sep2=' ',
342
+ )
343
+ )
344
+
345
+
346
+ # Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
347
+ # is that during training, the preprocessing function for the Hermes-2 template doesn't add
348
+ # <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
349
+ # Therefore, they are completely equivalent during inference.
350
+ register_conv_template(
351
+ Conversation(
352
+ name='Hermes-2',
353
+ system_template='<|im_start|>system\n{system_message}',
354
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
355
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
356
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
357
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
358
+ sep_style=SeparatorStyle.MPT,
359
+ sep='<|im_end|>',
360
+ stop_token_ids=[
361
+ 2,
362
+ 6,
363
+ 7,
364
+ 8,
365
+ ],
366
+ stop_str='<|endoftext|>',
367
+ )
368
+ )
369
+
370
+ register_conv_template(
371
+ Conversation(
372
+ name='Hermes-2-imgen',
373
+ system_template='<|im_start|>system\n{system_message}',
374
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
375
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
376
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
377
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
378
+ sep_style=SeparatorStyle.MPT,
379
+ sep='<|im_end|>',
380
+ stop_token_ids=[
381
+ 2,
382
+ 6,
383
+ 7,
384
+ 8,
385
+ ],
386
+ stop_str='<|endoftext|>',
387
+ )
388
+ )
389
+
390
+ register_conv_template(
391
+ Conversation(
392
+ name='internlm2-chat',
393
+ system_template='<|im_start|>system\n{system_message}',
394
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
395
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
396
+ system_message='你���由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
397
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
398
+ sep_style=SeparatorStyle.MPT,
399
+ sep='<|im_end|>',
400
+ stop_token_ids=[
401
+ 2,
402
+ 92543,
403
+ 92542
404
+ ]
405
+ )
406
+ )
407
+
408
+ register_conv_template(
409
+ Conversation(
410
+ name='internlm2-chat-imgen',
411
+ system_template='<|im_start|>system\n{system_message}',
412
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
413
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
414
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
415
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
416
+ sep_style=SeparatorStyle.MPT,
417
+ sep='<|im_end|>',
418
+ stop_token_ids=[
419
+ 2,
420
+ 92543,
421
+ 92542
422
+ ]
423
+ )
424
+ )
425
+
426
+ register_conv_template(
427
+ Conversation(
428
+ name='phi3-chat',
429
+ system_template='<|system|>\n{system_message}',
430
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
431
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
432
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
433
+ roles=('<|user|>\n', '<|assistant|>\n'),
434
+ sep_style=SeparatorStyle.MPT,
435
+ sep='<|end|>',
436
+ stop_token_ids=[
437
+ 2,
438
+ 32000,
439
+ 32007
440
+ ]
441
+ )
442
+ )
443
+
444
+ register_conv_template(
445
+ Conversation(
446
+ name='qwen3-chat',
447
+ system_template='<|im_start|>system\n{system_message}',
448
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
449
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
450
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
451
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n<think>\n\n</think>\n\n'),
452
+ sep_style=SeparatorStyle.MPT,
453
+ sep='<|im_end|>',
454
+ stop_token_ids=[
455
+ 2,
456
+ 92543,
457
+ 92542
458
+ ]
459
+ )
460
+ )
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.51.0"
4
+ }
image_processing_qwen2_vl.py ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """Image processor class for Qwen2-VL."""
21
+
22
+ import math
23
+ from typing import Dict, List, Optional, Union
24
+
25
+ import numpy as np
26
+
27
+ from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
28
+ from transformers.image_transforms import (
29
+ convert_to_rgb,
30
+ resize,
31
+ to_channel_dimension_format,
32
+ pad
33
+ )
34
+ from transformers.image_utils import (
35
+ OPENAI_CLIP_MEAN,
36
+ OPENAI_CLIP_STD,
37
+ ChannelDimension,
38
+ ImageInput,
39
+ PILImageResampling,
40
+ VideoInput,
41
+ get_image_size,
42
+ infer_channel_dimension_format,
43
+ is_scaled_image,
44
+ is_valid_image,
45
+ make_list_of_images,
46
+ to_numpy_array,
47
+ valid_images,
48
+ validate_preprocess_arguments,
49
+ )
50
+ from transformers.utils import TensorType, is_vision_available, logging
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+
56
+ if is_vision_available():
57
+ from PIL import Image
58
+
59
+
60
+ def make_batched_images(images) -> List[List[ImageInput]]:
61
+ """
62
+ Accepts images in list or nested list format, and makes a list of images for preprocessing.
63
+
64
+ Args:
65
+ images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):
66
+ The input image.
67
+
68
+ Returns:
69
+ list: A list of images.
70
+ """
71
+ if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):
72
+ return [img for img_list in images for img in img_list]
73
+
74
+ elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):
75
+ return images
76
+
77
+ elif is_valid_image(images):
78
+ return [images]
79
+
80
+ raise ValueError(f"Could not make batched images from {images}")
81
+
82
+
83
+ # Copied from transformers.models.llava_next_video.image_processing_llava_next_video.make_batched_videos
84
+ def make_batched_videos(videos) -> List[VideoInput]:
85
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
86
+ return videos
87
+
88
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
89
+ if isinstance(videos[0], Image.Image):
90
+ return [videos]
91
+ elif len(videos[0].shape) == 4:
92
+ return [list(video) for video in videos]
93
+
94
+ elif is_valid_image(videos) and len(videos.shape) == 4:
95
+ return [list(videos)]
96
+
97
+ raise ValueError(f"Could not make batched video from {videos}")
98
+
99
+
100
+ def smart_resize(
101
+ height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 14 * 14 * 4 * 1280
102
+ ):
103
+ """Rescales the image so that the following conditions are met:
104
+
105
+ 1. Both dimensions (height and width) are divisible by 'factor'.
106
+
107
+ 2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
108
+
109
+ 3. The aspect ratio of the image is maintained as closely as possible.
110
+
111
+ """
112
+ if height < factor or width < factor:
113
+ raise ValueError(f"height:{height} or width:{width} must be larger than factor:{factor}")
114
+ elif max(height, width) / min(height, width) > 200:
115
+ raise ValueError(
116
+ f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
117
+ )
118
+ h_bar = round(height / factor) * factor
119
+ w_bar = round(width / factor) * factor
120
+ if h_bar * w_bar > max_pixels:
121
+ beta = math.sqrt((height * width) / max_pixels)
122
+ h_bar = math.floor(height / beta / factor) * factor
123
+ w_bar = math.floor(width / beta / factor) * factor
124
+ elif h_bar * w_bar < min_pixels:
125
+ beta = math.sqrt(min_pixels / (height * width))
126
+ h_bar = math.ceil(height * beta / factor) * factor
127
+ w_bar = math.ceil(width * beta / factor) * factor
128
+ return h_bar, w_bar
129
+
130
+
131
+ class Qwen2VLImageProcessor(BaseImageProcessor):
132
+ r"""
133
+ Constructs a Qwen2-VL image processor that dynamically resizes images based on the original images.
134
+
135
+ Args:
136
+ do_resize (`bool`, *optional*, defaults to `True`):
137
+ Whether to resize the image's (height, width) dimensions.
138
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
139
+ Resampling filter to use when resizing the image.
140
+ do_rescale (`bool`, *optional*, defaults to `True`):
141
+ Whether to rescale the image by the specified scale `rescale_factor`.
142
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
143
+ Scale factor to use if rescaling the image.
144
+ do_normalize (`bool`, *optional*, defaults to `True`):
145
+ Whether to normalize the image.
146
+ image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
147
+ Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
148
+ image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
149
+ Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
150
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
151
+ Whether to convert the image to RGB.
152
+ min_pixels (`int`, *optional*, defaults to `56 * 56`):
153
+ The min pixels of the image to resize the image.
154
+ max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
155
+ The max pixels of the image to resize the image.
156
+ patch_size (`int`, *optional*, defaults to 14):
157
+ The spacial patch size of the vision encoder.
158
+ temporal_patch_size (`int`, *optional*, defaults to 2):
159
+ The temporal patch size of the vision encoder.
160
+ merge_size (`int`, *optional*, defaults to 2):
161
+ The merge size of the vision encoder to llm encoder.
162
+ """
163
+
164
+ model_input_names = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw"]
165
+
166
+ def __init__(
167
+ self,
168
+ do_resize: bool = True,
169
+ do_pad: bool = False,
170
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
171
+ do_rescale: bool = True,
172
+ rescale_factor: Union[int, float] = 1 / 255,
173
+ do_normalize: bool = True,
174
+ image_mean: Optional[Union[float, List[float]]] = None,
175
+ image_std: Optional[Union[float, List[float]]] = None,
176
+ do_convert_rgb: bool = True,
177
+ min_pixels: int = 56 * 56,
178
+ max_pixels: int = 28 * 28 * 1280,
179
+ patch_size: int = 14,
180
+ temporal_patch_size: int = 2,
181
+ merge_size: int = 2,
182
+ **kwargs,
183
+ ) -> None:
184
+ super().__init__(**kwargs)
185
+ self.do_resize = do_resize
186
+ self.do_pad = do_pad
187
+ self.resample = resample
188
+ self.do_rescale = do_rescale
189
+ self.rescale_factor = rescale_factor
190
+ self.do_normalize = do_normalize
191
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
192
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
193
+ self.min_pixels = min_pixels
194
+ self.max_pixels = max_pixels
195
+ self.patch_size = patch_size
196
+ self.temporal_patch_size = temporal_patch_size
197
+ self.merge_size = merge_size
198
+ self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels}
199
+ self.do_convert_rgb = do_convert_rgb
200
+
201
+ def _preprocess(
202
+ self,
203
+ images: Union[ImageInput, VideoInput],
204
+ do_resize: bool = None,
205
+ do_pad: bool = None,
206
+ resample: PILImageResampling = None,
207
+ do_rescale: bool = None,
208
+ rescale_factor: float = None,
209
+ do_normalize: bool = None,
210
+ image_mean: Optional[Union[float, List[float]]] = None,
211
+ image_std: Optional[Union[float, List[float]]] = None,
212
+ do_convert_rgb: bool = None,
213
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
214
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
215
+ min_pixels: int = None,
216
+ max_pixels: int = None,
217
+ ):
218
+ """
219
+ Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
220
+
221
+ Args:
222
+ images (`ImageInput`):
223
+ Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
224
+ vision_info (`List[Dict]`, *optional*):
225
+ Optional list of dictionaries containing additional information about vision inputs.
226
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
227
+ Whether to resize the image.
228
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
229
+ Whether to pad the image.
230
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
231
+ Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
232
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
233
+ Whether to rescale the image.
234
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
235
+ Scale factor to use if rescaling the image.
236
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
237
+ Whether to normalize the image.
238
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
239
+ Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
240
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
241
+ Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
242
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
243
+ Whether to convert the image to RGB.
244
+ data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
245
+ The channel dimension format for the output image. Can be one of:
246
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
247
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
248
+ - Unset: Use the channel dimension format of the input image.
249
+ input_data_format (`ChannelDimension` or `str`, *optional*):
250
+ The channel dimension format for the input image. Can be one of:
251
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
252
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
253
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
254
+ """
255
+
256
+ min_pixels = min_pixels if min_pixels is not None else self.min_pixels
257
+ max_pixels = max_pixels if max_pixels is not None else self.max_pixels
258
+
259
+ images = make_list_of_images(images)
260
+
261
+ if do_convert_rgb:
262
+ images = [convert_to_rgb(image) for image in images]
263
+
264
+ # All transformations expect numpy arrays.
265
+ images = [to_numpy_array(image) for image in images]
266
+
267
+ if is_scaled_image(images[0]) and do_rescale:
268
+ logger.warning_once(
269
+ "It looks like you are trying to rescale already rescaled images. If the input"
270
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
271
+ )
272
+ if input_data_format is None:
273
+ # We assume that all images have the same channel dimension format.
274
+ input_data_format = infer_channel_dimension_format(images[0])
275
+
276
+ assert not (do_resize and do_pad), "Only one of `do_resize` and `do_pad` can be set to `True`."
277
+
278
+ height, width = get_image_size(images[0], channel_dim=input_data_format)
279
+ resized_height, resized_width = height, width
280
+ processed_images = []
281
+ for image in images:
282
+ if do_resize:
283
+ resized_height, resized_width = smart_resize(
284
+ height,
285
+ width,
286
+ factor=self.patch_size * self.merge_size,
287
+ min_pixels=min_pixels,
288
+ max_pixels=max_pixels,
289
+ )
290
+ image = resize(
291
+ image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
292
+ )
293
+ elif do_pad:
294
+ # 1. resize the image s.t. the total number of pixels is within the range [min_pixels, max_pixels] while maintaining the aspect ratio
295
+ resized_height, resized_width = smart_resize(
296
+ height,
297
+ width,
298
+ factor=1,
299
+ min_pixels=min_pixels,
300
+ max_pixels=max_pixels,
301
+ )
302
+ image = resize(
303
+ image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
304
+ )
305
+ # 2. pad the image to the nearest multiple of patch_size * merge_size
306
+ pad_height = math.ceil(resized_height / (self.patch_size * self.merge_size)) * self.patch_size * self.merge_size
307
+ pad_width = math.ceil(resized_width / (self.patch_size * self.merge_size)) * self.patch_size * self.merge_size
308
+ image = pad(
309
+ image,
310
+ padding=((0, pad_height - resized_height), (0, pad_width - resized_width)),
311
+ constant_values=0,
312
+ input_data_format=input_data_format,
313
+ data_format=input_data_format,
314
+ )
315
+ resized_height, resized_width = pad_height, pad_width
316
+
317
+ if do_rescale:
318
+ image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
319
+
320
+ if do_normalize:
321
+ image = self.normalize(
322
+ image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
323
+ )
324
+
325
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
326
+ processed_images.append(image)
327
+
328
+ patches = np.array(processed_images)
329
+ if data_format == ChannelDimension.LAST:
330
+ patches = patches.transpose(0, 3, 1, 2)
331
+ if patches.shape[0] == 1:
332
+ patches = np.tile(patches, (self.temporal_patch_size, 1, 1, 1))
333
+ channel = patches.shape[1]
334
+ grid_t = patches.shape[0] // self.temporal_patch_size
335
+ grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
336
+ patches = patches.reshape(
337
+ grid_t,
338
+ self.temporal_patch_size,
339
+ channel,
340
+ grid_h // self.merge_size,
341
+ self.merge_size,
342
+ self.patch_size,
343
+ grid_w // self.merge_size,
344
+ self.merge_size,
345
+ self.patch_size,
346
+ )
347
+ patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8)
348
+ flatten_patches = patches.reshape(
349
+ grid_t * grid_h * grid_w, channel * self.temporal_patch_size * self.patch_size * self.patch_size
350
+ )
351
+
352
+ return flatten_patches, (grid_t, grid_h, grid_w)
353
+
354
+ def preprocess(
355
+ self,
356
+ images: ImageInput,
357
+ videos: VideoInput = None,
358
+ do_resize: bool = None,
359
+ do_pad: bool = None,
360
+ size: Dict[str, int] = None,
361
+ resample: PILImageResampling = None,
362
+ do_rescale: bool = None,
363
+ rescale_factor: float = None,
364
+ do_normalize: bool = None,
365
+ image_mean: Optional[Union[float, List[float]]] = None,
366
+ image_std: Optional[Union[float, List[float]]] = None,
367
+ do_convert_rgb: bool = None,
368
+ return_tensors: Optional[Union[str, TensorType]] = None,
369
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
370
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
371
+ min_pixels: int = None,
372
+ max_pixels: int = None,
373
+ ):
374
+ """
375
+ Args:
376
+ images (`ImageInput`):
377
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
378
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
379
+ videos (`VideoInput`):
380
+ Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If
381
+ passing in videos with pixel values between 0 and 1, set `do_rescale=False`.
382
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
383
+ Whether to resize the image.
384
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
385
+ Whether to pad the image.
386
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
387
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
388
+ the longest edge resized to keep the input aspect ratio.
389
+ resample (`int`, *optional*, defaults to `self.resample`):
390
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
391
+ has an effect if `do_resize` is set to `True`.
392
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
393
+ Whether to rescale the image.
394
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
395
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
396
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
397
+ Whether to normalize the image.
398
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
399
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
400
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
401
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
402
+ `True`.
403
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
404
+ Whether to convert the image to RGB.
405
+ return_tensors (`str` or `TensorType`, *optional*):
406
+ The type of tensors to return. Can be one of:
407
+ - Unset: Return a list of `np.ndarray`.
408
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
409
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
410
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
411
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
412
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
413
+ The channel dimension format for the output image. Can be one of:
414
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
415
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
416
+ - Unset: Use the channel dimension format of the input image.
417
+ input_data_format (`ChannelDimension` or `str`, *optional*):
418
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
419
+ from the input image. Can be one of:
420
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
421
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
422
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
423
+
424
+ """
425
+ do_resize = do_resize if do_resize is not None else self.do_resize
426
+ do_pad = do_pad if do_pad is not None else self.do_pad
427
+ size = size if size is not None else self.size
428
+ resample = resample if resample is not None else self.resample
429
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
430
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
431
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
432
+ image_mean = image_mean if image_mean is not None else self.image_mean
433
+ image_std = image_std if image_std is not None else self.image_std
434
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
435
+ min_pixels = min_pixels if min_pixels is not None else self.min_pixels
436
+ max_pixels = max_pixels if max_pixels is not None else self.max_pixels
437
+
438
+ if images is not None:
439
+ images = make_batched_images(images)
440
+ if videos is not None:
441
+ videos = make_batched_videos(videos)
442
+
443
+ if images is not None and not valid_images(images):
444
+ raise ValueError(
445
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
446
+ "torch.Tensor, tf.Tensor or jax.ndarray."
447
+ )
448
+
449
+ validate_preprocess_arguments(
450
+ rescale_factor=rescale_factor,
451
+ do_normalize=do_normalize,
452
+ image_mean=image_mean,
453
+ image_std=image_std,
454
+ do_resize=do_resize,
455
+ # do_pad=do_pad,
456
+ size=size,
457
+ resample=resample,
458
+ )
459
+
460
+ if images is not None:
461
+ pixel_values, vision_grid_thws = [], []
462
+ for image in images:
463
+ patches, image_grid_thw = self._preprocess(
464
+ image,
465
+ do_resize=do_resize,
466
+ resample=resample,
467
+ do_rescale=do_rescale,
468
+ do_pad=do_pad,
469
+ rescale_factor=rescale_factor,
470
+ do_normalize=do_normalize,
471
+ image_mean=image_mean,
472
+ image_std=image_std,
473
+ data_format=data_format,
474
+ do_convert_rgb=do_convert_rgb,
475
+ input_data_format=input_data_format,
476
+ min_pixels=min_pixels,
477
+ max_pixels=max_pixels,
478
+ )
479
+ pixel_values.extend(patches)
480
+ vision_grid_thws.append(image_grid_thw)
481
+ pixel_values = np.array(pixel_values)
482
+ vision_grid_thws = np.array(vision_grid_thws)
483
+ data = {"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws}
484
+
485
+ if videos is not None:
486
+ pixel_values, vision_grid_thws = [], []
487
+ for images in videos:
488
+ patches, video_grid_thw = self._preprocess(
489
+ images,
490
+ do_resize=do_resize,
491
+ do_pad=do_pad,
492
+ resample=resample,
493
+ do_rescale=do_rescale,
494
+ rescale_factor=rescale_factor,
495
+ do_normalize=do_normalize,
496
+ image_mean=image_mean,
497
+ image_std=image_std,
498
+ data_format=data_format,
499
+ do_convert_rgb=do_convert_rgb,
500
+ input_data_format=input_data_format,
501
+ min_pixels=min_pixels,
502
+ max_pixels=max_pixels,
503
+ )
504
+ pixel_values.extend(patches)
505
+ vision_grid_thws.append(video_grid_thw)
506
+ pixel_values = np.array(pixel_values)
507
+ vision_grid_thws = np.array(vision_grid_thws)
508
+ data = {"pixel_values_videos": pixel_values, "video_grid_thw": vision_grid_thws}
509
+
510
+ return BatchFeature(data=data, tensor_type=return_tensors)
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bbf7885e7752468f4299e694ca938d90fb9e8885bbf03e25d5bf162ef956a7e
3
+ size 4954887984
model-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4025923f43ac16c4eeaf66362b5f77fbf92ac1f45345bb0ff36a3ecb2c771c24
3
+ size 4982970768
model-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa090d1a6cbba8af0778567458b81fef489f01aebb6f84bbd8f07719384482ab
3
+ size 4949401448
model-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e44246a62e6fdec97b8e2cfd4caf5459198be25e394f762a55813a2a2f58563
3
+ size 4932622216
model-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2e744af620121041b1b1026a9967b5ba3698c29e081822d40a66b5ccef4a296
3
+ size 4999750184
model-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f32342820a24f9c69cb5ecd7f2feaf349bdcaf71b61bb7cd8e1270bf92f5d8c6
3
+ size 4932622208
model-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5033d4a84e2aaf665e8d17fda3b2a8c778c704f033b0e6ebe1090109fbca358
3
+ size 3079885360
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_navil_chat.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # NaViL
3
+ # Copyright (c) 2025 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import os
8
+ import warnings
9
+ from typing import Any, List, Optional, Tuple, Union
10
+ import copy
11
+
12
+ from dataclasses import dataclass
13
+
14
+ import torch
15
+ import torch.distributed as dist
16
+ from torch import nn
17
+ from torch.nn import CrossEntropyLoss
18
+
19
+ import transformers
20
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
21
+ LlamaTokenizer, Qwen2ForCausalLM)
22
+ from transformers.modeling_utils import PreTrainedModel
23
+ from transformers.utils import ModelOutput, logging
24
+ from transformers.models.qwen2.modeling_qwen2 import Qwen2RMSNorm
25
+
26
+ from .configuration_navil_chat import NaViLChatConfig
27
+ from .modeling_navil_vit_anyres import NaViLVisionModelAnyRes
28
+
29
+ from .conversation import get_conv_template
30
+ # from navil.model.internlm2.modeling_internlm2_ve import InternLM2VEForCausalLM
31
+ from .modeling_qwen3_ve import Qwen3VEForCausalLM
32
+ # from navil.model.internlm2.modeling_internlm2_ve import InternLM2RMSNorm
33
+ from .image_processing_qwen2_vl import Qwen2VLImageProcessor
34
+ from .constants import (
35
+ SPECIAL_TOKEN_LIST,
36
+ IMG_CONTEXT_TOKEN, IMG_END_TOKEN, IMG_START_TOKEN, IMG_UNCOND_TOKEN,
37
+ VAE_MEAN, VAE_STD,
38
+ )
39
+ from .modular_intern_vit import (
40
+ InternVisionFlashAttention2,
41
+ InternVisionSdpaAttention,
42
+ InternMLP,
43
+ NORM2FN,
44
+ InternVisionRotaryEmbedding,
45
+ )
46
+
47
+ logger = logging.get_logger(__name__)
48
+ logger.setLevel(logging.INFO)
49
+
50
+
51
+ def version_cmp(v1, v2, op='eq'):
52
+ import operator
53
+
54
+ from packaging import version
55
+ op_func = getattr(operator, op)
56
+ return op_func(version.parse(v1), version.parse(v2))
57
+
58
+
59
+
60
+ @dataclass
61
+ class CausalLMOutputWithPast(ModelOutput):
62
+ """
63
+ Base class for causal language model (or autoregressive) outputs.
64
+
65
+ Args:
66
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
67
+ Language modeling loss (for next-token prediction).
68
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
69
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
70
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
71
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
72
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
73
+
74
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
75
+ `past_key_values` input) to speed up sequential decoding.
76
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
77
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
78
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
79
+
80
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
81
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
82
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
83
+ sequence_length)`.
84
+
85
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
86
+ heads.
87
+ """
88
+
89
+ loss: Optional[torch.FloatTensor] = None
90
+ logits: torch.FloatTensor = None
91
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
92
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
93
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
94
+
95
+ log_dict: Optional[dict] = None
96
+
97
+
98
+ class NaViL(PreTrainedModel):
99
+ config_class = NaViLChatConfig
100
+ main_input_name = 'pixel_values'
101
+ _no_split_modules = ['NaViLVisionModelAnyRes', 'InternLM2DecoderLayer', 'Qwen3DecoderLayer']
102
+ _supports_flash_attn_2 = True
103
+
104
+ def __init__(self, config: NaViLChatConfig, vision_model=None, language_model=None):
105
+ super().__init__(config)
106
+ self.config = config
107
+
108
+ assert version_cmp(transformers.__version__, '4.51.0', 'ge')
109
+ image_size = config.force_image_size or config.vision_config.image_size
110
+ patch_size = config.vision_config.patch_size
111
+ self.patch_size = patch_size
112
+ self.select_layer = config.select_layer
113
+ self.template = config.template
114
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
115
+ self.downsample_ratio = config.downsample_ratio
116
+ self.patch_aspect_ratio = 1.0
117
+ self.ps_version = config.ps_version
118
+ self.llm_arch_name = config.llm_config.architectures[0]
119
+
120
+ logger.info(f'init - image_size: {image_size}, patch_size: {patch_size}, num_image_token: {self.num_image_token}')
121
+ logger.info(f'ps_version: {self.ps_version}')
122
+ if vision_model is not None:
123
+ self.vision_model = vision_model
124
+ else:
125
+ self.vision_model = NaViLVisionModelAnyRes(config.vision_config)
126
+ if language_model is not None:
127
+ self.language_model = language_model
128
+ else:
129
+ llm_config = config.llm_config
130
+ if config.llm_config.architectures[0] == 'Qwen3VEForCausalLM':
131
+ self.language_model = Qwen3VEForCausalLM(llm_config)
132
+ else:
133
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
134
+
135
+ vit_hidden_size = config.vision_config.hidden_size
136
+ llm_hidden_size = config.llm_config.hidden_size
137
+
138
+ self.mlp1 = nn.Sequential(
139
+ nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
140
+ nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
141
+ nn.GELU(),
142
+ nn.Linear(llm_hidden_size, llm_hidden_size)
143
+ )
144
+
145
+ self.img_context_token_id = None
146
+ self.img_start_token_id = None
147
+ self.img_end_token_id = None
148
+ self.img_uncond_token_id = None
149
+ self.img_line_break_token_id = None
150
+ self.img_frame_break_token_id = None
151
+ self.pad_token_id = None
152
+ self.conv_template = get_conv_template(self.template)
153
+ if hasattr(config, 'system_message'):
154
+ self.system_message = config.system_message
155
+ else:
156
+ self.system_message = self.conv_template.system_message
157
+
158
+ min_pixels = config.min_dynamic_patch * (patch_size ** 2)
159
+ max_pixels = config.max_dynamic_patch * (patch_size ** 2)
160
+ down_sample_ratio = config.vision_config.downsample_ratio
161
+ self.image_processor = Qwen2VLImageProcessor(
162
+ do_resize=False,
163
+ do_pad=True,
164
+ do_rescale=True,
165
+ do_normalize=True,
166
+ image_mean=VAE_MEAN,
167
+ image_std=VAE_STD,
168
+ min_pixels=min_pixels,
169
+ max_pixels=max_pixels,
170
+ patch_size=patch_size,
171
+ temporal_patch_size=1,
172
+ merge_size=int(1.0 / down_sample_ratio),
173
+ )
174
+
175
+ ##### ---- Special token embeddings ---- #####
176
+ self.special_token_embedding = nn.Embedding(len(SPECIAL_TOKEN_LIST), config.llm_config.hidden_size)
177
+ self.special_token_list = copy.deepcopy(SPECIAL_TOKEN_LIST)
178
+ self.special_token_id_list = None # Remember to initialize this in the training script after tokenizer is loaded
179
+
180
+ self.group = None # Distributed group. Remember to set this in the training script
181
+
182
+ def init_special_token_ids(self, tokenizer):
183
+ special_token_id_list = []
184
+ for token in SPECIAL_TOKEN_LIST:
185
+ special_token_id_list.append(tokenizer.convert_tokens_to_ids(token))
186
+ self.special_token_id_list = special_token_id_list
187
+
188
+ self.img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
189
+ self.img_start_token_id = tokenizer.convert_tokens_to_ids(IMG_START_TOKEN)
190
+ self.img_end_token_id = tokenizer.convert_tokens_to_ids(IMG_END_TOKEN)
191
+ self.img_uncond_token_id = tokenizer.convert_tokens_to_ids(IMG_UNCOND_TOKEN)
192
+
193
+ def replace_img_special_tokens(self, input_embeds, input_ids):
194
+ assert self.special_token_id_list is not None, "model's special_token_id_list is not initialized"
195
+ for i, token_id in enumerate(self.special_token_id_list):
196
+ token_pos = input_ids == token_id
197
+ input_embeds[token_pos] = input_embeds[token_pos] * 0.0 + self.special_token_embedding.weight[i]
198
+
199
+ return input_embeds
200
+
201
+ def _init_weights(self, module):
202
+ if isinstance(module, nn.Linear):
203
+ module.weight.data.normal_(mean=0.0, std=0.02)
204
+ if module.bias is not None:
205
+ module.bias.data.zero_()
206
+ elif isinstance(module, nn.Embedding):
207
+ module.weight.data.normal_(mean=0.0, std=0.02)
208
+ elif isinstance(module, (nn.LayerNorm, Qwen2RMSNorm)):
209
+ if hasattr(module, 'bias') and module.bias is not None:
210
+ module.bias.data.zero_()
211
+ if module.weight is not None:
212
+ module.weight.data.fill_(1.0)
213
+
214
+ def forward(
215
+ self,
216
+ pixel_values: torch.FloatTensor,
217
+ input_ids: torch.LongTensor = None,
218
+ attention_mask: Optional[torch.Tensor] = None,
219
+ position_ids: Optional[torch.LongTensor] = None,
220
+ image_flags: Optional[torch.LongTensor] = None,
221
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
222
+ labels: Optional[torch.LongTensor] = None,
223
+ use_cache: Optional[bool] = None,
224
+ output_attentions: Optional[bool] = None,
225
+ output_hidden_states: Optional[bool] = None,
226
+ return_dict: Optional[bool] = None,
227
+ generation_modality: Optional[int] = 0,
228
+ statistics: Optional[torch.LongTensor] = None,
229
+ loss_weight: Optional[List] = None,
230
+ loss_reduction_all_gather: Optional[bool] = False,
231
+ padding_type: Optional[str] = None,
232
+ type_ids: Optional[torch.LongTensor] = None,
233
+ image_grid_thw: Optional[torch.LongTensor] = None,
234
+ video_grid_thw: Optional[torch.LongTensor] = None,
235
+ rope_deltas: Optional[torch.LongTensor] = None,
236
+ # cache_position: Optional[torch.LongTensor] = None,
237
+ second_per_grid_ts: Optional[torch.Tensor] = None,
238
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
239
+ ignore_flag = False
240
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
241
+
242
+ image_flags = image_flags.squeeze(-1)
243
+
244
+ input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
245
+ input_embeds = self.replace_img_special_tokens(input_embeds, input_ids)
246
+
247
+ if video_grid_thw is not None:
248
+ grid_thw = video_grid_thw
249
+ else:
250
+ grid_thw = image_grid_thw
251
+ vit_embeds, vit_embeds_ori = self.extract_feature(pixel_values, grid_thw)
252
+ vit_embeds = vit_embeds[image_flags == 1]
253
+ vit_embeds_ori = vit_embeds_ori[image_flags == 1]
254
+ vit_batch_size = image_flags.sum().item()
255
+
256
+ log_dict_keys = [
257
+ "text_loss", "text_acc1",
258
+ ]
259
+ log_dict = {k: torch.tensor(0.0, device=self.device) for k in log_dict_keys}
260
+ return_feature_scale = True
261
+
262
+ B, N, C = input_embeds.shape
263
+ selected = (input_ids == self.img_context_token_id)
264
+ try:
265
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
266
+ # ignore_flag = False
267
+ except Exception as e:
268
+ vit_embeds = vit_embeds.reshape(-1, C)
269
+ print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
270
+ f'vit_embeds.shape={vit_embeds.shape}', force=True)
271
+ n_token = selected.sum()
272
+ if n_token > vit_embeds.shape[0]:
273
+ selected = selected.view(-1, selected.shape[-1]) # 确保是 [B, N] 形状
274
+ batch_size = selected.shape[0]
275
+ max_visual_tokens = vit_embeds.shape[0] // batch_size # 每个批次可用的视觉特征数量
276
+ for i in range(batch_size):
277
+ # 获取当前批次中的图像标记位置
278
+ curr_selected = selected[i]
279
+ # 只保留前 max_visual_tokens 个标记位置
280
+ curr_indices = torch.where(curr_selected)[0][:max_visual_tokens]
281
+ # 更新选择标记
282
+ selected[i] = False
283
+ selected[i, curr_indices] = True
284
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
285
+ ignore_flag = True
286
+
287
+ # input_embeds = input_embeds.reshape(B, N, C)
288
+ visual_token_mask = (selected + (input_ids == self.img_start_token_id))
289
+
290
+ outputs = self.language_model(
291
+ inputs_embeds=input_embeds,
292
+ attention_mask=attention_mask,
293
+ position_ids=position_ids,
294
+ past_key_values=past_key_values,
295
+ use_cache=use_cache,
296
+ output_attentions=output_attentions,
297
+ output_hidden_states=output_hidden_states,
298
+ return_dict=return_dict,
299
+ visual_token_mask=visual_token_mask,
300
+ generation_modality=generation_modality,
301
+ padding_type=padding_type, # or self.train_padding_type,
302
+ skip_lm_head=False, # imgen
303
+ return_feature_scale=return_feature_scale,
304
+ )
305
+ logits = outputs.logits # B, N, C
306
+
307
+ if labels is not None and loss_weight is not None:
308
+ loss_weight = torch.tensor(loss_weight, dtype=torch.float32, device=labels.device)
309
+ # Shift so that tokens < n predict n
310
+ shift_logits = logits[..., :-1, :].contiguous()
311
+ shift_labels = labels[..., 1:].contiguous()
312
+ shift_weights = loss_weight[..., 1:].contiguous()
313
+ # Flatten the tokens
314
+ loss_fct = CrossEntropyLoss(reduction='none')
315
+ shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
316
+ shift_labels = shift_labels.view(-1)
317
+ shift_weights = shift_weights.view(-1)
318
+ # Enable model parallelism
319
+ shift_labels = shift_labels.to(shift_logits.device)
320
+ shift_weights = shift_weights.to(shift_logits.device)
321
+ loss = loss_fct(shift_logits, shift_labels)
322
+
323
+ shift_weights_sum = shift_weights.sum()
324
+ if loss_reduction_all_gather:
325
+ dist.all_reduce(shift_weights_sum, op=dist.ReduceOp.AVG, group=self.group)
326
+
327
+ pred_ids = shift_logits.argmax(dim=-1)
328
+ pred_acc = 100.0 * ((shift_labels == pred_ids) * (shift_labels != -100)).sum() / (shift_labels != -100).sum()
329
+
330
+ log_dict.update({
331
+ "text_loss": ((loss * shift_weights).sum() / shift_weights_sum).detach(),
332
+ "text_acc1": pred_acc
333
+ })
334
+
335
+ loss = loss * shift_weights
336
+ loss = loss.sum() / shift_weights_sum
337
+
338
+ if ignore_flag:
339
+ loss = loss * 0.0
340
+
341
+ elif labels is not None:
342
+ # To reduce gpu memory, remove the image parts of the logits and labels
343
+ shift_selected = (input_ids == self.img_context_token_id)[..., :-1]
344
+ shift_logits = logits[..., :-1, :][~shift_selected]
345
+ shift_labels = labels[..., 1:][~shift_selected]
346
+
347
+ # Shift so that tokens < n predict n
348
+ # shift_logits = logits[..., :-1, :].contiguous()
349
+ # shift_labels = labels[..., 1:].contiguous()
350
+ # Flatten the tokens
351
+ loss_fct = CrossEntropyLoss()
352
+ shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
353
+ shift_labels = shift_labels.view(-1)
354
+ # Enable model parallelism
355
+ shift_labels = shift_labels.to(shift_logits.device)
356
+ loss = loss_fct(shift_logits, shift_labels)
357
+
358
+ pred_ids = shift_logits.argmax(dim=-1)
359
+ pred_acc = 100.0 * ((shift_labels == pred_ids) * (shift_labels != -100)).sum() / (shift_labels != -100).sum()
360
+
361
+ log_dict.update({
362
+ "text_loss": loss.mean().detach(),
363
+ "text_acc1": pred_acc
364
+ })
365
+
366
+ if ignore_flag:
367
+ loss = loss * 0.0
368
+
369
+ if not return_dict:
370
+ output = (logits,) + outputs[1:]
371
+ return (loss,) + output if loss is not None else output
372
+
373
+ if return_feature_scale:
374
+ log_dict["feature_scale"] = {
375
+ "image": outputs.feature_scale[0],
376
+ "text": outputs.feature_scale[1],
377
+ }
378
+
379
+ return CausalLMOutputWithPast(
380
+ loss=loss,
381
+ logits=logits,
382
+ past_key_values=outputs.past_key_values,
383
+ hidden_states=outputs.hidden_states,
384
+ attentions=outputs.attentions,
385
+ log_dict=log_dict
386
+ )
387
+
388
+ def extract_feature(self, pixel_values, grid_thw=None):
389
+
390
+ if grid_thw is not None:
391
+ grid_thw = grid_thw.to(pixel_values.device)
392
+
393
+ vit_embeds = self.vision_model(
394
+ pixel_values=pixel_values,
395
+ output_hidden_states=False,
396
+ return_dict=True,
397
+ grid_thw=grid_thw
398
+ ).last_hidden_state
399
+
400
+ vit_embeds = pixel_shuffle_v2(vit_embeds, scale_factor=self.downsample_ratio, patch_aspect_ratio=self.patch_aspect_ratio)
401
+
402
+ vit_embeds_after_mlp = self.mlp1(vit_embeds)
403
+
404
+ return vit_embeds_after_mlp, vit_embeds
405
+
406
+ def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
407
+ num_patches_list=None, num_scales: list = [2],
408
+ IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
409
+ IMG_LINE_BREAK_TOKEN='<IMG_LINE_BREAK>', IMG_FRAME_BREAK_TOKEN='<IMG_FRAME_BREAK>',
410
+ anyres_image_size=True,
411
+ verbose=False,
412
+ ):
413
+
414
+ if history is None and pixel_values is not None and '<image>' not in question:
415
+ question = '<image>\n' * len(num_scales) + question
416
+
417
+ if num_patches_list is None:
418
+ assert not anyres_image_size, "Please provide `num_patches_list` when anyres_image_size is True."
419
+ num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
420
+ assert pixel_values is None or anyres_image_size or len(pixel_values) == sum(num_patches_list)
421
+
422
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
423
+ self.img_context_token_id = img_context_token_id
424
+ img_start_token_id = tokenizer.convert_tokens_to_ids(IMG_START_TOKEN)
425
+ self.img_start_token_id = img_start_token_id
426
+ self.img_line_break_token_id = tokenizer.convert_tokens_to_ids(IMG_LINE_BREAK_TOKEN)
427
+ self.img_frame_break_token_id = tokenizer.convert_tokens_to_ids(IMG_FRAME_BREAK_TOKEN)
428
+
429
+ template = get_conv_template(self.template)
430
+ template.system_message = self.system_message
431
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
432
+
433
+ history = [] if history is None else history
434
+ for (old_question, old_answer) in history:
435
+ template.append_message(template.roles[0], old_question)
436
+ template.append_message(template.roles[1], old_answer)
437
+ template.append_message(template.roles[0], question)
438
+ template.append_message(template.roles[1], None)
439
+ query = template.get_prompt()
440
+
441
+ if verbose and pixel_values is not None:
442
+ image_bs = pixel_values.shape[0]
443
+ print(f'dynamic ViT batch size: {image_bs}')
444
+
445
+ if anyres_image_size:
446
+ merge_size = int(1.0 / self.downsample_ratio)
447
+ for image_idx in range(len(num_scales)):
448
+ num_scales_prev = sum(num_scales[:image_idx])
449
+ num_scale = num_scales[image_idx]
450
+ _num_image_token_list = num_patches_list[num_scales_prev:num_scales_prev + num_scale]
451
+ image_tokens = f"{IMG_START_TOKEN}"
452
+ for i in range(len(_num_image_token_list)):
453
+ _image_tokens = ""
454
+ t, h, w = _num_image_token_list[i][0], _num_image_token_list[i][1] // merge_size, _num_image_token_list[i][2] // merge_size
455
+ for _ in range(t):
456
+ for _ in range(h):
457
+ _image_tokens += f"{IMG_CONTEXT_TOKEN * w}{IMG_LINE_BREAK_TOKEN}"
458
+ _image_tokens += f"{IMG_FRAME_BREAK_TOKEN}"
459
+ image_tokens += _image_tokens
460
+ image_tokens += f"{IMG_END_TOKEN}"
461
+ query = query.replace('<image>', image_tokens, 1)
462
+ else:
463
+ for num_patches in num_patches_list:
464
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
465
+ query = query.replace('<image>', image_tokens, 1)
466
+
467
+ model_inputs = tokenizer(query, return_tensors='pt')
468
+ input_ids = model_inputs['input_ids'].cuda()
469
+ attention_mask = model_inputs['attention_mask'].cuda()
470
+ generation_config['eos_token_id'] = eos_token_id
471
+ generation_output = self.generate(
472
+ pixel_values=pixel_values,
473
+ input_ids=input_ids,
474
+ attention_mask=attention_mask,
475
+ image_grid_thw=num_patches_list,
476
+ **generation_config
477
+ )
478
+ response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
479
+ response = response.split(template.sep)[0].strip()
480
+ # fix for InternLM2-base (textvqa)
481
+ response = response.replace("<|im_end|", "")
482
+ response = response.replace("<|im_end", "")
483
+ response = response.replace("<|im", "")
484
+ history.append((question, response))
485
+ if return_history:
486
+ return response, history
487
+ else:
488
+ query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
489
+ query_to_print = query_to_print.replace(IMG_LINE_BREAK_TOKEN, '')
490
+ query_to_print = query_to_print.replace(IMG_FRAME_BREAK_TOKEN, '')
491
+ query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
492
+ if verbose:
493
+ print(query_to_print, response)
494
+
495
+ return response
496
+
497
+ @torch.no_grad()
498
+ def generate(
499
+ self,
500
+ pixel_values: Optional[torch.FloatTensor] = None,
501
+ input_ids: Optional[torch.FloatTensor] = None,
502
+ attention_mask: Optional[torch.LongTensor] = None,
503
+ visual_features: Optional[torch.FloatTensor] = None,
504
+ generation_config: Optional[GenerationConfig] = None,
505
+ output_hidden_states: Optional[bool] = None,
506
+ return_dict: Optional[bool] = None,
507
+ image_grid_thw: Optional[torch.LongTensor] = None,
508
+ **generate_kwargs,
509
+ ) -> torch.LongTensor:
510
+
511
+ assert self.img_context_token_id is not None
512
+
513
+ grid_thw = image_grid_thw
514
+
515
+ if pixel_values is not None:
516
+ if visual_features is not None:
517
+ vit_embeds = visual_features
518
+ else:
519
+ vit_embeds, vit_embeds_ori = self.extract_feature(pixel_values, grid_thw)
520
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
521
+ input_embeds = self.replace_img_special_tokens(input_embeds, input_ids)
522
+ B, N, C = input_embeds.shape
523
+ # input_embeds = input_embeds.reshape(B * N, C)
524
+
525
+ # input_ids = input_ids.reshape(B * N)
526
+ selected = (input_ids == self.img_context_token_id) # B, N
527
+ assert selected.sum() != 0
528
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
529
+
530
+ # input_embeds = input_embeds.reshape(B, N, C)
531
+ else:
532
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
533
+ input_embeds = self.replace_img_special_tokens(input_embeds, input_ids)
534
+ selected = None
535
+
536
+ # input_embeds = self.replace_special_tokens(input_embeds, input_ids)
537
+ visual_token_mask = selected + (input_ids == self.img_start_token_id) if selected is not None else None
538
+
539
+ position_ids = None
540
+ generate_kwargs['position_ids'] = position_ids
541
+
542
+ outputs = self.language_model.generate(
543
+ inputs_embeds=input_embeds,
544
+ attention_mask=attention_mask,
545
+ generation_config=generation_config,
546
+ output_hidden_states=output_hidden_states,
547
+ # return_dict=return_dict,
548
+ use_cache=True,
549
+ visual_token_mask=visual_token_mask,
550
+ **generate_kwargs,
551
+ )
552
+
553
+ return outputs
554
+
555
+
556
+ def pixel_shuffle_v2(x, scale_factor=0.5, patch_aspect_ratio=1.0):
557
+ # input shape: N, L, C or N, H, W, C
558
+ # output shape: N, L * (scale_factor ** 2), C / (scale_factor ** 2)
559
+
560
+ if x.ndim == 3:
561
+ n, l, c = x.size()
562
+ h = w = int(l ** 0.5)
563
+ # N, L, C --> N, H, W, C
564
+ x = x.reshape(n, h, w, c)
565
+
566
+ n, h, w, c = x.size()
567
+
568
+ h_scale_factor = scale_factor * (patch_aspect_ratio ** 0.5)
569
+ w_scale_factor = scale_factor / (patch_aspect_ratio ** 0.5)
570
+
571
+ # N, H, W, C --> N, H, W * w_scale_factor, C // w_scale_factor
572
+ x = x.reshape(n, h, int(w * w_scale_factor), int(c / w_scale_factor))
573
+ # N, H, W * w_scale_factor, C // w_scale_factor --> N, W * w_scale_factor, H, C // w_scale_factor
574
+ x = x.permute(0, 2, 1, 3).contiguous()
575
+ # N, W * w_scale_factor, H, C // w_scale_factor --> N, W * w_scale_factor, H * h_scale_factor, C // (w_scale_factor * h_scale_factor)
576
+ x = x.reshape(n, int(w * w_scale_factor), int(h * h_scale_factor), int(c / (w_scale_factor * h_scale_factor)))
577
+ # N, W * w_scale_factor, H * h_scale_factor, C // (w_scale_factor * h_scale_factor) --> N, H * h_scale_factor, W * w_scale_factor, C // (w_scale_factor * h_scale_factor)
578
+ x = x.permute(0, 2, 1, 3).contiguous()
579
+ # N, H * h_scale_factor, W * w_scale_factor, C // (w_scale_factor * h_scale_factor) --> N, L * (scale_factor ** 2), C // (scale_factor ** 2)
580
+ x = x.reshape(n, int(h * h_scale_factor * w * w_scale_factor), int(c / (h_scale_factor * w_scale_factor)))
581
+
582
+ return x
modeling_navil_vit_anyres.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # NaViL
3
+ # Copyright (c) 2025 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ from typing import Optional, Tuple, Union
7
+ from functools import partial
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint
12
+
13
+ from einops import rearrange
14
+ from timm.models.layers import DropPath
15
+ from torch import nn
16
+ from transformers.activations import ACT2FN
17
+ from transformers.modeling_outputs import (BaseModelOutput,
18
+ BaseModelOutputWithPooling)
19
+ from transformers.modeling_utils import PreTrainedModel
20
+ from transformers.utils import logging
21
+
22
+ from .configuration_navil_vit import NaViLVisionConfig
23
+ from .modular_intern_vit import (
24
+ InternVisionFlashAttention2,
25
+ InternVisionSdpaAttention,
26
+ InternMLP,
27
+ NORM2FN,
28
+ InternVisionRotaryEmbedding,
29
+ )
30
+
31
+ try:
32
+ # from .flash_attention import FlashAttention
33
+ from flash_attn import flash_attn_varlen_func
34
+ from flash_attn.layers.rotary import apply_rotary_emb
35
+ has_flash_attn = True
36
+ except:
37
+ print('FlashAttention is not installed.')
38
+ has_flash_attn = False
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ class NaViLVisionEmbeddingsAnyRes(nn.Module):
44
+ def __init__(self, config: NaViLVisionConfig):
45
+ super().__init__()
46
+ self.config = config
47
+ self.embed_dim = config.hidden_size
48
+ self.image_size = config.image_size
49
+ self.patch_size = config.patch_size
50
+ self.merge_size = int(1.0 / config.downsample_ratio)
51
+
52
+ self.patch_embedding = nn.Conv2d(
53
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
54
+ )
55
+
56
+ self.num_patches = (self.image_size // self.patch_size) ** 2
57
+ self.num_positions = self.num_patches + 1
58
+
59
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
60
+ target_dtype = self.patch_embedding.weight.dtype
61
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
62
+ batch_size, _, height, width = patch_embeds.shape
63
+
64
+ return patch_embeds.flatten(1)
65
+
66
+
67
+ class NaViLVisionEncoderLayerAnyRes(nn.Module):
68
+ def __init__(self, config: NaViLVisionConfig, drop_path_rate: float):
69
+ super().__init__()
70
+ self.embed_dim = config.hidden_size
71
+ self.intermediate_size = config.intermediate_size
72
+ self.norm_type = config.norm_type
73
+
74
+ if has_flash_attn:
75
+ self.attn = InternVisionFlashAttention2(config)
76
+ else:
77
+ self.attn = InternVisionSdpaAttention(config)
78
+ self.mlp = InternMLP(config)
79
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
80
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
81
+
82
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
83
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
84
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
85
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
86
+
87
+ def forward(
88
+ self,
89
+ hidden_states: torch.Tensor,
90
+ cu_seqlens,
91
+ rotary_pos_emb
92
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
93
+ """
94
+ Args:
95
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
96
+ """
97
+ hidden_states = hidden_states + self.drop_path1(
98
+ self.attn(
99
+ self.norm1(hidden_states),
100
+ cu_seqlens=cu_seqlens,
101
+ rotary_pos_emb=rotary_pos_emb,
102
+ ) * self.ls1)
103
+
104
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
105
+
106
+ return hidden_states
107
+
108
+
109
+ class NaViLVisionEncoderAnyRes(nn.Module):
110
+ """
111
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
112
+ [`InternEncoderLayer`].
113
+
114
+ Args:
115
+ config (`InternConfig`):
116
+ The corresponding vision configuration for the `InternEncoder`.
117
+ """
118
+
119
+ def __init__(self, config: NaViLVisionConfig):
120
+ super().__init__()
121
+ self.config = config
122
+ # stochastic depth decay rule
123
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
124
+ self.layers = nn.ModuleList([
125
+ NaViLVisionEncoderLayerAnyRes(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
126
+ self.gradient_checkpointing = True
127
+
128
+ head_dim = config.hidden_size // config.num_attention_heads
129
+ self.rotary_pos_emb = InternVisionRotaryEmbedding(head_dim // 2)
130
+
131
+ self.merge_size = int(1.0 / config.downsample_ratio)
132
+ self.merge_unit = self.merge_size * self.merge_size
133
+ self.patch_size = config.patch_size
134
+ self.fullatt_block_indexes = config.fullatt_block_indexes
135
+ self.window_size = config.window_size
136
+
137
+ def rot_pos_emb(self, grid_thw):
138
+ pos_ids = []
139
+ for t, h, w in grid_thw:
140
+ hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
141
+ hpos_ids = hpos_ids.reshape(
142
+ h // self.merge_size,
143
+ self.merge_size,
144
+ w // self.merge_size,
145
+ self.merge_size,
146
+ )
147
+ hpos_ids = hpos_ids.permute(0, 2, 1, 3)
148
+ hpos_ids = hpos_ids.flatten()
149
+
150
+ wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
151
+ wpos_ids = wpos_ids.reshape(
152
+ h // self.merge_size,
153
+ self.merge_size,
154
+ w // self.merge_size,
155
+ self.merge_size,
156
+ )
157
+ wpos_ids = wpos_ids.permute(0, 2, 1, 3)
158
+ wpos_ids = wpos_ids.flatten()
159
+ pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
160
+ pos_ids = torch.cat(pos_ids, dim=0)
161
+ max_grid_size = grid_thw[:, 1:].max()
162
+ rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
163
+ rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
164
+ return rotary_pos_emb
165
+
166
+ def get_window_index(self, grid_thw):
167
+ window_index: list = []
168
+ cu_window_seqlens: list = [0]
169
+ window_index_id = 0
170
+ vit_merger_window_size = self.window_size // self.merge_size
171
+ assert vit_merger_window_size > 0
172
+
173
+ for grid_t, grid_h, grid_w in grid_thw:
174
+ llm_grid_h, llm_grid_w = (
175
+ grid_h // self.merge_size,
176
+ grid_w // self.merge_size,
177
+ )
178
+ index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w)
179
+ pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size
180
+ pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size
181
+ num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size
182
+ num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size
183
+ index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100)
184
+ index_padded = index_padded.reshape(
185
+ grid_t,
186
+ num_windows_h,
187
+ vit_merger_window_size,
188
+ num_windows_w,
189
+ vit_merger_window_size,
190
+ )
191
+ index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape(
192
+ grid_t,
193
+ num_windows_h * num_windows_w,
194
+ vit_merger_window_size,
195
+ vit_merger_window_size,
196
+ )
197
+ seqlens = (index_padded != -100).sum([2, 3]).reshape(-1)
198
+ index_padded = index_padded.reshape(-1)
199
+ index_new = index_padded[index_padded != -100]
200
+ window_index.append(index_new + window_index_id)
201
+ cu_seqlens_tmp = seqlens.cumsum(0) * self.merge_unit + cu_window_seqlens[-1]
202
+ cu_window_seqlens.extend(cu_seqlens_tmp.tolist())
203
+ window_index_id += (grid_t * llm_grid_h * llm_grid_w).item()
204
+ window_index = torch.cat(window_index, dim=0)
205
+
206
+ return window_index, cu_window_seqlens
207
+
208
+ def forward(
209
+ self,
210
+ inputs_embeds,
211
+ output_hidden_states: Optional[bool] = None,
212
+ return_dict: Optional[bool] = None,
213
+ grid_thw: Optional[torch.Tensor] = None,
214
+ ) -> Union[Tuple, BaseModelOutput]:
215
+ r"""
216
+ Args:
217
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
218
+ Embedded representation of the inputs. Should be float, not int tokens.
219
+ output_hidden_states (`bool`, *optional*):
220
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
221
+ for more detail.
222
+ return_dict (`bool`, *optional*):
223
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
224
+ """
225
+ output_hidden_states = (
226
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
227
+ )
228
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
229
+
230
+ encoder_states = () if output_hidden_states else None
231
+ hidden_states = inputs_embeds
232
+
233
+ rotary_pos_emb = self.rot_pos_emb(grid_thw)
234
+ window_index, cu_window_seqlens = self.get_window_index(grid_thw)
235
+ cu_window_seqlens = torch.tensor(
236
+ cu_window_seqlens,
237
+ device=hidden_states.device,
238
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
239
+ )
240
+ cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
241
+
242
+ seq_len, _ = hidden_states.size()
243
+ hidden_states = hidden_states.reshape(seq_len // self.merge_unit, self.merge_unit, -1)
244
+ hidden_states = hidden_states[window_index, :, :]
245
+ hidden_states = hidden_states.reshape(seq_len, -1)
246
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.merge_unit, self.merge_unit, -1)
247
+ rotary_pos_emb = rotary_pos_emb[window_index, :, :]
248
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
249
+
250
+ cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
251
+ dim=0,
252
+ # Select dtype based on the following factors:
253
+ # - FA2 requires that cu_seqlens_q must have dtype int32
254
+ # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
255
+ # See https://github.com/huggingface/transformers/pull/34852 for more information
256
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
257
+ )
258
+ cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
259
+
260
+
261
+ for idx, encoder_layer in enumerate(self.layers):
262
+ if (self.fullatt_block_indexes is None) or (idx in self.fullatt_block_indexes):
263
+ cu_seqlens_now = cu_seqlens
264
+ else:
265
+ cu_seqlens_now = cu_window_seqlens
266
+ if output_hidden_states:
267
+ encoder_states = encoder_states + (hidden_states,)
268
+ if self.gradient_checkpointing and self.training:
269
+ layer_outputs = torch.utils.checkpoint.checkpoint(
270
+ partial(encoder_layer, cu_seqlens=cu_seqlens_now, rotary_pos_emb=rotary_pos_emb),
271
+ hidden_states)
272
+ else:
273
+ layer_outputs = encoder_layer(
274
+ hidden_states,
275
+ cu_seqlens=cu_seqlens_now,
276
+ rotary_pos_emb=rotary_pos_emb,
277
+ )
278
+ hidden_states = layer_outputs
279
+
280
+ if output_hidden_states:
281
+ encoder_states = encoder_states + (hidden_states,)
282
+
283
+ if not return_dict:
284
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
285
+ return BaseModelOutput(
286
+ last_hidden_state=hidden_states, hidden_states=encoder_states
287
+ )
288
+
289
+
290
+ class NaViLVisionModelAnyRes(PreTrainedModel):
291
+ main_input_name = 'pixel_values'
292
+ config_class = NaViLVisionConfig
293
+ _no_split_modules = ['NaViLVisionEncoderLayerAnyRes']
294
+
295
+ def __init__(self, config: NaViLVisionConfig):
296
+ super().__init__(config)
297
+ self.config = config
298
+
299
+ self.merge_size = int(1.0 / config.downsample_ratio)
300
+ self.embeddings = NaViLVisionEmbeddingsAnyRes(config)
301
+ self.encoder = NaViLVisionEncoderAnyRes(config)
302
+
303
+ def get_input_embeddings(self):
304
+ return self.embeddings
305
+
306
+ def forward(
307
+ self,
308
+ pixel_values: Optional[torch.FloatTensor] = None,
309
+ output_hidden_states: Optional[bool] = None,
310
+ return_dict: Optional[bool] = None,
311
+ pixel_embeds: Optional[torch.FloatTensor] = None,
312
+ grid_thw: Optional[torch.Tensor] = None,
313
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
314
+ output_hidden_states = (
315
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
316
+ )
317
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
318
+
319
+ if pixel_values is None and pixel_embeds is None:
320
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
321
+
322
+ if pixel_embeds is not None:
323
+ hidden_states = pixel_embeds
324
+ else:
325
+ if len(pixel_values.shape) == 4:
326
+ hidden_states = self.embeddings(pixel_values)
327
+ else:
328
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
329
+
330
+ encoder_outputs = self.encoder(
331
+ inputs_embeds=hidden_states,
332
+ output_hidden_states=output_hidden_states,
333
+ return_dict=return_dict,
334
+ grid_thw=grid_thw
335
+ )
336
+ last_hidden_state = encoder_outputs.last_hidden_state
337
+ # pooled_output = last_hidden_state[:, 0, :]
338
+
339
+ last_hidden_state = last_hidden_state.unsqueeze(1).reshape(-1, self.merge_size, self.merge_size, last_hidden_state.shape[-1])
340
+
341
+ if not return_dict:
342
+ return (last_hidden_state, ) + encoder_outputs[1:]
343
+
344
+ return BaseModelOutputWithPooling(
345
+ last_hidden_state=last_hidden_state,
346
+ pooler_output=None,
347
+ hidden_states=encoder_outputs.hidden_states,
348
+ attentions=encoder_outputs.attentions,
349
+ )
modeling_qwen3_ve.py ADDED
@@ -0,0 +1,1629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/qwen3/modular_qwen3.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_qwen3.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from functools import partial
23
+ from typing import Callable, Optional, Tuple, Union
24
+
25
+ import torch
26
+ from torch import logit, nn
27
+
28
+ from transformers.activations import ACT2FN
29
+ from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
30
+ from transformers.generation import GenerationMixin
31
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
32
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
33
+ from transformers.modeling_outputs import (
34
+ BaseModelOutputWithPast,
35
+ CausalLMOutputWithPast,
36
+ QuestionAnsweringModelOutput,
37
+ SequenceClassifierOutputWithPast,
38
+ TokenClassifierOutput,
39
+ )
40
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
41
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
42
+ from transformers.processing_utils import Unpack
43
+ from transformers.utils import (
44
+ LossKwargs,
45
+ add_code_sample_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ can_return_tuple,
49
+ logging,
50
+ replace_return_docstrings,
51
+ )
52
+ from transformers.utils.deprecation import deprecate_kwarg
53
+ from .configuration_qwen3 import Qwen3VEConfig
54
+
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen3-8B"
59
+ _CONFIG_FOR_DOC = "Qwen3VEConfig"
60
+
61
+
62
+ class Qwen3RMSNorm(nn.Module):
63
+ def __init__(self, hidden_size, eps=1e-6):
64
+ """
65
+ Qwen3RMSNorm is equivalent to T5LayerNorm
66
+ """
67
+ super().__init__()
68
+ self.weight = nn.Parameter(torch.ones(hidden_size))
69
+ self.variance_epsilon = eps
70
+
71
+ def forward(self, hidden_states):
72
+ input_dtype = hidden_states.dtype
73
+ hidden_states = hidden_states.to(torch.float32)
74
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
75
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
76
+ return self.weight * hidden_states.to(input_dtype)
77
+
78
+ def extra_repr(self):
79
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
80
+
81
+
82
+ class Qwen3MLP(nn.Module):
83
+ def __init__(self, config):
84
+ super().__init__()
85
+ self.config = config
86
+ self.hidden_size = config.hidden_size
87
+ self.intermediate_size = config.intermediate_size
88
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
89
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
90
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
91
+ self.act_fn = ACT2FN[config.hidden_act]
92
+
93
+ def forward(self, x):
94
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
95
+ return down_proj
96
+
97
+
98
+ def rotate_half(x):
99
+ """Rotates half the hidden dims of the input."""
100
+ x1 = x[..., : x.shape[-1] // 2]
101
+ x2 = x[..., x.shape[-1] // 2 :]
102
+ return torch.cat((-x2, x1), dim=-1)
103
+
104
+
105
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
106
+ """Applies Rotary Position Embedding to the query and key tensors.
107
+
108
+ Args:
109
+ q (`torch.Tensor`): The query tensor.
110
+ k (`torch.Tensor`): The key tensor.
111
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
112
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
113
+ position_ids (`torch.Tensor`, *optional*):
114
+ Deprecated and unused.
115
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
116
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
117
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
118
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
119
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
120
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
121
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
122
+ Returns:
123
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
124
+ """
125
+ cos = cos.unsqueeze(unsqueeze_dim)
126
+ sin = sin.unsqueeze(unsqueeze_dim)
127
+ q_embed = (q * cos) + (rotate_half(q) * sin)
128
+ k_embed = (k * cos) + (rotate_half(k) * sin)
129
+ return q_embed, k_embed
130
+
131
+
132
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
133
+ """
134
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
135
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
136
+ """
137
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
138
+ if n_rep == 1:
139
+ return hidden_states
140
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
141
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
142
+
143
+
144
+ def eager_attention_forward(
145
+ module: nn.Module,
146
+ query: torch.Tensor,
147
+ key: torch.Tensor,
148
+ value: torch.Tensor,
149
+ attention_mask: Optional[torch.Tensor],
150
+ scaling: float,
151
+ dropout: float = 0.0,
152
+ **kwargs,
153
+ ):
154
+ key_states = repeat_kv(key, module.num_key_value_groups)
155
+ value_states = repeat_kv(value, module.num_key_value_groups)
156
+
157
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
158
+ if attention_mask is not None:
159
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
160
+ attn_weights = attn_weights + causal_mask
161
+
162
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
163
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
164
+ attn_output = torch.matmul(attn_weights, value_states)
165
+ attn_output = attn_output.transpose(1, 2).contiguous()
166
+
167
+ return attn_output, attn_weights
168
+
169
+
170
+ class Qwen3Attention(nn.Module):
171
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
172
+
173
+ def __init__(self, config: Qwen3VEConfig, layer_idx: int):
174
+ super().__init__()
175
+ self.config = config
176
+ self.layer_idx = layer_idx
177
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
178
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
179
+ self.scaling = self.head_dim**-0.5
180
+ self.attention_dropout = config.attention_dropout
181
+ self.is_causal = True
182
+
183
+ self.q_proj = nn.Linear(
184
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
185
+ )
186
+ self.k_proj = nn.Linear(
187
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
188
+ )
189
+ self.v_proj = nn.Linear(
190
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
191
+ )
192
+ self.o_proj = nn.Linear(
193
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
194
+ )
195
+ self.q_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
196
+ self.k_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape
197
+
198
+ self.q_proj_ve = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
199
+ self.k_proj_ve = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
200
+ self.v_proj_ve = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
201
+ self.o_proj_ve = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
202
+ self.q_norm_ve = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
203
+ self.k_norm_ve = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape
204
+
205
+ self.sliding_window = config.sliding_window
206
+ if not (
207
+ self.config.use_sliding_window
208
+ and getattr(self.config, "sliding_window", None) is not None
209
+ and self.layer_idx >= self.config.max_window_layers
210
+ ):
211
+ self.sliding_window = None
212
+
213
+ def forward(
214
+ self,
215
+ hidden_states: torch.Tensor,
216
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
217
+ attention_mask: Optional[torch.Tensor],
218
+ past_key_value: Optional[Cache] = None,
219
+ cache_position: Optional[torch.LongTensor] = None,
220
+ visual_token_mask: Optional[torch.Tensor] = None,
221
+ **kwargs: Unpack[FlashAttentionKwargs],
222
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
223
+ input_shape = hidden_states.shape[:-1]
224
+ hidden_shape = (*input_shape, -1, self.head_dim) # [B, L, -1, D]
225
+
226
+ query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape))
227
+ key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape))
228
+ value_states = self.v_proj(hidden_states).view(hidden_shape)
229
+
230
+ if visual_token_mask is not None:
231
+ # NOTE visual token mask can be None when evaluating with kv_cache
232
+ visual_token_mask = visual_token_mask.bool() # B, L
233
+ if visual_token_mask.any():
234
+ query_states[visual_token_mask] = self.q_norm_ve(self.q_proj_ve(hidden_states).view(hidden_shape))[visual_token_mask]
235
+ key_states[visual_token_mask] = self.k_norm_ve(self.k_proj_ve(hidden_states).view(hidden_shape))[visual_token_mask]
236
+ value_states[visual_token_mask] = self.v_proj_ve(hidden_states).view(hidden_shape)[visual_token_mask]
237
+
238
+ query_states = query_states.transpose(1, 2)
239
+ key_states = key_states.transpose(1, 2)
240
+ value_states = value_states.transpose(1, 2)
241
+
242
+ cos, sin = position_embeddings
243
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
244
+
245
+ if past_key_value is not None:
246
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
247
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
248
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
249
+
250
+ attention_interface: Callable = eager_attention_forward
251
+ if self.config._attn_implementation != "eager":
252
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
253
+ logger.warning_once(
254
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
255
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
256
+ )
257
+ else:
258
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
259
+
260
+ attn_output, attn_weights = attention_interface(
261
+ self,
262
+ query_states,
263
+ key_states,
264
+ value_states,
265
+ attention_mask,
266
+ dropout=0.0 if not self.training else self.attention_dropout,
267
+ scaling=self.scaling,
268
+ sliding_window=self.sliding_window, # diff with Llama
269
+ **kwargs,
270
+ )
271
+
272
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
273
+ attn_output_prev = attn_output.clone()
274
+ attn_output = self.o_proj(attn_output_prev)
275
+
276
+ if visual_token_mask is not None:
277
+ if visual_token_mask.any():
278
+ attn_output[visual_token_mask] = self.o_proj_ve(attn_output_prev[visual_token_mask])
279
+
280
+ return attn_output, attn_weights
281
+
282
+ import torch.nn.functional as F
283
+ from transformers.utils import (
284
+ add_start_docstrings,
285
+ add_start_docstrings_to_model_forward,
286
+ is_flash_attn_2_available,
287
+ is_flash_attn_greater_or_equal_2_10,
288
+ logging,
289
+ replace_return_docstrings,
290
+ )
291
+
292
+ import inspect
293
+ if is_flash_attn_2_available():
294
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
295
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
296
+
297
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
298
+
299
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
300
+ def _get_unpad_data(attention_mask):
301
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
302
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
303
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
304
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
305
+ return (
306
+ indices,
307
+ cu_seqlens,
308
+ max_seqlen_in_batch,
309
+ )
310
+
311
+
312
+ class Qwen3FlashAttention2(Qwen3Attention):
313
+ """
314
+ Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
315
+ as the weights of the module stays untouched. The only required change would be on the forward pass
316
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
317
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
318
+ config.max_window_layers layers.
319
+ """
320
+
321
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
322
+ def __init__(self, *args, **kwargs):
323
+ super().__init__(*args, **kwargs)
324
+
325
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
326
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
327
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
328
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
329
+
330
+ def forward(
331
+ self,
332
+ hidden_states: torch.Tensor,
333
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
334
+ attention_mask: Optional[torch.Tensor] = None,
335
+ position_ids: Optional[torch.LongTensor] = None,
336
+ past_key_value: Optional[Cache] = None,
337
+ output_attentions: bool = False,
338
+ use_cache: bool = False,
339
+ cache_position: Optional[torch.LongTensor] = None,
340
+ padding_type: str = "pad",
341
+ visual_token_mask: Optional[torch.Tensor] = None,
342
+ **kwargs: Unpack[FlashAttentionKwargs],
343
+ ):
344
+ bsz, q_len, _ = hidden_states.size()
345
+ input_shape = hidden_states.shape[:-1]
346
+ hidden_shape = (*input_shape, -1, self.head_dim)
347
+
348
+ # print(f"{hidden_states.dtype=}")
349
+ # print(f"{self.q_proj.weight.dtype=}")
350
+
351
+ # query_states = self.q_proj(hidden_states)
352
+ # key_states = self.k_proj(hidden_states)
353
+ # value_states = self.v_proj(hidden_states)
354
+
355
+ # query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
356
+ # key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
357
+ # value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
358
+
359
+ query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape))
360
+ key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape))
361
+ value_states = self.v_proj(hidden_states).view(hidden_shape)
362
+
363
+ if visual_token_mask is not None:
364
+ # NOTE visual token mask can be None when evaluating with kv_cache
365
+ visual_token_mask = visual_token_mask.bool() # B, L
366
+ if visual_token_mask.any():
367
+ query_states[visual_token_mask] = self.q_norm_ve(self.q_proj_ve(hidden_states).view(hidden_shape))[visual_token_mask]
368
+ key_states[visual_token_mask] = self.k_norm_ve(self.k_proj_ve(hidden_states).view(hidden_shape))[visual_token_mask]
369
+ value_states[visual_token_mask] = self.v_proj_ve(hidden_states).view(hidden_shape)[visual_token_mask]
370
+
371
+ query_states = query_states.transpose(1, 2)
372
+ key_states = key_states.transpose(1, 2)
373
+ value_states = value_states.transpose(1, 2)
374
+
375
+ kv_seq_len = key_states.shape[-2]
376
+ if past_key_value is not None:
377
+ if self.layer_idx is None:
378
+ raise ValueError(
379
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
380
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
381
+ "with a layer index."
382
+ )
383
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
384
+
385
+ # # Because the input can be padded, the absolute sequence length depends on the max position id.
386
+ # rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
387
+ # cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
388
+
389
+ # query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
390
+
391
+ cos, sin = position_embeddings
392
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
393
+
394
+ use_sliding_windows = (
395
+ _flash_supports_window_size
396
+ and getattr(self.config, "sliding_window", None) is not None
397
+ and kv_seq_len > self.config.sliding_window
398
+ and self.config.use_sliding_window
399
+ )
400
+
401
+ if not _flash_supports_window_size:
402
+ logger.warning_once(
403
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
404
+ " make sure to upgrade flash-attn library."
405
+ )
406
+
407
+ if past_key_value is not None:
408
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
409
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
410
+ if (
411
+ getattr(self.config, "sliding_window", None) is not None
412
+ and kv_seq_len > self.config.sliding_window
413
+ and cache_has_contents
414
+ ):
415
+ slicing_tokens = 1 - self.config.sliding_window
416
+
417
+ past_key = past_key_value[self.layer_idx][0]
418
+ past_value = past_key_value[self.layer_idx][1]
419
+
420
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
421
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
422
+
423
+ if past_key.shape[-2] != self.config.sliding_window - 1:
424
+ raise ValueError(
425
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
426
+ f" {past_key.shape}"
427
+ )
428
+
429
+ if attention_mask is not None:
430
+ attention_mask = attention_mask[:, slicing_tokens:]
431
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
432
+
433
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
434
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
435
+
436
+ # repeat k/v heads if n_kv_heads < n_heads
437
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
438
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
439
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
440
+
441
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
442
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
443
+ # cast them back in float16 just to be sure everything works as expected.
444
+ input_dtype = query_states.dtype
445
+ if input_dtype == torch.float32:
446
+ if torch.is_autocast_enabled():
447
+ target_dtype = torch.get_autocast_gpu_dtype()
448
+ # Handle the case where the model is quantized
449
+ elif hasattr(self.config, "_pre_quantization_dtype"):
450
+ target_dtype = self.config._pre_quantization_dtype
451
+ else:
452
+ target_dtype = self.q_proj.weight.dtype
453
+
454
+ logger.warning_once(
455
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
456
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
457
+ f" {target_dtype}."
458
+ )
459
+
460
+ query_states = query_states.to(target_dtype)
461
+ key_states = key_states.to(target_dtype)
462
+ value_states = value_states.to(target_dtype)
463
+
464
+ # Reashape to the expected shape for Flash Attention
465
+ query_states = query_states.transpose(1, 2)
466
+ key_states = key_states.transpose(1, 2)
467
+ value_states = value_states.transpose(1, 2)
468
+
469
+ attn_output = self._flash_attention_forward(
470
+ query_states,
471
+ key_states,
472
+ value_states,
473
+ attention_mask,
474
+ q_len,
475
+ dropout=dropout_rate,
476
+ use_sliding_windows=use_sliding_windows,
477
+ padding_type=padding_type,
478
+ )
479
+
480
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
481
+
482
+ attn_output_prev = attn_output.clone()
483
+ attn_output = self.o_proj(attn_output_prev)
484
+
485
+ if visual_token_mask is not None:
486
+ if visual_token_mask.any():
487
+ attn_output[visual_token_mask] = self.o_proj_ve(attn_output_prev[visual_token_mask])
488
+
489
+ if not output_attentions:
490
+ attn_weights = None
491
+
492
+ return attn_output, attn_weights
493
+
494
+ def _flash_attention_forward(
495
+ self,
496
+ query_states,
497
+ key_states,
498
+ value_states,
499
+ attention_mask,
500
+ query_length,
501
+ dropout=0.0,
502
+ softmax_scale=None,
503
+ use_sliding_windows=False,
504
+ padding_type="pad"
505
+ ):
506
+ """
507
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
508
+ first unpad the input, then computes the attention scores and pad the final attention scores.
509
+
510
+ Args:
511
+ query_states (`torch.Tensor`):
512
+ Input query states to be passed to Flash Attention API
513
+ key_states (`torch.Tensor`):
514
+ Input key states to be passed to Flash Attention API
515
+ value_states (`torch.Tensor`):
516
+ Input value states to be passed to Flash Attention API
517
+ attention_mask (`torch.Tensor`):
518
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
519
+ position of padding tokens and 1 for the position of non-padding tokens.
520
+ dropout (`int`, *optional*):
521
+ Attention dropout
522
+ softmax_scale (`float`, *optional*):
523
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
524
+ use_sliding_windows (`bool`, *optional*):
525
+ Whether to activate sliding window attention.
526
+ """
527
+ if padding_type == "pad":
528
+ attn_output = self._flash_attention_forward_pad(
529
+ query_states,
530
+ key_states,
531
+ value_states,
532
+ attention_mask,
533
+ query_length,
534
+ dropout=dropout,
535
+ softmax_scale=softmax_scale,
536
+ use_sliding_windows=use_sliding_windows,
537
+ )
538
+ elif padding_type == "pack":
539
+ attn_output = self._flash_attention_forward_pack(
540
+ query_states,
541
+ key_states,
542
+ value_states,
543
+ attention_mask,
544
+ query_length,
545
+ dropout=dropout,
546
+ softmax_scale=softmax_scale,
547
+ use_sliding_windows=use_sliding_windows,
548
+ )
549
+ else:
550
+ raise ValueError(f"padding_type should be either `pad` or `pack`, got {padding_type}")
551
+ return attn_output
552
+
553
+
554
+ def _flash_attention_forward_pad(
555
+ self,
556
+ query_states,
557
+ key_states,
558
+ value_states,
559
+ attention_mask,
560
+ query_length,
561
+ dropout=0.0,
562
+ softmax_scale=None,
563
+ use_sliding_windows=False,
564
+ ):
565
+ """
566
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
567
+ first unpad the input, then computes the attention scores and pad the final attention scores.
568
+
569
+ Args:
570
+ query_states (`torch.Tensor`):
571
+ Input query states to be passed to Flash Attention API
572
+ key_states (`torch.Tensor`):
573
+ Input key states to be passed to Flash Attention API
574
+ value_states (`torch.Tensor`):
575
+ Input value states to be passed to Flash Attention API
576
+ attention_mask (`torch.Tensor`):
577
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
578
+ position of padding tokens and 1 for the position of non-padding tokens.
579
+ dropout (`float`):
580
+ Attention dropout
581
+ softmax_scale (`float`, *optional*):
582
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
583
+ use_sliding_windows (`bool`, *optional*):
584
+ Whether to activate sliding window attention.
585
+ """
586
+ if not self._flash_attn_uses_top_left_mask:
587
+ causal = self.is_causal
588
+ else:
589
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
590
+ causal = self.is_causal and query_length != 1
591
+
592
+ # Decide whether to use SWA or not by layer index.
593
+ if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
594
+ use_sliding_windows = False
595
+
596
+ # Contains at least one padding token in the sequence
597
+ if attention_mask is not None:
598
+ batch_size = query_states.shape[0]
599
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
600
+ query_states, key_states, value_states, attention_mask, query_length
601
+ )
602
+
603
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
604
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
605
+
606
+ if not use_sliding_windows:
607
+ attn_output_unpad = flash_attn_varlen_func(
608
+ query_states,
609
+ key_states,
610
+ value_states,
611
+ cu_seqlens_q=cu_seqlens_q,
612
+ cu_seqlens_k=cu_seqlens_k,
613
+ max_seqlen_q=max_seqlen_in_batch_q,
614
+ max_seqlen_k=max_seqlen_in_batch_k,
615
+ dropout_p=dropout,
616
+ softmax_scale=softmax_scale,
617
+ causal=causal,
618
+ )
619
+ else:
620
+ attn_output_unpad = flash_attn_varlen_func(
621
+ query_states,
622
+ key_states,
623
+ value_states,
624
+ cu_seqlens_q=cu_seqlens_q,
625
+ cu_seqlens_k=cu_seqlens_k,
626
+ max_seqlen_q=max_seqlen_in_batch_q,
627
+ max_seqlen_k=max_seqlen_in_batch_k,
628
+ dropout_p=dropout,
629
+ softmax_scale=softmax_scale,
630
+ causal=causal,
631
+ window_size=(self.config.sliding_window, self.config.sliding_window),
632
+ )
633
+
634
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
635
+ else:
636
+ if not use_sliding_windows:
637
+ attn_output = flash_attn_func(
638
+ query_states,
639
+ key_states,
640
+ value_states,
641
+ dropout,
642
+ softmax_scale=softmax_scale,
643
+ causal=causal,
644
+ )
645
+ else:
646
+ attn_output = flash_attn_func(
647
+ query_states,
648
+ key_states,
649
+ value_states,
650
+ dropout,
651
+ softmax_scale=softmax_scale,
652
+ causal=causal,
653
+ window_size=(self.config.sliding_window, self.config.sliding_window),
654
+ )
655
+
656
+ return attn_output
657
+
658
+ def _flash_attention_forward_pack(
659
+ self,
660
+ query_states,
661
+ key_states,
662
+ value_states,
663
+ attention_mask,
664
+ query_length,
665
+ dropout=0.0,
666
+ softmax_scale=None,
667
+ use_sliding_windows=False,
668
+ ):
669
+ """
670
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
671
+ first unpad the input, then computes the attention scores and pad the final attention scores.
672
+
673
+ Args:
674
+ query_states (`torch.Tensor`):
675
+ Input query states to be passed to Flash Attention API
676
+ key_states (`torch.Tensor`):
677
+ Input key states to be passed to Flash Attention API
678
+ value_states (`torch.Tensor`):
679
+ Input value states to be passed to Flash Attention API
680
+ attention_mask (`torch.Tensor`):
681
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
682
+ position of padding tokens and 1 for the position of non-padding tokens.
683
+ dropout (`int`, *optional*):
684
+ Attention dropout
685
+ softmax_scale (`float`, *optional*):
686
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
687
+ use_sliding_windows (`bool`, *optional*):
688
+ Whether to activate sliding window attention.
689
+ """
690
+ assert query_states.size(0) == key_states.size(0) == value_states.size(0) == 1
691
+ query_states = query_states.squeeze(0)
692
+ key_states = key_states.squeeze(0)
693
+ value_states = value_states.squeeze(0)
694
+ cu_seqlens = attention_mask.squeeze(0)
695
+
696
+ with torch.no_grad():
697
+ max_seqlen = max([
698
+ cu_seqlens[idx+1] - cu_seqlens[idx]
699
+ for idx in range(cu_seqlens.size(0) - 1)
700
+ ]).item()
701
+
702
+ if not self._flash_attn_uses_top_left_mask:
703
+ causal = self.is_causal
704
+ else:
705
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
706
+ causal = self.is_causal and query_length != 1
707
+
708
+ # Decide whether to use SWA or not by layer index.
709
+ if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
710
+ use_sliding_windows = False
711
+
712
+ if not use_sliding_windows:
713
+ attn_output = flash_attn_varlen_func(
714
+ q=query_states,
715
+ k=key_states,
716
+ v=value_states,
717
+ cu_seqlens_q=cu_seqlens,
718
+ cu_seqlens_k=cu_seqlens,
719
+ max_seqlen_q=max_seqlen,
720
+ max_seqlen_k=max_seqlen,
721
+ dropout_p=dropout,
722
+ softmax_scale=softmax_scale,
723
+ causal=causal,
724
+ )
725
+ else:
726
+ attn_output = flash_attn_varlen_func(
727
+ q=query_states,
728
+ k=key_states,
729
+ v=value_states,
730
+ cu_seqlens_q=cu_seqlens,
731
+ cu_seqlens_k=cu_seqlens,
732
+ max_seqlen_q=max_seqlen,
733
+ max_seqlen_k=max_seqlen,
734
+ dropout_p=dropout,
735
+ softmax_scale=softmax_scale,
736
+ causal=causal,
737
+ window_size=(self.config.sliding_window, self.config.sliding_window),
738
+ )
739
+
740
+ query_states = query_states.unsqueeze(0)
741
+ key_states = key_states.unsqueeze(0)
742
+ value_states = value_states.unsqueeze(0)
743
+ return attn_output
744
+
745
+
746
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
747
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
748
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
749
+
750
+ # On the first iteration we need to properly re-create the padding mask
751
+ # by slicing it on the proper place
752
+ if kv_seq_len != attention_mask.shape[-1]:
753
+ attention_mask_num_tokens = attention_mask.shape[-1]
754
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
755
+
756
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
757
+
758
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
759
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
760
+
761
+ if query_length == kv_seq_len:
762
+ query_layer = index_first_axis(
763
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
764
+ )
765
+ cu_seqlens_q = cu_seqlens_k
766
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
767
+ indices_q = indices_k
768
+ elif query_length == 1:
769
+ max_seqlen_in_batch_q = 1
770
+ cu_seqlens_q = torch.arange(
771
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
772
+ ) # There is a memcpy here, that is very bad.
773
+ indices_q = cu_seqlens_q[:-1]
774
+ query_layer = query_layer.squeeze(1)
775
+ else:
776
+ # The -q_len: slice assumes left padding.
777
+ attention_mask = attention_mask[:, -query_length:]
778
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
779
+
780
+ return (
781
+ query_layer,
782
+ key_layer,
783
+ value_layer,
784
+ indices_q,
785
+ (cu_seqlens_q, cu_seqlens_k),
786
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
787
+ )
788
+
789
+ QWEN3_ATTENTION_CLASSES = {
790
+ "eager": Qwen3Attention,
791
+ "flash_attention_2": Qwen3FlashAttention2,
792
+ "sdpa": Qwen3Attention,
793
+ }
794
+
795
+ class Qwen3DecoderLayer(nn.Module):
796
+ def __init__(self, config: Qwen3VEConfig, layer_idx: int):
797
+ super().__init__()
798
+ self.hidden_size = config.hidden_size
799
+ self.self_attn = QWEN3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
800
+ self.mlp = Qwen3MLP(config)
801
+ self.mlp_ve = Qwen3MLP(config)
802
+ self.input_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
803
+ self.post_attention_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
804
+ if (
805
+ config.sliding_window and config._attn_implementation != "flash_attention_2"
806
+ ): # diff with Llama is this warning
807
+ logger.warning_once(
808
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
809
+ "unexpected results may be encountered."
810
+ )
811
+
812
+ def forward(
813
+ self,
814
+ hidden_states: torch.Tensor,
815
+ attention_mask: Optional[torch.Tensor] = None,
816
+ position_ids: Optional[torch.LongTensor] = None,
817
+ past_key_value: Optional[Cache] = None,
818
+ output_attentions: Optional[bool] = False,
819
+ use_cache: Optional[bool] = False,
820
+ cache_position: Optional[torch.LongTensor] = None,
821
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
822
+ padding_type: Optional[str] = "pad",
823
+ visual_token_mask: Optional[torch.Tensor] = None,
824
+ layer_idx: Optional[int] = -1,
825
+ return_feature_scale: Optional[bool] = False,
826
+ **kwargs: Unpack[FlashAttentionKwargs],
827
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
828
+ residual = hidden_states
829
+
830
+ hidden_states = self.input_layernorm(hidden_states)
831
+
832
+ # Self Attention
833
+ hidden_states, self_attn_weights = self.self_attn(
834
+ hidden_states=hidden_states,
835
+ attention_mask=attention_mask,
836
+ position_ids=position_ids,
837
+ past_key_value=past_key_value,
838
+ output_attentions=output_attentions,
839
+ use_cache=use_cache,
840
+ cache_position=cache_position,
841
+ position_embeddings=position_embeddings,
842
+ padding_type=padding_type,
843
+ visual_token_mask=visual_token_mask,
844
+ **kwargs,
845
+ )
846
+ hidden_states = residual + hidden_states
847
+
848
+ # Fully Connected
849
+ residual = hidden_states
850
+ hidden_states = self.post_attention_layernorm(hidden_states)
851
+
852
+ if visual_token_mask is None:
853
+ hidden_states = self.mlp(hidden_states)
854
+ else:
855
+ visual_token_mask = visual_token_mask.bool() # B, L
856
+ hidden_states_prev = hidden_states.clone()
857
+ if visual_token_mask.any():
858
+ hidden_states[visual_token_mask] = self.mlp_ve(hidden_states_prev[visual_token_mask])
859
+ if (~visual_token_mask).any():
860
+ hidden_states[~visual_token_mask] = self.mlp(hidden_states_prev[~visual_token_mask])
861
+ hidden_states = residual + hidden_states
862
+
863
+ outputs = (hidden_states,)
864
+ if output_attentions:
865
+ outputs += (self_attn_weights,)
866
+
867
+ if return_feature_scale:
868
+ assert visual_token_mask is not None, "visual_token_mask must be provided when return_feature_scale is True"
869
+ outputs += ((hidden_states[visual_token_mask].abs().mean(-1).std(), hidden_states[~visual_token_mask].abs().mean(-1).std()),)
870
+
871
+ return outputs
872
+
873
+
874
+ class Qwen3RotaryEmbedding(nn.Module):
875
+ def __init__(self, config: Qwen3VEConfig, device=None):
876
+ super().__init__()
877
+ # BC: "rope_type" was originally "type"
878
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
879
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
880
+ else:
881
+ self.rope_type = "default"
882
+ self.max_seq_len_cached = config.max_position_embeddings
883
+ self.original_max_seq_len = config.max_position_embeddings
884
+
885
+ self.config = config
886
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
887
+
888
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
889
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
890
+ self.original_inv_freq = self.inv_freq
891
+
892
+ @torch.no_grad()
893
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
894
+ def forward(self, x, position_ids):
895
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
896
+ position_ids_expanded = position_ids[:, None, :].float()
897
+
898
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
899
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
900
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
901
+ emb = torch.cat((freqs, freqs), dim=-1)
902
+ cos = emb.cos() * self.attention_scaling
903
+ sin = emb.sin() * self.attention_scaling
904
+
905
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
906
+
907
+
908
+ QWEN3_START_DOCSTRING = r"""
909
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
910
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
911
+ etc.)
912
+
913
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
914
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
915
+ and behavior.
916
+
917
+ Parameters:
918
+ config ([`Qwen3VEConfig`]):
919
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
920
+ load the weights associated with the model, only the configuration. Check out the
921
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
922
+ """
923
+
924
+
925
+ @add_start_docstrings(
926
+ "The bare Qwen3 Model outputting raw hidden-states without any specific head on top.",
927
+ QWEN3_START_DOCSTRING,
928
+ )
929
+ class Qwen3PreTrainedModel(PreTrainedModel):
930
+ config_class = Qwen3VEConfig
931
+ base_model_prefix = "model"
932
+ supports_gradient_checkpointing = True
933
+ _no_split_modules = ["Qwen3DecoderLayer"]
934
+ _skip_keys_device_placement = ["past_key_values"]
935
+ _supports_flash_attn_2 = True
936
+ _supports_sdpa = True
937
+ _supports_flex_attn = True
938
+ _supports_cache_class = True
939
+ _supports_quantized_cache = True
940
+ _supports_static_cache = True
941
+ _supports_attention_backend = True
942
+
943
+ def _init_weights(self, module):
944
+ std = self.config.initializer_range
945
+ if isinstance(module, nn.Linear):
946
+ module.weight.data.normal_(mean=0.0, std=std)
947
+ if module.bias is not None:
948
+ module.bias.data.zero_()
949
+ elif isinstance(module, nn.Embedding):
950
+ module.weight.data.normal_(mean=0.0, std=std)
951
+ if module.padding_idx is not None:
952
+ module.weight.data[module.padding_idx].zero_()
953
+
954
+
955
+ QWEN3_INPUTS_DOCSTRING = r"""
956
+ Args:
957
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
958
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
959
+ it.
960
+
961
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
962
+ [`PreTrainedTokenizer.__call__`] for details.
963
+
964
+ [What are input IDs?](../glossary#input-ids)
965
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
966
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
967
+
968
+ - 1 for tokens that are **not masked**,
969
+ - 0 for tokens that are **masked**.
970
+
971
+ [What are attention masks?](../glossary#attention-mask)
972
+
973
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
974
+ [`PreTrainedTokenizer.__call__`] for details.
975
+
976
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
977
+ `past_key_values`).
978
+
979
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
980
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
981
+ information on the default strategy.
982
+
983
+ - 1 indicates the head is **not masked**,
984
+ - 0 indicates the head is **masked**.
985
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
986
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
987
+ config.n_positions - 1]`.
988
+
989
+ [What are position IDs?](../glossary#position-ids)
990
+ past_key_values (`Cache`, *optional*):
991
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
992
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
993
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
994
+
995
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
996
+
997
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
998
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
999
+ of shape `(batch_size, sequence_length)`.
1000
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1001
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1002
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1003
+ model's internal embedding lookup matrix.
1004
+ use_cache (`bool`, *optional*):
1005
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1006
+ `past_key_values`).
1007
+ output_attentions (`bool`, *optional*):
1008
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1009
+ tensors for more detail.
1010
+ output_hidden_states (`bool`, *optional*):
1011
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1012
+ more detail.
1013
+ return_dict (`bool`, *optional*):
1014
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1015
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1016
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1017
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1018
+ the complete sequence length.
1019
+ """
1020
+
1021
+
1022
+ @add_start_docstrings(
1023
+ "The bare Qwen3 Model outputting raw hidden-states without any specific head on top.",
1024
+ QWEN3_START_DOCSTRING,
1025
+ )
1026
+ class Qwen3Model(Qwen3PreTrainedModel):
1027
+ """
1028
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen3DecoderLayer`]
1029
+
1030
+ Args:
1031
+ config: Qwen3VEConfig
1032
+ """
1033
+
1034
+ def __init__(self, config: Qwen3VEConfig):
1035
+ super().__init__(config)
1036
+ self.padding_idx = config.pad_token_id
1037
+ self.vocab_size = config.vocab_size
1038
+
1039
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1040
+ self.layers = nn.ModuleList(
1041
+ [Qwen3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1042
+ )
1043
+ self.norm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1044
+ self.rotary_emb = Qwen3RotaryEmbedding(config=config)
1045
+ self.gradient_checkpointing = False
1046
+
1047
+ # Initialize weights and apply final processing
1048
+ self.post_init()
1049
+
1050
+ def get_input_embeddings(self):
1051
+ return self.embed_tokens
1052
+
1053
+ def set_input_embeddings(self, value):
1054
+ self.embed_tokens = value
1055
+
1056
+ @can_return_tuple
1057
+ @add_start_docstrings_to_model_forward(QWEN3_INPUTS_DOCSTRING)
1058
+ def forward(
1059
+ self,
1060
+ input_ids: Optional[torch.LongTensor] = None,
1061
+ attention_mask: Optional[torch.Tensor] = None,
1062
+ position_ids: Optional[torch.LongTensor] = None,
1063
+ past_key_values: Optional[Cache] = None,
1064
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1065
+ use_cache: Optional[bool] = None,
1066
+ output_attentions: Optional[bool] = None,
1067
+ output_hidden_states: Optional[bool] = None,
1068
+ cache_position: Optional[torch.LongTensor] = None,
1069
+ padding_type: Optional[str] = "pad",
1070
+ visual_token_mask: Optional[torch.Tensor] = None,
1071
+ generation_modality: Optional[int] = 0,
1072
+ return_feature_scale: Optional[bool] = False,
1073
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
1074
+ ) -> BaseModelOutputWithPast:
1075
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1076
+ output_hidden_states = (
1077
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1078
+ )
1079
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1080
+
1081
+ if (input_ids is None) ^ (inputs_embeds is not None):
1082
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1083
+
1084
+ if self.gradient_checkpointing and self.training and use_cache:
1085
+ logger.warning_once(
1086
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1087
+ )
1088
+ use_cache = False
1089
+
1090
+ # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
1091
+ if not isinstance(past_key_values, (type(None), Cache)):
1092
+ raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
1093
+
1094
+ if inputs_embeds is None:
1095
+ inputs_embeds = self.embed_tokens(input_ids)
1096
+
1097
+ if use_cache and past_key_values is None:
1098
+ past_key_values = DynamicCache()
1099
+
1100
+ if cache_position is None:
1101
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1102
+ cache_position = torch.arange(
1103
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1104
+ )
1105
+
1106
+ if position_ids is None:
1107
+ position_ids = cache_position.unsqueeze(0)
1108
+
1109
+ causal_mask = self._update_causal_mask(
1110
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
1111
+ )
1112
+
1113
+ if generation_modality == 0 and use_cache and past_key_values.get_seq_length() > 0:
1114
+ # Indicating we are generating text and is not pre-filling. This is an ugly hack to make the model work
1115
+ visual_token_mask = None
1116
+
1117
+ hidden_states = inputs_embeds
1118
+
1119
+ # create position embeddings to be shared across the decoder layers
1120
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1121
+
1122
+ # decoder layers
1123
+ all_hidden_states = () if output_hidden_states else None
1124
+ all_self_attns = () if output_attentions else None
1125
+
1126
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
1127
+ if output_hidden_states:
1128
+ all_hidden_states += (hidden_states,)
1129
+
1130
+ if self.gradient_checkpointing and self.training:
1131
+ layer_outputs = self._gradient_checkpointing_func(
1132
+ partial(decoder_layer.__call__,
1133
+ padding_type=padding_type,
1134
+ visual_token_mask=visual_token_mask,
1135
+ return_feature_scale=return_feature_scale,
1136
+ **flash_attn_kwargs),
1137
+ hidden_states,
1138
+ causal_mask,
1139
+ position_ids,
1140
+ past_key_values,
1141
+ output_attentions,
1142
+ use_cache,
1143
+ cache_position,
1144
+ position_embeddings,
1145
+ )
1146
+ else:
1147
+ layer_outputs = decoder_layer(
1148
+ hidden_states,
1149
+ attention_mask=causal_mask,
1150
+ position_ids=position_ids,
1151
+ past_key_value=past_key_values,
1152
+ output_attentions=output_attentions,
1153
+ use_cache=use_cache,
1154
+ cache_position=cache_position,
1155
+ position_embeddings=position_embeddings,
1156
+ padding_type=padding_type,
1157
+ visual_token_mask=visual_token_mask,
1158
+ return_feature_scale=return_feature_scale,
1159
+ **flash_attn_kwargs,
1160
+ )
1161
+
1162
+ hidden_states = layer_outputs[0]
1163
+
1164
+ if output_attentions:
1165
+ all_self_attns += (layer_outputs[1],)
1166
+
1167
+ hidden_states = self.norm(hidden_states)
1168
+
1169
+ # add hidden states from the last decoder layer
1170
+ if output_hidden_states:
1171
+ all_hidden_states += (hidden_states,)
1172
+
1173
+ ret = BaseModelOutputWithPast(
1174
+ last_hidden_state=hidden_states,
1175
+ past_key_values=past_key_values if use_cache else None,
1176
+ hidden_states=all_hidden_states,
1177
+ attentions=all_self_attns,
1178
+ )
1179
+ if return_feature_scale:
1180
+ ret["feature_scale"] = layer_outputs[-1]
1181
+ return ret
1182
+
1183
+ def _update_causal_mask(
1184
+ self,
1185
+ attention_mask: torch.Tensor,
1186
+ input_tensor: torch.Tensor,
1187
+ cache_position: torch.Tensor,
1188
+ past_key_values: Cache,
1189
+ output_attentions: bool = False,
1190
+ ):
1191
+ if self.config._attn_implementation == "flash_attention_2":
1192
+ if attention_mask is not None and past_key_values is not None:
1193
+ is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
1194
+ if is_padding_right:
1195
+ raise ValueError(
1196
+ "You are attempting to perform batched generation with padding_side='right'"
1197
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen3. Make sure to "
1198
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1199
+ )
1200
+ if attention_mask is not None and 0.0 in attention_mask:
1201
+ return attention_mask
1202
+ return None
1203
+
1204
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1205
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1206
+ # to infer the attention mask.
1207
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1208
+ using_static_cache = isinstance(past_key_values, StaticCache)
1209
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
1210
+
1211
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1212
+ if (
1213
+ self.config._attn_implementation == "sdpa"
1214
+ and not (using_static_cache or using_sliding_window_cache)
1215
+ and not output_attentions
1216
+ ):
1217
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1218
+ attention_mask,
1219
+ inputs_embeds=input_tensor,
1220
+ past_key_values_length=past_seen_tokens,
1221
+ sliding_window=self.config.sliding_window,
1222
+ is_training=self.training,
1223
+ ):
1224
+ return None
1225
+
1226
+ dtype, device = input_tensor.dtype, input_tensor.device
1227
+ min_dtype = torch.finfo(dtype).min
1228
+ sequence_length = input_tensor.shape[1]
1229
+ # SlidingWindowCache or StaticCache
1230
+ if using_sliding_window_cache or using_static_cache:
1231
+ target_length = past_key_values.get_max_cache_shape()
1232
+ # DynamicCache or no cache
1233
+ else:
1234
+ target_length = (
1235
+ attention_mask.shape[-1]
1236
+ if isinstance(attention_mask, torch.Tensor)
1237
+ else past_seen_tokens + sequence_length + 1
1238
+ )
1239
+
1240
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1241
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
1242
+ attention_mask,
1243
+ sequence_length=sequence_length,
1244
+ target_length=target_length,
1245
+ dtype=dtype,
1246
+ device=device,
1247
+ cache_position=cache_position,
1248
+ batch_size=input_tensor.shape[0],
1249
+ config=self.config,
1250
+ past_key_values=past_key_values,
1251
+ )
1252
+
1253
+ if (
1254
+ self.config._attn_implementation == "sdpa"
1255
+ and attention_mask is not None
1256
+ and attention_mask.device.type in ["cuda", "xpu"]
1257
+ and not output_attentions
1258
+ ):
1259
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1260
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1261
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1262
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1263
+
1264
+ return causal_mask
1265
+
1266
+ @staticmethod
1267
+ def _prepare_4d_causal_attention_mask_with_cache_position(
1268
+ attention_mask: torch.Tensor,
1269
+ sequence_length: int,
1270
+ target_length: int,
1271
+ dtype: torch.dtype,
1272
+ device: torch.device,
1273
+ cache_position: torch.Tensor,
1274
+ batch_size: int,
1275
+ config: Qwen3VEConfig,
1276
+ past_key_values: Cache,
1277
+ ):
1278
+ """
1279
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
1280
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
1281
+
1282
+ Args:
1283
+ attention_mask (`torch.Tensor`):
1284
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
1285
+ sequence_length (`int`):
1286
+ The sequence length being processed.
1287
+ target_length (`int`):
1288
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
1289
+ dtype (`torch.dtype`):
1290
+ The dtype to use for the 4D attention mask.
1291
+ device (`torch.device`):
1292
+ The device to place the 4D attention mask on.
1293
+ cache_position (`torch.Tensor`):
1294
+ Indices depicting the position of the input sequence tokens in the sequence.
1295
+ batch_size (`torch.Tensor`):
1296
+ Batch size.
1297
+ config (`Qwen3VEConfig`):
1298
+ The model's configuration class
1299
+ past_key_values (`Cache`):
1300
+ The cache class that is being used currently to generate
1301
+ """
1302
+ if attention_mask is not None and attention_mask.dim() == 4:
1303
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
1304
+ causal_mask = attention_mask
1305
+ else:
1306
+ min_dtype = torch.finfo(dtype).min
1307
+ causal_mask = torch.full(
1308
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
1309
+ )
1310
+ diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1311
+ if config.sliding_window is not None:
1312
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
1313
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
1314
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
1315
+ sliding_attend_mask = torch.arange(target_length, device=device) <= (
1316
+ cache_position.reshape(-1, 1) - config.sliding_window
1317
+ )
1318
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
1319
+ causal_mask *= diagonal_attend_mask
1320
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
1321
+ if attention_mask is not None:
1322
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1323
+ if attention_mask.shape[-1] > target_length:
1324
+ attention_mask = attention_mask[:, :target_length]
1325
+ mask_length = attention_mask.shape[-1]
1326
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
1327
+ causal_mask.device
1328
+ )
1329
+ padding_mask = padding_mask == 0
1330
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1331
+ padding_mask, min_dtype
1332
+ )
1333
+ return causal_mask
1334
+
1335
+
1336
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
1337
+
1338
+
1339
+ class Qwen3VEForCausalLM(Qwen3PreTrainedModel, GenerationMixin):
1340
+ _tied_weights_keys = ["lm_head.weight"]
1341
+ _tp_plan = {"lm_head": "colwise_rep"}
1342
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
1343
+
1344
+ def __init__(self, config):
1345
+ super().__init__(config)
1346
+ self.model = Qwen3Model(config)
1347
+ self.vocab_size = config.vocab_size
1348
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1349
+
1350
+ # Initialize weights and apply final processing
1351
+ self.post_init()
1352
+
1353
+ def get_input_embeddings(self):
1354
+ return self.model.embed_tokens
1355
+
1356
+ def set_input_embeddings(self, value):
1357
+ self.model.embed_tokens = value
1358
+
1359
+ def get_output_embeddings(self):
1360
+ return self.lm_head
1361
+
1362
+ def set_output_embeddings(self, new_embeddings):
1363
+ self.lm_head = new_embeddings
1364
+
1365
+ def set_decoder(self, decoder):
1366
+ self.model = decoder
1367
+
1368
+ def get_decoder(self):
1369
+ return self.model
1370
+
1371
+ @can_return_tuple
1372
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
1373
+ @add_start_docstrings_to_model_forward(QWEN3_INPUTS_DOCSTRING)
1374
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1375
+ def forward(
1376
+ self,
1377
+ input_ids: Optional[torch.LongTensor] = None,
1378
+ attention_mask: Optional[torch.Tensor] = None,
1379
+ position_ids: Optional[torch.LongTensor] = None,
1380
+ past_key_values: Optional[Cache] = None,
1381
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1382
+ labels: Optional[torch.LongTensor] = None,
1383
+ use_cache: Optional[bool] = None,
1384
+ output_attentions: Optional[bool] = None,
1385
+ output_hidden_states: Optional[bool] = None,
1386
+ cache_position: Optional[torch.LongTensor] = None,
1387
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1388
+ padding_type: Optional[str] = "pad",
1389
+ visual_token_mask: Optional[torch.Tensor] = None,
1390
+ generation_modality: Optional[int] = 0,
1391
+ skip_lm_head: Optional[bool] = False,
1392
+ return_feature_scale: Optional[bool] = False,
1393
+ **kwargs: Unpack[KwargsForCausalLM],
1394
+ ) -> CausalLMOutputWithPast:
1395
+ r"""
1396
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1397
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1398
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1399
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1400
+
1401
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
1402
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
1403
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1404
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1405
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
1406
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
1407
+
1408
+ Returns:
1409
+
1410
+ Example:
1411
+
1412
+ ```python
1413
+ >>> from transformers import AutoTokenizer, Qwen3ForCausalLM
1414
+
1415
+ >>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B")
1416
+ >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
1417
+
1418
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1419
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1420
+
1421
+ >>> # Generate
1422
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1423
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1424
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1425
+ ```"""
1426
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1427
+ output_hidden_states = (
1428
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1429
+ )
1430
+
1431
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1432
+ outputs: BaseModelOutputWithPast = self.model(
1433
+ input_ids=input_ids,
1434
+ attention_mask=attention_mask,
1435
+ position_ids=position_ids,
1436
+ past_key_values=past_key_values,
1437
+ inputs_embeds=inputs_embeds,
1438
+ use_cache=use_cache,
1439
+ output_attentions=output_attentions,
1440
+ output_hidden_states=output_hidden_states,
1441
+ cache_position=cache_position,
1442
+ padding_type=padding_type,
1443
+ visual_token_mask=visual_token_mask,
1444
+ generation_modality=generation_modality,
1445
+ return_feature_scale=return_feature_scale,
1446
+ **kwargs,
1447
+ )
1448
+
1449
+ hidden_states = outputs.last_hidden_state
1450
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1451
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1452
+
1453
+ logits = None
1454
+ if not skip_lm_head:
1455
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1456
+
1457
+ import os
1458
+ if os.environ.get("LOGGING_GRAD_NORM_ALL", "0") == "1":
1459
+ import torch.distributed as dist
1460
+ print(f"{self.config.vocab_size=} {self.loss_function.__class__.__name__=}")
1461
+ print(f"{kwargs=}")
1462
+ iter_step = int(os.environ["ITER_STEP"])
1463
+ if iter_step == 1:
1464
+ save_dict = {
1465
+ "weight": self.lm_head.weight.detach().cpu(),
1466
+ "hidden_states": hidden_states.detach().cpu(),
1467
+ "logits": logits.detach().cpu(),
1468
+ # "labels": labels.detach().cpu(),
1469
+ }
1470
+ torch.save(save_dict, f"./OUTPUT/debug_nan/debug_lm_head_{dist.get_rank():03d}_{iter_step}.pt")
1471
+
1472
+ loss = None
1473
+ if labels is not None:
1474
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
1475
+
1476
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1477
+ output = CausalLMOutputWithPast(
1478
+ loss=loss,
1479
+ logits=logits,
1480
+ past_key_values=outputs.past_key_values,
1481
+ hidden_states=outputs.hidden_states,
1482
+ attentions=outputs.attentions,
1483
+ )
1484
+
1485
+ if return_feature_scale:
1486
+ output["feature_scale"] = outputs["feature_scale"]
1487
+ if not skip_lm_head:
1488
+ output['logits'] = output['logits'].to(device)
1489
+ return output
1490
+
1491
+ def prepare_inputs_for_generation(
1492
+ self,
1493
+ input_ids: torch.LongTensor,
1494
+ past_key_values: Optional[Cache] = None,
1495
+ attention_mask: Optional[torch.LongTensor] = None,
1496
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1497
+ cache_position: Optional[torch.LongTensor] = None,
1498
+ **kwargs,
1499
+ ):
1500
+ """
1501
+ Prepare the model inputs for generation. In includes operations like computing the 4D attention mask or
1502
+ slicing inputs given the existing cache.
1503
+
1504
+ See the forward pass in the model documentation for expected arguments (different models might have different
1505
+ requirements for e.g. `past_key_values`). This function should work as is for most LLMs.
1506
+ """
1507
+
1508
+ # 1. Handle BC:
1509
+ model_inputs = {}
1510
+ # - some models don't have `Cache` support (which implies they don't expect `cache_position` in `forward`)
1511
+ if self._supports_cache_class:
1512
+ model_inputs["cache_position"] = cache_position
1513
+ # - `cache_position` was not a mandatory input in `prepare_inputs_for_generation` for those models, and this
1514
+ # function may be called outside of `generate`. Handle most use cases by creating `cache_position` on the fly
1515
+ # (this alternative is not as robust as calling `generate` and letting it create `cache_position`)
1516
+ elif cache_position is None:
1517
+ past_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1518
+ cache_position = torch.arange(past_length, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
1519
+
1520
+ # 2. Generic cache-dependent input preparation
1521
+ if past_key_values is not None:
1522
+ model_inputs["past_key_values"] = past_key_values
1523
+ inputs_embeds, input_ids = self._cache_dependant_input_preparation(
1524
+ input_ids, inputs_embeds, cache_position
1525
+ )
1526
+
1527
+ # 3. Prepare base model inputs
1528
+ input_ids_key = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1529
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step for every prompt.
1530
+ if not self.config.is_encoder_decoder:
1531
+ if inputs_embeds is not None and len(cache_position) == inputs_embeds.shape[1]:
1532
+ model_inputs[input_ids_key] = None
1533
+ model_inputs["inputs_embeds"] = inputs_embeds
1534
+ else:
1535
+ # `clone` calls in this function ensure a consistent stride. See #32227
1536
+ model_inputs[input_ids_key] = input_ids.clone(memory_format=torch.contiguous_format)
1537
+ model_inputs["inputs_embeds"] = None
1538
+ else:
1539
+ model_inputs[input_ids_key] = input_ids.clone(memory_format=torch.contiguous_format)
1540
+
1541
+ # 4. Create missing `position_ids` on the fly
1542
+ encoder_attention_mask = attention_mask if self.config.is_encoder_decoder else None
1543
+ attention_mask = (
1544
+ kwargs.pop("decoder_attention_mask", None) if self.config.is_encoder_decoder else attention_mask
1545
+ )
1546
+ attention_mask_key = "decoder_attention_mask" if self.config.is_encoder_decoder else "attention_mask"
1547
+ position_ids_key = "decoder_position_ids" if self.config.is_encoder_decoder else "position_ids"
1548
+ if (
1549
+ attention_mask is not None
1550
+ and kwargs.get(position_ids_key) is None
1551
+ and position_ids_key in set(inspect.signature(self.forward).parameters.keys())
1552
+ ):
1553
+ position_ids = attention_mask.long().cumsum(-1) - 1
1554
+ position_ids.masked_fill_(attention_mask == 0, 1)
1555
+ kwargs[position_ids_key] = position_ids # placed in kwargs for further processing (see below)
1556
+
1557
+ # 5. Slice model inputs if it's an input that should have the same length as `input_ids`
1558
+ for model_input_name in ["position_ids", "token_type_ids", "decoder_position_ids"]:
1559
+ model_input = kwargs.get(model_input_name)
1560
+ if model_input is not None:
1561
+ if past_key_values is not None:
1562
+ current_input_length = (
1563
+ model_inputs["inputs_embeds"].shape[1]
1564
+ if model_inputs.get("inputs_embeds") is not None
1565
+ else model_inputs[input_ids_key].shape[1]
1566
+ )
1567
+ model_input = model_input[:, -current_input_length:]
1568
+ model_input = model_input.clone(memory_format=torch.contiguous_format)
1569
+ model_inputs[model_input_name] = model_input
1570
+
1571
+ # 6. Create 4D attention mask is we are using a `StaticCache` (important for performant compiled forward pass)
1572
+ if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
1573
+ if model_inputs["inputs_embeds"] is not None:
1574
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
1575
+ device = model_inputs["inputs_embeds"].device
1576
+ else:
1577
+ batch_size, sequence_length = model_inputs[input_ids_key].shape
1578
+ device = model_inputs[input_ids_key].device
1579
+
1580
+ # Create the causal mask with fixed shape in advance, to reduce recompilations. If the function to create
1581
+ # the 4D causal mask exists, it should be present in the base model (XXXModel class).
1582
+ base_model = getattr(self, self.base_model_prefix, None)
1583
+ if base_model is None:
1584
+ causal_mask_creation_function = getattr(
1585
+ self, "_prepare_4d_causal_attention_mask_with_cache_position", None
1586
+ )
1587
+ else:
1588
+ causal_mask_creation_function = getattr(
1589
+ base_model, "_prepare_4d_causal_attention_mask_with_cache_position", None
1590
+ )
1591
+ if causal_mask_creation_function is None:
1592
+ logger.warning_once(
1593
+ f"{self.__class__.__name__} has no `_prepare_4d_causal_attention_mask_with_cache_position` method "
1594
+ "defined in its base modeling class. Compiled forward passes will be sub-optimal. If you're "
1595
+ "writing code, see Llama for an example implementation. If you're a user, please report this "
1596
+ "issue on GitHub."
1597
+ )
1598
+ else:
1599
+ attention_mask = causal_mask_creation_function(
1600
+ attention_mask,
1601
+ sequence_length=sequence_length,
1602
+ target_length=past_key_values.get_max_cache_shape(),
1603
+ dtype=self.dtype,
1604
+ device=device,
1605
+ cache_position=cache_position,
1606
+ batch_size=batch_size,
1607
+ config=self.config,
1608
+ past_key_values=past_key_values,
1609
+ )
1610
+ if attention_mask is not None:
1611
+ model_inputs[attention_mask_key] = attention_mask
1612
+
1613
+ if encoder_attention_mask is not None:
1614
+ model_inputs["attention_mask"] = encoder_attention_mask
1615
+
1616
+ # 7. Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
1617
+ for key, value in kwargs.items():
1618
+ if key not in model_inputs:
1619
+ model_inputs[key] = value
1620
+
1621
+ # 8. Remove unexpected `generate` inputs (TODO @joao: fix trainer and examples)
1622
+ model_inputs.pop("labels", None)
1623
+ return model_inputs
1624
+
1625
+ __all__ = [
1626
+ "Qwen3VEForCausalLM",
1627
+ "Qwen3Model",
1628
+ "Qwen3PreTrainedModel",
1629
+ ]
modular_intern_vit.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+
10
+ from einops import rearrange
11
+ from torch import nn
12
+ from transformers.activations import ACT2FN
13
+ from transformers.utils import logging
14
+
15
+ from .configuration_navil_vit import NaViLVisionConfig
16
+
17
+ try:
18
+ # from .flash_attention import FlashAttention
19
+ from flash_attn import flash_attn_varlen_func
20
+ from flash_attn.layers.rotary import apply_rotary_emb
21
+ has_flash_attn = True
22
+ except:
23
+ print('FlashAttention is not installed.')
24
+ has_flash_attn = False
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class InternRMSNorm(nn.Module):
30
+ def __init__(self, hidden_size, eps=1e-6):
31
+ super().__init__()
32
+ self.weight = nn.Parameter(torch.ones(hidden_size))
33
+ self.variance_epsilon = eps
34
+
35
+ def forward(self, hidden_states):
36
+ input_dtype = hidden_states.dtype
37
+ hidden_states = hidden_states.to(torch.float32)
38
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
39
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
40
+ return self.weight * hidden_states.to(input_dtype)
41
+
42
+
43
+ try:
44
+ from apex.normalization import FusedRMSNorm
45
+
46
+ InternRMSNorm = FusedRMSNorm # noqa
47
+
48
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
49
+ except ImportError:
50
+ # using the normal InternRMSNorm
51
+ pass
52
+ except Exception:
53
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
54
+ pass
55
+
56
+
57
+ NORM2FN = {
58
+ 'rms_norm': InternRMSNorm,
59
+ 'layer_norm': nn.LayerNorm,
60
+ }
61
+
62
+
63
+ class InternVisionRotaryEmbedding(nn.Module):
64
+ def __init__(self, dim: int, theta: float = 10000.0) -> None:
65
+ super().__init__()
66
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
67
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
68
+
69
+ def forward(self, seqlen: int) -> torch.Tensor:
70
+ seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
71
+ freqs = torch.outer(seq, self.inv_freq)
72
+ return freqs
73
+
74
+
75
+ class InternAttention(nn.Module):
76
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
77
+
78
+ def __init__(self, config: NaViLVisionConfig):
79
+ super().__init__()
80
+ self.config = config
81
+ self.embed_dim = config.hidden_size
82
+ self.num_heads = config.num_attention_heads
83
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
84
+ if config.use_flash_attn and not has_flash_attn:
85
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
86
+ self.head_dim = self.embed_dim // self.num_heads
87
+ if self.head_dim * self.num_heads != self.embed_dim:
88
+ raise ValueError(
89
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
90
+ f' {self.num_heads}).'
91
+ )
92
+
93
+ self.scale = self.head_dim ** -0.5
94
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
95
+ self.attn_drop = nn.Dropout(config.attention_dropout)
96
+ self.proj_drop = nn.Dropout(config.dropout)
97
+
98
+ self.qk_normalization = config.qk_normalization
99
+
100
+ if self.qk_normalization:
101
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
102
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
103
+
104
+ if self.use_flash_attn:
105
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
106
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
107
+
108
+ def _naive_attn(self, x):
109
+ B, N, C = x.shape
110
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
111
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
112
+
113
+ if self.qk_normalization:
114
+ B_, H_, N_, D_ = q.shape
115
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
116
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
117
+
118
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
119
+ attn = attn.softmax(dim=-1)
120
+ attn = self.attn_drop(attn)
121
+
122
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
123
+ x = self.proj(x)
124
+ x = self.proj_drop(x)
125
+ return x
126
+
127
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
128
+ qkv = self.qkv(x)
129
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
130
+
131
+ if self.qk_normalization:
132
+ q, k, v = qkv.unbind(2)
133
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
134
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
135
+ qkv = torch.stack([q, k, v], dim=2)
136
+
137
+ context, _ = self.inner_attn(
138
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
139
+ )
140
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
141
+ outs = self.proj_drop(outs)
142
+ return outs
143
+
144
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
145
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
146
+ return x
147
+
148
+
149
+ def rotate_half(x):
150
+ """Rotates half the hidden dims of the input."""
151
+ x1 = x[..., : x.shape[-1] // 2]
152
+ x2 = x[..., x.shape[-1] // 2 :]
153
+ return torch.cat((-x2, x1), dim=-1)
154
+
155
+ def apply_rotary_pos_emb_vision(tensor: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor:
156
+ orig_dtype = tensor.dtype
157
+ tensor = tensor.float()
158
+ cos = freqs.cos()
159
+ sin = freqs.sin()
160
+ cos = cos.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
161
+ sin = sin.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
162
+ output = (tensor * cos) + (rotate_half(tensor) * sin)
163
+ output = output.to(orig_dtype)
164
+ return output
165
+
166
+
167
+ class InternVisionSdpaAttention(nn.Module):
168
+ def __init__(self, config: NaViLVisionConfig) -> None:
169
+ super().__init__()
170
+
171
+ self.config = config
172
+
173
+ dim = config.hidden_size
174
+ num_heads = config.num_attention_heads
175
+ self.num_heads = num_heads
176
+ self.qkv = nn.Linear(dim, dim * 3, bias=config.qkv_bias)
177
+ self.proj = nn.Linear(dim, dim)
178
+
179
+ self.qk_normalization = config.qk_normalization
180
+
181
+ if self.qk_normalization:
182
+ self.q_norm = InternRMSNorm(dim, eps=config.layer_norm_eps)
183
+ self.k_norm = InternRMSNorm(dim, eps=config.layer_norm_eps)
184
+
185
+ self.proj_drop = nn.Dropout(config.dropout)
186
+
187
+ def forward(
188
+ self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: torch.Tensor = None
189
+ ) -> torch.Tensor:
190
+ seq_length = hidden_states.shape[0]
191
+ q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
192
+
193
+ if self.qk_normalization:
194
+ q = self.q_norm(q.flatten(1).view(q.shape))
195
+ k = self.k_norm(k.flatten(1).view(k.shape))
196
+
197
+ q = apply_rotary_pos_emb_vision(q.unsqueeze(0), rotary_pos_emb).squeeze(0)
198
+ k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0)
199
+
200
+ attention_mask = torch.zeros([1, seq_length, seq_length], device=q.device, dtype=torch.bool)
201
+ for i in range(1, len(cu_seqlens)):
202
+ attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = True
203
+ q = q.transpose(0, 1)
204
+ k = k.transpose(0, 1)
205
+ v = v.transpose(0, 1)
206
+ attn_output = F.scaled_dot_product_attention(q, k, v, attention_mask, dropout_p=0.0)
207
+ attn_output = attn_output.transpose(0, 1)
208
+ attn_output = attn_output.reshape(seq_length, -1)
209
+ attn_output = self.proj(attn_output)
210
+ attn_output = self.proj_drop(attn_output)
211
+ return attn_output
212
+
213
+
214
+ def apply_rotary_pos_emb_flashatt(tensor: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor:
215
+ tensor_ = tensor.float()
216
+ cos = freqs.cos().float()
217
+ sin = freqs.sin().float()
218
+ output = apply_rotary_emb(tensor_, cos, sin).type_as(tensor)
219
+ return output
220
+
221
+
222
+ class InternVisionFlashAttention2(nn.Module):
223
+ def __init__(self, config: NaViLVisionConfig) -> None:
224
+ super().__init__()
225
+ self.config = config
226
+
227
+ dim = config.hidden_size
228
+ num_heads = config.num_attention_heads
229
+
230
+ self.num_heads = num_heads
231
+ self.qkv = nn.Linear(dim, dim * 3, bias=config.qkv_bias)
232
+ self.proj = nn.Linear(dim, dim)
233
+
234
+ self.qk_normalization = config.qk_normalization
235
+
236
+ if self.qk_normalization:
237
+ self.q_norm = InternRMSNorm(dim, eps=config.layer_norm_eps)
238
+ self.k_norm = InternRMSNorm(dim, eps=config.layer_norm_eps)
239
+
240
+ self.proj_drop = nn.Dropout(config.dropout)
241
+
242
+ def forward(
243
+ self,
244
+ hidden_states: torch.Tensor,
245
+ cu_seqlens: torch.Tensor,
246
+ rotary_pos_emb: torch.Tensor = None,
247
+ ) -> torch.Tensor:
248
+ seq_length = hidden_states.shape[0]
249
+ q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
250
+
251
+ if self.qk_normalization:
252
+ q = self.q_norm(q.flatten(1).view(q.shape))
253
+ k = self.k_norm(k.flatten(1).view(k.shape))
254
+
255
+ q = apply_rotary_pos_emb_flashatt(q.unsqueeze(0), rotary_pos_emb).squeeze(0)
256
+ k = apply_rotary_pos_emb_flashatt(k.unsqueeze(0), rotary_pos_emb).squeeze(0)
257
+
258
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
259
+ attn_output = flash_attn_varlen_func(q, k, v, cu_seqlens, cu_seqlens, max_seqlen, max_seqlen).reshape(
260
+ seq_length, -1
261
+ )
262
+ attn_output = self.proj(attn_output)
263
+ attn_output = self.proj_drop(attn_output)
264
+ return attn_output
265
+
266
+
267
+ class InternMLP(nn.Module):
268
+ def __init__(self, config: NaViLVisionConfig):
269
+ super().__init__()
270
+ self.config = config
271
+ self.act = ACT2FN[config.hidden_act]
272
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
273
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
274
+
275
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
276
+ hidden_states = self.fc1(hidden_states)
277
+ hidden_states = self.act(hidden_states)
278
+ hidden_states = self.fc2(hidden_states)
279
+ return hidden_states
special_tokens_map.json ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>",
16
+ {
17
+ "content": "</box>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ {
24
+ "content": "<box>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ {
31
+ "content": "<IMG_CONTEXT>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ {
38
+ "content": "</img>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ {
45
+ "content": "<img>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ },
51
+ {
52
+ "content": "</quad>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false
57
+ },
58
+ {
59
+ "content": "<quad>",
60
+ "lstrip": false,
61
+ "normalized": false,
62
+ "rstrip": false,
63
+ "single_word": false
64
+ },
65
+ {
66
+ "content": "</ref>",
67
+ "lstrip": false,
68
+ "normalized": false,
69
+ "rstrip": false,
70
+ "single_word": false
71
+ },
72
+ {
73
+ "content": "<ref>",
74
+ "lstrip": false,
75
+ "normalized": false,
76
+ "rstrip": false,
77
+ "single_word": false
78
+ },
79
+ {
80
+ "content": "<img_uncond>",
81
+ "lstrip": false,
82
+ "normalized": false,
83
+ "rstrip": false,
84
+ "single_word": false
85
+ },
86
+ {
87
+ "content": "<IMG_LINE_BREAK>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false
92
+ },
93
+ {
94
+ "content": "<IMG_FRAME_BREAK>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false
99
+ }
100
+ ],
101
+ "eos_token": {
102
+ "content": "<|im_end|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false
107
+ },
108
+ "pad_token": {
109
+ "content": "<|endoftext|>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false
114
+ }
115
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "151643": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "151644": {
15
+ "content": "<|im_start|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "151645": {
23
+ "content": "<|im_end|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "151646": {
31
+ "content": "<|object_ref_start|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "151647": {
39
+ "content": "<|object_ref_end|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "151648": {
47
+ "content": "<|box_start|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "151649": {
55
+ "content": "<|box_end|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "151650": {
63
+ "content": "<|quad_start|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "151651": {
71
+ "content": "<|quad_end|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "151652": {
79
+ "content": "<|vision_start|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "151653": {
87
+ "content": "<|vision_end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "151654": {
95
+ "content": "<|vision_pad|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "151655": {
103
+ "content": "<|image_pad|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "151656": {
111
+ "content": "<|video_pad|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "151657": {
119
+ "content": "<tool_call>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ },
126
+ "151658": {
127
+ "content": "</tool_call>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": false
133
+ },
134
+ "151659": {
135
+ "content": "<|fim_prefix|>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": false
141
+ },
142
+ "151660": {
143
+ "content": "<|fim_middle|>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ },
150
+ "151661": {
151
+ "content": "<|fim_suffix|>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": false
157
+ },
158
+ "151662": {
159
+ "content": "<|fim_pad|>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": false
165
+ },
166
+ "151663": {
167
+ "content": "<|repo_name|>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": false
173
+ },
174
+ "151664": {
175
+ "content": "<|file_sep|>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": false
181
+ },
182
+ "151665": {
183
+ "content": "<tool_response>",
184
+ "lstrip": false,
185
+ "normalized": false,
186
+ "rstrip": false,
187
+ "single_word": false,
188
+ "special": false
189
+ },
190
+ "151666": {
191
+ "content": "</tool_response>",
192
+ "lstrip": false,
193
+ "normalized": false,
194
+ "rstrip": false,
195
+ "single_word": false,
196
+ "special": false
197
+ },
198
+ "151667": {
199
+ "content": "<think>",
200
+ "lstrip": false,
201
+ "normalized": false,
202
+ "rstrip": false,
203
+ "single_word": false,
204
+ "special": false
205
+ },
206
+ "151668": {
207
+ "content": "</think>",
208
+ "lstrip": false,
209
+ "normalized": false,
210
+ "rstrip": false,
211
+ "single_word": false,
212
+ "special": false
213
+ },
214
+ "151669": {
215
+ "content": "</box>",
216
+ "lstrip": false,
217
+ "normalized": false,
218
+ "rstrip": false,
219
+ "single_word": false,
220
+ "special": true
221
+ },
222
+ "151670": {
223
+ "content": "<box>",
224
+ "lstrip": false,
225
+ "normalized": false,
226
+ "rstrip": false,
227
+ "single_word": false,
228
+ "special": true
229
+ },
230
+ "151671": {
231
+ "content": "<IMG_CONTEXT>",
232
+ "lstrip": false,
233
+ "normalized": false,
234
+ "rstrip": false,
235
+ "single_word": false,
236
+ "special": true
237
+ },
238
+ "151672": {
239
+ "content": "</img>",
240
+ "lstrip": false,
241
+ "normalized": false,
242
+ "rstrip": false,
243
+ "single_word": false,
244
+ "special": true
245
+ },
246
+ "151673": {
247
+ "content": "<img>",
248
+ "lstrip": false,
249
+ "normalized": false,
250
+ "rstrip": false,
251
+ "single_word": false,
252
+ "special": true
253
+ },
254
+ "151674": {
255
+ "content": "</quad>",
256
+ "lstrip": false,
257
+ "normalized": false,
258
+ "rstrip": false,
259
+ "single_word": false,
260
+ "special": true
261
+ },
262
+ "151675": {
263
+ "content": "<quad>",
264
+ "lstrip": false,
265
+ "normalized": false,
266
+ "rstrip": false,
267
+ "single_word": false,
268
+ "special": true
269
+ },
270
+ "151676": {
271
+ "content": "</ref>",
272
+ "lstrip": false,
273
+ "normalized": false,
274
+ "rstrip": false,
275
+ "single_word": false,
276
+ "special": true
277
+ },
278
+ "151677": {
279
+ "content": "<ref>",
280
+ "lstrip": false,
281
+ "normalized": false,
282
+ "rstrip": false,
283
+ "single_word": false,
284
+ "special": true
285
+ },
286
+ "151678": {
287
+ "content": "<img_uncond>",
288
+ "lstrip": false,
289
+ "normalized": false,
290
+ "rstrip": false,
291
+ "single_word": false,
292
+ "special": true
293
+ },
294
+ "151679": {
295
+ "content": "<IMG_LINE_BREAK>",
296
+ "lstrip": false,
297
+ "normalized": false,
298
+ "rstrip": false,
299
+ "single_word": false,
300
+ "special": true
301
+ },
302
+ "151680": {
303
+ "content": "<IMG_FRAME_BREAK>",
304
+ "lstrip": false,
305
+ "normalized": false,
306
+ "rstrip": false,
307
+ "single_word": false,
308
+ "special": true
309
+ }
310
+ },
311
+ "additional_special_tokens": [
312
+ "<|im_start|>",
313
+ "<|im_end|>",
314
+ "<|object_ref_start|>",
315
+ "<|object_ref_end|>",
316
+ "<|box_start|>",
317
+ "<|box_end|>",
318
+ "<|quad_start|>",
319
+ "<|quad_end|>",
320
+ "<|vision_start|>",
321
+ "<|vision_end|>",
322
+ "<|vision_pad|>",
323
+ "<|image_pad|>",
324
+ "<|video_pad|>",
325
+ "</box>",
326
+ "<box>",
327
+ "<IMG_CONTEXT>",
328
+ "</img>",
329
+ "<img>",
330
+ "</quad>",
331
+ "<quad>",
332
+ "</ref>",
333
+ "<ref>",
334
+ "<img_uncond>",
335
+ "<IMG_LINE_BREAK>",
336
+ "<IMG_FRAME_BREAK>"
337
+ ],
338
+ "bos_token": null,
339
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set content = message.content %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is defined and message.reasoning_content is not none %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in message.content %}\n {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '<think>\\n\\n</think>\\n\\n' }}\n {%- endif %}\n{%- endif %}",
340
+ "clean_up_tokenization_spaces": false,
341
+ "eos_token": "<|im_end|>",
342
+ "errors": "replace",
343
+ "extra_special_tokens": {},
344
+ "model_max_length": 16384,
345
+ "pad_token": "<|endoftext|>",
346
+ "split_special_tokens": false,
347
+ "tokenizer_class": "Qwen2Tokenizer",
348
+ "unk_token": null
349
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff