| import torch |
| import torch.nn as nn |
| from typing import Optional |
| from transformers import AutoProcessor |
| from transformers.image_utils import load_image |
| from transformers.models.siglip.modeling_siglip import ( |
| SiglipModel, |
| SiglipVisionModel, |
| SiglipTextModel, |
| SiglipPreTrainedModel, |
| SiglipVisionTransformer, |
| ) |
| from transformers.models.siglip.configuration_siglip import ( |
| SiglipVisionConfig, |
| SiglipTextConfig, |
| ) |
| from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling |
| from transformers.utils import can_return_tuple, add_start_docstrings_to_model_forward, replace_return_docstrings |
| from transformers.models.siglip.modeling_siglip import SIGLIP_VISION_INPUTS_DOCSTRING, SIGLIP_TEXT_INPUTS_DOCSTRING |
| import inspect |
|
|
| def apply_masks(x, masks): |
| """ |
| :param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)] |
| :param masks: list of tensors containing indices of patches in [N] to keep |
| """ |
| all_x = [] |
| for m in masks: |
| mask_keep = m.unsqueeze(-1).repeat(1, 1, x.size(-1)) |
| all_x += [torch.gather(x, dim=1, index=mask_keep)] |
| return torch.cat(all_x, dim=0) |
|
|
| class MaskSiglipVisionTransformer(SiglipVisionTransformer): |
| @can_return_tuple |
| @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig) |
| def forward( |
| self, |
| pixel_values, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| interpolate_pos_encoding: Optional[bool] = False, |
| mask = None |
| ) -> BaseModelOutputWithPooling: |
| r""" |
| Returns: |
| |
| """ |
| |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
|
|
| hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) |
| |
| if mask is not None: |
| hidden_states = apply_masks(hidden_states, mask) |
|
|
| encoder_outputs: BaseModelOutput = self.encoder( |
| inputs_embeds=hidden_states, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| ) |
| |
| last_hidden_state = encoder_outputs.last_hidden_state |
| |
| last_hidden_state = self.post_layernorm(last_hidden_state) |
|
|
| pooler_output = self.head(last_hidden_state) if self.use_head else None |
| |
| return BaseModelOutputWithPooling( |
| last_hidden_state=last_hidden_state, |
| pooler_output=pooler_output, |
| hidden_states=encoder_outputs.hidden_states, |
| attentions=encoder_outputs.attentions, |
| ) |
|
|
| class MaskSiglipVisionModel(SiglipVisionModel): |
| def __init__(self, config): |
| SiglipPreTrainedModel.__init__(self, config) |
| self.vision_model = MaskSiglipVisionTransformer(config) |
| self.post_init() |
| |
| |
| class MaskSiglipModel(SiglipModel): |
| def __init__(self, config): |
| SiglipPreTrainedModel.__init__(self, config) |
| |
| if not isinstance(config.text_config, SiglipTextConfig): |
| raise TypeError( |
| "config.text_config is expected to be of type SiglipTextConfig but is of type" |
| f" {type(config.text_config)}." |
| ) |
|
|
| if not isinstance(config.vision_config, SiglipVisionConfig): |
| raise TypeError( |
| "config.vision_config is expected to be of type SiglipVisionConfig but is of type" |
| f" {type(config.vision_config)}." |
| ) |
|
|
| text_config = config.text_config |
| vision_config = config.vision_config |
|
|
| |
| text_model = SiglipTextModel._from_config(text_config) |
| vision_model = MaskSiglipVisionModel._from_config(config.vision_config) |
|
|
| |
| self.text_model = text_model.text_model |
| self.vision_model = vision_model.vision_model |
|
|
| self.logit_scale = nn.Parameter(torch.randn(1)) |
| self.logit_bias = nn.Parameter(torch.randn(1)) |
|
|
| |
| self.post_init() |
| |
| @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING) |
| def get_text_features( |
| self, |
| input_ids: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.Tensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| ) -> torch.FloatTensor: |
| r""" |
| Returns: |
| text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by |
| applying the projection layer to the pooled output of [`SiglipTextModel`]. |
| |
| Examples: |
| |
| ```python |
| >>> from transformers import AutoTokenizer, AutoModel |
| >>> import torch |
| |
| >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224") |
| >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224") |
| |
| >>> # important: make sure to set padding="max_length" as that's how the model was trained |
| >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt") |
| >>> with torch.no_grad(): |
| ... text_features = model.get_text_features(**inputs) |
| ```""" |
| |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
|
|
| text_outputs: BaseModelOutputWithPooling = self.text_model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| ) |
|
|
| pooled_output = text_outputs.pooler_output |
| |
| if output_hidden_states: |
| |
| return text_outputs.hidden_states[-1], pooled_output |
| else: |
| return pooled_output |
| |
| @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING) |
| def get_image_features( |
| self, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| interpolate_pos_encoding: bool = False, |
| mask = None, |
| ) -> torch.FloatTensor: |
| r""" |
| Returns: |
| image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by |
| applying the projection layer to the pooled output of [`SiglipVisionModel`]. |
| |
| Examples: |
| |
| ```python |
| >>> from PIL import Image |
| >>> import requests |
| >>> from transformers import AutoProcessor, AutoModel |
| >>> import torch |
| |
| >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224") |
| >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224") |
| |
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| >>> image = Image.open(requests.get(url, stream=True).raw) |
| |
| >>> inputs = processor(images=image, return_tensors="pt") |
| |
| >>> with torch.no_grad(): |
| ... image_features = model.get_image_features(**inputs) |
| ```""" |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
|
|
| vision_outputs: BaseModelOutputWithPooling = self.vision_model( |
| pixel_values=pixel_values, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| interpolate_pos_encoding=interpolate_pos_encoding, |
| mask = mask |
| ) |
| |
| pooled_output = vision_outputs.pooler_output |
|
|
| if output_hidden_states: |
| |
| return vision_outputs.hidden_states[-1], pooled_output |
| else: |
| return pooled_output |
|
|
| def compare_model_parameters(model1, model2, rtol=1e-5, atol=1e-5): |
| state_dict1 = dict(model1.named_parameters()) |
| state_dict2 = dict(model2.named_parameters()) |
|
|
| mismatched_keys = [] |
| total_keys = len(state_dict1) |
| matching_keys = 0 |
|
|
| for key in state_dict1: |
| tensor1 = state_dict1[key].detach().cpu() |
| tensor2 = state_dict2[key].detach().cpu() |
|
|
| |
| if not torch.allclose(tensor1, tensor2, rtol=rtol, atol=atol): |
| print(f"❌ Mismatch in parameter '{key}'") |
| diff_norm = torch.norm(tensor1 - tensor2).item() |
| |
| mismatched_keys.append(key) |
| else: |
| matching_keys += 1 |
|
|
| |
| |
| |
| |
|
|
| return mismatched_keys |
|
|
| if __name__ == '__main__': |
| |
| import numpy as np |
| from peft import LoraConfig, get_peft_model |
| from peft import PeftModel |
| ckpt = "google/siglip2-base-patch16-256" |
|
|
| model = MaskSiglipModel.from_pretrained(ckpt, device_map="auto").eval() |
|
|
| lora_config = LoraConfig( |
| r=32, |
| lora_alpha=64, |
| target_modules=["q_proj", "v_proj", "k_proj", "fc1", "fc2"], |
| lora_dropout=0.05, |
| bias="none", |
| task_type="FEATURE_EXTRACTION" |
| ) |
| model_1 = get_peft_model(model, lora_config) |
| |
| |
| model2 = MaskSiglipModel.from_pretrained(ckpt, device_map="auto").eval() |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| processor = AutoProcessor.from_pretrained(ckpt) |
|
|
| |
| |
| image = load_image("https://huggingface.co/datasets/merve/coco/resolve/main/val2017/000000000285.jpg") |
| |
| image_np = np.array(image) |
|
|
| |
| image_batch_np = np.stack([image_np.copy() for _ in range(32)], axis=0) |
| inputs = processor(images=torch.tensor(image_batch_np), return_tensors="pt").to(model.device) |
| |
| with torch.no_grad(): |
| image_embeddings_1 = model_1.get_image_features(**inputs) |
| |
| |
|
|
|
|