import copy import logging from typing import Callable, List, Optional, Tuple, Union, Mapping import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from einops import rearrange, repeat from transformers import AutoModel, AutoTokenizer from transformers.cache_utils import Cache from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS from transformers.activations import ACT2FN from transformers.cache_utils import DynamicCache, StaticCache from transformers.generation import GenerationMixin from transformers.modeling_attn_mask_utils import AttentionMaskConverter from transformers.modeling_flash_attention_utils import FlashAttentionKwargs from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS from transformers.modeling_utils import PreTrainedModel from transformers.processing_utils import Unpack from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from .configuration_gigarembed import GigarConfig, GigarEmbedConfig, LatentAttentionConfig logger = logging.getLogger(__name__) _CONFIG_FOR_DOC = "GigarEmbedConfig" class GigarMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj class GigarRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ GigarRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class GigarLatentAttention(nn.Module): """ Multi-headed Latent Attention (MLA) Check out the original paper: https://arxiv.org/pdf/2405.04434, and the reference implementation: https://github.com/deepseek-ai/DeepSeek-V3/blob/main/inference/model.py """ def __init__(self, config: GigarConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) assert config.num_attention_heads == config.num_key_value_heads, ( "GQA for MLA is not supported (does it even make sense?)" ) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.apply_qk_norm = config.apply_qk_norm self.attention_dropout = config.attention_dropout assert config.mla_config is not None self.qk_nope_head_dim = config.mla_config["qk_nope_head_dim"] self.qk_rope_head_dim = config.mla_config["qk_rope_head_dim"] self.v_head_dim = config.mla_config["v_head_dim"] # V has no rope part self.kv_lora_rank = config.mla_config["kv_lora_rank"] self.q_lora_rank = config.mla_config["q_lora_rank"] self.qk_head_dim = self.qk_nope_head_dim + self.qk_rope_head_dim self.scaling = self.qk_head_dim**-0.5 if self.q_lora_rank == 0: self.q_proj = nn.Linear( self.hidden_size, self.num_heads * self.qk_head_dim, bias=config.attention_bias, ) else: self.dq_proj = nn.Linear( self.hidden_size, self.q_lora_rank, bias=config.attention_bias, ) self.q_norm = GigarRMSNorm(self.q_lora_rank) self.uq_proj = nn.Linear( self.q_lora_rank, self.num_heads * self.qk_head_dim, bias=config.attention_bias, ) self.kv_norm = GigarRMSNorm(self.kv_lora_rank) self.dkv_proj = nn.Linear( self.hidden_size, self.kv_lora_rank, bias=config.attention_bias, ) self.uk_proj = nn.Linear( config.kv_lora_rank, self.num_heads * self.qk_nope_head_dim, bias=config.attention_bias, ) self.uv_proj = nn.Linear( config.kv_lora_rank, self.num_heads * self.v_head_dim, bias=config.attention_bias, ) self.kr_proj = nn.Linear( self.hidden_size, self.num_heads * self.qk_rope_head_dim, bias=config.attention_bias, ) self.o_proj = nn.Linear( self.num_heads * self.v_head_dim, self.hidden_size, bias=config.attention_bias, ) if self.apply_qk_norm: self.qk_q_norm = nn.LayerNorm(self.num_heads * self.qk_head_dim, bias=False) self.qk_k_norm = nn.LayerNorm(self.num_heads * self.qk_head_dim, bias=False) config_for_rope = copy.copy(self.config) config_for_rope.head_dim = self.config.qk_rope_head_dim self.is_causal = False def _compute_qkv( self, hidden_states: torch.Tensor, ): """Compute query, key, and value tensors from hidden states.""" bsz, seq_len, _ = hidden_states.size() if self.q_lora_rank == 0: query = self.q_proj(hidden_states) else: query = self.uq_proj(self.q_norm(self.dq_proj(hidden_states))) latent = self.dkv_proj(hidden_states) latent = self.kv_norm(latent) k_rope = self.kr_proj(hidden_states) k_nope = self.uk_proj(latent) value = self.uv_proj(latent) if self.apply_qk_norm: query = self.qk_q_norm(query).to(query.dtype) key = self.qk_k_norm(torch.cat([k_nope, k_rope], dim=-1)).to(k_nope.dtype) k_nope, k_rope = torch.split(key, [k_nope.shape[-1], k_rope.shape[-1]], dim=-1) # Reshape tensors query = query.view(bsz, seq_len, self.num_heads, self.qk_head_dim).transpose(1, 2) k_nope = k_nope.view(bsz, seq_len, self.num_heads, self.qk_nope_head_dim).transpose(1, 2) k_rope = k_rope.view(bsz, seq_len, self.num_heads, self.qk_rope_head_dim).transpose(1, 2) value = value.view(bsz, seq_len, self.num_heads, self.v_head_dim).transpose(1, 2) q_nope, q_rope = torch.split(query, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) return q_nope, q_rope, k_nope, k_rope, value def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """ hidden_states: [bsz, seq_len, hidden_size] attention_mask: [bsz, seq_len] """ batch_size, seq_len, _ = hidden_states.size() q_nope, q_rope, k_nope, k_rope, value_states = self._compute_qkv(hidden_states) # cos, sin = self.rotary_emb(q_rope, seq_len=seq_len) cos, sin = position_embeddings q_rope, k_rope = apply_rotary_pos_emb(q_rope, k_rope, cos, sin) query_states = torch.cat([q_nope, q_rope], dim=-1) key_states = torch.cat([k_nope, k_rope], dim=-1) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(batch_size, seq_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class GigarDecoderLayer(nn.Module): def __init__(self, config: GigarConfig, layer_idx: Optional[int] = None): super().__init__() self.hidden_size = config.hidden_size self.self_attn = GigarLatentAttention(config, layer_idx) self.mlp = GigarMLP(config) self.input_layernorm = GigarRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = GigarRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs class GigarRotaryEmbedding(nn.Module): def __init__(self, config: GigarConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) GIGAR_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GigarConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Gigar Model outputting raw hidden-states without any specific head on top.", GIGAR_START_DOCSTRING, ) class GigarPreTrainedModel(PreTrainedModel): config_class = GigarConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["GigarDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn_2 = True _supports_sdpa = True _supports_flex_attn = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() GIGAR_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. Two formats are allowed: - a [`~cache_utils.Cache`] instance, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache); - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy cache format. The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the legacy cache format will be returned. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ @add_start_docstrings( "The bare Gigar Model outputting raw hidden-states without any specific head on top.", GIGAR_START_DOCSTRING, ) class GigarModel(GigarPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`GigarDecoderLayer`] Args: config: GigarConfig """ def __init__(self, config: GigarConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [GigarDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = GigarRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = GigarRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(GIGAR_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **flash_attn_kwargs: Unpack[FlashAttentionKwargs], ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) attention_mask = self._update_encoder_mask(attention_mask, inputs_embeds) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, # causal_mask position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, # causal_mask position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **flash_attn_kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) output = BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) return output if return_dict else output.to_tuple() def _update_encoder_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, ): # Для flash_attention_2 возвращаем исходную маску if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0).any(): return attention_mask return None dtype, device = input_tensor.dtype, input_tensor.device batch_size, sequence_length = input_tensor.shape[:2] # 1. Создаём базовую маску без ограничений (все токены видят друг друга) encoder_mask = torch.full( (batch_size, 1, sequence_length, sequence_length), fill_value=1.0, dtype=dtype, device=device ) # 2. Применяем padding-маску если есть if attention_mask is not None: # Создаём 4D padding-маску [batch, 1, 1, seq_len] padding_mask = attention_mask[:, None, None, :].to(dtype=dtype) # Комбинируем: обнуляем позиции где padding_mask == 0 encoder_mask = encoder_mask * padding_mask # Конвертируем в формат для softmax (0 = -inf) min_dtype = torch.finfo(dtype).min encoder_mask = encoder_mask.masked_fill(encoder_mask == 0.0, min_dtype) return encoder_mask def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda" and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask class FeedForward(nn.Module): def __init__(self, dim, mult = 4): super().__init__() self.hidden_size = dim self.intermediate_size = dim * mult self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = nn.SiLU() def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class Attention(nn.Module): def __init__(self, query_dimension, context_dimension=None, num_heads=8, head_dim=64): super().__init__() inner_dimension = head_dim * num_heads context_dimension = context_dimension if context_dimension is not None else query_dimension self.scaling_factor = head_dim ** -0.5 self.num_heads = num_heads self.to_q = nn.Linear(query_dimension, inner_dimension, bias=False) self.to_kv = nn.Linear(context_dimension, inner_dimension * 2, bias=False) self.to_out = nn.Linear(inner_dimension, query_dimension, bias=False) def forward(self, input_tensor, context=None, attention_mask=None): batch_size, seq_len, _ = input_tensor.shape num_heads = self.num_heads # Project input to query query = self.to_q(input_tensor) # Use input as context if not provided context = input_tensor if context is None else context key, value = self.to_kv(context).chunk(2, dim=-1) # Rearrange for multi-head attention query = rearrange(query, 'b n (h d) -> (b h) n d', h=num_heads) key = rearrange(key, 'b n (h d) -> (b h) n d', h=num_heads) value = rearrange(value, 'b n (h d) -> (b h) n d', h=num_heads) # Compute scaled dot-product attention with torch.backends.cuda.sdp_kernel( enable_flash=True, enable_math=True, enable_mem_efficient=True ): attention_output = F.scaled_dot_product_attention(query, key, value) # Rearrange back to original shape attention_output = rearrange(attention_output, '(b h) n d -> b n (h d)', h=num_heads) return self.to_out(attention_output) class LatentAttentionModel(PreTrainedModel): config_class = LatentAttentionConfig def __init__(self, configuration: LatentAttentionConfig): super().__init__(configuration) # Extract configuration parameters num_latents = configuration.num_latents_value latent_dimension = configuration.latent_dim cross_attention_heads = configuration.num_cross_heads cross_head_dimension = configuration.cross_dim_head hidden_dimension = configuration.hidden_dim # Initialize cross-attention components self.cross_attend_blocks = nn.ModuleList([ Attention( query_dimension=latent_dimension, context_dimension=hidden_dimension, num_heads=cross_attention_heads, head_dim=cross_head_dimension ), FeedForward(latent_dimension) ]) # Register learnable latents as model parameter self.latents = nn.Parameter(torch.randn(num_latents, latent_dimension)) def forward(self, hidden_states, attention_mask: Optional[torch.Tensor] = None): cross_attention, feed_forward = self.cross_attend_blocks batch_size, device = hidden_states.size(0), hidden_states.device # Expand latents to match batch size expanded_latents = self.latents.repeat(batch_size, 1, 1) # Apply cross-attention with residual connection attended_output = cross_attention( hidden_states, context=expanded_latents, attention_mask=attention_mask) + hidden_states # Apply feed-forward with residual connection processed_output = feed_forward(attended_output) + attended_output return processed_output class GigarEmbedModel(PreTrainedModel): config_class = GigarEmbedConfig _supports_flash_attn_2 = True _no_split_modules = ["GigarDecoderLayer", "LatentAttentionModel"] def __init__(self, configuration: GigarEmbedConfig): super().__init__(configuration) # Initialize latent attention model self.latent_attention_model = AutoModel.from_config( configuration.latent_attention_config ) self.tokenizer, self.text_encoder = None, None if configuration.text_config is not None: # Initialize text model if provided in config self.model = AutoModel.from_config(configuration.text_config) # Initialize tokenizer if text config is available self.tokenizer = AutoTokenizer.from_pretrained( configuration.text_config.name_or_path ) # Set configuration parameters self.padding_side = configuration.padding_side self.add_eos = configuration.add_eos self.mask_type = configuration.mask_type # Add padding token if configured if configuration.add_pad_token and self.tokenizer is not None: self.add_pad_token() def add_pad_token(self): self.tokenizer.pad_token_id = 0 self.tokenizer.padding_side = self.padding_side def gradient_checkpointing_enable(self, *args, **kwargs): self.model.gradient_checkpointing_enable(*args, **kwargs) def forward(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, return_embeddings: bool = False, **kwargs): kwargs.pop('token_type_ids', None) with torch.autocast('cuda', dtype=torch.bfloat16): outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) last_hidden = self.latent_attention_model(outputs.last_hidden_state, attention_mask) if return_embeddings: return self.mean_pool(last_hidden, attention_mask) return BaseModelOutputWithPast(last_hidden_state=last_hidden) def mean_pool(self, last_hidden: torch.Tensor, attention_mask: torch.Tensor): last_hidden = last_hidden.masked_fill(~attention_mask[..., None].bool(), 0.0) embeddings = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] return F.normalize(embeddings, p=2, dim=-1) ## AutoModel Register AutoModel.register(GigarConfig, GigarModel) AutoModel.register(GigarEmbedConfig, GigarEmbedModel) AutoModel.register(LatentAttentionConfig, LatentAttentionModel) ## Register for auto class GigarModel.register_for_auto_class("AutoModel") GigarEmbedModel.register_for_auto_class("AutoModel") LatentAttentionModel.register_for_auto_class("AutoModel")