|
- """PyTorch PanguAlpha GPT2 Model"""
- # from .configuration_gptpangu import GPTPanguConfig
-
- from typing import Tuple
- import math
-
- import torch
- from torch import nn
-
- from transformers.activations import ACT2FN
- from transformers.modeling_utils import PreTrainedModel
- from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
-
- from transformers.utils import logging
-
- logger = logging.get_logger(__name__)
-
-
- class GPTPanguAttention(nn.Module):
- def __init__(self, config):
- super().__init__()
-
- max_positions = config.max_position_embeddings
- self.register_buffer(
- "bias",
- torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
- 1, 1, max_positions, max_positions
- ),
- )
- self.register_buffer("masked_bias", torch.tensor(-1e4))
-
- self.embed_dim = config.hidden_size
- self.num_heads = config.num_heads
- self.head_dim = self.embed_dim // self.num_heads
- if self.head_dim * self.num_heads != self.embed_dim:
- raise ValueError(
- f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
- )
-
- self.scale_attn_weights = config.scale_attn_weights
-
- self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
- self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
- self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
- self.c_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
-
- self.attn_dropout = nn.Dropout(config.attn_pdrop)
- self.resid_dropout = nn.Dropout(config.resid_pdrop)
-
-
- def _attn(self, query, key, value, attention_mask=None, head_mask=None):
- attn_weights = torch.matmul(query, key.transpose(-1, -2))
-
- if self.scale_attn_weights:
- attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
-
- query_length, key_length = query.size(-2), key.size(-2)
- causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
- attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
-
- if attention_mask is not None:
- # Apply the attention mask
- attn_weights = attn_weights + attention_mask
-
- attn_weights = nn.functional.softmax(attn_weights, dim=-1)
-
- # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
- attn_weights = attn_weights.type(value.dtype)
- attn_weights = self.attn_dropout(attn_weights)
-
- # Mask heads if we want to
- if head_mask is not None:
- attn_weights = attn_weights * head_mask
-
- attn_output = torch.matmul(attn_weights, value)
-
- return attn_output, attn_weights
-
- def _split_heads(self, tensor, num_heads, attn_head_size):
- """
- Splits hidden_size dim into attn_head_size and num_heads
- """
- new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
- tensor = tensor.view(*new_shape)
- return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
-
- def _merge_heads(self, tensor, num_heads, attn_head_size):
- """
- Merges attn_head_size dim and num_attn_heads dim into hidden_size
- """
- tensor = tensor.permute(0, 2, 1, 3).contiguous()
- new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
- return tensor.view(new_shape)
-
- def forward(
- self,
- hidden_states,
- layer_past=None,
- attention_mask=None,
- head_mask=None,
- custom_query=None,
- use_cache=False,
- output_attentions=False,
- ):
- query = self.q_proj(custom_query) if custom_query is not None else self.q_proj(hidden_states)
- key = self.k_proj(hidden_states)
- value = self.v_proj(hidden_states)
-
- query = self._split_heads(query, self.num_heads, self.head_dim)
- key = self._split_heads(key, self.num_heads, self.head_dim)
- value = self._split_heads(value, self.num_heads, self.head_dim)
-
- if layer_past is not None:
- past_key, past_value = layer_past
- key = torch.cat((past_key, key), dim=-2)
- value = torch.cat((past_value, value), dim=-2)
-
- if use_cache is True:
- present = (key, value)
- else:
- present = None
-
- attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
-
- attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
- attn_output = self.c_proj(attn_output)
- attn_output = self.resid_dropout(attn_output)
-
- outputs = (attn_output, present)
- if output_attentions:
- outputs += (attn_weights,)
-
- return outputs # a, present, (attentions)
-
-
- class GPTPanguMLP(nn.Module):
- def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * hidden_size
- super().__init__()
- embed_dim = config.hidden_size
- self.c_fc = nn.Linear(embed_dim, intermediate_size)
- self.c_proj = nn.Linear(intermediate_size, embed_dim)
- self.act = ACT2FN[config.activation_function]
- self.dropout = nn.Dropout(config.resid_pdrop)
-
- def forward(self, hidden_states):
- hidden_states = self.c_fc(hidden_states)
- hidden_states = self.act(hidden_states)
- hidden_states = self.c_proj(hidden_states)
- hidden_states = self.dropout(hidden_states)
- return hidden_states
-
-
- class GPTPanguBlock(nn.Module):
- def __init__(self, config):
- super().__init__()
- hidden_size = config.hidden_size
- inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size
-
- self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
- self.attn = GPTPanguAttention(config)
- self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
- self.mlp = GPTPanguMLP(inner_dim, config)
-
- def forward(
- self,
- hidden_states,
- layer_past=None,
- attention_mask=None,
- head_mask=None,
- custom_query=None,
- use_cache=False,
- output_attentions=False,
- ):
- residual = hidden_states
- hidden_states = self.ln_1(hidden_states)
- attn_outputs = self.attn(
- hidden_states,
- layer_past=layer_past,
- attention_mask=attention_mask,
- head_mask=head_mask,
- custom_query=custom_query,
- use_cache=use_cache,
- output_attentions=output_attentions,
- )
- attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
- outputs = attn_outputs[1:]
- # residual connection
- hidden_states = attn_output + residual
-
- residual = hidden_states
- hidden_states = self.ln_2(hidden_states)
- feed_forward_hidden_states = self.mlp(hidden_states)
- # residual connection
- hidden_states = residual + feed_forward_hidden_states
-
- if use_cache:
- outputs = (hidden_states,) + outputs
- else:
- outputs = (hidden_states,) + outputs[1:]
- return outputs # hidden_states, present, (attentions, cross_attentions)
-
-
- class GPTPanguPreTrainedModel(PreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
-
- # config_class = GPTPanguConfig
- base_model_prefix = "transformer"
- supports_gradient_checkpointing = True
- # config_class = GPT2Config
- # load_tf_weights = load_tf_weights_in_gpt2
- # base_model_prefix = "transformer"
- # is_parallelizable = True
- # supports_gradient_checkpointing = True
- _no_split_modules = ["GPT2Block"]
-
- def __init__(self, *inputs, **kwargs):
- super().__init__(*inputs, **kwargs)
-
- def _init_weights(self, module):
- """Initialize the weights."""
- if isinstance(module, (nn.Linear,)):
- # Slightly different from the TF version which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.Embedding):
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
-
- # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
- # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
- # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
- # > -- GPT-2 :: https://openai.com/blog/better-language-models/
- #
- # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
- for name, p in module.named_parameters():
- if "c_proj" in name and "weight" in name:
- # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
- p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.num_layers)))
-
- def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, GPTPanguModel):
- module.gradient_checkpointing = value
-
-
- class GPTPanguModel(GPTPanguPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
-
- self.embed_dim = config.hidden_size
- # self.wte = self.wte.to(self.first_device)
- # self.wpe = self.wpe.to(self.first_device)
- self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
- self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
- self.wqe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
-
- self.drop = nn.Dropout(config.embd_pdrop)
- self.h = nn.ModuleList([GPTPanguBlock(config) for _ in range(config.num_layers)])
- self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
-
- self.gradient_checkpointing = False
- # Initialize weights and apply final processing
- self.post_init()
-
-
- def get_input_embeddings(self):
- return self.wte
-
- def set_input_embeddings(self, new_embeddings):
- self.wte = new_embeddings
-
- def forward(
- self,
- input_ids=None,
- past_key_values=None,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- inputs_embeds=None,
- use_cache=None,
- output_attentions=None,
- output_hidden_states=None,
- return_dict=None,
- ):
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- input_shape = input_ids.size()
- input_ids = input_ids.view(-1, input_shape[-1])
- batch_size = input_ids.shape[0]
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- batch_size = inputs_embeds.shape[0]
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- if token_type_ids is not None:
- token_type_ids = token_type_ids.view(-1, input_shape[-1])
- if position_ids is not None:
- position_ids = position_ids.view(-1, input_shape[-1])
-
- if past_key_values is None:
- past_length = 0
- past_key_values = tuple([None] * len(self.h))
- else:
- past_length = past_key_values[0][0].size(-2)
- if position_ids is None:
- position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
- position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
- # GPT2Attention mask.
- if attention_mask is not None:
- if batch_size <= 0:
- raise ValueError("batch_size has to be defined and > 0")
- attention_mask = attention_mask.view(batch_size, -1)
- # We create a 3D attention mask from a 2D tensor mask.
- # Sizes are [batch_size, 1, 1, to_seq_length]
- # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
- # this attention mask is more simple than the triangular masking of causal attention
- # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
- attention_mask = attention_mask[:, None, None, :]
-
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
- # masked positions, this operation will create a tensor which is 0.0 for
- # positions we want to attend and -10000.0 for masked positions.
- # Since we are adding it to the raw scores before the softmax, this is
- # effectively the same as removing these entirely.
- attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
- attention_mask = (1.0 - attention_mask) * -10000.0
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x num_heads x N x N
- # head_mask has shape n_layer x batch x num_heads x N x N
- head_mask = self.get_head_mask(head_mask, self.config.num_layers)
-
- if inputs_embeds is None:
- inputs_embeds = self.wte(input_ids)
- position_embeds = self.wpe(position_ids)
- hidden_states = inputs_embeds + position_embeds
-
- if token_type_ids is not None:
- token_type_embeds = self.wte(token_type_ids)
- hidden_states = hidden_states + token_type_embeds
-
- hidden_states = self.drop(hidden_states)
- output_shape = input_shape + (hidden_states.size(-1),)
-
- # top attention custom query
- last_layer_id = len(self.h) - 1
- query_embeds = self.wqe(position_ids)
-
- presents = () if use_cache else None
- all_self_attentions = () if output_attentions else None
- all_hidden_states = () if output_hidden_states else None
- for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
- # Final LayerNorm before last query layer
- if i == last_layer_id:
- hidden_states = self.ln_f(hidden_states)
-
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- if self.gradient_checkpointing and self.training:
-
- if use_cache:
- logger.warning(
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
- )
- use_cache = False
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- # None for past_key_value
- return module(*inputs, use_cache, output_attentions)
-
- return custom_forward
-
- outputs = torch.utils.checkpoint.checkpoint(
- create_custom_forward(block),
- hidden_states=hidden_states,
- layer_past=None,
- attention_mask=attention_mask,
- head_mask=head_mask[i],
- # custom query
- custom_query=query_embeds if i == last_layer_id else None,
- )
- else:
- outputs = block(
- hidden_states,
- layer_past=layer_past,
- attention_mask=attention_mask,
- head_mask=head_mask[i],
- # custom query
- custom_query=query_embeds if i == last_layer_id else None,
- use_cache=use_cache,
- output_attentions=output_attentions,
- )
-
- hidden_states = outputs[0]
- if use_cache is True:
- presents = presents + (outputs[1],)
-
- if output_attentions:
- all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
- hidden_states = hidden_states.view(*output_shape)
- # Add last hidden state
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- if not return_dict:
- return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
-
- return BaseModelOutputWithPast(
- last_hidden_state=hidden_states,
- past_key_values=presents,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- )
-
-
- class GPTPanguForCausalLM(GPTPanguPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
-
- self.transformer = GPTPanguModel(config)
-
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_output_embeddings(self):
- return self.lm_head
-
- def set_output_embeddings(self, new_embeddings):
- self.lm_head = new_embeddings
-
- def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
- token_type_ids = kwargs.get("token_type_ids", None)
- # only last token for inputs_ids if past is defined in kwargs
- if past:
- input_ids = input_ids[:, -1].unsqueeze(-1)
- if token_type_ids is not None:
- token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
-
- attention_mask = kwargs.get("attention_mask", None)
- position_ids = kwargs.get("position_ids", None)
-
- if attention_mask is not None and position_ids is None:
- # create position_ids on the fly for batch generation
- position_ids = attention_mask.long().cumsum(-1) - 1
- position_ids.masked_fill_(attention_mask == 0, 1)
- if past:
- position_ids = position_ids[:, -1].unsqueeze(-1)
- else:
- position_ids = None
- return {
- "input_ids": input_ids,
- "past_key_values": past,
- "use_cache": kwargs.get("use_cache"),
- "position_ids": position_ids,
- "attention_mask": attention_mask,
- "token_type_ids": token_type_ids,
- }
-
- def forward(
- self,
- input_ids=None,
- past_key_values=None,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- inputs_embeds=None,
- labels=None,
- use_cache=None,
- output_attentions=None,
- output_hidden_states=None,
- return_dict=None,
- ):
- r"""
- labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
- Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
- ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
- ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
-
- transformer_outputs = self.transformer(
- input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- hidden_states = transformer_outputs[0]
-
- lm_logits = self.lm_head(hidden_states)
-
- loss = None
- if labels is not None:
- # Shift so that tokens < n predict n
- shift_logits = lm_logits[..., :-1, :].contiguous()
- shift_labels = labels[..., 1:].contiguous()
- # Flatten the tokens
- loss_fct = nn.CrossEntropyLoss()
- loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
- if not return_dict:
- output = (lm_logits,) + transformer_outputs[1:]
- return ((loss,) + output) if loss is not None else output
-
- return CausalLMOutputWithPast(
- loss=loss,
- logits=lm_logits,
- past_key_values=transformer_outputs.past_key_values,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- )
-
- @staticmethod
- def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
- """
- This function is used to re-order the :obj:`past_key_values` cache if
- :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
- called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
- """
- return tuple(
- tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
- for layer_past in past
- )
|