|
- import math
- from typing import Optional
- import torch
- import torch.nn.functional as F
- import bmtrain as bmt
-
-
- class Embedding(bmt.DistributedModule):
- def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
- max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
- sparse: bool = False, _weight: Optional[torch.Tensor] = None,
- dtype=None):
- super().__init__()
-
- self.num_embeddings = num_embeddings
- self.embedding_dim = embedding_dim
- if padding_idx is not None:
- if padding_idx > 0:
- assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
- elif padding_idx < 0:
- assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
- padding_idx = self.num_embeddings + padding_idx
- self.padding_idx = padding_idx
- self.max_norm = max_norm
- self.norm_type = norm_type
- self.scale_grad_by_freq = scale_grad_by_freq
- if _weight is None:
- self.weight = bmt.DistributedParameter(torch.empty(num_embeddings, embedding_dim, dtype=dtype, device="cuda"), init_method=torch.nn.init.normal_)
- else:
- self.weight = bmt.DistributedParameter(_weight)
-
- self.sparse = sparse
-
- @classmethod
- def from_pretrained(cls, embeddings, freeze=True, padding_idx=None,
- max_norm=None, norm_type=2., scale_grad_by_freq=False,
- sparse=False):
- r"""Creates Embedding instance from given 2-dimensional FloatTensor.
-
- Args:
- embeddings (Tensor): FloatTensor containing weights for the Embedding.
- First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``.
- freeze (boolean, optional): If ``True``, the tensor does not get updated in the learning process.
- Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True``
- padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
- therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
- i.e. it remains as a fixed "pad".
- max_norm (float, optional): See module initialization documentation.
- norm_type (float, optional): See module initialization documentation. Default ``2``.
- scale_grad_by_freq (boolean, optional): See module initialization documentation. Default ``False``.
- sparse (bool, optional): See module initialization documentation.
-
- Examples::
-
- >>> # FloatTensor containing pretrained weights
- >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
- >>> embedding = nn.Embedding.from_pretrained(weight)
- >>> # Get embeddings for index 1
- >>> input = torch.LongTensor([1])
- >>> embedding(input)
- tensor([[ 4.0000, 5.1000, 6.3000]])
- """
- assert embeddings.dim() == 2, \
- 'Embeddings parameter is expected to be 2-dimensional'
- rows, cols = embeddings.shape
- embedding = cls(
- num_embeddings=rows,
- embedding_dim=cols,
- _weight=embeddings,
- padding_idx=padding_idx,
- max_norm=max_norm,
- norm_type=norm_type,
- scale_grad_by_freq=scale_grad_by_freq,
- sparse=sparse)
- embedding.weight.requires_grad = not freeze
- return embedding
-
- def forward(self, input: torch.Tensor, projection : bool = False) -> torch.Tensor:
- if not projection:
- return F.embedding(
- input, self.weight, self.padding_idx, self.max_norm,
- self.norm_type, self.scale_grad_by_freq, self.sparse)
- else:
- return F.linear(input, self.weight) / math.sqrt(self.embedding_dim)
-
- def extra_repr(self) -> str:
- s = '{num_embeddings}, {embedding_dim}'
- if self.padding_idx is not None:
- s += ', padding_idx={padding_idx}'
- if self.max_norm is not None:
- s += ', max_norm={max_norm}'
- if self.norm_type != 2:
- s += ', norm_type={norm_type}'
- if self.scale_grad_by_freq is not False:
- s += ', scale_grad_by_freq={scale_grad_by_freq}'
- if self.sparse is not False:
- s += ', sparse=True'
- return s.format(**self.__dict__)
-
-
|