|
- import numpy as np
- import mindspore.nn as nn
- #import mindspore.nn.functional as F
- import mindspore.ops as ops
- from mindspore.common import initializer as init
-
-
-
- class ScaledDotProductAttention(nn.Cell):
- '''
- Scaled dot-product attention
- '''
-
- def __init__(self, d_model, d_k, d_v, h,dropout=.1):
- '''
- :param d_model: Output dimensionality of the model
- :param d_k: Dimensionality of queries and keys
- :param d_v: Dimensionality of values
- :param h: Number of heads
- '''
- super(ScaledDotProductAttention, self).__init__()
- self.fc_q = nn.Dense(d_model, h * d_k)
- self.fc_k = nn.Dense(d_model, h * d_k)
- self.fc_v = nn.Dense(d_model, h * d_v)
- self.fc_o = nn.Dense(h * d_v, d_model)
- self.dropout=nn.Dropout(dropout)
-
- self.d_model = d_model
- self.d_k = d_k
- self.d_v = d_v
- self.h = h
-
- self.init_weights()
-
-
- def init_weights(self):
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- init.HeNormal(m.weight, mode='fan_out')
- if m.bias is not None:
- init.Constant(m.bias, 0)
- elif isinstance(m, nn.BatchNorm2d):
- init.Constant(m.weight, 1)
- init.Constant(m.bias, 0)
- elif isinstance(m, nn.Linear):
- init.Normal(m.weight, std=0.001)
- if m.bias is not None:
- init.Constant(m.bias, 0)
-
- def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):
- '''
- Computes
- :param queries: Queries (b_s, nq, d_model)
- :param keys: Keys (b_s, nk, d_model)
- :param values: Values (b_s, nk, d_model)
- :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking.
- :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk).
- :return:
- '''
- b_s, nq = queries.shape[:2]
- nk = keys.shape[1]
-
- q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) # (b_s, h, nq, d_k)
- k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) # (b_s, h, d_k, nk)
- v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) # (b_s, h, nk, d_v)
-
- att = nn.MatMul(q, k) / np.sqrt(self.d_k) # (b_s, h, nq, nk)
- if attention_weights is not None:
- att = att * attention_weights
- if attention_mask is not None:
- att = att.masked_fill(attention_mask, -np.inf)
- att = torch.softmax(att, -1)
- att=self.dropout(att)
-
- out = nn.MatMul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) # (b_s, nq, h*d_v)
- out = self.fc_o(out) # (b_s, nq, d_model)
- return out
-
-
- if __name__ == '__main__':
- shape = (50,49,42)
- stdnormal = ops.StandardNormal(seed=2)
- input = stdnormal(shape)
- sa = ScaledDotProductAttention(d_model=512, d_k=512, d_v=512, h=8)
- output=sa(input,input,input)
- print(output.shape)
|