|
- import mindspore
- from mindspore.ops import constexpr
- import numpy as np
- from mindspore import ops, context
- from mindspore import Tensor
- import mindspore
- from mindspore import dtype as mstype
- import mindspore.numpy as msnp
- from mindspore.common.initializer import One, Normal
- from mindspore import nn
- from mindspore import dtype as mstype
-
- import mindspore
- import mindspore.numpy as msnp
- import numpy as np
- from mindspore import Tensor, context, ms_function
- from mindspore import nn
- from mindspore import ops
- from mindspore.ops import constexpr
- from mindspore.ops import functional as F
- from mindspore import dtype as mstype
- from mindspore.common.initializer import One, Normal
- from mindspore.nn.loss.loss import LossBase
-
-
- @constexpr
- def convert_to_tensor(obj):
- """
- Convert type of obj to Tensor.
- """
- return Tensor(obj)
-
- def euclidean_dist_MS(x, y):
- pow = ops.Pow()
- n = x.shape[0]
- m = y.shape[0]
-
- x = msnp.expand_dims(x,1).repeat(m,axis=1)
- y = msnp.expand_dims(y,0).repeat(n,axis=0)
- output = pow(x-y, 2).sum(axis=2)
- return output
-
- class Tripletloss(LossBase):
- """Tripletloss"""
- def __init__(self, margin=0.1):
- super(Tripletloss, self).__init__()
- self.margin = margin
- self.sqrt = ops.Sqrt()
- self.reduce_sum = ops.ReduceSum(keep_dims=True)
- self.square = ops.Square()
- self.div = ops.Div()
- self.reshape = ops.Reshape()
- self.split = ops.Split(1, 3)
- self.relu = nn.ReLU()
- self.expand_dims = ops.ExpandDims()
- def construct(self, logit, label=None):
- """Tripletloss c"""
- fea_dim = logit.shape[1]
- input_norm = self.sqrt(self.reduce_sum(self.square(logit), 1))
- logit = self.div(logit, input_norm)
- output = self.reshape(logit, (-1, 3, fea_dim))
- anchor, positive, negative = self.split(output)
- anchor = F.reshape(anchor, (-1, fea_dim))
- positive = self.reshape(positive, (-1, fea_dim))
- negative = self.reshape(negative, (-1, fea_dim))
- a_p = self.square(anchor - positive)
- a_n = self.square(anchor - negative)
- a_p = self.reduce_sum(a_p, 1)
- a_n = self.reduce_sum(a_n, 1)
- loss = a_p - a_n + self.margin
- loss = self.relu(loss)
- return loss
-
-
-
-
-
- # inputs = mindspore.Tensor([[1.0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0],
- # [0, 0, 2, 0, 0, 0], [0, 0, 0, 2.0, 0, 0],
- # [0, 0, 0, 0, 3.0, 0], [0, 0, 0, 0, 0, 3.0],
- # ],dtype=mstype.float32)
- # targets = mindspore.Tensor([0, 0, 1 ,1, 2, 2])
- # loss = TripleLoss()
- # out = loss(inputs, targets)
- # print(out)
- # print(out.shape)
-
-
- class SoftMaxCE(mindspore.nn.Cell):
- def __init__(self):
- super(SoftMaxCE, self).__init__()
- self.max = ops.ReduceMax(keep_dims=True)
- self.sum = ops.ReduceSum(keep_dims=True)
- self.mean = ops.ReduceMean(keep_dims=False)
- self.exp = ops.Exp()
- self.div = ops.Div()
- self.onehot = ops.OneHot()
- self.mul = ops.Mul()
- self.log = ops.Log()
- self.onvalue = Tensor(1.0, mindspore.dtype.float32)
- self.offvalue = Tensor(0.0, mindspore.dtype.float32)
- self.eps = Tensor(1e-30, mindspore.dtype.float32)
-
- def construct(self, logits, total_label):
- max_fc = self.max(logits, 1)
-
- logits_exp = self.exp(logits - max_fc)
- logits_sum_exp = self.sum(logits_exp, 1)
-
- logits_exp = self.div(logits_exp, logits_sum_exp)
-
- label = self.onehot(total_label, F.shape(logits)[-1], self.onvalue, self.offvalue)
-
- softmax_result_log = self.log(logits_exp + self.eps)
- loss = self.sum((self.mul(softmax_result_log, label)), -1)
- loss = self.mul(ops.scalar_to_array(-1.0), loss)
- loss_v = self.mean(loss, 0)
-
- return loss_v
-
-
- class Loss(nn.Cell):
-
- def __init__(self):
- super().__init__()
- self.margin = TripleLoss()
- self.softmaxce = SoftMaxCE()
-
- def construct(self, logits, total_label):
-
- out1 = self.margin(logits, total_label)
- out2 = self.softmaxce(logits, total_label)
-
- out11 = msnp.array(out1, msnp.float32)
- out22 = msnp.array(out2, msnp.float32)
- print(out11,out22,out11+out22)
- loss = 0.5 * out11 + 0.5*out22
- return loss
-
- if __name__ == '__main__':
- input = mindspore.Tensor([[1.0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0],
- [0, 0, 2, 0, 0, 0], [0, 0, 0, 2.0, 0, 0],
- [0, 0, 0, 0, 3.0, 0], [0, 0, 0, 0, 0, 3.0],
- ], dtype=mstype.float32)
- target = mindspore.Tensor([0, 0, 1, 1, 2, 2])
- print(1)
- loss = Loss()
- L = loss(input, target)
- print(L, type(L))
|