#823 [WIP]modify namedtuple

Open
Erpim wants to merge 1 commits from erpim_1225 into master
  1. +8
    -0
      mindtorch/torch/common/_inner.py
  2. +13
    -41
      mindtorch/torch/functional.py
  3. +1
    -4
      mindtorch/torch/nn/modules/adaptive.py
  4. +18
    -30
      mindtorch/torch/tensor.py
  5. +16
    -8
      testing/ut/pytorch/functional/test_reduction.py

+ 8
- 0
mindtorch/torch/common/_inner.py View File

@@ -57,6 +57,14 @@ def _out_inplace_assign(out, output, op_name):
return cast_to_adapter_tensor(output)
return _out_assign_with_output(out, output, op_name)

def _nametuple_out_inplace_assign(out, output, op_name):
r'''
Use for assign `out` with `output` when `output` is(are) MindSpore Tensor(s)
'''
if out is None:
return output
return _out_assign_with_output(out, output, op_name)


def _inplace_assign_pynative(input, inplace, output, op_name):
if inplace is True:


+ 13
- 41
mindtorch/torch/functional.py View File

@@ -19,7 +19,7 @@ from mindtorch.utils import unsupported_attr, pynative_mode_condition, is_under_
set_multiple_name_tuple, INT32_MIN, INT64_MIN, INT32_MAX, INT64_MAX, FP64_MAX, FP64_MIN, FP32_MAX, FP32_MIN
from mindtorch.torch.tensor import Tensor as adapter_tensor
from mindtorch.torch.common._inner import _out_inplace_assign, _out_limit_pynative, \
_out_inplace_assign_with_adapter_tensor, _functional_inplace_assign
_out_inplace_assign_with_adapter_tensor, _functional_inplace_assign, _nametuple_out_inplace_assign
from mindtorch.torch.common.dtype import _TypeDict, all_int_type, all_float_type, all_complex_type, finfo, \
all_float_and_complex_type
from mindtorch.torch.linalg import matrix_power as linalg_matrix_power
@@ -584,21 +584,9 @@ def max(input, dim=None, keepdim=False, other=None, *, out=None):
value, indice = ms.ops.max(input_ms, dim, keepdim)
value = value.astype(type)
indice = indice.astype(ms.int64)
if pynative_mode_condition():
point = set_name_tuple('max')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
if out is not None:
if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
raise TypeError("In max(), `out` should be tuple of Tensors.")
out[0].assign_value(value)
out[1].assign_value(indice)
return out
return rlt

if out is not None:
raise ValueError('In MindSpore static graph mode, `out` in `max` should be None, '
'please set out=None and use return value instead of `out`.')
return cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice)
point = set_name_tuple('max')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
return _nametuple_out_inplace_assign(out, rlt, "max")


# To achieve the polymorphism torch.min(Tensor input, Tensor other, *, Tensor out)
@@ -620,21 +608,10 @@ def min(input, dim=None, keepdim=False, other=None, *, out=None):

result, indices = ms.ops.min(input_ms, dim, keepdim)
result = result.astype(type)
if pynative_mode_condition():
point = set_name_tuple('min')
rlt = point(cast_to_adapter_tensor(result), cast_to_adapter_tensor(indices))
if out is not None:
if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
raise TypeError("In min(), `out` should be tuple of Tensors.")
out[0].assign_value(result)
out[1].assign_value(indices)
return out
return rlt
point = set_name_tuple('min')
rlt = point(cast_to_adapter_tensor(result), cast_to_adapter_tensor(indices))
return _nametuple_out_inplace_assign(out, rlt, "min")

if out is not None:
raise ValueError('In MindSpore static graph mode, `out` in `min` should be None, '
'please set out=None and use return value instead of `out`.')
return cast_to_adapter_tensor(result), cast_to_adapter_tensor(indices)

def fmax(input, other, *, out=None):
output = input.fmax(other)
@@ -2116,11 +2093,9 @@ def topk(input, k, dim=None, largest=True, sorted=True, *, out=None):
value, indice = (ms.ops.zeros((0,), dtype=input.dtype), ms.ops.zeros((0,), dtype=ms.int32))
else:
value, indice = ms.ops.topk(input_x, k, dim, largest, sorted)
if pynative_mode_condition():
point = set_name_tuple('topk')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
return rlt
return _out_inplace_assign(out, (value, indice), "topk")
point = set_name_tuple('topk')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
return _nametuple_out_inplace_assign(out, rlt, "topk")

def addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None):
_input, _batch1, _batch2 = cast_to_ms_tensor((input, batch1, batch2))
@@ -2843,12 +2818,9 @@ def histogramdd(input, bins=10, *, range=None, weight=None, density=False, out=N
bin_edges = [bin_edge.to(ms.float64) for bin_edge in bin_edges]
else:
hist, bin_edges = ms.numpy.histogramdd(input_ms, bins=bins, range=range, weights=weight, density=density)
output = (hist, bin_edges)
if pynative_mode_condition():
svd_namedtuple = set_multiple_name_tuple('histogramdd', 'hist, bin_edges')
output = svd_namedtuple(cast_to_adapter_tensor(hist), cast_to_adapter_tensor(bin_edges))
return output
return _out_inplace_assign(out, output, "histogramdd")
svd_namedtuple = set_multiple_name_tuple('histogramdd', 'hist, bin_edges')
output = svd_namedtuple(cast_to_adapter_tensor(hist), cast_to_adapter_tensor(bin_edges))
return _nametuple_out_inplace_assign(out, output, "histogramdd")

def diag_embed(input, offset=0, dim1=-2, dim2=-1, *, out=None):
output = input.diag_embed(offset=offset, dim1=dim1, dim2=dim2)


+ 1
- 4
mindtorch/torch/nn/modules/adaptive.py View File

@@ -3,7 +3,6 @@
from collections import namedtuple

from mindspore.ops.primitive import _primexpr
from mindtorch.utils import pynative_mode_condition
from .container import Sequential, ModuleList
from .linear import Linear
from .module import Module
@@ -132,9 +131,7 @@ class AdaptiveLogSoftmaxWithLoss(Module):
if not is_batched:
output = output.squeeze(0)

if pynative_mode_condition():
return _ASMoutput()(output, loss)
return output, loss
return _ASMoutput()(output, loss)

def _get_full_log_prob(self, input, head_output):
out = input.new_empty((head_output.shape[0], self.n_classes))


+ 18
- 30
mindtorch/torch/tensor.py View File

@@ -1010,11 +1010,9 @@ class Tensor(StubTensor, metaclass=_TensorMeta):
value, indice = ms.ops.min(input_ms, dim, keepdim)
value = value.astype(type)
indice = indice.astype(ms.int64)
if pynative_mode_condition():
point = set_name_tuple('min')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
return rlt
return cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice)
point = set_name_tuple('min')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
return rlt

# To achieve the polymorphism Tensor.max(Tensor input, Tensor other, *, Tensor out)
# other=None is used to represent the keywords param input
@@ -1035,11 +1033,9 @@ class Tensor(StubTensor, metaclass=_TensorMeta):
value, indice = ms.ops.max(input_ms, dim, keepdim)
value = value.astype(type)
indice = indice.astype(ms.int64)
if pynative_mode_condition():
point = set_name_tuple('max')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
return rlt
return cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice)
point = set_name_tuple('max')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
return rlt


def numel(self):
@@ -2167,11 +2163,9 @@ class Tensor(StubTensor, metaclass=_TensorMeta):
value, indice = (ms.ops.zeros((0,), dtype=input_x.dtype), ms.ops.zeros((0,), dtype=ms.int32))
else:
value, indice = ms.ops.topk(input_x, k, dim, largest, sorted)
if pynative_mode_condition():
point = set_name_tuple('topk')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
return rlt
return cast_to_adapter_tensor((value, indice))
point = set_name_tuple('topk')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
return rlt

def maximum(self, other):
x = cast_to_ms_tensor(self)
@@ -3545,11 +3539,9 @@ class Tensor(StubTensor, metaclass=_TensorMeta):
else:
# TODO: On GPU, ms.ops.median the return indices may be wrong.
value, indices = ms.ops.median(input_ms, dim, keepdim)
if pynative_mode_condition():
point = set_name_tuple('median')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indices))
return rlt
return cast_to_adapter_tensor(value), cast_to_adapter_tensor(indices)
point = set_name_tuple('median')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indices))
return rlt

def frac(self):
input_ms = cast_to_ms_tensor(self)
@@ -3988,11 +3980,9 @@ class Tensor(StubTensor, metaclass=_TensorMeta):
if type_trans:
values = values.astype(input_type)
indices = indices.astype(ms.int64)
if pynative_mode_condition():
point = set_name_tuple('kthvalue')
rlt = point(cast_to_adapter_tensor(values), cast_to_adapter_tensor(indices))
return rlt
return cast_to_adapter_tensor(values), cast_to_adapter_tensor(indices)
point = set_name_tuple('kthvalue')
rlt = point(cast_to_adapter_tensor(values), cast_to_adapter_tensor(indices))
return rlt

def _get_scatter_ndim_input(self, input, index, src, dim):
index_stk = ()
@@ -4240,11 +4230,9 @@ class Tensor(StubTensor, metaclass=_TensorMeta):
nanmedian_ = _get_cache_prim(ms.ops.Median)(global_median=False, axis=dim, keep_dims=keepdim,
ignore_nan=True)
value, indices = nanmedian_(input_ms)
if pynative_mode_condition():
point = set_name_tuple('nanmedian')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indices))
return rlt
return cast_to_adapter_tensor(value), cast_to_adapter_tensor(indices)
point = set_name_tuple('nanmedian')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indices))
return rlt

def backward(self, gradient=None, retain_graph=None, create_graph=False, inputs=None):
unsupported_attr(gradient)


+ 16
- 8
testing/ut/pytorch/functional/test_reduction.py View File

@@ -8,7 +8,7 @@ import numpy as np
from mindspore import context
from mindtorch.torch.nn import Module
from ...utils import SKIP_ENV_GRAPH_MODE, set_mode_by_env_config, param_compare, SKIP_ENV_ASCEND, \
SKIP_ENV_GPU
SKIP_ENV_GPU, TestNet, graph_lax_level
set_mode_by_env_config()

def test_max():
@@ -44,11 +44,12 @@ def test_max():
x = ms_torch.max(input, dim=self.dim, keepdim=self.keepdim, out=self.out)
return x

ms_max1 = MaxMs(dim=0)
ms_max2 = MaxMs(dim=1, keepdim=True)
with graph_lax_level():
ms_max1 = MaxMs(dim=0)
ms_max2 = MaxMs(dim=1, keepdim=True)

ms_out1 = ms_max1(ms_tensor)
ms_out2 = ms_max2(ms_tensor)
ms_out1 = ms_max1(ms_tensor)
ms_out2 = ms_max2(ms_tensor)

param_compare(ms_out1, pt_out1)
param_compare(ms_out2, pt_out2)
@@ -65,7 +66,6 @@ def test_max1():
param_compare(torch_out, ms_out)


@SKIP_ENV_GRAPH_MODE(reason="graph cannot support collections.namedtuple.")
def test_max2():
np_array = np.array([[1, 2],[3, 4]]).astype(np.int32)

@@ -73,10 +73,18 @@ def test_max2():
torch_out = torch.max(torch_tensor, dim=1, keepdim=True)

ms_tensor = ms_torch.tensor(np_array)
ms_out = ms_torch.max(ms_tensor, dim=1, keepdim=True)

param_compare(torch_out, ms_out)
def fn(ms_tensor):
# return ms_torch.max(ms_tensor, dim=1, keepdim=True)
output = ms_torch.max(ms_tensor, dim=1, keepdim=True)
return output[0], output[1]
# return ms_torch.max(ms_tensor, dim=1, keepdim=True)

with graph_lax_level():
net = TestNet(fn)
ms_out = net(ms_tensor)

param_compare(torch_out, ms_out)


@SKIP_ENV_GRAPH_MODE(reason="graph cannot support collections.namedtuple.")


Loading…
Cancel
Save