|
- # Copyright 2021 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- """PANGU training wrapper"""
-
- import mindspore.nn as nn
- from mindspore.ops import operations as P
- from mindspore.ops import composite as C
- from mindspore.ops import functional as F
- from mindspore import context
- from mindspore.context import ParallelMode
- from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
- from mindspore.communication.management import get_group_size
- from mindspore.common.tensor import Tensor
- import mindspore.common.dtype as mstype
- from mindspore.common.parameter import Parameter
- from mindspore.ops.operations.comm_ops import _VirtualDataset
- from utils_fix import ClipByGlobalNorm
- from mindspore.nn.optim.optimizer import Optimizer
- from mindspore.nn.optim.adam import _adam_opt, _check_param_value
- import numpy as np
-
- GRADIENT_CLIP_TYPE = 1
- GRADIENT_CLIP_VALUE = 1.0
- clip_grad = C.MultitypeFuncGraph("clip_grad")
-
-
- @clip_grad.register("Number", "Number", "Tensor")
- def _clip_grad(clip_type, clip_value, grad):
- """
- Clip gradients.
-
- Inputs:
- clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
- clip_value (float): Specifies how much to clip.
- grad (tuple[Tensor]): Gradients.
-
- Outputs:
- tuple[Tensor], clipped gradients.
- """
- if clip_type not in [0, 1]:
- return grad
- dt = F.dtype(grad)
- if clip_type == 0:
- new_grad = C.clip_by_value(
- grad, F.cast(F.tuple_to_array((-clip_value,)), dt),
- F.cast(F.tuple_to_array((clip_value,)), dt))
- else:
- new_grad = nn.ClipByNorm()(grad,
- F.cast(F.tuple_to_array((clip_value,)),
- dt))
- return new_grad
-
-
- grad_scale = C.MultitypeFuncGraph("grad_scale")
- reciprocal = P.Reciprocal()
-
-
- @grad_scale.register("Tensor", "Tensor")
- def tensor_grad_scale(scale, grad):
- return grad * reciprocal(scale)
-
-
- class VirtualDatasetOneInputCell(nn.Cell):
- def __init__(self, backbone):
- super(VirtualDatasetOneInputCell, self).__init__(auto_prefix=False)
- self._backbone = backbone
- self._virtual_dataset = _VirtualDataset()
-
- def construct(self, *data):
- data_ = self._virtual_dataset(*data)
- return self._backbone(*data_)
-
-
- class AdamWeightDecay(Optimizer):
- def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0):
- super(AdamWeightDecay, self).__init__(learning_rate, params, weight_decay)
- _check_param_value(beta1, beta2, eps, self.cls_name)
- self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
- self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
- self.eps = Tensor(np.array([eps]).astype(np.float32))
- self.moments1 = self.parameters.clone(prefix="adam_m", init='zeros')
- self.moments2 = self.parameters.clone(prefix="adam_v", init='zeros')
- self.hyper_map = C.HyperMap()
-
- def construct(self, gradients):
- lr = self.get_lr()
- if self.is_group:
- if self.is_group_lr:
- optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps),
- lr, self.weight_decay, self.parameters, self.moments1, self.moments2,
- gradients, self.decay_flags, self.optim_filter)
- else:
- optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps, lr),
- self.weight_decay, self.parameters, self.moments1, self.moments2,
- gradients, self.decay_flags, self.optim_filter)
- else:
- optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps, lr, self.weight_decay),
- self.parameters, self.moments1, self.moments2,
- gradients, self.decay_flags, self.optim_filter)
- if self.use_parallel:
- self.broadcast_params(optim_result)
- return optim_result, lr
-
-
-
- class PANGUTrainOneStepWithLossScaleCell(nn.Cell):
- """
- Encapsulation class of PANGU network training.
-
- Append an optimizer to the training network after that the construct
- function can be called to create the backward graph.
-
- Args:
- network (Cell): The training network. Note that loss function should have been added.
- optimizer (Optimizer): Optimizer for updating the weights.
- scale_update_cell (Cell): Cell to do the loss scale. Default: None.
- """
- def __init__(self,
- network,
- optimizer,
- scale_update_cell=None,
- enable_global_norm=False,
- config=None):
- super(PANGUTrainOneStepWithLossScaleCell,
- self).__init__(auto_prefix=False)
- self.network = network
- self.config = config
- self.network.add_flags(defer_inline=True)
- self.weights = optimizer.parameters
- self.optimizer = optimizer
- self.enable_global_norm = True
- self.grad = C.GradOperation(get_by_list=True, sens_param=True)
- self.reducer_flag = False
- self.allreduce = P.AllReduce()
- self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
- if self.parallel_mode in [
- ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL
- ]:
- self.reducer_flag = True
- self.grad_reducer = F.identity
- self.degree = 1
- if self.reducer_flag:
- self.degree = get_group_size()
- self.grad_reducer = DistributedGradReducer(optimizer.parameters,
- False, self.degree)
- self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
- self.cast = P.Cast()
- self.alloc_status = P.NPUAllocFloatStatus()
- self.get_status = P.NPUGetFloatStatus()
- self.clear_before_grad = P.NPUClearFloatStatus()
- self.reduce_sum = P.ReduceSum(keep_dims=False)
- self.depend_parameter_use = P.ControlDepend(depend_mode=1)
- self.base = Tensor(1, mstype.float32)
- self.less_equal = P.LessEqual()
- self.hyper_map = C.HyperMap()
- self.loss_scale = None
- self.loss_scaling_manager = scale_update_cell
- if scale_update_cell:
- self.loss_scale = Parameter(Tensor(
- scale_update_cell.get_loss_scale(), dtype=mstype.float32),
- name="loss_scale")
- self.clip = ClipByGlobalNorm(self.weights)
- assert self.enable_global_norm == True
- self.default_lr = Tensor([0.0], dtype=mstype.float32)
- print("Using global normal", flush=True)
- self.discard_norm = Tensor(0.4, mstype.float32)
-
- @C.add_flags(has_effect=True)
- def construct(self, input_ids, input_position=None, attention_mask=None, layer_past=None, sens=None):
- """Defines the computation performed."""
- weights = self.weights
- loss = self.network(input_ids,input_position, attention_mask)
- if sens is None:
- scaling_sens = self.loss_scale
- else:
- scaling_sens = sens
- # alloc status and clear should be right before gradoperation
- init = self.alloc_status()
- self.clear_before_grad(init)
- grads = self.grad(self.network,
- weights)(input_ids,
- input_position, attention_mask,
- self.cast(scaling_sens, mstype.float32))
- # apply grad reducer on grads
- grads = self.grad_reducer(grads)
- grads = self.hyper_map(
- F.partial(grad_scale, scaling_sens * self.degree), grads)
-
- global_norm = None
- if self.enable_global_norm:
- grads, global_norms = self.clip(grads)
- global_norm = P.Reshape()(global_norms, (()))
- else:
- grads = self.hyper_map(
- F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE),
- grads)
-
- self.get_status(init)
- flag_sum = self.reduce_sum(init, (0,))
- if self.is_distributed:
- # sum overflow flag over devices
- flag_reduce = self.allreduce(flag_sum)
- cond = self.less_equal(self.base, flag_reduce)
- else:
- cond = self.less_equal(self.base, flag_sum)
-
- overflow = cond
- if sens is None:
- overflow = self.loss_scaling_manager(self.loss_scale, cond)
- if overflow:
- succ = False
- lr = self.default_lr
- else:
- succ, lr = self.optimizer(grads)
- ret = (loss, cond, scaling_sens, global_norm, lr)
- return F.depend(ret, succ)
|