|
- # Copyright 2021 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- """train_criteo."""
- import argparse
- import json
- import os
-
- os.system('pip install librosa==0.8.1')
- os.system('pip install soundfile')
- os.system('pip install easydict')
- os.system('pip install sox')
- os.system('pip install unidecode')
- os.system('pip install inflect')
- os.system('pip install python-Levenshtein')
- os.system('pip install six')
- os.environ['GLOG_v'] = '3'
-
- from mindspore import context, Tensor, ParameterTuple
- from mindspore.communication.management import init, get_rank, get_group_size
- from mindspore.communication import management as MultiDevice
- from mindspore.context import ParallelMode
- from mindspore.nn import TrainOneStepCell
- from mindspore.nn.optim import AdamWeightDecay
- from mindspore.ops.operations.math_ops import Mod
- from mindspore.train import Model
- from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
- from mindspore.train.serialization import load_checkpoint, load_param_into_net
- from mindspore.train.loss_scale_manager import FixedLossScaleManager
- from mindspore.profiler import Profiler
- import moxing as mox
-
- from src.callback import TimeMonitor, Monitor, StopAtStep
- from src.config import train_config, symbols, encoder_kw, decoder_kw
- from src.dataset import create_dataset
- # from src.deepspeech2 import DeepSpeechModel, NetWithLossClass
- from src.model import Jasper, NetWithLossClass, init_weights
- from src.eval_callback import SaveCallback
- from src.lr_generator import get_lr
-
- parser = argparse.ArgumentParser(description='Jasper training')
- parser.add_argument('--pre_trained_model_path',
- type=str,
- default='',
- help='Pretrained checkpoint path')
- parser.add_argument(
- '--is_distributed',
- # action="store_true",
- default=False,
- help='Distributed training')
- parser.add_argument('--bidirectional',
- action="store_false",
- default=True,
- help='Use bidirectional RNN')
- parser.add_argument('--device_target',
- type=str,
- default="Ascend",
- help='Device target, support GPU and CPU, Default: GPU')
-
- workroot = '/home/work/user-job-dir'
- parser.add_argument('--data_url',
- help='path to training/inference dataset folder',
- default=workroot + '/data/')
- parser.add_argument('--train_url', help='model folder to save/load', default=workroot + '/model/')
-
- args = parser.parse_args()
- print(args)
-
-
- def start_mox_arts(args):
- data_dir = workroot + '/data' #数据集存放路径
- log_dir = workroot + '/log' #日志存放路径
- if not os.path.exists(data_dir): os.mkdir(data_dir)
- if not os.path.exists(log_dir): os.mkdir(log_dir)
- try:
- obs_data_url = args.data_url
- mox.file.copy_parallel(obs_data_url, data_dir)
- print("Successfully Download {} to {}".format(obs_data_url, data_dir))
- except Exception as e:
- print('moxing download {} to {} failed: '.format(obs_data_url, data_dir) + str(e))
-
-
- def end_mox_arts(obs_train_url, model_dir, log_dir=None):
- try:
- if log_dir is not None:
- mox.file.copy_parallel(log_dir, obs_train_url)
- mox.file.copy_parallel(model_dir, obs_train_url)
- print("Successfully Upload {} ,{} to {}".format(model_dir, log_dir, obs_train_url))
- except Exception as e:
- print('moxing upload {},{} to {} failed: '.format(model_dir, log_dir, obs_train_url) +
- str(e))
-
-
- if __name__ == '__main__':
- rank_id = 0
- group_size = 1
- config = train_config
- data_sink = False
- context.set_context(mode=context.GRAPH_MODE,
- device_target=args.device_target,
- save_graphs=False)
-
- profiler_output_path = './profiler_data'
- profiler = Profiler(output_path=profiler_output_path)
-
- if args.device_target == "GPU":
- context.set_context(enable_graph_kernel=False)
- if args.is_distributed:
- if args.device_target == "GPU":
- init()
- rank_id = get_rank()
- group_size = get_group_size()
- context.reset_auto_parallel_context()
- context.set_auto_parallel_context(device_num=get_group_size(),
- parallel_mode=ParallelMode.DATA_PARALLEL,
- gradients_mean=True)
- elif args.device_target == "Ascend":
- MultiDevice.init('hccl')
- context.reset_auto_parallel_context()
- context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
- device_num=MultiDevice.get_group_size(),
- gradients_mean=True)
- rank_id = MultiDevice.get_rank()
- group_size = MultiDevice.get_group_size()
- print(f"rank-{rank_id};Starting traning on multiple devices. |~ _ ~| |~ _ ~|")
-
- start_mox_arts(args)
-
- with open(config.DataConfig.labels_path) as label_file:
- labels = json.load(label_file)
- # print('labels:', labels)
-
- ds_train = create_dataset(data_dir=config.DataConfig.Data_dir,
- manifest_filepath=config.DataConfig.train_manifest,
- labels=symbols,
- batch_size=config.DataConfig.batch_size,
- train_mode=True,
- rank=rank_id,
- group_size=group_size)
- steps_size = ds_train.get_dataset_size()
- print('steps_size:', steps_size)
-
- lr = get_lr(lr_init=config.OptimConfig.learning_rate,
- total_epochs=config.TrainingConfig.epochs,
- steps_per_epoch=steps_size)
- lr = Tensor(lr)
- jasper_net = Jasper(encoder_kw=encoder_kw, decoder_kw=decoder_kw)
- loss_net = NetWithLossClass(jasper_net)
- init_weights(loss_net)
- weights = ParameterTuple(jasper_net.trainable_params())
- # optimizer = Adam(weights, learning_rate=config.OptimConfig.learning_rate, eps=config.OptimConfig.epsilon,
- # loss_scale=config.OptimConfig.loss_scale)
- optimizer = AdamWeightDecay(weights,
- learning_rate=lr,
- eps=config.OptimConfig.epsilon,
- weight_decay=1e-3)
- train_net = TrainOneStepCell(loss_net, optimizer)
- train_net.set_train(True)
- if args.pre_trained_model_path != '':
- param_dict = load_checkpoint(args.pre_trained_model_path)
- load_param_into_net(loss_net, param_dict)
- print('Successfully loading the pre-trained model')
-
- loss_scale = 128.0
- loss_scale = FixedLossScaleManager(loss_scale, drop_overflow_update=True)
- model = Model(loss_net, optimizer=optimizer, loss_scale_manager=loss_scale)
- callback_list = [Monitor(lr)]
-
- if args.is_distributed:
- print('Distributed training.')
- config.CheckpointConfig.ckpt_path = os.path.join(config.CheckpointConfig.ckpt_path,
- 'ckpt_' + str(get_rank()) + '/')
- if rank_id == 0:
- # callback_update = SaveCallback(config.CheckpointConfig.ckpt_path)
- # callback_list += [callback_update]
- config_ck = CheckpointConfig(
- save_checkpoint_steps=steps_size,
- keep_checkpoint_max=config.CheckpointConfig.keep_checkpoint_max)
- ckpt_cb = ModelCheckpoint(prefix=config.CheckpointConfig.ckpt_file_name_prefix,
- directory=config.CheckpointConfig.ckpt_path,
- config=config_ck)
- callback_list.append(ckpt_cb)
- callback_list.append(StopAtStep(0, config.TrainingConfig.epochs * steps_size))
- else:
- print('Standalone training.')
- config_ck = CheckpointConfig(
- save_checkpoint_steps=steps_size,
- keep_checkpoint_max=config.CheckpointConfig.keep_checkpoint_max)
- ckpt_cb = ModelCheckpoint(prefix=config.CheckpointConfig.ckpt_file_name_prefix,
- directory=config.CheckpointConfig.ckpt_path,
- config=config_ck)
-
- callback_list.append(ckpt_cb)
- callback_list.append(StopAtStep(0, config.TrainingConfig.epochs * steps_size))
- print(len(callback_list), callback_list)
-
- model.train(config.TrainingConfig.epochs,
- ds_train,
- callbacks=callback_list,
- dataset_sink_mode=data_sink)
-
- profiler.analyse()
- if rank_id == 0:
- end_mox_arts(args.train_url, config.CheckpointConfig.ckpt_path, profiler_output_path)
|