|
- import os
- import argparse
- import moxing as mox
- from dataset import create_dataset
- from config import convmixer_cfg as cfg
- from net import convmixer_net
- import mindspore.nn as nn
- from mindspore import context
- from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
- from mindspore.train import Model
- from mindspore.train.callback import Callback
- from mindspore.nn.metrics import Accuracy
- from mindspore.common import set_seed
- from hccl.split.api import set_split_strategy_by_size
-
- set_split_strategy_by_size([10, 10, 10, 10, 15, 15, 15, 15])
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
- os.environ.pop('http_proxy', None)
- os.environ['WHICH_OP'] = 'GEOP'
- os.environ['NEW_GE_FE_ID'] = '1'
- os.environ['GE_AICPU_FLAG'] = '1'
- os.environ['HCCL_CONNECT_TIMEOUT'] = '600'
- os.environ['GE_USE_STATIC_MEMORY'] = '1'
-
- #配置默认的工作空间根目录
- environment = 'train'
- workroot = '/home/ma-user/modelarts' #调试任务使用该参数
- print('current work mode:' + environment + ', workroot:' + workroot)
-
- parser = argparse.ArgumentParser(description='MindSpore convmixer-net')
-
- # define 2 parameters for running on modelArts
- # data_url,train_url是固定用于在modelarts上训练的参数,表示数据集的路径和输出模型的路径
- parser.add_argument('--data_url',
- help='path to training/inference dataset folder',
- default= workroot + '/user-job-dir/inputs/')
-
- parser.add_argument('--train_url',
- help='model folder to save/load',
- default= workroot + '/outputs/model/')
-
- parser.add_argument(
- '--device_target',
- type=str,
- default="Ascend",
- choices=['Ascend', 'CPU'],
- help='device where the code will be implemented (default: CPU),若要在启智平台上使用NPU,需要在启智平台训练界面上加上运行参数device_target=Ascend')
-
- #modelarts已经默认使用data_url和train_url
- parser.add_argument('--epoch_size',
- type=int,
- default=cfg.epoch_size,
- help='Training epochs.')
-
- class Eval_Net(Callback):
- def __init__(self,eval_dataset):
- self.eval_dataset = eval_dataset
- def step__end(self, run_context):
- acc = run_context.original_args().train_network.eval(self.eval_dataset)
- print("acc : {}".format(acc))
- set_seed(1)
-
- if __name__ == "__main__":
- mox.run(input_fn=input_fn,
- model_fn=model_fn,
- optimizer_fn=optimizer_fn,
- checkpoint_path=FLAGS.init_checkpoint,
- run_mode=mox.ModeKeys.TRAIN,
- batch_size=FLAGS.train_batch_size,
- log_dir=FLAGS.nas_train_url if FLAGS.nas_train_url else FLAGS.train_url,
- max_number_of_steps=max_number_of_steps,
- log_every_n_steps=FLAGS.log_every_n_steps,
- save_model_steps=FLAGS.save_interval_steps,
- save_summary_steps=FLAGS.save_summaries_steps,
- auto_batch=False)
- args = parser.parse_args()
- print('args:')
- print(args)
-
- data_dir = workroot + '/inputs' #数据集存放路径
- #初始化数据存放目录
- if not os.path.exists(data_dir):
- os.mkdir(data_dir)
- #初始化模型存放目录
- obs_train_url = args.train_url
- train_dir = data_dir + '/train_0' #模型存放路径
- if not os.path.exists(train_dir):
- os.mkdir(train_dir)
- ######################## 将数据集从obs拷贝到训练镜像中 (固定写法)########################
- # 在训练环境中定义data_url和train_url,并把数据从obs拷贝到相应的固定路径,以下写法是将数据拷贝到/home/work/user-job-dir/data/目录下,可修改为其他目录
- #创建数据存放的位置
- if environment == 'train':
- obs_data_url = args.data_url
- #将数据拷贝到训练环境
- try:
- mox.file.copy_parallel(obs_data_url, data_dir)
- print("Successfully Download {} to {}".format(obs_data_url,
- data_dir))
- except Exception as e:
- print('moxing download {} to {} failed: '.format(
- obs_data_url, data_dir) + str(e))
- ######################## 将数据集从obs拷贝到训练镜像中 ########################
-
- #注意:这里很重要,指定了训练所用的设备CPU还是Ascend NPU
- context.set_context(mode=context.GRAPH_MODE,
- device_target=args.device_target)
- #创建数据集
- print("data_dir: ",data_dir)
- ds_train = create_dataset(data_dir+"/train_0",cfg.batch_size)
- val_dir = data_dir + '/val_1'
- ds_val = create_dataset(val_dir)
-
- if ds_train.get_dataset_size() == 0:
- raise ValueError(
- "Please check dataset size > 0 and batch_size <= dataset size")
- #创建网络
- print("dataset size: ",ds_train.get_dataset_size())
- network = convmixer_net()
- net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
- net_opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum)
- time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
- net_eval = Eval_Net(eval_dataset=ds_val)
-
- model = Model(network,net_loss,net_opt,
- metrics={"accuracy": Accuracy()},
- amp_level="O2")
-
- config_ck = CheckpointConfig(
- save_checkpoint_steps=cfg.save_checkpoint_steps,
- keep_checkpoint_max=cfg.keep_checkpoint_max)
- #定义模型输出路径
- ckpoint_cb = ModelCheckpoint(prefix="checkpoint_convmixernet",
- directory=train_dir,
- config=config_ck)
- #开始训练
- print("============== Starting Training ==============")
- if (args.epoch_size):
- epoch_size = args.epoch_size
- print('epoch_size is: ', epoch_size)
-
- model.train(epoch=epoch_size,
- train_dataset=ds_train,
- callbacks=[time_cb, ckpoint_cb,
- LossMonitor(),net_eval])
-
- ######################## 将输出的模型拷贝到obs(固定写法) ########################
- # 把训练后的模型数据从本地的运行环境拷贝回obs,在启智平台相对应的训练任务中会提供下载
- if environment == 'train':
- try:
- mox.file.copy_parallel(train_dir, obs_train_url)
- print("Successfully Upload {} to {}".format(train_dir,
- obs_train_url))
- except Exception as e:
- print('moxing upload {} to {} failed: '.format(train_dir,
- obs_train_url) + str(e))
- ######################## 将输出的模型拷贝到obs ########################
|