|
- import os
- import argparse
- import moxing as mox
- from dataset import create_dataset
- from config import convmixer_cfg as cfg
- from net import convmixer_net
- import mindspore.nn as nn
- from mindspore import context
- from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
- from mindspore.train import Model
- from mindspore.train.callback import Callback
- from mindspore.nn.metrics import Accuracy
- from mindspore.common import set_seed
- from mindspore.context import ParallelMode
- from mindspore.communication.management import init, get_rank, get_group_size
-
- #配置默认的工作空间根目录
- environment = 'train'
- if environment == 'debug':
- workroot = '/home/ma-user/user-job-dir' #调试任务使用该参数
- else:
- workroot = '/home/work/user-job-dir' # 训练任务使用该参数
- print('current work mode:' + environment + ', workroot:' + workroot)
-
- parser = argparse.ArgumentParser(description='MindSpore convmixer-net')
-
- # define 2 parameters for running on modelArts
- # data_url,train_url是固定用于在modelarts上训练的参数,表示数据集的路径和输出模型的路径
- parser.add_argument('--data_url',
- help='path to training/inference dataset folder',
- default= workroot + '/data/')
-
- parser.add_argument('--train_url',
- help='model folder to save/load',
- default= workroot + '/model/')
-
- parser.add_argument(
- '--device_target',
- type=str,
- default="Ascend",
- choices=['Ascend'],
- help='device where the code will be implemented (default: CPU),若要在启智平台上使用NPU,需要在启智平台训练界面上加上运行参数device_target=Ascend')
-
- #modelarts已经默认使用data_url和train_url
- parser.add_argument('--epoch_size',
- type=int,
- default=cfg.epoch_size,
- help='Training epochs.')
-
- class Eval_Net(Callback):
- def __init__(self,eval_dataset):
- self.eval_dataset = eval_dataset
- def step__end(self, run_context):
- acc = run_context.original_args().train_network.eval(self.eval_dataset)
- print("acc : {}".format(acc))
-
- class EvalCallBack(Callback):
- def __init__(self, model, eval_dataset, epochs_to_eval):
- self.model = model
- self.eval_dataset = eval_dataset
- # epochs_to_eval是一个int数字,代表着:每隔多少个epoch进行一次验证
- self.epochs_to_eval = epochs_to_eval
-
- def epoch_end(self, run_context):
- # 获取到现在的epoch数
- cb_param = run_context.original_args()
- cur_epoch = cb_param.cur_epoch_num
- # 如果达到进行验证的epoch数,则进行以下验证操作
- if cur_epoch % self.epochs_to_eval == 0:
- # 此处model设定的metrics是准确率accuracy
- acc = self.model.eval(self.eval_dataset)
- print("------------验证集准确率为: {} ------------".format(acc["accuracy"]))
-
-
- set_seed(1)
-
-
-
- if __name__ == "__main__":
- args = parser.parse_args()
- print('args:')
- print(args)
-
- data_dir = workroot + '/data' #数据集存放路径
- train_dir = workroot + '/model' #模型存放路径
- #初始化数据存放目录
- if not os.path.exists(data_dir):
- os.mkdir(data_dir)
- #初始化模型存放目录
- obs_train_url = args.train_url
- train_dir = workroot + '/model/'
- if not os.path.exists(train_dir):
- os.mkdir(train_dir)
- ######################## 将数据集从obs拷贝到训练镜像中 (固定写法)########################
- # 在训练环境中定义data_url和train_url,并把数据从obs拷贝到相应的固定路径,以下写法是将数据拷贝到/home/work/user-job-dir/data/目录下,可修改为其他目录
- #创建数据存放的位置
- if environment == 'train':
- obs_data_url = args.data_url
- #将数据拷贝到训练环境
- try:
- mox.file.copy_parallel(obs_data_url, data_dir)
- print("Successfully Download {} to {}".format(obs_data_url,
- data_dir))
- except Exception as e:
- print('moxing download {} to {} failed: '.format(
- obs_data_url, data_dir) + str(e))
- ######################## 将数据集从obs拷贝到训练镜像中 ########################
-
- #注意:这里很重要,指定了训练所用的设备CPU还是Ascend NPU
- context.set_context(mode=context.GRAPH_MODE, device_id=int(os.environ["DEVICE_ID"]),device_target="Ascend")
- # , parameter_broadcast=True
- context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL,gradients_mean=True, parameter_broadcast=True,
- auto_parallel_search_mode="recursive_programming")
- init()
-
- #创建数据集
- print("data_dir: ",data_dir)
- ds_train = create_dataset(data_dir+"/imagenet/train",cfg.batch_size)
- val_dir = data_dir + '/imagenet/val'
- ds_val = create_dataset(val_dir)
-
- if ds_train.get_dataset_size() == 0:
- raise ValueError(
- "Please check dataset size > 0 and batch_size <= dataset size")
- #创建网络
- print("dataset size: ",ds_train.get_dataset_size())
- network = convmixer_net()
- net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
- net_opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum)
- time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
- model = Model(network,net_loss,net_opt,metrics={"accuracy"},amp_level="O3")
-
- net_eval = EvalCallBack( model=model, eval_dataset=ds_val,epochs_to_eval=1)
-
- '''
- config_ck = CheckpointConfig(
- save_checkpoint_steps=cfg.save_checkpoint_steps,
- keep_checkpoint_max=cfg.keep_checkpoint_max)
- #定义模型输出路径
- ckpoint_cb = ModelCheckpoint(prefix="checkpoint_convmixernet",
- directory=train_dir,
- config=config_ck)'''
- # 模型参数的保存
- ckpoint_cb = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
- keep_checkpoint_max=cfg.keep_checkpoint_max)
- kpt_callback = ModelCheckpoint(prefix='convmixer', directory=train_dir+"ckpt_" + str(get_rank()) + "/",
- config=ckpoint_cb)
-
- #开始训练
- print("============== Starting Training ==============")
- if (cfg.epoch_size):
- epoch_size = cfg.epoch_size
- print('epoch_size is: ', epoch_size)
-
- model.train(epoch=cfg.epoch_size,train_dataset=ds_train,dataset_sink_mode=True,
- callbacks=[time_cb, LossMonitor(), net_eval, kpt_callback])
-
- ######################## 将输出的模型拷贝到obs(固定写法) ########################
- # 把训练后的模型数据从本地的运行环境拷贝回obs,在启智平台相对应的训练任务中会提供下载
- if environment == 'train':
- try:
- mox.file.copy_parallel(train_dir, obs_train_url)
- print("Successfully Upload {} to {}".format(train_dir,
- obs_train_url))
- except Exception as e:
- print('moxing upload {} to {} failed: '.format(train_dir,
- obs_train_url) + str(e))
- ######################## 将输出的模型拷贝到obs ########################
|