|
-
- import os
-
- import cv2
- import numpy as np
- import pandas as pd
- import os
- import argparse
- import moxing as mox
- import mindspore.nn as nn
- from mindspore import context
- from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
- from mindspore.train import Model
- from mindspore.nn.metrics import Accuracy
- from mindspore.common import set_seed
-
- from mindspore import Model, dataset, nn
- from mindspore.nn import learning_rate_schedule
- from mindspore.train.callback import LossMonitor
- from mindspore.common import set_seed
- from dataloader import get_dataloader
- from Loss import Tripletloss, SoftMaxCE
-
- from Resnet import resnet50
- from mindspore.communication import init, get_rank, get_group_size
-
- from mindspore import Model, dataset, nn
- from mindspore.communication import get_rank
- from mindspore.nn import learning_rate_schedule, TrainOneStepCell
- from mindspore.train.callback import LossMonitor, CheckpointConfig, ModelCheckpoint
- from mindspore.common import set_seed
-
- from Multigrain_net import Layer
- from dataloader import get_dataloader
-
- from Resnet import resnet50
-
- NUM_CLASSES = 2048
-
- import sys
- print("*" * 10)
- print(sys.path)
- print("*" * 10)
-
- # environment = 'debug'
- environment = 'train'
- if environment == 'debug':
- workroot = '/home/ma-user/work' #调试任务使用该参数
- else:
- workroot = '/home/work/user-job-dir' # 训练任务使用该参数
- print('current work mode:' + environment + ', workroot:' + workroot)
-
- parser = argparse.ArgumentParser(description='Multigrain')
-
- # define 2 parameters for running on modelArts
- # data_url,train_url是固定用于在modelarts上训练的参数,表示数据集的路径和输出模型的路径
- parser.add_argument('--data_url',
- help='path to training/inference dataset folder',
- default= workroot + '/data/')
-
- parser.add_argument('--train_url',
- help='model folder to save/load',
- default= workroot + '/model/')
-
- parser.add_argument(
- '--device_target',
- type=str,
- default="Ascend",
- choices=['Ascend', 'CPU'],
- help='device where the code will be implemented (default: CPU),若要在启智平台上使用NPU,需要在启智平台训练界面上加上运行参数device_target=Ascend')
-
- #modelarts已经默认使用data_url和train_url
- parser.add_argument('--epoch_size',
- type=int,
- default=5,
- help='Training epochs.')
-
-
- if __name__ == '__main__':
-
- args = parser.parse_args()
- print('args:')
- print(args)
-
- data_dir = workroot + '/data' #数据集存放路径
- train_dir = workroot + '/model' #模型存放路径
- #初始化数据存放目录
- if not os.path.exists(data_dir):
- os.mkdir(data_dir)
- #初始化模型存放目录
- obs_train_url = args.train_url
- train_dir = workroot + '/model/'
- if not os.path.exists(train_dir):
- os.mkdir(train_dir)
-
- ######################## 将数据集从obs拷贝到训练镜像中 (固定写法)########################
- # 在训练环境中定义data_url和train_url,并把数据从obs拷贝到相应的固定路径,以下写法是将数据拷贝到/home/work/user-job-dir/data/目录下,可修改为其他目录
- #创建数据存放的位置
- if environment == 'train':
- obs_data_url = args.data_url
- #将数据拷贝到训练环境
- try:
- mox.file.copy_parallel(obs_data_url, data_dir)
- print("Successfully Download {} to {}".format(obs_data_url,
- data_dir))
- except Exception as e:
- print('moxing download {} to {} failed: '.format(
- obs_data_url, data_dir) + str(e))
-
- # print("********************************" , os.path.join(data_dir, "ILSVRC2012_img_train"))
-
- ######################## 训练前的准备 ########################
- device_id = int(os.getenv("DEVICE_ID"))
- context.set_context(mode=context.PYNATIVE_MODE, device_target=args.device_target)
- # context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
- context.set_context(device_id=device_id)
- # context.set_context(GRAPH_OP_RUN=1)
- init()
-
- # checkpoint setting
- ck_config = CheckpointConfig(save_checkpoint_steps=32, keep_checkpoint_max=10)
- globalmodel_ck_cb = ModelCheckpoint(prefix="global_resnet50", directory=train_dir, config=ck_config)
-
- # 构建数据集
- # dataset_generator = get_dataloader(os.path.join(data_dir, "imagenet/train"), 4, 4)
- dataset_generator = get_dataloader(os.path.join(data_dir, "ILSVRC2012_img_train"), 4, 4)
- ds_train = dataset.GeneratorDataset(dataset_generator, ["data", "label"], shuffle=True)
- ds_train = ds_train.batch(16)
-
- # 训练轮次
- loss_cb = LossMonitor()
- global_net = Layer(NUM_CLASSES)
- lr = learning_rate_schedule.CosineDecayLR(min_lr=0.001, max_lr=0.1, decay_steps=4)
- global_optimizer = nn.SGD(global_net.trainable_params(), learning_rate=lr, momentum=0.9)
- global_net_with_grad = TrainOneStepCell(global_net, global_optimizer)
- model = Model(global_net_with_grad)
- model.train(1, ds_train, callbacks=[loss_cb, globalmodel_ck_cb], dataset_sink_mode=True)
- print(os.path.exists('/home/work/user-job-dir/data'))
- print(os.path.exists('/home/work/user-job-dir'))
- print(os.path.exists('/home/work/user-job-dir/data/ILSVRC2012_img_train/'))
- print(os.path.exists('/home/work/user-job-dir/ILSVRC2012_img_train/'))
- ######################## 将输出的模型拷贝到obs(固定写法) ########################
- # 把训练后的模型数据从本地的运行环境拷贝回obs,在启智平台相对应的训练任务中会提供下载
- try:
- mox.file.copy_parallel(train_dir, obs_train_url)
- print("Successfully Upload {} to {}".format(train_dir,
- obs_train_url))
- except Exception as e:
- print('moxing upload {} to {} failed: '.format(train_dir,
- obs_train_url) + str(e))
- ######################## 将输出的模型拷贝到obs ########################
|