|
- # Copyright 2021 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- """FOTS train."""
- import os
- import time
- import argparse
- import datetime
- import numpy as np
- #import mindspore.numpy as np
- import mindspore as ms
- from mindspore.context import ParallelMode
- from mindspore.nn import Momentum
- from mindspore.nn import Adam
- from mindspore import Tensor
- from mindspore import context
- from mindspore.communication.management import init, get_rank, get_group_size
- from mindspore.train.callback import ModelCheckpoint, RunContext
- from mindspore.train.callback import _InternalCallbackParam, CheckpointConfig
-
- from src.fots import FOTS, FotsWithLossCell, TrainingWrapper
- from src.logger import get_logger
- from src.utils import AverageMeter, get_param_groups
- from src.lr_scheduler import get_lr
- from src.fots_dataset import create_fots_dataset
- from src.initializer import default_recurisive_init, load_fots_params
- from src.config import configFOTS
-
-
- def convert_training_shape(args_training_shape):
- training_shape = [int(args_training_shape), int(args_training_shape)]
- return training_shape
-
-
- def main(args):
- if args.lr_scheduler == 'cosine_annealing' and args.max_epoch > args.T_max:
- args.T_max = args.max_epoch
- args.lr_epochs = list(map(int, args.lr_epochs.split(',')))
-
- args.data_root = os.path.join(args.data_url, 'ch4_training_images')
- args.annFile = os.path.join(
- args.data_url, 'ch4_training_localization_transcription_gt')
- outputs_dir = args.ckpt_path
-
- # deviced = int(os.getenv('DEVICE_ID', '0'))
- context.set_context(mode=context.PYNATIVE_MODE, device_target=args.device_target,
- save_graphs=False) # PYNATIVE_MODE 和 GRAPH_MODE
- # context.reset_auto_parallel_context()
- # parallel_mode = ParallelMode.STAND_ALONE
- # degree = 1
- # # init distributed
- # if args.run_distribute:
- # args.rank = get_rank()
- # args.group_size = get_group_size()
- # parallel_mode = ParallelMode.DATA_PARALLEL
- # degree = get_group_size()
- # if args.device_target == "Ascend":
- # init()
- #
- # else:
- # init("nccl")
- # context.set_auto_parallel_context(parallel_mode=parallel_mode,
- # gradients_mean=True, device_num=degree)
-
-
-
- # init distributed
- if args.is_distributed:
- if args.device_target == "Ascend":
- rank_id = int(os.getenv('RANK_ID'))
- device_id = int(os.getenv("DEVICE_ID"))
- context.set_context(device_id=device_id, enable_auto_mixed_precision=True)
- device_num = args.device_num
- print("rank_id:{}, device_id:{}, device_num:{}".format(rank_id, device_id, device_num))
- # context.reset_auto_parallel_context()
- context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
- device_num=device_num)
- init()
-
- else:
- init("nccl")
- args.rank = get_rank()
- args.group_size = get_group_size()
- else:
- deviced = int(os.getenv('DEVICE_ID', args.device_id))
- # context.set_context(mode=context.PYNATIVE_MODE, device_target=args.device_target,
- # save_graphs=False, device_id=deviced) # PYNATIVE_MODE 和 GRAPH_MODE
- # context.set_context(mode=context.PYNATIVE_MODE, device_target=args.device_target, save_graphs=False, device_id=deviced)
- context.set_context(device_id=deviced, enable_auto_mixed_precision=True)
-
-
-
-
- args.rank_save_ckpt_flag = 0
- if args.is_save_on_master:
- if args.rank == 0:
- args.rank_save_ckpt_flag = 1
- else:
- args.rank_save_ckpt_flag = 1
- print('-------------------------')
- print(args)
- print('-------------------------')
- # logger
- args.outputs_dir = os.path.join(outputs_dir, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
- args.logger = get_logger(args.outputs_dir, args.rank)
- args.logger.save_args(args)
-
- loss_meter = AverageMeter('loss')
-
- # TODO:待改
- if args.is_modelArts:
- import moxing as mox
- obs_data_url = args.data_url
- args.data_url = '/home/work/user-job-dir/inputs/data/'
- obs_train_url = args.train_url
- args.train_url = '/home/work/user-job-dir/outputs/model/'
- print("dataset dir----------", os.listdir(args.data_url))
- mox.file.copy_parallel(obs_data_url, args.data_url)
- args.data_root = os.path.join(args.data_url, 'ch4_training_images')
- args.annFile = os.path.join(args.data_url, 'ch4_training_localization_transcription_gt')
- args.pretrained = os.path.join(args.data_url, 'fots_epoch8.ckpt')
-
- args.outputs_dir = args.train_url
-
-
- network = FOTS()
- # default is kaiming-normal
- default_recurisive_init(network)
- load_fots_params(args, network)
-
- network = FotsWithLossCell(network)
- config = configFOTS()
-
- config.label_smooth = args.label_smooth
- config.label_smooth_factor = args.label_smooth_factor
-
-
- ds, data_size = create_fots_dataset(image_dir=args.data_root, anno_path=args.annFile, is_training=True,
- batch_size=args.per_batch_size, max_epoch=args.max_epoch,
- device_num=args.group_size, rank=args.rank, config=config)
-
- args.logger.info('Finish loading dataset')
-
- args.steps_per_epoch = int(data_size / args.per_batch_size / args.group_size)
- configFOTS.milestone = [i*args.steps_per_epoch for i in range(30, args.max_epoch, 20)]
-
-
- if not args.ckpt_interval:
- args.ckpt_interval = args.steps_per_epoch
-
- lr = np.array(get_lr(args), dtype=np.float32)
- # lr = Tensor(get_lr(args))
- # opt = Momentum(params=get_param_groups(network), momentum=args.momentum, learning_rate=Tensor(lr),
- # weight_decay=args.weight_decay, loss_scale=args.loss_scale)
- opt = Adam(params=get_param_groups(network), learning_rate=Tensor(lr), weight_decay=1e-5)
-
- network = TrainingWrapper(network, opt, args.loss_scale // 2)
- network.set_train()
-
- if args.rank_save_ckpt_flag:
- # checkpoint save
- ckpt_max_num = args.max_epoch * args.steps_per_epoch // args.ckpt_interval
- ckpt_config = CheckpointConfig(save_checkpoint_steps=args.ckpt_interval, keep_checkpoint_max=1)
- save_ckpt_path = os.path.join(args.outputs_dir, 'ckpt_' + str(args.rank) + '/')
- ckpt_cb = ModelCheckpoint(config=ckpt_config, directory=save_ckpt_path, prefix='{}'.format(args.rank))
- cb_params = _InternalCallbackParam()
- cb_params.train_network = network
- cb_params.epoch_num = ckpt_max_num
- cb_params.cur_epoch_num = 1
- run_context = RunContext(cb_params)
- ckpt_cb.begin(run_context)
-
- old_progress = -1
- t_end = time.time()
- count =0
- data_loader = ds.create_dict_iterator(output_numpy=True, num_epochs=5)
- for _ in range(5):
- data_count = 0
- for item in data_loader:
- data_count += 1
- count += 1
- print("epoch: {}".format(count))
-
- for i, data in enumerate(data_loader):
- images = data["image"]
- input_shape = images.shape[2:4]
- images = Tensor.from_numpy(images)
-
- classification = Tensor.from_numpy(data['classification'])
- regression = Tensor.from_numpy(data['regression'])
- thetas = Tensor.from_numpy(data['thetas'])
- training_mask = Tensor.from_numpy(data['training_mask'])
-
-
- loss = network(images, classification, regression, thetas, training_mask)
- loss_meter.update(loss.asnumpy())
-
- if args.rank_save_ckpt_flag:
- # ckpt progress
- cb_params.cur_step_num = i + 1 # current step number
- cb_params.batch_num = i + 2
- ckpt_cb.step_end(run_context)
-
- if i % args.log_interval == 0:
- time_used = time.time() - t_end
- epoch = int(i / args.steps_per_epoch)
- fps = args.per_batch_size * (i - old_progress) * args.group_size / time_used
- if args.rank == 0:
- args.logger.info('epoch[{}], iter[{}], {}, fps:{:.2f} imgs/sec, '
- 'lr:{}'.format(epoch, i, loss_meter, fps, lr[i]))
- t_end = time.time()
- loss_meter.reset()
- old_progress = i
-
- if (i + 1) % args.steps_per_epoch == 0 and args.rank_save_ckpt_flag:
- cb_params.cur_epoch_num += 1
-
- if args.is_modelArts:
- try:
- mox.file.copy_parallel(args.train_url, obs_train_url)
- print("Successfully Upload {} to {}".format(args.train_url,
- obs_train_url))
- except Exception as e:
- print('moxing upload {} to {} failed: '.format(args.train_url,
- obs_train_url) + str(e))
-
-
-
- if __name__ == '__main__':
- ms.set_seed(1)
-
- parser = argparse.ArgumentParser('mindspore coco training')
-
- # device related
- parser.add_argument('--device_target', type=str, default='Ascend',
- help='device where the code will be implemented.')
-
- # dataset related
- parser.add_argument('--data_url', default='/home/lzh/2021-9-25/dataset/ICDAR2015/task4_1/', type=str,
- help='Train dataset directory.')
- parser.add_argument('--per_batch_size', default=32, type=int, help='Batch size for Training. Default: 8')
-
- # network related
- parser.add_argument('--net_work', default='fots', type=str,
- help='The name of network, options: fots')
- parser.add_argument('--pretrained', default='', type=str,
- help='The pretrained file of fots. Default: "".') # /home/lzh/2021-9-25/preTrain_ckpt/fots_epoch8.ckpt
- parser.add_argument('--resume',
- default='', type=str,
- help='The ckpt file of fots, which used to fine tune. Default: ""') # outputs/2021-11-03_time_16_00_13/ckpt_0/0-52_773.ckpt
-
- # optimizer and lr related
- parser.add_argument('--lr_scheduler', default='piecewise_constant_lr', type=str,
- help='Learning rate scheduler, options: exponential, ReduceLROnPlateau, cosine_annealing. Default: piecewise_constant_lr')
- parser.add_argument('--lr', default=0.01, type=float, help='Learning rate. Default: 0.01')
- parser.add_argument('--lr_epochs', type=str, default='220,250',
- help='Epoch of changing of lr changing, split with ",". Default: 220,250')
- parser.add_argument('--lr_gamma', type=float, default=0.1,
- help='Decrease lr by a factor of exponential lr_scheduler. Default: 0.1')
- parser.add_argument('--max_epoch', type=int, default=500, help='Max epoch num to train the model. Default: 320')
- parser.add_argument('--warmup_epochs', default=20, type=float, help='Warmup epochs. Default: 0')
- parser.add_argument('--weight_decay', type=float, default=0.0005, help='Weight decay factor. Default: 0.0005')
- parser.add_argument('--momentum', type=float, default=0.9, help='Momentum. Default: 0.9')
- parser.add_argument('--T_max', type=int, default=135, help='T-max in cosine_annealing scheduler. Default: 135')
- parser.add_argument('--eta_min', type=float, default=0., help='Eta_min in cosine_annealing scheduler. Default: 0.')
-
- # loss related
- parser.add_argument('--loss_scale', type=int, default=1024, help='Static loss scale. Default: 1024')
- parser.add_argument('--label_smooth', type=int, default=0, help='Whether to use label smooth in CE. Default:0')
- parser.add_argument('--label_smooth_factor', type=float, default=0.1,
- help='Smooth strength of original one-hot. Default: 0.1')
-
- # logging related
- parser.add_argument('--log_interval', type=int, default=1, help='Logging interval steps. Default: 10')
- parser.add_argument('--ckpt_path', type=str, default='./outputs/',
- help='Checkpoint save location. Default: outputs/')
- parser.add_argument('--ckpt_interval', type=int, default=1, help='Save checkpoint interval. Default: None')
-
- parser.add_argument('--is_save_on_master', type=int, default=1,
- help='Save ckpt on master or all rank, 1 for master, 0 for all ranks. Default: 1')
-
- # distributed related
- parser.add_argument('--is_distributed', type=int, default=0,
- help='Distribute train or not, 1 for yes, 0 for no. Default: 1')
- parser.add_argument('--rank', type=int, default=0, help='Local rank of distributed. Default: 0')
- parser.add_argument('--group_size', type=int, default=1, help='World size of device. Default: 1')
-
- # roma obs
- parser.add_argument('--train_url', type=str, default="", help='train url')
- # profiler init
- parser.add_argument('--need_profiler', type=int, default=0,
- help='Whether use profiler. 0 for no, 1 for yes. Default: 0')
-
- # reset default config
- parser.add_argument('--training_shape', type=str, default="", help='Fix training shape. Default: ""') # TODO:不清楚
- parser.add_argument('--resize_rate', type=int, default=10,
- help='Resize rate for multi-scale training. Default: None') # TODO:不清楚
- parser.add_argument('--is_modelArts', type=int, default=0,
- help='Trainning in modelArts or not, 1 for yes, 0 for no. Default: 0')
- parser.add_argument("--device_num", type=int, default=1, help="number of device, default: 1.")
- parser.add_argument("--device_id", type=int, default=1, help="number of device, default: 1.")
- args, _ = parser.parse_known_args()
-
- main(args)
|