|
- # Copyright 2021 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- """FOTS train."""
- import os
- import time
- import argparse
- import datetime
- import numpy as np
- import mindspore as ms
- from mindspore.context import ParallelMode
- from mindspore.nn import Momentum
- from mindspore.nn import Adam
- from mindspore import Tensor
- from mindspore import context
- from mindspore.communication.management import init, get_rank, get_group_size
- from mindspore.train.callback import ModelCheckpoint, RunContext
- from mindspore.train.callback import _InternalCallbackParam, CheckpointConfig
-
- from src.fots import FOTS, FotsWithLossCell, TrainingWrapper
- from src.logger import get_logger
- from src.utils import AverageMeter, get_param_groups
- from src.lr_scheduler import get_lr
- from src.fots_dataset import create_fots_dataset
- # from src.dataset import create_fots_dataset
- from src.initializer import default_recurisive_init, load_fots_params
- from src.config import configFOTS
-
- ms.set_seed(1)
- def args_parm():
- parser = argparse.ArgumentParser('mindspore coco training')
-
- # device related
- parser.add_argument('--device_target', type=str, default='Ascend', help='device where the code will be implemented.')
- # dataset related
- parser.add_argument('--data_dir', default='../../dataset/ICDAR2015/task4_1/', type=str, help='Train dataset directory.')
- parser.add_argument('--per_batch_size', default=32, type=int, help='Batch size for Training. Default: 8')
- # network related
- parser.add_argument('--net_work', default='fots', type=str,
- help='The name of network, options: fots')
- parser.add_argument('--pretrained', default='../../preTrain_ckpt/fots_epoch8.ckpt', type=str, help='The pretrained file of fots. Default: "".') # /home/lzh/2021-9-25/preTrain_ckpt/fots_epoch8.ckpt
-
- parser.add_argument('--resume', default='', type=str, help='The ckpt file of fots, which used to fine tune. Default: ""') # outputs/2021-11-03_time_16_00_13/ckpt_0/0-52_773.ckpt
- parser.add_argument('--resume_epoch', type=int, default=0, help='resume epoch. Default: 0')
- # optimizer and lr related
- parser.add_argument('--lr_scheduler', default='piecewise_constant_lr', type=str,
- help='Learning rate scheduler, options: exponential, ReduceLROnPlateau, cosine_annealing. Default: piecewise_constant_lr')
- parser.add_argument('--lr', default=0.05, type=float, help='Learning rate. Default: 0.01')
- parser.add_argument('--lr_epochs', type=str, default='220,250',
- help='Epoch of changing of lr changing, split with ",". Default: 220,250')
- parser.add_argument('--lr_gamma', type=float, default=0.1,
- help='Decrease lr by a factor of exponential lr_scheduler. Default: 0.1')
- parser.add_argument('--max_epoch', type=int, default=500, help='Max epoch num to train the model. Default: 320')
- parser.add_argument('--warmup_epochs', default=20, type=float, help='Warmup epochs. Default: 0')
- parser.add_argument('--weight_decay', type=float, default=0.0005, help='Weight decay factor. Default: 0.0005')
- parser.add_argument('--momentum', type=float, default=0.9, help='Momentum. Default: 0.9')
- parser.add_argument('--T_max', type=int, default=135, help='T-max in cosine_annealing scheduler. Default: 135')
- parser.add_argument('--eta_min', type=float, default=0., help='Eta_min in cosine_annealing scheduler. Default: 0.')
-
- # loss related
- parser.add_argument('--loss_scale', type=int, default=1024, help='Static loss scale. Default: 1024')
- parser.add_argument('--label_smooth', type=int, default=0, help='Whether to use label smooth in CE. Default:0')
- parser.add_argument('--label_smooth_factor', type=float, default=0.1,
- help='Smooth strength of original one-hot. Default: 0.1')
-
- # logging related
- parser.add_argument('--log_interval', type=int, default=2, help='Logging interval steps. Default: 10')
- parser.add_argument('--ckpt_path', type=str, default='../../outputs/', help='Checkpoint save location. Default: outputs/')
- parser.add_argument('--ckpt_interval', type=int, default=1, help='Save checkpoint interval. Default: None')
-
- parser.add_argument('--is_save_on_master', type=int, default=1,
- help='Save ckpt on master or all rank, 1 for master, 0 for all ranks. Default: 1')
-
- # distributed related
- parser.add_argument('--run_distribute', type=int, default=0, help='Distribute train or not, 1 for yes, 0 for no. Default: 1')
- parser.add_argument('--rank', type=int, default=0, help='Local rank of distributed. Default: 0')
- parser.add_argument('--group_size', type=int, default=1, help='World size of device. Default: 1')
- parser.add_argument('--device_id', type=int, default=0, help='device id of GPU or Ascend. (Default: 0)')
-
- # roma obs
- parser.add_argument('--train_url', type=str, default="", help='train url')
- # profiler init
- parser.add_argument('--need_profiler', type=int, default=0,
- help='Whether use profiler. 0 for no, 1 for yes. Default: 0')
-
- args, _ = parser.parse_known_args()
- return args
-
- def convert_training_shape(args_training_shape):
- training_shape = [int(args_training_shape), int(args_training_shape)]
- return training_shape
- def prepare_network(args):
-
- """Prepare Network"""
- network = FOTS()
- # default is kaiming-normal
- default_recurisive_init(network)
- load_fots_params(args, network)
- network = FotsWithLossCell(network)
- return network
-
- def prepare_dataset(args, config):
- """Prepare dataset"""
- return create_fots_dataset(image_dir=args.data_root,
- anno_path=args.annFile,
- is_training=True,
- batch_size=args.per_batch_size,
- max_epoch=args.max_epoch,
- device_num=args.group_size,
- rank=args.rank,
- config=config)
- def setup_run(args):
- if args.lr_scheduler == 'cosine_annealing' and args.max_epoch > args.T_max:
- args.T_max = args.max_epoch
- args.lr_epochs = list(map(int, args.lr_epochs.split(',')))
- args.data_root = os.path.join(args.data_dir, 'ch4_training_images')
- args.annFile = os.path.join(
- args.data_dir, 'ch4_training_localization_transcription_gt')
- outputs_dir = args.ckpt_path
- # init distributed
- if args.run_distribute:
- if args.device_target == "Ascend":
- init()
- else:
- init("nccl")
- args.rank = get_rank()
- args.group_size = get_group_size()
- args.rank_save_ckpt_flag = 0
- if args.is_save_on_master:
- if args.rank == 0:
- args.rank_save_ckpt_flag = 1
- else:
- args.rank_save_ckpt_flag = 1
- # logger
- args.outputs_dir = os.path.join(outputs_dir, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
- args.logger = get_logger(args.outputs_dir, args.rank)
- args.logger.save_args(args)
- deviced = int(os.getenv('DEVICE_ID', '0'))
- context.set_context(mode=context.PYNATIVE_MODE, device_target=args.device_target,
- save_graphs=False, device_id=deviced) # PYNATIVE_MODE 和 GRAPH_MODE
- context.reset_auto_parallel_context()
-
- if args.run_distribute:
- parallel_mode = ParallelMode.DATA_PARALLEL
- degree = get_group_size()
- else:
- parallel_mode = ParallelMode.STAND_ALONE
- degree = 1
- context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=degree)
- if args.resume_epoch:
- args.remain_epochs = args.max_epoch - args.resume_epoch
- else:
- args.remain_epochs = args.max_epoch
- args.resume_epoch = 0
- config = configFOTS()
- config.label_smooth = args.label_smooth
- config.label_smooth_factor = args.label_smooth_factor
- return args, config
- def run(args):
- args, config = setup_run(args)
- loss_meter = AverageMeter('loss')
- ds, data_size = prepare_dataset(args, config)
- args.logger.info('Finish loading dataset')
- args.steps_per_epoch = int(data_size / args.per_batch_size / args.group_size)
- configFOTS.milestone = [i*args.steps_per_epoch for i in range(30, args.max_epoch, 20)]
- if not args.ckpt_interval:
- args.ckpt_interval = args.steps_per_epoch
- print("========================")
- print(args)
- print("========================")
- network = prepare_network(args)
- lr = get_lr(args)
- print("len(lr): ",len(lr))
- # opt = Momentum(params=get_param_groups(network), momentum=args.momentum, learning_rate=Tensor(lr),
- # weight_decay=args.weight_decay, loss_scale=args.loss_scale)
- opt = Adam(params=get_param_groups(network), learning_rate=Tensor(lr, ms.float32), weight_decay=1e-5)
- network = TrainingWrapper(network, opt, args.loss_scale // 2)
- network.set_train()
- if args.rank_save_ckpt_flag:
- # checkpoint save
- ckpt_max_num = args.max_epoch * args.steps_per_epoch // args.ckpt_interval
- ckpt_config = CheckpointConfig(save_checkpoint_steps=args.ckpt_interval, keep_checkpoint_max=20)
- save_ckpt_path = os.path.join(args.outputs_dir, 'ckpt_' + str(args.rank) + '/')
- ckpt_cb = ModelCheckpoint(config=ckpt_config, directory=save_ckpt_path, prefix='{}'.format(args.rank))
- cb_params = _InternalCallbackParam()
- cb_params.train_network = network
- cb_params.epoch_num = ckpt_max_num
- cb_params.cur_epoch_num = 1
- run_context = RunContext(cb_params)
- ckpt_cb.begin(run_context)
- t_end = time.time()
- data_loader = ds.create_dict_iterator(output_numpy=True, num_epochs=args.remain_epochs)
- i_start = args.resume_epoch * args.steps_per_epoch
- if args.rank_save_ckpt_flag:
- cb_params.cur_epoch_num = args.resume_epoch + 1
- # pylint: disable=protected-access
- ckpt_cb._last_triggered_step = i_start
- old_progress = i_start - 1
- print("data_size:",data_size)
- print("i_start:{}".format(i_start))
- for i, data in enumerate(data_loader, i_start):
- images = data["image"]
- input_shape = images.shape[2:4]
- images = Tensor.from_numpy(images)
- classification = Tensor.from_numpy(data['classification'])
- regression = Tensor.from_numpy(data['regression'])
- thetas = Tensor.from_numpy(data['thetas'])
- training_mask = Tensor.from_numpy(data['training_mask'])
- loss = network(images, classification, regression, thetas, training_mask)
- loss_meter.update(loss.asnumpy())
- if args.rank_save_ckpt_flag:
- # ckpt progress
- cb_params.cur_step_num = i + 1 # current step number
- cb_params.batch_num = i + 2
- ckpt_cb.step_end(run_context)
- if i % args.log_interval == 0:
- time_used = time.time() - t_end
- epoch = int(i / args.steps_per_epoch)
- fps = args.per_batch_size * (i - old_progress) * args.group_size / time_used
- if args.rank == 0:
- args.logger.info('epoch[{}], iter[{}], {}, fps:{:.2f} imgs/sec, '
- 'lr:{}'.format(epoch, i, loss_meter, fps, lr[i - i_start]))
- t_end = time.time()
- loss_meter.reset()
- old_progress = i
- if (i + 1) % args.steps_per_epoch == 0 and args.rank_save_ckpt_flag:
- cb_params.cur_epoch_num += 1
- args.logger.info('==========end training===============')
-
- if __name__ == '__main__':
- args = args_parm()
- run(args)
|