|
- # Copyright 2020-2021 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
-
- """train FasterRcnn and get checkpoint files."""
-
- import os
- import time
- import argparse
- import ast
- import numpy as np
-
- import mindspore.common.dtype as mstype
- from mindspore import context, Tensor, Parameter
- from mindspore.communication.management import init, get_rank, get_group_size
- from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, TimeMonitor
- from mindspore.train import Model
- from mindspore.context import ParallelMode
- from mindspore.train.serialization import load_checkpoint, load_param_into_net
- from mindspore.nn import SGD
- from mindspore.common import set_seed
-
- from src.FasterRcnn.faster_rcnn_r50 import Faster_Rcnn_Resnet50
- from src.network_define import LossCallBack, WithLossCell, TrainOneStepCell, LossNet
- from src.config import config
- from src.dataset import data_to_mindrecord_byte_image, create_fasterrcnn_dataset
- from src.lr_schedule import dynamic_lr
-
- set_seed(1)
-
- parser = argparse.ArgumentParser(description="FasterRcnn training")
- parser.add_argument("--run_distribute", type=ast.literal_eval, default=False, help="Run distribute, default: false.")
- parser.add_argument("--dataset", type=str, default="coco", help="Dataset name, default: coco.")
- parser.add_argument("--pre_trained", type=str, default="", help="Pretrained file path.")
- parser.add_argument("--device_target", type=str, default="Ascend",
- help="device where the code will be implemented, default is Ascend")
- parser.add_argument("--device_id", type=int, default=0, help="Device id, default: 0.")
- parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default: 1.")
- parser.add_argument("--rank_id", type=int, default=0, help="Rank id, default: 0.")
- args_opt = parser.parse_args()
-
- context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args_opt.device_id)
-
- if __name__ == '__main__':
- if args_opt.run_distribute:
- if args_opt.device_target == "Ascend":
- rank = args_opt.rank_id
- device_num = args_opt.device_num
- context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
- gradients_mean=True)
- init()
- else:
- init("nccl")
- context.reset_auto_parallel_context()
- rank = get_rank()
- device_num = get_group_size()
- context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
- gradients_mean=True)
- else:
- rank = 0
- device_num = 1
-
- print("Start create dataset!")
-
- # It will generate mindrecord file in args_opt.mindrecord_dir,
- # and the file name is FasterRcnn.mindrecord0, 1, ... file_num.
- prefix = "FasterRcnn.mindrecord"
- mindrecord_dir = config.mindrecord_dir
- mindrecord_file = os.path.join(mindrecord_dir, prefix + "0")
- print("CHECKING MINDRECORD FILES ...")
-
- if rank == 0 and not os.path.exists(mindrecord_file):
- if not os.path.isdir(mindrecord_dir):
- os.makedirs(mindrecord_dir)
- if args_opt.dataset == "coco":
- if os.path.isdir(config.coco_root):
- if not os.path.exists(config.coco_root):
- print("Please make sure config:coco_root is valid.")
- raise ValueError(config.coco_root)
- print("Create Mindrecord. It may take some time.")
- data_to_mindrecord_byte_image("coco", True, prefix)
- print("Create Mindrecord Done, at {}".format(mindrecord_dir))
- else:
- print("coco_root not exits.")
- else:
- if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path):
- if not os.path.exists(config.image_dir):
- print("Please make sure config:image_dir is valid.")
- raise ValueError(config.image_dir)
- print("Create Mindrecord. It may take some time.")
- data_to_mindrecord_byte_image("other", True, prefix)
- print("Create Mindrecord Done, at {}".format(mindrecord_dir))
- else:
- print("image_dir or anno_path not exits.")
-
- while not os.path.exists(mindrecord_file + ".db"):
- time.sleep(5)
-
- print("CHECKING MINDRECORD FILES DONE!")
-
- loss_scale = float(config.loss_scale)
-
- # When create MindDataset, using the fitst mindrecord file, such as FasterRcnn.mindrecord0.
- dataset = create_fasterrcnn_dataset(mindrecord_file, batch_size=config.batch_size,
- device_num=device_num, rank_id=rank,
- num_parallel_workers=config.num_parallel_workers,
- python_multiprocessing=config.python_multiprocessing)
-
- dataset_size = dataset.get_dataset_size()
- print("Create dataset done!")
-
- net = Faster_Rcnn_Resnet50(config=config)
- net = net.set_train()
-
- load_path = args_opt.pre_trained
- if load_path != "":
- param_dict = load_checkpoint(load_path)
-
- key_mapping = {'down_sample_layer.1.beta': 'bn_down_sample.beta',
- 'down_sample_layer.1.gamma': 'bn_down_sample.gamma',
- 'down_sample_layer.0.weight': 'conv_down_sample.weight',
- 'down_sample_layer.1.moving_mean': 'bn_down_sample.moving_mean',
- 'down_sample_layer.1.moving_variance': 'bn_down_sample.moving_variance',
- }
- for oldkey in list(param_dict.keys()):
- if not oldkey.startswith(('backbone', 'end_point', 'global_step', 'learning_rate', 'moments', 'momentum')):
- data = param_dict.pop(oldkey)
- newkey = 'backbone.' + oldkey
- param_dict[newkey] = data
- oldkey = newkey
- for k, v in key_mapping.items():
- if k in oldkey:
- newkey = oldkey.replace(k, v)
- param_dict[newkey] = param_dict.pop(oldkey)
- break
-
- for item in list(param_dict.keys()):
- if not item.startswith('backbone'):
- param_dict.pop(item)
-
- for key, value in param_dict.items():
- tensor = value.asnumpy().astype(np.float32)
- param_dict[key] = Parameter(tensor, key)
- load_param_into_net(net, param_dict)
-
- device_type = "Ascend" if context.get_context("device_target") == "Ascend" else "Others"
- if device_type == "Ascend":
- net.to_float(mstype.float16)
-
- loss = LossNet()
- lr = Tensor(dynamic_lr(config, dataset_size), mstype.float32)
-
- opt = SGD(params=net.trainable_params(), learning_rate=lr, momentum=config.momentum,
- weight_decay=config.weight_decay, loss_scale=config.loss_scale)
- net_with_loss = WithLossCell(net, loss)
- if args_opt.run_distribute:
- net = TrainOneStepCell(net_with_loss, opt, sens=config.loss_scale, reduce_flag=True,
- mean=True, degree=device_num)
- else:
- net = TrainOneStepCell(net_with_loss, opt, sens=config.loss_scale)
-
- time_cb = TimeMonitor(data_size=dataset_size)
- loss_cb = LossCallBack(rank_id=rank)
- cb = [time_cb, loss_cb]
- if config.save_checkpoint:
- ckptconfig = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * dataset_size,
- keep_checkpoint_max=config.keep_checkpoint_max)
- save_checkpoint_path = os.path.join(config.save_checkpoint_path, "ckpt_" + str(rank) + "/")
- ckpoint_cb = ModelCheckpoint(prefix='faster_rcnn', directory=save_checkpoint_path, config=ckptconfig)
- cb += [ckpoint_cb]
-
- model = Model(net)
- model.train(config.epoch_size, dataset, callbacks=cb)
|