|
- # Copyright 2022 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
-
- import math
- import numpy as np
- import os
- import mindspore
-
- from mindspore.train.serialization import save_checkpoint, load_checkpoint,load_param_into_net
-
- this_path = os.getcwd()
- train_dir = this_path+'/output'
-
- RESULT_DIR = train_dir+'/results'
- WEIGHT_DIR = train_dir+'/weights'
-
- def adjust_learning_rate(c, optimizer, epoch):
- lr = c.lr
- if c.lr_cosine:
- eta_min = lr * (c.lr_decay_rate ** 3)
- lr = eta_min + (lr - eta_min) * (
- 1 + math.cos(math.pi * epoch / c.meta_epochs)) / 2
- else:
- steps = np.sum(epoch >= np.asarray(c.lr_decay_epochs))
- if steps > 0:
- lr = lr * (c.lr_decay_rate ** steps)
-
- for param_group in optimizer.get_lr_parameter():
- param_group=lr
-
-
- def warmup_learning_rate(c, epoch, batch_id, total_batches, optimizer):
- if c.lr_warm and epoch < c.lr_warm_epochs:
- p = (batch_id + epoch * total_batches) / \
- (c.lr_warm_epochs * total_batches)
- lr = c.lr_warmup_from + p * (c.lr_warmup_to - c.lr_warmup_from)
- for param_group in optimizer.get_lr_parameter():
- param_group = lr
- for param_group in optimizer.get_lr_parameter():
- lrate=param_group
- return lrate
-
- def get_lr_list(c, epochs, subepochs, batches,pool_layer_size, N):
- lr_list=[]
- for i in range(epochs):
- for j in range(subepochs):
- for k in range(batches):
- if c.lr_warm and i < c.lr_warm_epochs:
- p = ((k+j*batches) + i * (batches*subepochs)) / \
- (c.lr_warm_epochs * (batches*subepochs))
- lr = c.lr_warmup_from + p * (c.lr_warmup_to - c.lr_warmup_from)
- nums=(int)(c.bs*((c.input_size/32)*(c.input_size/32)//N))
- if c.pool_layers>=2:
- nums+=(int)(c.bs*((c.input_size/16)*(c.input_size/16)//N))
- if c.pool_layers>=3:
- nums+=(int)(c.bs*((c.input_size/8)*(c.input_size/8)//N))
- lr_list.extend([lr]*pool_layer_size*nums)
- return lr_list
-
-
-
- def op_get_lr_list(c, epochs, subepochs, batches,pool_layer, N):
- lr_list=[]
- if pool_layer==1:
- nums=(int)(c.bs*(c.input_size/8)*(c.input_size/8)//N)
- if pool_layer==2:
- nums=(int)(c.bs*(c.input_size/16)*(c.input_size/16)//N)
- if pool_layer==3:
- nums=(int)(c.bs*(c.input_size/32)*(c.input_size/32)//N)
- for i in range(epochs):
- for j in range(subepochs):
- for k in range(batches):
- if c.lr_warm and i < c.lr_warm_epochs:
- p = ((k+j*batches) + i * (batches*subepochs)) / \
- (c.lr_warm_epochs * (batches*subepochs))
- lr = c.lr_warmup_from + p * (c.lr_warmup_to - c.lr_warmup_from)
- lr_list.extend([lr]*nums)
- else:
- lr = c.lr
- if c.lr_cosine:
- eta_min = lr * (c.lr_decay_rate ** 3)
- lr = eta_min + (lr - eta_min) * (
- 1 + math.cos(math.pi * i / c.meta_epochs)) / 2
- else:
- steps = np.sum(i >= np.asarray(c.lr_decay_epochs))
- if steps > 0:
- lr = lr * (c.lr_decay_rate ** steps)
- lr_list.extend([c.lr]*nums)
- return lr_list
-
- def save_mindspore_weights(encoder, decoders,class_name, model_name, run_date,epoch):
- if not os.path.exists(WEIGHT_DIR):
- os.makedirs(WEIGHT_DIR)
- state = {'encoder_state_dict': encoder.parameters_dict(),
- 'decoder_state_dict': [decoder.parameters_dict() for decoder in decoders]}
- filename = '{}_epoch_{}_{}.ckpt'.format(model_name,epoch,run_date)
- path = os.path.join(os.path.join(WEIGHT_DIR,class_name), filename)
- save_checkpoint([state], path)
- print('Saving weights to {}'.format(filename))
-
- def load_mindspore_weights(encoder, decoders, filename):
- path = os.path.join(filename)
- state = load_checkpoint(path)[0]
- load_param_into_net(encoder,state['encoder_state_dict']);
- decoders = [load_param_into_net(decoder,state) for decoder, state in
- zip(decoders, state['decoder_state_dict'])]
- print('Loading weights from {}'.format(filename))
-
- def save_weights(encoder, decoders, model_name, run_date, weight_dir,epoch):
- if not os.path.exists(WEIGHT_DIR):
- os.makedirs(WEIGHT_DIR)
- encoderfilename = '{}_{}_encoder'.format(model_name, run_date)
- mindspore.save_checkpoint(encoder, '{0}/epoch_{1}_{2}.ckpt'.format(WEIGHT_DIR ,epoch, encoderfilename))
- k=0
- for decoder in decoders:
- decoderfilename = '{}_{}_decoder_{}'.format(model_name, run_date,k)
- k+=1
- mindspore.save_checkpoint(decoder, '{0}/epoch_{1}_{2}.ckpt'.format(WEIGHT_DIR, epoch, decoderfilename))
- print('Saving weights to {}'.format(WEIGHT_DIR))
-
-
- def load_weights(encoder, decoders, encoderfilename, decoderfilename):
- mindspore.load_param_into_net(encoder, mindspore.load_checkpoint(encoderfilename))
- print("encoder path")
- print(encoderfilename)
- k=0
- print("decoder path")
- print(len(decoders))
- decoderfilename.sort()
- for decoder in decoders:
- print(decoderfilename[k])
- mindspore.load_param_into_net(decoder, mindspore.load_checkpoint(decoderfilename[k]))
- k+=1
- print('Loading weights finish')
-
- def save_results(det_roc_obs, seg_roc_obs, seg_pro_obs, model_name, class_name, run_date):
- result = '{:.2f},{:.2f},{:.2f} \t\tfor {:s}/{:s}/{:s} at epoch {:d}/{:d}/{:d} for {:s}\n'.format(
- det_roc_obs.max_score, seg_roc_obs.max_score, seg_pro_obs.max_score,
- det_roc_obs.name, seg_roc_obs.name, seg_pro_obs.name,
- det_roc_obs.max_epoch, seg_roc_obs.max_epoch, seg_pro_obs.max_epoch, class_name)
- if not os.path.exists(RESULT_DIR):
- os.makedirs(RESULT_DIR)
- fp = open(os.path.join(RESULT_DIR, '{}_{}.txt'.format(model_name, run_date)), "w")
- fp.write(result)
- fp.close()
|