|
- import os
- # import torch
- # from torch.autograd import Variable
- import numpy as np
- import copy
- import time
- import sys
- from dataset import AttDataset
- import os
- import argparse
- import ast
- import mindspore
- import mindspore.dataset.vision.c_transforms as C
- import mindspore.dataset.transforms.c_transforms as C2
- import mindspore.common.dtype as mstype
- import numpy as np
- import math
- from model_deepmar import Deep_Mar_v1
-
- from mindspore import context
- from mindspore import Tensor
- def extract_feat(model, height,width,datasets_path, partitions_path, root):
- """
- extract feature for images
- 得到的特征变成了numpy
- """
-
- resize_op = C.Resize((height, width))
- normalize_op = C.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
- std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
- change_swap_op = C.HWC2CHW()
-
- trans_valtest = [resize_op, normalize_op, change_swap_op]
- type_cast_op = C2.TypeCast(mstype.float32)
-
- # 获取训练数据集
- test_datasetgen = AttDataset(datasets_path, partitions_path, 'test', root)
- test_dataset = mindspore.dataset.GeneratorDataset(source=test_datasetgen, column_names=["data", "label"],
- shuffle=False)
- test_dataset = test_dataset.map(operations=trans_valtest, input_columns="data", num_parallel_workers=1)
- test_dataset = test_dataset.map(operations=type_cast_op, input_columns="label", num_parallel_workers=1)
-
- # apply shuffle operations
- # apply batch operations
- test_dataset = test_dataset.batch(64, drop_remainder=False)
-
- # extract feature for all the images of test/val identities
- start_time = time.time()
- total_eps = test_dataset.get_dataset_size()
- N = test_datasetgen.__len__()
- start = 0
- iter=test_dataset.create_dict_iterator()
- for ep, data in enumerate(iter):
- images = data['data']
- target = data['label']
- # imgs_var = Variable(imgs, volatile=True).cuda()
- feat_tmp = model( images ).asnumpy()
- # print(feat_tmp)
- batch_size = feat_tmp.shape[0]
- if ep == 0:
- feat = np.zeros((N, feat_tmp.size//batch_size))
- feat[start:start+batch_size, :] = feat_tmp.reshape((batch_size, -1))
- start += batch_size
- end_time = time.time()
- print('{} batches done, total {:.2f}s'.format(total_eps, end_time-start_time))
- return feat
-
- # attribute recognition evaluation
- #这里输入的datdaset因该是
- def attribute_evaluate(model, height,width,datasets_path, partitions_path, root):
- print ("extracting features for attribute recognition")
- pt_result = extract_feat(model, height,width,datasets_path, partitions_path, root)
- # obain the attributes from the attribute dictionary
- print ("computing attribute recognition result")
- N = pt_result.shape[0]
- L = pt_result.shape[1]
- gt_result = np.zeros(pt_result.shape)
- # get the groundtruth attributes
- test_datasetgen = AttDataset(datasets_path, partitions_path, 'test', root)
- for idx, label in enumerate(test_datasetgen.label):
- gt_result[idx, :] = label
- # print(pt_result)
- # print(gt_result)
- pt_result[pt_result>=0.5] = 1
- pt_result[pt_result<0.5] = 0
- return attribute_evaluate_lidw(gt_result, pt_result)
-
- def attribute_evaluate_lidw(gt_result, pt_result):
- """
- Input:
- gt_result, pt_result, N*L, with 0/1
- Output:
- result
- a dictionary, including label-based and instance-based evaluation
- label-based: label_pos_acc, label_neg_acc, label_acc
- instance-based: instance_acc, instance_precision, instance_recall, instance_F1
- """
- # obtain the label-based and instance-based accuracy
- # compute the label-based accuracy
- if gt_result.shape != pt_result.shape:
- print ('Shape beteen groundtruth and predicted results are different')
- # compute the label-based accuracy
- result = {}
- gt_pos = np.sum((gt_result == 1).astype(float), axis=0)
- gt_neg = np.sum((gt_result == 0).astype(float), axis=0)
- pt_pos = np.sum((gt_result == 1).astype(float) * (pt_result == 1).astype(float), axis=0)
- pt_neg = np.sum((gt_result == 0).astype(float) * (pt_result == 0).astype(float), axis=0)
- label_pos_acc = 1.0*pt_pos/gt_pos
- label_neg_acc = 1.0*pt_neg/gt_neg
- label_acc = (label_pos_acc + label_neg_acc)/2
- avg_acc=(pt_neg+pt_pos)/(gt_neg+gt_pos)
- result['label_pos_acc'] = label_pos_acc
- result['label_neg_acc'] = label_neg_acc
- result['label_acc'] = label_acc
- result['avg_acc'] = avg_acc
- leng=len(avg_acc)
- all_avg=np.sum(avg_acc)
- all_acac=all_avg/leng
- result['all_acac'] = all_acac
- # compute the instance-based accuracy
- # precision
- gt_pos = np.sum((gt_result == 1).astype(float), axis=1)
- pt_pos = np.sum((pt_result == 1).astype(float), axis=1)
- floatersect_pos = np.sum((gt_result == 1).astype(float)*(pt_result == 1).astype(float), axis=1)
- union_pos = np.sum(((gt_result == 1)+(pt_result == 1)).astype(float),axis=1)
- # avoid empty label in predicted results
- cnt_eff = float(gt_result.shape[0])
- for iter, key in enumerate(gt_pos):
- if key == 0:
- union_pos[iter] = 1
- pt_pos[iter] = 1
- gt_pos[iter] = 1
- cnt_eff = cnt_eff - 1
- continue
- if pt_pos[iter] == 0:
- pt_pos[iter] = 1
- instance_acc = np.sum(floatersect_pos/union_pos)/cnt_eff
- instance_precision = np.sum(floatersect_pos/pt_pos)/cnt_eff
- instance_recall = np.sum(floatersect_pos/gt_pos)/cnt_eff
- floatance_F1 = 2*instance_precision*instance_recall/(instance_precision+instance_recall)
- result['instance_acc'] = instance_acc
- result['instance_precision'] = instance_precision
- result['instance_recall'] = instance_recall
- result['instance_F1'] = floatance_F1
- return result
|