|
- import os
- os.environ["CUDA_VISIBLE_DEVICES"] = "4"
- import sys
- import torch
- import numpy as np
-
- import time
- import logging
- import provider
- import importlib
- import shutil
- import argparse
-
- from pathlib import Path
- from tqdm import tqdm
- from data_utils.CloudPointsDataLoader import AllCloudPointsDataLoader
- import torch.nn.functional as F
-
- BASE_DIR = os.path.dirname(os.path.abspath(__file__))
- ROOT_DIR = BASE_DIR
- sys.path.append(os.path.join(ROOT_DIR, 'models_yizx'))
-
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- '''LOG'''
- logger = logging.getLogger("Model")
- logger.setLevel(logging.INFO)
-
- em_dict = {}
-
- def log_string(str):
- logger.info(str)
- print(str)
-
- def seed_torch(seed):
- # random.seed(seed)
- os.environ['PYTHONHASHSEED'] = str(seed)
- np.random.seed(seed)
- torch.manual_seed(seed)
- torch.cuda.manual_seed(seed)
- torch.cuda.manual_seed_all(seed)
- torch.backends.cudnn.deterministic = True
- torch.backends.cudnn.benchmark = False
-
-
- def parse_args():
- '''PARAMETERS'''
- parser = argparse.ArgumentParser('training')
- parser.add_argument('--use_cpu', action='store_true', default=False, help='use cpu mode')
- parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
- parser.add_argument('--batch_size', type=int, default=8, help='batch size in training')
- parser.add_argument('--model', default='pointclouds_diffusion_model', help='model name [default: pointnet_cls]')
- parser.add_argument('--num_category', default=2, type=int, choices=[2, 7, 17], help='training on ModelNet10/40')
- parser.add_argument('--epoch', default=10, type=int, help='number of epoch in training')
- parser.add_argument('--learning_rate', default=2e-4, type=float, help='learning rate in training') # 0.001
- parser.add_argument('--num_point', type=int, default=8192, help='Point Number')
- parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training')
- parser.add_argument('--log_dir', type=str, default='outputs', help='experiment root')
- parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate')
- parser.add_argument('--use_normals', action='store_true', default=False, help='use normals')
- parser.add_argument('--process_data', action='store_true', default=True, help='save data offline')
- parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling')
-
- parser.add_argument('--do_sample', type=int, default=1, help='Whether DoSample using centroid algorithm')
- parser.add_argument('--data_concat', type=int, default=1, help='Whether DoDataConcat/'
- ' DO:multi-label, no: single label')
-
- parser.add_argument('--latent_dim', type=int, default=1024, help='default 1024 for encoder_latent')
- parser.add_argument('--full_pc', type=int, default=1, help='Whether use full pointClouds? 0 False, 1 True')
-
-
- # Model Arguments
- parser.add_argument('--num_steps', type=int, default=200)
- parser.add_argument('--beta_1', type=float, default=1e-4)
- parser.add_argument('--beta_T', type=float, default=0.05)
- parser.add_argument('--sched_mode', type=str, default='linear')
- parser.add_argument('--flexibility', type=float, default=0.0)
- parser.add_argument('--residual', type=eval, default=True, choices=[True, False])
- parser.add_argument('--resume', type=str, default=None)
-
- # [0 for usingSampleFront num_points,
- # 1 for CentroidSample num_points,
- # 2 for uniformSample
- # ]
-
- parser.add_argument('--multi_label_used', type=str, default='16')
- parser.add_argument('--multi_round_eval_loading_prefix', type=str,
- default='OnlyCLS0_stages2_frozeDiffusion_CLSOnly_diffusionCheckpoints-num_category_2-SampleMethod_1')
- # 'full' for multi_fc_parallel CLS_MODEL,
- # '0' to '16' for single_fc CLS_Model
-
- return parser.parse_args()
-
-
- def load_model_with_allDataLoader(args):
- model = importlib.import_module(args.model)
- classifier = model.pointclouds_diffusion_model(args)
- dataset = AllCloudPointsDataLoader(args, process_data=args.process_data, data_concat=args.data_concat)
- dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, drop_last=False)
-
- return classifier, dataloader
-
- def multiRound_model_loading(args, model, SEED):
- if 'stages2_frozeDiffusion' in args.multi_round_eval_loading_prefix:
- this_round = int(args.multi_round_eval_loading_prefix.split('OnlyCLS')[-1].split('_')[0])
- args.multi_label_used = str(this_round)
- this_round_dir = 'log/classification/outputs_seed{}/OnlyCLS{}_stages2_frozeDiffusion_CLSOnly_diffusionCheckpoints-num_category_2-SampleMethod_1'.format(SEED, this_round)
- this_loading_model = os.path.join(this_round_dir, 'stage2_final_best_model.pth')
-
- checkpoint = torch.load(this_loading_model)
- model.load_state_dict(checkpoint['model_state_dict'])
- log_string('>>> Use pretrain [Round {}] model >>>'.format(this_round))
-
- this_round += 1
- args.multi_round_eval_loading_prefix = \
- 'OnlyCLS{}_stages2_frozeDiffusion_CLSOnly_diffusionCheckpoints' \
- '-num_category_2-SampleMethod_1'.format(this_round)
-
- if not args.use_cpu:
- model = model.to(device)
-
- return args, model
- else:
- exit()
-
- def do_eval_round(model, dataloader):
- global em_dict
- classifier = model.eval()
- allRight_count = 0
- sample_idx = 0
- for item in tqdm(dataloader):
- points, region_target, classify_target, image_name = item
- points = points.transpose(-1, -2)
- if args.full_pc == 0:
- points = points.view(-1, 3, args.num_point).float()
- elif args.full_pc == 1:
- points = points.reshape(1, 3, -1).float()
- if args.num_category == 2:
- this_task_cls_idx = int(args.multi_label_used)
- classify_target = classify_target[:, :, this_task_cls_idx]
- if args.full_pc == 0:
- pass
- elif args.full_pc == 1:
- classify_target = classify_target[:, 0]
- classify_target = classify_target.view(-1, 1).long()
- target = F.one_hot(classify_target, num_classes=2)
- target = target.view(-1, 2)
-
- if not args.use_cpu:
- points, target = points.to(device), target.to(device)
- pred, trans_feat, diffusion_loss = classifier.get_code_with_diffusion_loss(points)
- probs = torch.sigmoid(pred)
- # using maxProbs to get ClassifyResult
- max_probs = probs.max()
- row_max_list = probs.argmax(dim=0).cpu().numpy()
- max_rowIndex = 0
- max_columnIndex = 0
- for i, each_row in enumerate(row_max_list):
- this_probs = probs[each_row, i]
- if this_probs == max_probs:
- max_columnIndex = i
- max_rowIndex = each_row
-
- pred_choice = max_columnIndex
- acc_target = classify_target.view(-1)
- if pred_choice == acc_target[0]:
- allRight_count += 1
- if em_dict.get(image_name) == None:
- em_dict[image_name] = [True]
- else:
- em_dict[image_name].append(True)
- else:
- if em_dict.get(image_name) == None:
- em_dict[image_name] = [False]
- else:
- em_dict[image_name].append(False)
- sample_idx += 1
-
- thisRound_thisCLS_eval_em_avg = allRight_count / float(sample_idx)
- return thisRound_thisCLS_eval_em_avg
-
- if __name__ == '__main__':
- torch.autograd.set_detect_anomaly(True)
- args = parse_args()
- SEED = 2279
- seed_torch(SEED)
- model, dataloader = load_model_with_allDataLoader(args)
-
- start_time = time.time()
- for i in range(17):
- args, model = multiRound_model_loading(args, model, SEED)
- _ = do_eval_round(model, dataloader)
- end_time = time.time()
- print('> Eval MultiRound for {} samples, using {}s. AVG {}s/sample\n >'.
- format(len(dataloader), (end_time - start_time), (end_time - start_time) / len(dataloader)))
-
- save_dir = 'log/classification/2023_multiRound_classificationResult/'
- if not os.path.exists(save_dir):
- os.makedirs(save_dir, exist_ok=True)
- np.save(os.path.join(save_dir, 'SEED{}_MultiRound_PointsDiffusion_emDict.npy'.format(SEED)), em_dict)
-
-
- # then: python eval_tools/stage2_pc_diffusion_evalResult_vis.py
-
|