|
- """
- Based on Deit: Facebook, Inc.
- https://github.com/facebookresearch/deit/blob/main/main.py
- """
-
- import argparse
- import datetime
- import time
- import torch.backends.cudnn as cudnn
- import json
- import torch
- import numpy as np
- from pathlib import Path
-
- from timm.data import Mixup
- from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
- from timm.scheduler import create_scheduler
- from timm.optim import create_optimizer
- #from timm.utils import get_state_dict
- from timm.utils import NativeScaler
-
- #from timm.models import
- from datasets import build_dataset
- from engine import train_one_epoch, evaluate
- from samplers import RASampler
- from models import *
- import utils
- from swin_models import *
- import logging as logger
- from mobilenetv2 import my_mobilenet_v2,timm_mobilenet_v2, vis_mobilenet_v2
-
- from torchvision.models import resnet18
- from torchvision.models import resnet50
- from compared_models import CSWin_64_12211_tiny_224,CSWin_64_24322_small_224
- from compared_models import pvt_small,pvt_medium,pvt_tiny
- from compared_models import pvt_v2_b0,pvt_v2_b1,pvt_v2_b2,pvt_v2_b2_li,pvt_v2_b3,pvt_v2_b4
- from compared_models import t2t_vit_t_14,t2t_vit_t_19
- from compared_models import Conformer_tiny_patch16, Conformer_small_patch16
- from compared_models import cvt_13,cvt_21
- from compared_models import focal_s,focal_t
- from compared_models import autoformer_t,autoformer_s
-
- from timm.models import deit_tiny_patch16_224,deit_small_patch16_224, deit_tiny_distilled_patch16_224,deit_small_distilled_patch16_224,deit_base_patch16_224,deit_base_distilled_patch16_224
- from timm.models import deit_base_patch16_224
- from timm.models import regnety_040,regnety_080
- from timm.models.swin_transformer import swin_tiny_patch4_window7_224,swin_small_patch4_window7_224
- from timm.models.efficientnet import efficientnet_b3, efficientnet_b2, efficientnet_b4,efficientnet_b1, efficientnet_b5, efficientnet_b0
-
- def get_args_parser():
- parser = argparse.ArgumentParser('DeiT training and evaluation script', add_help=False)
- parser.add_argument('--batch-size', default=256, type=int)
- parser.add_argument('--epochs', default=300, type=int)
-
- # Model parameters
- parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
- help='Name of model to train')
- parser.add_argument('--input-size', default=224, type=int, help='images input size')
-
- parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
- help='Dropout rate (default: 0.)')
- parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
- help='Drop path rate (default: 0.1)')
-
- parser.add_argument('--qk-scale-factor', type=float, default=None, metavar='PCT',
- help='scale q & k in self-attention. scale = head_dim ** qk-scale-factor')
-
- # Optimizer parameters
- parser.add_argument('--amp', action='store_true', default=False,
- help='using automatic mixed precision (amp)')
- parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
- help='Optimizer (default: "adamw"')
- parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
- help='Optimizer Epsilon (default: 1e-8)')
- parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
- help='Optimizer Betas (default: None, use opt default)')
- parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
- help='Clip gradient norm (default: None, no clipping)')
- parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
- help='SGD momentum (default: 0.9)')
- parser.add_argument('--weight-decay', type=float, default=0.05,
- help='weight decay (default: 0.05)')
- # Learning rate schedule parameters
- parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
- help='LR scheduler (default: "cosine"')
- parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
- help='learning rate (default: 5e-4)')
- parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
- help='warmup learning rate (default: 1e-6)')
- parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
- help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
-
- parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
- help='epoch interval to decay LR')
- parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
- help='epochs to warmup LR, if scheduler supports')
-
- parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
- help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
- #parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
- # help='patience epochs for Plateau LR scheduler (default: 10')
- parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
- help='LR decay rate (default: 0.1)')
- parser.add_argument('--skip_test', action='store_true', default=False,
- help='skip 250 eval epoch')
-
- # Augmentation parameters
- parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
- help='Color jitter factor (default: 0.4)')
- parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
- help='Use AutoAugment policy. "v0" or "original". " + \
- "(default: rand-m9-mstd0.5-inc1)'),
- parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
- parser.add_argument('--train-interpolation', type=str, default='bicubic',
- help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
-
- parser.add_argument('--std-aug', action='store_true', default=False)
- parser.add_argument('--repeated-aug', action='store_true')
- parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
- parser.set_defaults(repeated_aug=True)
-
- # * Random Erase params
- parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
- help='Random erase prob (default: 0.25)')
- parser.add_argument('--remode', type=str, default='pixel',
- help='Random erase mode (default: "pixel")')
- parser.add_argument('--recount', type=int, default=1,
- help='Random erase count (default: 1)')
- parser.add_argument('--resplit', action='store_true', default=False,
- help='Do not random erase first (clean) augmentation split')
-
- # * Mixup params
- parser.add_argument('--mixup', type=float, default=0.8,
- help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
- parser.add_argument('--cutmix', type=float, default=1.0,
- help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
- parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
- help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
- parser.add_argument('--mixup-prob', type=float, default=1.0,
- help='Probability of performing mixup or cutmix when either/both is enabled')
- parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
- help='Probability of switching to cutmix when both mixup and cutmix enabled')
- parser.add_argument('--mixup-mode', type=str, default='batch',
- help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
-
- # Dataset parameters
- parser.add_argument('--data-path', default='../../dataset/imagenet', type=str,
- help='dataset path')
- parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'IMNET100', 'IMNET10'],
- type=str, help='Image Net dataset path')
-
- parser.add_argument('--output_dir', default='',
- help='path where to save, empty for no saving')
- parser.add_argument('--device', default='cuda',
- help='device to use for training / testing')
- parser.add_argument('--seed', default=0, type=int)
- parser.add_argument('--resume', default='', help='resume from checkpoint')
- parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
- help='start epoch')
- parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
- parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
- parser.add_argument('--num_workers', default=10, type=int)
- parser.add_argument('--pin-mem', action='store_true',
- help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
- parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
- help='')
- parser.set_defaults(pin_mem=True)
-
- # distributed training parameters
- parser.add_argument('--world_size', default=1, type=int,
- help='number of distributed processes')
- parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
- return parser
-
- def main(args):
- utils.init_distributed_mode(args)
-
- print(args)
-
- device = torch.device(args.device)
-
- # fix the seed for reproducibility
- seed = args.seed + utils.get_rank()
- torch.manual_seed(seed)
- np.random.seed(seed)
- #random.seed(seed)
-
- cudnn.benchmark = True
-
- # dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
- dataset_val, _ = build_dataset(is_train=False, args=args)
-
- if args.distributed:
- num_tasks = utils.get_world_size()
- global_rank = utils.get_rank()
- # if args.repeated_aug:
- # sampler_train = RASampler(
- # dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
- # )
- # # else:
- # sampler_train = torch.utils.data.DistributedSampler(
- # dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
- # )
- if args.dist_eval:
- if len(dataset_val) % num_tasks != 0:
- print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
- 'This will slightly alter validation results as extra duplicate entries are added to achieve '
- 'equal num of samples per-process.')
- sampler_val = torch.utils.data.DistributedSampler(
- dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
- else:
- sampler_val = torch.utils.data.SequentialSampler(dataset_val)
- else:
- # sampler_train = torch.utils.data.RandomSampler(dataset_train)
- sampler_val = torch.utils.data.SequentialSampler(dataset_val)
-
- # data_loader_train = torch.utils.data.DataLoader(
- # dataset_train, sampler=sampler_train,
- # batch_size=args.batch_size,
- # num_workers=args.num_workers,
- # pin_memory=args.pin_mem,
- # drop_last=True,
- # )
-
- data_loader_val = torch.utils.data.DataLoader(
- dataset_val, sampler=sampler_val,
- batch_size=int(args.batch_size),
- num_workers=args.num_workers,
- pin_memory=args.pin_mem,
- drop_last=False
- )
-
- mixup_fn = None
- mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
- if mixup_active:
- mixup_fn = Mixup(
- mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
- prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
- label_smoothing=args.smoothing, num_classes=1000)
-
- print(f"Creating model: {args.model}")
-
- if 'visformer' in args.model:
-
- model = eval(args.model)(
- num_classes=1000,
- drop_rate=args.drop,
- drop_path_rate=args.drop_path,
- qk_scale=args.qk_scale_factor
- )
-
- else:
- model = eval(args.model + '()')
-
- model.to(device)
-
- model_without_ddp = model
- if args.distributed:
- model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
- model_without_ddp = model.module
- n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
- print('number of params:', n_parameters)
-
- linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
- args.lr = linear_scaled_lr
- optimizer = create_optimizer(args, model_without_ddp)
- loss_scaler = NativeScaler()
- lr_scheduler, _ = create_scheduler(args, optimizer)
-
- #criterion = LabelSmoothingCrossEntropy()
-
- throughput(data_loader_val, model, logger)
-
- @torch.no_grad()
- def throughput(data_loader, model, logger):
- model.eval()
- for idx, (images, _) in enumerate(data_loader):
- images = images.cuda(non_blocking=True)
- batch_size = images.shape[0]
- for i in range(50):
- model(images)
- torch.cuda.synchronize()
- print(f"throughput averaged with 30 times")
- tic1 = time.time()
- for i in range(30):
- model(images)
- torch.cuda.synchronize()
- tic2 = time.time()
- print(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
- return
-
-
-
-
- if __name__ == '__main__':
- parser = argparse.ArgumentParser('DeiT training and evaluation script', parents=[get_args_parser()])
- args = parser.parse_args()
- if args.output_dir:
- Path(args.output_dir).mkdir(parents=True, exist_ok=True)
- main(args)
|