|
- # Copyright (c) Meta Platforms, Inc. and affiliates.
- # All rights reserved.
-
- # This source code is licensed under the license found in the
- # LICENSE file in the root directory of this source tree.
- # --------------------------------------------------------
- # References:
- # DeiT: https://github.com/facebookresearch/deit
- # BEiT: https://github.com/microsoft/unilm/tree/master/beit
- # --------------------------------------------------------
- import argparse
- import datetime
- import json
- import numpy as np
- import os
- import time
- from pathlib import Path
-
- import torch
- import torch.backends.cudnn as cudnn
- from torch.utils.tensorboard import SummaryWriter
- import torchvision.transforms as transforms
- import torchvision.datasets as datasets
- from torch.utils.data import random_split
-
- import timm
-
- assert timm.__version__ == "0.3.2" # version check
- import timm.optim.optim_factory as optim_factory
-
- import util.misc as misc
- from util.misc import NativeScalerWithGradNormCount as NativeScaler
-
- # import models_mae
- import models_mae_3chans
-
- from engine_pretrain import train_one_epoch
-
- # import flwr as fl
- import AISyncore
- from collections import OrderedDict
-
- import math
- import sys
- from typing import Iterable
-
- import torch
-
- import util.misc as misc
- import util.lr_sched as lr_sched
-
- os.environ["CUDA_VISIBLE_DEVICES"] = "7"
-
-
- def get_args_parser():
- parser = argparse.ArgumentParser('MAE pre-training', add_help=False)
- parser.add_argument('--batch_size', default=64, type=int,
- help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
- parser.add_argument('--epochs', default=400, type=int)
- parser.add_argument('--accum_iter', default=1, type=int,
- help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
-
- # Model parameters
- parser.add_argument('--model', default='client2_mae_vit_base_patch16', type=str, metavar='MODEL',
- # parser.add_argument('--model', default='client2_mae_vit_large_patch16', type=str, metavar='MODEL',
- # parser.add_argument('--model', default='mae_client_test', type=str, metavar='MODEL',
- help='Name of model to train')
-
- parser.add_argument('--input_size', default=224, type=int,
- help='images input size')
-
- parser.add_argument('--mask_ratio', default=0.75, type=float,
- help='Masking ratio (percentage of removed patches).')
-
- parser.add_argument('--norm_pix_loss', action='store_true',
- help='Use (per-patch) normalized pixels as targets for computing loss')
- parser.set_defaults(norm_pix_loss=False)
-
- # Optimizer parameters
- parser.add_argument('--weight_decay', type=float, default=0.05,
- help='weight decay (default: 0.05)')
-
- parser.add_argument('--lr', type=float, default=None, metavar='LR',
- help='learning rate (absolute lr)')
- parser.add_argument('--blr', type=float, default=1e-3, metavar='LR',
- help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
- parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
- help='lower lr bound for cyclic schedulers that hit 0')
-
- parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N',
- help='epochs to warmup LR')
-
- # Dataset parameters
- parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
- help='dataset path')
-
- parser.add_argument('--output_dir', default='./output/client2',
- help='path where to save, empty for no saving')
- parser.add_argument('--log_dir', default='./output/client2',
- help='path where to tensorboard log')
- parser.add_argument('--device', default='cuda',
- help='device to use for training / testing')
- parser.add_argument('--seed', default=0, type=int)
- parser.add_argument('--resume', default='',
- help='resume from checkpoint')
-
- parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
- help='start epoch')
- parser.add_argument('--num_workers', default=10, type=int)
- parser.add_argument('--pin_mem', action='store_true',
- help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
- parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
- parser.set_defaults(pin_mem=True)
-
- # distributed training parameters
- parser.add_argument('--world_size', default=1, type=int,
- help='number of distributed processes')
- parser.add_argument('--local_rank', default=-1, type=int)
- parser.add_argument('--dist_on_itp', action='store_true')
- parser.add_argument('--dist_url', default='env://',
- help='url used to set up distributed training')
-
- return parser
-
-
- def main(args):
- misc.init_distributed_mode(args)
-
- # print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
- # print("{}".format(args).replace(', ', ',\n'))
-
- device = torch.device(args.device)
-
- # fix the seed for reproducibility
- seed = args.seed + misc.get_rank()
- torch.manual_seed(seed)
- np.random.seed(seed)
-
- cudnn.benchmark = True
-
- # simple augmentation
- transform_train = transforms.Compose([
- transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), # 3 is bicubic
- transforms.RandomHorizontalFlip(),
- transforms.ToTensor(),
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
- # transforms.Normalize((0.5), (0.5))])
- # dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
- # dataset_train = datasets.CIFAR10(root = "/raid3/jiaqi/MAE/data",
- # # dataset_train = datasets.MNIST(root = "/home/jiaqi/MAE/data/",
- # transform=transform_train,
- # train = True,
- # download = False)
- dataset_client2 = datasets.ImageFolder(os.path.join('./dataset', 'client2'), transform=transform_train)
- length_of_train = int(0.7 * len(dataset_client2))
- length_of_test = len(dataset_client2) - length_of_train
- dataset_train, testset = random_split(
- dataset=dataset_client2,
- lengths=[length_of_train, length_of_test],
- generator=torch.Generator().manual_seed(0)
- )
- print(dataset_train)
-
- transform_test = transforms.Compose([
- transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), # 3 is bicubic
- transforms.RandomHorizontalFlip(),
- transforms.ToTensor(),
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
- # testset = datasets.CIFAR10(root = "/raid3/jiaqi/MAE/data", train=False, download=True, transform=transform_test)
- # testloader = torch.utils.data.DataLoader(testset, batch_size=32)
- testloader = torch.utils.data.DataLoader(
- testset, sampler=torch.utils.data.RandomSampler(testset),
- batch_size=32,
- num_workers=args.num_workers,
- pin_memory=args.pin_mem,
- drop_last=True,
- )
-
- if True: # args.distributed:
- num_tasks = misc.get_world_size()
- global_rank = misc.get_rank()
- sampler_train = torch.utils.data.DistributedSampler(
- dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
- )
- print("Sampler_train = %s" % str(sampler_train))
- else:
- sampler_train = torch.utils.data.RandomSampler(dataset_train)
-
- if global_rank == 0 and args.log_dir is not None:
- os.makedirs(args.log_dir, exist_ok=True)
- log_writer = SummaryWriter(log_dir=args.log_dir)
- else:
- log_writer = None
-
- data_loader_train = torch.utils.data.DataLoader(
- dataset_train, sampler=sampler_train,
- batch_size=args.batch_size,
- num_workers=args.num_workers,
- pin_memory=args.pin_mem,
- drop_last=True,
- )
-
- # define the model
- model = models_mae_3chans.__dict__[args.model](norm_pix_loss=args.norm_pix_loss)
-
- model.to(device)
-
- server_model = models_mae_3chans.__dict__['mae_vit_base_patch16'](norm_pix_loss=args.norm_pix_loss)
- server_model.to(device)
-
- model_without_ddp = model
- print("Model = %s" % str(model_without_ddp))
-
- eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
-
- if args.lr is None: # only base_lr is specified
- args.lr = args.blr * eff_batch_size / 256
-
- print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
- print("actual lr: %.2e" % args.lr)
-
- print("accumulate grad iterations: %d" % args.accum_iter)
- print("effective batch size: %d" % eff_batch_size)
-
- if args.distributed:
- model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
- model_without_ddp = model.module
-
- # following timm: set wd as 0 for bias and norm layers
- param_groups = optim_factory.add_weight_decay(model_without_ddp, args.weight_decay)
- optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))
- print(optimizer)
- loss_scaler = NativeScaler()
-
- misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
-
- print(f"Start training for {args.epochs} epochs")
- start_time = time.time()
-
- def train_one_epoch(model: torch.nn.Module,
- data_loader: Iterable, optimizer: torch.optim.Optimizer,
- device: torch.device, epoch: int, loss_scaler,
- log_writer=None,
- args=None):
- model.train(True)
- metric_logger = misc.MetricLogger(delimiter=" ")
- metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
- header = 'Epoch: [{}]'.format(epoch)
- print_freq = 5
-
- accum_iter = args.accum_iter
-
- optimizer.zero_grad()
-
- if log_writer is not None:
- print('log_dir: {}'.format(log_writer.log_dir))
-
- for data_iter_step, (samples, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
-
- # we use a per iteration (instead of per epoch) lr scheduler
- if data_iter_step % accum_iter == 0:
- lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
-
- samples = samples.to(device, non_blocking=True)
-
- with torch.cuda.amp.autocast():
- loss, _, _ = model(samples, mask_ratio=args.mask_ratio)
-
- loss_value = loss.item()
-
- if not math.isfinite(loss_value):
- print("Loss is {}, stopping training".format(loss_value))
- sys.exit(1)
-
- loss /= accum_iter
- loss_scaler(loss, optimizer, parameters=model.parameters(),
- update_grad=(data_iter_step + 1) % accum_iter == 0)
- if (data_iter_step + 1) % accum_iter == 0:
- optimizer.zero_grad()
-
- torch.cuda.synchronize()
-
- metric_logger.update(loss=loss_value)
-
- lr = optimizer.param_groups[0]["lr"]
- metric_logger.update(lr=lr)
-
- loss_value_reduce = misc.all_reduce_mean(loss_value)
- if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
- """ We use epoch_1000x as the x-axis in tensorboard.
- This calibrates different curves when batch size changes.
- """
- epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
- log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
- log_writer.add_scalar('lr', lr, epoch_1000x)
-
-
- # gather the stats from all processes
- metric_logger.synchronize_between_processes()
- print("Averaged stats:", metric_logger)
- return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
-
- #定义Flower的Client类
- class maeClient(AISyncore.client.NumPyClient):
- def __init__(self):
- super().__init__()
- # self.trainer=trainer
- # self.logger=logger
- # self.opt=opt
- self.optimizer=optimizer
- self.model=model
- self.epoch=0
- self.best = 1e10
- self.train_loader=data_loader_train
- self.testloader=testloader
- self.args = args
- self.server_model = server_model
- self.accum_iter = args.accum_iter
- self.print_freq = 5
- self.header = 'Epoch: [{}]'.format(self.epoch)
- # self.val_loader=val_loader
- def get_parameters(self):
- # print(self.model.state_dict().items())
- print("GETTING PARAMETERS")
- # print([dataset_train.cpu().numpy() for _, dataset_train in self.model.state_dict().items()].size)
- # print([dataset_train.cpu().numpy() for _, dataset_train in self.model.state_dict().items()][0])
- return [dataset_train.cpu().numpy() for _, dataset_train in self.model.state_dict().items()]
-
- # def get_initial_parameters(self):
- # print("GETTING INITIAL PARAMETERS")
- # return [dataset_train.cpu().numpy() for _, dataset_train in self.server_model.state_dict().items()]
-
- def set_parameters(self, parameters):
- params_dict = zip(self.model.state_dict().keys(), parameters)
- state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
- self.model.load_state_dict(state_dict, strict=True)
-
- def set_eval_parameters(self, parameters):
- params_dict = zip(self.server_model.state_dict().keys(), parameters)
- state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
- self.server_model.load_state_dict(state_dict, strict=True)
-
- def fit(self, parameters, config):
- self.set_parameters(parameters)
- # # train(self.model, self.train_loader, epochs=1)
- # model.train(True)
- # metric_logger = misc.MetricLogger(delimiter=" ")
- # metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
- # header = 'Epoch: [{}]'.format(self.epoch)
- # print_freq = 20
-
- # accum_iter = args.accum_iter
-
- # optimizer.zero_grad()
- # train(self.model, self.train_loader, epochs=1)
-
- train_stats = train_one_epoch(
- self.model, self.train_loader,
- self.optimizer, device, self.epoch, loss_scaler,
- log_writer=log_writer,
- args=self.args
- )
-
- # print(train_stats)
- log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
- 'epoch': self.epoch,}
-
- if args.output_dir and misc.is_main_process():
- if log_writer is not None:
- log_writer.flush()
- with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
- f.write(json.dumps(log_stats) + "\n")
-
- return self.get_parameters(), len(self.train_loader), {}
-
- def evaluate(self, parameters, config):
- # self.set_parameters(parameters)
- self.set_eval_parameters(parameters)
-
- with torch.no_grad():
- for data in self.testloader:
- samples = data[0]
- samples = samples.to(device, non_blocking=True)
- loss, pred, mask = self.server_model(samples, mask_ratio=args.mask_ratio)
-
- if args.output_dir and (self.epoch % 20 == 0 or self.epoch + 1 == args.epochs):
- misc.save_model(
- args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
- loss_scaler=loss_scaler, epoch=self.epoch)
-
- test_stats = {'lr': optimizer.param_groups[0]["lr"], 'loss': float(loss)}
- log_stats = {**{f'test_{k}': v for k, v in test_stats.items()},
- 'epoch': self.epoch,}
-
- if args.output_dir and misc.is_main_process():
- if log_writer is not None:
- log_writer.flush()
- with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
- f.write(json.dumps(log_stats) + "\n")
-
- print("[Epoch %d] Evaluate loss: %.4f" %(self.epoch,loss))
- self.epoch += 1
-
- return float(loss), len(self.train_loader), {"accuracy": float(loss)}
-
-
- server_address = "192.168.202.129"
- AISyncore.client.run_numpy_client(server_address + ":8000", client=maeClient())
-
-
- if __name__ == '__main__':
- args = get_args_parser()
- args = args.parse_args()
- if args.output_dir:
- Path(args.output_dir).mkdir(parents=True, exist_ok=True)
- main(args)
|