|
- import torch
- import torch.nn as nn
- from torch.nn import init
- import torch.nn.functional as F
- import torch.optim as optim
-
- import time
-
- import utils
- # A Toy Dataset
- data = torch.tensor([[0,0],[0,1],[1,0],[1,1.]])
- target = torch.tensor([[0],[0],[1],[1.]])
-
-
- # A Toy Model
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.fc1 = nn.Linear(2, 2)
- self.fc2 = nn.Linear(2, 1)
-
- def forward(self, x):
- x = self.fc1(x)
- x = F.relu(x)
- x = self.fc2(x)
- return x
- utils.setup_seed(utils.seed)
-
- if __name__ == '__main__':
- import HiStar
- client = HiStar.ClientWorker(None, utils.host, utils.port, 0, utils.client_num, "cpu", verbose=utils.verbose)
-
- model = Net()
- # model = client.get_model()
-
- opt = optim.SGD(params=model.parameters(), lr=0.1)
-
- opt = HiStar.FedOptim(opt, client)
-
- t1 = time.time()
- for iii in range(utils.iters):
- print("-----------------------")
- print(list(model.parameters()))
- print("-----------------------")
- # 1) erase previous gradients (if they exist)
- t11 = time.time()
- opt.zero_grad()
-
- # 2) make a prediction
- pred = model(data)
-
- # 3) calculate how much we missed
- loss = ((pred - target)**2).sum()
-
- # 4) figure out which weights caused us to miss
- loss.backward()
-
- # 5) change those weights
- opt.step()
- t12 = time.time()
-
- # 6) print our progress
- print("loss:%s time:%s " % (loss.data, t12-t11))
- t2 = time.time()
- print(t2-t1)
|