|
- ## nni库官方测试教程,这只是一个模板代码,我们将在另一个文件NNI_exp来对模型进行调优
- import nni
- import torch
- from torch import nn
- from torch.utils.data import DataLoader
- from torchvision import datasets
- from torchvision.transforms import ToTensor
-
-
- # 准备进行超参调优的参数
- params = {
- 'features': 512,
- 'lr': 0.001,
- 'momentum': 0,
- }
-
- # 获取调优后的参数,返回调优算法生成的超参组合
- optimized_params = nni.get_next_parameter()
- params.update(optimized_params)
- print(params, "\n")
-
- training_data = datasets.FashionMNIST(root="data", train=True, download=True, transform=ToTensor())
- test_data = datasets.FashionMNIST(root="data", train=False, download=True, transform=ToTensor())
-
- batch_size = 64
-
- train_dataloader = DataLoader(training_data, batch_size=batch_size)
- test_dataloader = DataLoader(test_data, batch_size=batch_size)
-
-
- # 使用超参构建模型
- device = "cuda" if torch.cuda.is_available() else "cpu"
- print(f"Using {device} device")
-
- class NeuralNetwork(nn.Module):
- def __init__(self):
- super(NeuralNetwork, self).__init__()
- self.flatten = nn.Flatten()
- self.linear_relu_stack = nn.Sequential(
- nn.Linear(28*28, params['features']),
- nn.ReLU(),
- nn.Linear(params['features'], params['features']),
- nn.ReLU(),
- nn.Linear(params['features'], 10)
- )
-
- def forward(self, x):
- x = self.flatten(x)
- logits = self.linear_relu_stack(x)
- return logits
-
- model = NeuralNetwork().to(device)
-
- loss_fn = nn.CrossEntropyLoss()
- optimizer = torch.optim.SGD(model.parameters(), lr=params['lr'], momentum=params['momentum'])
-
-
- def train(dataloader, model, loss_fn, optimizer):
- size = len(dataloader.dataset)
- model.train()
- for batch, (X, y) in enumerate(dataloader):
- X, y = X.to(device), y.to(device)
- pred = model(X)
- loss = loss_fn(pred, y)
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
-
- def test(dataloader, model, loss_fn):
- size = len(dataloader.dataset)
- num_batches = len(dataloader)
- model.eval()
- test_loss, correct = 0, 0
- with torch.no_grad():
- for X, y in dataloader:
- X, y = X.to(device), y.to(device)
- pred = model(X)
- test_loss += loss_fn(pred, y).item()
- correct += (pred.argmax(1) == y).type(torch.float).sum().item()
- test_loss /= num_batches
- correct /= size
- return correct
-
- epochs = 10
- for t in range(epochs):
- print(f"Epoch {t+1}\n-------------------------------")
- train(train_dataloader, model, loss_fn, optimizer)
- accuracy = test(test_dataloader, model, loss_fn)
- nni.report_intermediate_result(accuracy) # 直接调nni的API省去了自己写打印的步骤
- nni.report_final_result(accuracy)
|