|
- import hub
- import time
- import torch
- from torchvision import transforms, models
-
- ds_train = hub.load('hub://activeloop/fashion-mnist-train')
- ds_test = hub.load('hub://activeloop/fashion-mnist-test')
- def transform(sample_in):
- return {'images': tform(sample_in['images']), 'labels': sample_in['labels']}
-
- tform = transforms.Compose([
- transforms.ToPILImage(), # Must convert to PIL image for subsequent operations to run
- transforms.RandomRotation(20), # Image augmentation
- transforms.ToTensor(), # Must convert to pytorch tensor for subsequent operations to run
- transforms.Normalize([0.5], [0.5]),
- ])
- batch_size = 32
-
- train_loader = ds_train.pytorch(num_workers = 0, shuffle = True, transform = transform, batch_size = batch_size)
- test_loader = ds_test.pytorch(num_workers = 0, transform = transform, batch_size = batch_size)
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- # device = torch.device("cpu")
- net = models.resnet18(pretrained=True)
- # Convert model to grayscale
- net.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
-
- # Update the fully connected layer based on the number of classes in the dataset
- net.fc = torch.nn.Linear(net.fc.in_features, len(ds_train.labels.info.class_names))
-
- net.to(device)
-
- # Specity the loss function and optimizer
- criterion = torch.nn.CrossEntropyLoss()
- optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.1)
- def train_model(loader, test_loader, model, epochs = 1):
- for epoch in range(epochs): # loop over the dataset multiple times
-
- # Zero the performance stats for each epoch
- running_loss = 0.0
- start_time = time.time()
- total = 0
- correct = 0
-
- for i, data in enumerate(loader):
- # get the inputs; data is a list of [inputs, labels]
- inputs = data['images']
- labels = torch.squeeze(data['labels'])
-
- inputs = inputs.to(device)
- labels = labels.to(device)
-
- # zero the parameter gradients
- optimizer.zero_grad()
-
- # forward + backward + optimize
- outputs = model(inputs.float())
- loss = criterion(outputs, labels)
- loss.backward()
- optimizer.step()
-
- _, predicted = torch.max(outputs.data, 1)
- total += labels.size(0)
- correct += (predicted == labels).sum().item()
- accuracy = 100 * correct / total
-
- # Print performance statistics
- running_loss += loss.item()
- if i % 10 == 0: # print every 10 batches
- batch_time = time.time()
- speed = (i+1)/(batch_time-start_time)
- print('[%d, %5d] loss: %.3f, speed: %.2f, accuracy: %.2f %%' %
- (epoch + 1, i, running_loss, speed, accuracy))
-
- running_loss = 0.0
-
- print('Testing Model Performance')
- test_model(test_loader, model)
-
- print('Finished Training')
-
-
- def test_model(loader, model):
- start_time = time.time()
- total = 0
- correct = 0
- with torch.no_grad():
- for i, data in enumerate(loader):
- # get the inputs; data is a list of [inputs, labels]
- inputs = data['images']
- labels = torch.squeeze(data['labels'])
-
- inputs = inputs.to(device)
- labels = labels.to(device)
-
- # zero the parameter gradients
- optimizer.zero_grad()
-
- # forward + backward + optimize
- outputs = model(inputs.float())
-
- _, predicted = torch.max(outputs.data, 1)
- total += labels.size(0)
- correct += (predicted == labels).sum().item()
- accuracy = 100 * correct / total
-
- print('Finished Testing')
- print('Testing accuracy: %.1f %%' %(accuracy))
- if __name__ == '__main__':
- train_model(train_loader, test_loader, net, epochs = 1)
|