|
- # Custom Pytorch model from:
- # https://github.com/brain-score/candidate_models/blob/master/examples/score-model.ipynb
-
- # from model_tools.check_submission import check_models
- import numpy as np
- import torch
- from torch import nn
- import functools
- from model_tools.activations.pytorch import PytorchWrapper
- # from model_tools.brain_transformation import ModelCommitment
- from model_tools.activations.pytorch import load_preprocess_images
- # from brainscore import score_model
-
-
- """
- Template module for a base model submission to brain-score
- """
-
- # define your custom model here:
- class MyModel(nn.Module):
- def __init__(self):
- super(MyModel, self).__init__()
- self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=1, kernel_size=3, stride=3) # input image_size=224
- self.relu1 = torch.nn.ReLU()
- linear_input_size = np.power(int((224 - 3 + 2 * 0) / 3 + 1), 2)
- self.linear = torch.nn.Linear(int(linear_input_size), 10)
- self.relu2 = torch.nn.ReLU() # can't get named ReLU output otherwise
-
- def forward(self, x):
- x = self.conv1(x)
- x = self.relu1(x)
- x = x.view(x.size(0), -1)
- x = self.linear(x)
- x = self.relu2(x)
- return x
-
-
- # define your model identifier
- my_model_name = 'my-model'
-
-
- # # get your model ready
- # my_model = MyModel() # initialization input = torch.rand(1, 3, 224, 224)
- # #torch.save(my_model.state_dict(), '/code/my_model_params.pth', _use_new_zipfile_serialization=False)
- # pretrained_weights = '/code/my_model_params.pth' # path of pretrained weights
- # state = torch.load(pretrained_weights)
- # my_model.load_state_dict(state) # load pretrained weights
-
- # #todo: map the model to gpu here?
-
- # # init the model and the preprocessing:
- # preprocessing = functools.partial(load_preprocess_images, image_size=224)
-
- # # get an activations model from the Pytorch Wrapper
- # activations_model = PytorchWrapper(identifier=my_model_name, model=my_model, preprocessing=preprocessing)
-
- # # actually make the model, with the layers you want to see specified:
- # model = ModelCommitment(identifier=my_model_name, activations_model=activations_model,
- # # specify layers to consider
- # layers=['conv1', 'relu1'])
-
-
- # The model names to consider. If you are making a custom model, then you most likley want to change
- # the return value of this function.
- def get_model_identifier():
- """
- :return: the identifier of your model
- """
- return my_model_name
-
-
- # get_model method actually gets the model. For a custom model, this is just linked to the
- # model we defined above.
- def get_model(name, modelpath):
- """
- This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
- containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
- keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
- wrappers.
- :param name: the name of the model to fetch
- :return: the model instance
- """
-
- assert name == my_model_name
-
- my_model = MyModel() # initialization
- # pretrained_weights = '/code/my_model_params.pth' # path of pretrained weights
- pretrained_weights = modelpath
- state = torch.load(pretrained_weights)
- my_model.load_state_dict(state) # load pretrained weights
-
- # init the model and the preprocessing:
- preprocessing = functools.partial(load_preprocess_images, image_size=224)
- # get an activations model from the Pytorch Wrapper
- activations_model = PytorchWrapper(identifier=my_model_name, model=my_model, preprocessing=preprocessing)
-
- # link the custom model to the wrapper object(activations_model above):
- wrapper = activations_model
- wrapper.image_size = 224
- return wrapper
-
-
- # get_layers method to tell the code what layers to consider. If you are submitting a custom
- # model, then you will most likley need to change this method's return values.
- def get_layers(name):
- """
- This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to
- layers and uses this list as a set of possible layers. The lists doesn't have to contain all layers, the less the
- faster the benchmark process works. Additionally the given layers have to produce an activations vector of at least
- size 25! The layer names are delivered back to the model instance and have to be resolved in there. For a pytorch
- model, the layer name are for instance dot concatenated per module, e.g. "features.2".
- :param name: the name of the model, to return the layers for
- :return: a list of strings containing all layers, that should be considered as brain area.
- """
-
- # quick check to make sure the model is the correct one:
- assert name == my_model_name #'my-model'
-
- # returns the layers you want to consider
- return ['conv1', 'relu1']
-
-
- # Bibtex Method. For submitting a custom model, you can either put your own Bibtex if your
- # model has been published, or leave the empty return value if there is no publication to refer to.
- def get_bibtex(name):
- """
- A method returning the bibtex reference of the requested model as a string.
- """
-
- assert name == my_model_name
-
- # from pytorch.py:
- return ''
-
- # # Main Method: In submitting a custom model, you should not have to mess with this.
- # if __name__ == '__main__':
- # # Use this method to ensure the correctness of the BaseModel implementations.
- # # It executes a mock run of brain-score benchmarks.
- # check_models.check_base_models(__name__)
-
|