|
- import math
-
- import torch
- from torch import Tensor
- from torch.nn import Parameter
-
- import test_model
- import numpy as np
-
- net = test_model.attention_net(topN=6, num_classes=200, device='cuda')
-
- def weights_init(m):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- m.weight.data.normal_(0, math.sqrt(2. / n))
-
- shape = (m.out_channels, m.in_channels, m.kernel_size[0], m.kernel_size[1])
- stdv = 1 / math.sqrt(m.in_channels * m.kernel_size[0] * m.kernel_size[1])
- np.random.seed(0)
- m.weight = Parameter(Tensor(np.random.uniform(-stdv, stdv, shape).astype(np.float32)))
-
- shape_bias = (m.out_channels,)
- np.random.seed(0)
- m.bias = Parameter(Tensor(np.random.uniform(-stdv, stdv, shape_bias).astype(np.float32)))
- # # ones = torch.Tensor(np.ones([2, 2, 3, 3])) # 先创建一个自定义权值的Tensor,这里为了方便将所有权值设为1
- # #
- # # w.weight = torch.nn.Parameter(ones)
- # if m.bias is not None:
- # m.bias.data.zero_()
- # elif classname.find('BatchNorm') != -1:
- # m.weight.data.fill_(1)
- # m.bias.data.zero_()
- elif classname.find('Linear') != -1:
- # m.weight.data.normal_(0, 0.01)
- # m.bias.data = torch.ones(m.bias.data.size())
- stdv = 1 / math.sqrt(m.in_features)
- np.random.seed(0)
- m.weight = Parameter(Tensor(np.random.uniform(-stdv, stdv, (m.out_features, m.in_features)).astype(np.float32)))
- np.random.seed(0)
- m.bias = Parameter(Tensor(np.random.uniform(-stdv, stdv, (m.out_features)).astype(np.float32)))
-
-
- net.apply(weights_init)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- np.random.seed(0)
- x = Tensor(np.random.rand(1, 3,448,448))
- top_n_coordinates, concat_out, raw_logits, concat_logits, part_logits, top_n_index, top_n_prob = net(x)
|