|
- import torch
- import numpy as np
- import argparse
- import time
- import util
- import matplotlib.pyplot as plt
- from engine import trainer
- import random
-
-
-
-
- def setup_seed(seed):
- torch.manual_seed(seed)
- torch.cuda.manual_seed_all(seed)
- np.random.seed(seed)
- random.seed(seed)
- torch.backends.cudnn.deterministic = True
-
-
- # 设置随机数种子
- setup_seed(20)
- def test(data,adjdata,seq_length,nhid,in_dim,num_nodes,batch_size,learning_rate,dropout,weight_decay,savedcheckpoint,SE_file,test_output_file,test_data_tensor,data_mean,data_std):
- sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
- adjdata) # adj_max=[[forward_adj_max(nroute,nroute)],[back_adj_matrix(nroute,nroute)]]
- scaler = util.StandardScaler(data_mean,data_std) # scaler.mean scaler.std
- graphSE = util.load_SE(SE_file)
- gpu_count = torch.cuda.device_count()
- supports = adj_mx
- adj_mx = [np.tile(i, (gpu_count, 1)) for i in adj_mx]
- adjinit = supports[0]
- engine = trainer(scaler, in_dim, seq_length, num_nodes, nhid, dropout,
- learning_rate, weight_decay, supports,
- adjinit, graphSE=graphSE)
- modelpath = savedcheckpoint
- engine.model.eval()
- engine.model.load_state_dict(torch.load(modelpath))
- print("load model..", modelpath)
- test_data_tensor[...,0]=scaler.transform(test_data_tensor[...,0])
- test_dataloader=util.DataLoader(test_data_tensor,test_data_tensor,batch_size)
- outputs = []
- for iter, (x, y) in enumerate(test_dataloader.get_iterator()): # [b,12,207,2]
- testx = torch.Tensor(x).cuda()
- testx = testx.transpose(1, 3)
- supports = [torch.tensor(i).cuda() for i in adj_mx]
- with torch.no_grad():
- preds = engine.model(testx, None, False, supports=supports).transpose(1, 3)
- outputs.append(preds.squeeze())
-
- yhat = torch.cat(outputs, dim=0)
- f = open(test_output_file, 'wb')
- np.savez(f, yhat.cpu().detach().numpy())
-
- return yhat
-
- def main(data,adjdata,seq_length,nhid,in_dim,num_nodes,batch_size,learning_rate,dropout,weight_decay,savedcheckpoint,SE_file,test_output_file): # set seed
- # load data
- sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(adjdata) # adj_max=[[forward_adj_max(nroute,nroute)],[back_adj_matrix(nroute,nroute)]]
- dataloader = util.load_dataset(data, batch_size, batch_size, batch_size)
- scaler = dataloader['scaler'] # scaler.mean scaler.std
-
- graphSE = util.load_SE(SE_file)
-
- gpu_count = torch.cuda.device_count()
- supports = adj_mx
- adj_mx = [np.tile(i, (gpu_count, 1)) for i in adj_mx]
-
-
- adjinit = supports[0]
-
- engine = trainer(scaler, in_dim, seq_length, num_nodes, nhid, dropout,
- learning_rate, weight_decay, supports,
- adjinit, graphSE=graphSE)
-
- modelpath=savedcheckpoint
-
- engine.model.eval()
- engine.model.load_state_dict(torch.load(modelpath))
- print("load model..", modelpath)
-
- outputs = []
- realy = torch.Tensor(dataloader['y_test']).cuda()
- realy = realy.transpose(1, 3)[:, 0, :, :]
- for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):#[b,12,207,2]
- testx = torch.Tensor(x).cuda()
- testx = testx.transpose(1, 3)
- supports = [torch.tensor(i).cuda() for i in adj_mx]
- with torch.no_grad():
- preds = engine.model(testx, realy, False, supports=supports).transpose(1, 3)
- outputs.append(preds.squeeze())
-
- yhat = torch.cat(outputs, dim=0)
- f=open(test_output_file,'wb')
- np.savez(f,yhat.cpu().detach().numpy())
- yhat = yhat[:realy.size(0), ...]
-
- amae = []
- amape = []
- armse = []
- for i in range(12):
- pred = scaler.inverse_transform(yhat[:, :, i])
- real = realy[:, :, i]
- metrics = util.metric(pred, real)
- log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
- print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
- amae.append(metrics[0])
- amape.append(metrics[1])
- armse.append(metrics[2])
-
- log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.5f}, Test RMSE: {:.4f}'
- print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
|