|
- import os
- import json
- import subprocess
- import time
- import numpy as np
- from datetime import timezone, datetime, timedelta
- import argparse
- import torch
- import torch.nn as nn
- import torch.backends.cudnn as cudnn
- from PIL import Image
- from model import CongestionLevel
-
- parser = argparse.ArgumentParser()
-
- # model related
- parser.add_argument('--model', type=str, choices=['CongestionLevel'])
- parser.add_argument('--gpus', nargs='+', type=int, default=None)
- parser.add_argument('--checkpoint_path', default='./checkpoints/kin_8F_VIP_img_pre_resume_pa_model_best.pth', type=str)
-
- # data pipeline
- parser.add_argument('--video_root', default='./videos/', type=str, help='Path to location of dataset videos')
- parser.add_argument('--image_prefix', default="img_", type=str)
- parser.add_argument('--batch_size', default=16, type=int, help='Batch Size')
-
-
- def main():
- ####################################################################
- ####################################################################
- # Configuration and logging
- config = parser.parse_args()
- print('#' * 60)
- print('PCL Inference Configuration:')
- for k, v in vars(config).items():
- print(' {:>20} {}'.format(k, v))
- print('#' * 60)
- ####################################################################
- ####################################################################
- # Initialize model
- assert config.model in ['CongestionLevel']
- assert config.gpus is not None, ValueError('CPU inferencing not supported.')
- model = CongestionLevel(PCL_mode=True, batch_size=config.batch_size, video_root=config.video_root,
- image_prefix=config.image_prefix, checkpoint_path=config.checkpoint_path)
- cudnn.benchmark = True
- model.cuda()
-
- model.eval()
- print('#' * 60)
- ####################################################################
- ####################################################################
- # Initialize record paths
- if config.video_root.startswith('./'):
- config.video_root = '{0}/{1}'.format(os.path.dirname(os.path.realpath(__file__)),
- config.video_root.replace('./', ''))
-
- record_list = [os.path.join(config.video_root, path) for path in os.listdir(config.video_root) if
- os.path.isdir(os.path.join(config.video_root, path))]
-
- for record_path in record_list:
- img_list = [img for img in os.listdir(record_path) if img.startswith(config.image_prefix)]
- assert len(img_list) == 32, ValueError("视频段长度应为32!当前长度:{1},当前路径:{0}\n".format(record_path, len(img_list)))
-
- avg_indices = np.linspace(1, 29, 8).astype(np.int8).tolist()
- image_tmpl = config.image_prefix + '{:03d}.jpg'
- ####################################################################
- ####################################################################
- # Inferencing
- print('Starting with inference phase.')
- epoch_start_time = time.time()
-
- torch.set_grad_enabled(False)
-
- for record_path in record_list:
- start = time.time()
-
- frames = [Image.open(os.path.join(record_path, image_tmpl.format(p))).convert('RGB') for p in avg_indices]
- preds_level = model(frames) # batch_size × pred_level
-
- pred_level = preds_level[0] # single video
- cost_second = '{:.4f}'.format(float(time.time() - start)).zfill(6)
- record_path = record_path.rsplit('videos/')[1]
-
- print('[{time}] Video: {path}, Level: {level:1d}, Cost:{cost:s}s\t'.format(
- time=datetime.now(tz=timezone(timedelta(hours=+8))).strftime("%A %H:%M"),
- path=record_path, level=pred_level, cost=cost_second))
-
- epoch_duration = float(time.time() - epoch_start_time)
- print('Finished Inference in {} sec.'.format(epoch_duration))
-
-
- if __name__ == '__main__':
- main()
|