|
- from model_baseline import *
-
- import argparse
- import math
- import random
- import shutil
- import os.path as osp
- import glob
- import os
- import sys
- sys.path.append("..")
- # import torch
- # import torch.optim as optim
- # import torch.nn as nn
- import mindspore as ms
- import mindspore as mindspore
- import mindspore.nn as nn
- import mindspore.ops as ops
- import mindspore.dataset as ds
- from mindspore import Tensor
- from mindspore import context
- from mindspore import Parameter
- # from pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM #这个指标作用不大,先不计算了
- # from torch.autograd import Variable
-
- # from torch.utils.data import DataLoader,Dataset
-
- # from torchvision import transforms
-
- # from compressai.datasets import ImageFolder
- # from compressai.layers import GDN
- # from compressai.models import CompressionModel
- # from compressai.models.utils import conv, deconv
- import time
- import matplotlib
- import matplotlib.pyplot as plt
- import numpy as np
- import cv2
-
-
- # coding
- import itertools
- import arithmetic_coding as ac
- import tempfile
- from mindspore.dataset import vision
-
-
- dir = os.path.dirname(__file__)# '/home/paul/Desktop/CompressAI_MindSpore/demo'
- out_root_path = dir + 'decoded_files/'
- if not os.path.exists(out_root_path):
- print("not exist")
- os.system("mkdir "+out_root_path)
-
- #file
- out_root_path_file = open(osp.join(out_root_path,"details.txt"),'w')
-
- #file
- out_root_path_file = open(osp.join(out_root_path,"details.txt"),'w')
- save_path = '/code/CompressAI_MindSpore/demo/model/'
-
- def parse_args(argv):
- parser = argparse.ArgumentParser(description='Example training script')
- parser.add_argument('-d', '--dataset', default='/usr/dataset/kodak/', type=str, help='Training dataset')
- parser.add_argument('-n', '--num-workers', type=int, default=3, help='Dataloaders threads (default: %(default)s)')
- parser.add_argument(
- '--batch-size', type=int, default=16,
- help='Batch size (default: %(default)s)')
- parser.add_argument(
- '--test-batch-size', type=int, default=1,
- help='Test batch size (default: %(default)s)')
- parser.add_argument(
- '--patch-size', type=int, nargs=2, default=(160, 160),
- help='Size of the patches to be cropped (default: %(default)s)')
- # parser.add_argument('--cuda', type=int, default=0, help='Use cuda')
- parser.add_argument(
- '--pretrained', type=bool, default=True,
- dest='pretrained', help='if load from pretrained')
- parser.add_argument(
- '--real-bpp', action='store_true', default=False,dest='real_bpp', help='if real bpp')
- parser.add_argument(
- '--iscrop', action='store_true', default=False,dest='iscrop', help='if crop')
- parser.add_argument(
- '--logfile', type=str, default="train_log.txt", help='logfile_name')
- parser.add_argument(
- '--seed', type=int, default=0, help='Set random seed for reproducibility')
- parser.add_argument('--pretrained_file', type=str, default='ckpt/main_model_2000_bk317.ckpt', help='pretrained model file')
- parser.add_argument('--pretrained_file_ctx', type=str, default='ckpt/ctx_model_2000_bk317.ckpt', help='pretrained model file')
- # yapf: enable
- args = parser.parse_args(argv)
- return args
-
- class MyDataset():
- def __init__(self, input_path):
- self.input_list = []
- self.name_list = []
- self.num = 0
- self.mean = (127.5, 127.5, 127.5)
- self.std = (128.0, 128.0, 128.0)
- for i in os.listdir(input_path):
- input_img = input_path + i
- input_name = i
- self.input_list.append(input_img)
- self.name_list.append(input_name)
- self.num = self.num + 1
-
- # 转化
-
-
- def __len__(self):
- return self.num
-
- def __getitem__(self, idx):
- img = np.array(cv2.imread(self.input_list[idx]))
- name = self.name_list[idx]
- input_np = ((img.astype(np.float32)-self.mean)/self.std ).astype(np.float32)#.transpose(2, 0, 1)
- # input_tensor = torch.from_numpy(input_np)
-
- return input_np, name
-
- def mse2psnr(mse):
- # 根据Hyper论文中的内容,将MSE->psnr(db)
- # return 10*math.log10(255*255/mse)
- return 10 * math.log10(1/ mse) #???
- #psnr calculate
- def psnr(img1, img2):
- mse = np.mean( (img1/255. - img2/255.) ** 2 )
- if mse < 1.0e-10:
- return 100
- PIXEL_MAX = 1
- return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
-
- ################################################################
- class RateDistortionLoss(nn.Cell):
- """Custom rate distortion loss with a Lagrangian parameter."""
- def __init__(self, lmbda=1e-2):
- super().__init__()
- self.mse = nn.MSELoss()
- self.ms_ssim = nn.MSSSIM(filter_size=7)
- self.lmbda = lmbda
-
- def construct(self, x_hat, target):
- N, _, H, W = target.shape
- out = {}
- num_pixels = N * H * W
-
- ########################################
- # 计算误差
- out['mse_loss'] = self.mse(x_hat, target) #end to end
- out['ms_ssim'] = self.ms_ssim(x_hat, target) # (N,)
-
- ########################################
- out['psnr'] = mse2psnr(self.mse(x_hat, target))
- return out
-
- class AverageMeter:
- """Compute running average."""
- def __init__(self):
- self.val = 0
- self.avg = 0.0
- self.sum = 0
- self.count = 0
-
- def update(self, val, n=1):
- if isinstance(val, Tensor):
- val = val.asnumpy().item()
- self.val = val
- self.sum += val * n
- self.count += n
- self.avg = self.sum / self.count
-
- def save_pic(data,h,w,path):
- if osp.exists(path):
- os.system("rm "+path)
- print("rm "+path)
- '''
- reimage = data.cpu().clone()
- reimage[reimage > 1.0] = 1.0
- reimage = reimage.squeeze(0)
- reimage = transforms.ToPILImage()(reimage) # PIL格式
- reimage.save(path)
- '''
- img = data[:h, :w, :]
- cv2.imwrite(path,img)
-
-
-
- class PrTest():
- def __init__(self, centers,net_ctx, L=6):
-
- # self.table = SimpleFrequencyTable()
- # self.input_ctx_shape = shape #
- self.L = L
- self.ctx_size = 4 * 2 + 1
- self.ctx_shape = self.ctx_size // 2 + 1, self.ctx_size, self.ctx_size
- self.input_ctx_batched_shape = [1, 1, self.ctx_size]
- self._centers = centers
- self.net_ctx = net_ctx
- self.net_ctx.set_train(False)
-
- def _new_ctx_sym_itr(self, syms, ctx_shape):
- assert len(ctx_shape) == 3
- _, h, w = ctx_shape
- for ctx in self.iter_over_blocks(syms, ctx_shape):
- # symbol is in the last depth dimension in the center
- sym = ctx[-1, h // 2, w // 2]
- yield ctx, sym
-
- def iter_over_blocks(self,syms, block_sizes):
- """
- Iterate over symbols in blocks of size in block_sizes.
- :param syms: CHW
- :return: blocks, iterating in order W, H, C
- """
- for c_slice, h_slice, w_slice in self._iter_block_idices(syms.shape, block_sizes):
- yield syms[c_slice, h_slice, w_slice]
-
- def _new_ctx_itr(self, syms, ctx_shape):
- return self.iter_over_blocks(syms, ctx_shape)
-
-
- def _get_num_ctxs(self, syms_shape, block_sizes):
- return sum(1 for _ in self._iter_block_idices(syms_shape, block_sizes))
-
-
- def _iter_block_idices(self,syms_shape, block_sizes):
-
- # print(syms_shape)
- C, H, W = syms_shape
- bC, bH, bW = block_sizes
- last_indices = (C - bC + 1, H - bH + 1, W - bW + 1)
- for c, h, w in itertools.product(*map(range, last_indices)):
- yield slice(c, c+bC), slice(h, h+bH), slice(w, w+bW)
-
- def get_freqs_pr(self,ctx, first=None):
-
- # 为什么要gather?
- # print(ctx.shape)
- # input_ctx_batched = ms.
- input_ctx_batched = ms.ops.expand_dims(ctx,0)
- q = ms.ops.gather(self._centers, input_ctx_batched,0)
- # print(q)
- logits = self.net_ctx(q, is_training=False)
- pr = ms.ops.softmax(logits)
- if first:
- return None,pr
-
- # print(pr.shape)
- freqs_resolution = 1e9
- x = (pr * freqs_resolution).int()
- freqs = ms.ops.squeeze(x).asnumpy()
- freqs = np.int64(freqs)
- freqs = np.maximum(freqs, 1)
- # print(freqs.shape)
-
- freqs = ac.SimpleFrequencyTable(freqs)
-
- return freqs, pr
-
- def _new_sym_idxs_itr(self,syms_shape, ctx_size):
- # print(syms_shape)
- D, H, W = syms_shape
- pad = ctx_size // 2
- return itertools.product(
- range(pad, D), # D dimension is not padded
- range(pad, H - pad),
- range(pad, W - pad)) # yields tuples (d, h, w)
-
- def pad_symbols_volume(self, symbols):
- # assert symbols.ndim == 3
- pad = self.ctx_size // 2
- pads = ((0,0),
- (pad, 0),
- (pad, pad),
- (pad, pad)
- )
- pad = ms.nn.Pad(paddings=pads, mode="CONSTANT")
-
- return pad(symbols)
-
- def undo_pad_symbols_volume(self, symbols):
- # assert symbols.ndim == 3
- pad = self.ctx_size // 2
- assert pad >= 1
- return symbols[pad:, pad:-pad, pad:-pad]
-
- def encode(self,foutid, syms):
- """
- paras:
- foutid: 写进去的文件描述符
- syms: (不是隐藏表示z (or w),是量化之后返回的symbols)
-
- return:
- bit_out.num_bits, : 真实的比特数
- first_sym, :decode需要用
- theoretical_bit_cost :理论的比特数
- """
- syms = ms.ops.squeeze(syms,0)
- # print("syms.shape in encoder:",syms.shape)
- with open(foutid, 'wb') as fout:
- # import ac 就好了
- bit_out = ac.CountingBitOutputStream(
- bit_out=ac.BitOutputStream(fout))
- enc = ac.ArithmeticEncoder(bit_out)
-
- # ok
- # print("syms.shape in encoder:",self.ctx_shape)
- ctx_sym_itr = self._new_ctx_sym_itr(syms, ctx_shape=self.ctx_shape)
- # First sym is stored separately using log2(L) bits or sth
- first_ctx, first_sym = next(ctx_sym_itr)
- # print("first_ctx",first_ctx.shape)
- # print("first_sym",first_sym)
- # 因为first ctx不算数
- _ ,first_pr = self.get_freqs_pr(first_ctx, "first")
- # print("first_pr.shape:",first_pr.shape)
-
- # L = self.L
- # depth, on_value, off_value = L, ms.Tensor(1.0, ms.float32), ms.Tensor(0.0, ms.float32)
- # targets_one_hot = mindspore.ops.one_hot(first_sym, depth, on_value, off_value, axis=-1)
-
- # log_base_change_factor = ms.Tensor(np.log2(np.e), dtype=ms.float32, const_arg=True)
- # targets_one_hot = targets_one_hot.reshape([-1, L])
-
- # logits = first_pr.reshape([-1, L])
- # bc = ms.nn.SoftmaxCrossEntropyWithLogits()(labels=targets_one_hot, logits=logits) * log_base_change_factor # NCHW
- first_pr = first_pr.squeeze()
- first_bc = -np.log2(first_pr[first_sym])
- theoretical_bit_cost = first_bc
- num_ctxs = self._get_num_ctxs(syms.shape, self.ctx_shape)
- # Encode other symbols
- for i, (ctx, sym) in enumerate(ctx_sym_itr):
- freqs, pr = self.get_freqs_pr(ctx)
- # targets_one_hot = mindspore.ops.one_hot(sym, depth, on_value, off_value, axis=-1)
- # targets_one_hot = targets_one_hot.reshape([-1, L])
- # logits = pr.reshape([-1, L])
- # bc = ms.nn.SoftmaxCrossEntropyWithLogits()(labels=targets_one_hot, logits=logits) * log_base_change_factor # NCHW
- pr = pr.squeeze()
- bc = -np.log2(pr[sym])
- theoretical_bit_cost += bc
- enc.write(freqs, sym)
- if i % 1000 == 0:
- print(theoretical_bit_cost)
- print('\rFeeding context for symbol #{}/{}...'.format(i, num_ctxs))
-
- enc.finish()
- bit_out.close()
- return bit_out.num_bits, first_sym, theoretical_bit_cost
-
- def decode(self, fout_p, symbols, first_sym):
- # Idea:
- # have a matrix symbols_decoded, initially all zeros.
- # put first_sym into symbols_decoded
- # use a normal ctx_itr to retrieve the current context from symbols_decoded
- # use symbol_idx_itr to get the index of the next decoded symbol
- # write the decoded symbol into symbols_decoded, then advancethe ctx_itr to get the next context
-
- symbols_shape_padded = symbols.squeeze(0).shape
-
- with open(fout_p, 'rb') as fin:
- bitin = ac.BitInputStream(fin)
- dec = ac.ArithmeticDecoder(bitin)
-
- symbols_decoded = np.zeros(symbols_shape_padded, dtype=np.int32)
- ctx_itr = self.iter_over_blocks(symbols_decoded, self.ctx_shape)
- sym_idxs_itr = self._new_sym_idxs_itr(symbols_shape_padded, ctx_size=self.ctx_size)
-
- next(ctx_itr) # skip first ctx
- symbols_decoded[next(sym_idxs_itr)] = first_sym # write first_sym
- num_ctxs = self._get_num_ctxs(symbols_shape_padded, self.ctx_shape)
- for i, (current_ctx, next_decoded_sym_idx) in enumerate(zip(ctx_itr, sym_idxs_itr)):
- current_ctx = ms.Tensor(current_ctx)
- freqs,_ = self.get_freqs_pr(current_ctx)
- symbol = dec.read(freqs)
- symbols_decoded[next_decoded_sym_idx] = symbol
- if i % 1000 == 0:
- print('\rFeeding context for symbol #{}/{}...'.format(i, num_ctxs), end='', flush=True)
-
- return symbols_decoded
-
-
-
-
- def test_epoch(epoch, test_dataloader, model, model_ctx, criterion, real_bpp):
- global out_root_path_file
- test_dataloader = test_dataloader.create_dict_iterator()
-
- model.set_train(False)
- model_ctx.set_train(False)
- loss = AverageMeter()
- psnr = AverageMeter()
- msssim = AverageMeter()
- realbpp = AverageMeter()
- enctime = AverageMeter()
- dectime = AverageMeter()
- vbpp= AverageMeter()
-
- print("real_bpp:",real_bpp)
-
- for data in test_dataloader:
- d1 = data['data']
- print("start codec")
- name = data['name']
- print(name)
-
- N, _, H, W = d1.shape
- print(d1.shape)
- output = {}
- num_pixels = N * H * W
-
- hh, ww = d1.shape[2], d1.shape[3]
- pp = 64 # maximum 6 strides of 2
- new_hh = (hh + pp - 1) // pp * pp
- new_ww = (ww + pp - 1) // pp * pp
- padding_left = (new_ww - ww) // 2
- padding_right = new_ww - ww - padding_left
- padding_top = (new_hh - hh) // 2
- padding_bottom = new_hh - hh - padding_top
-
- d1_padded = nn.Pad(paddings=((0, 0),(0, 0),(padding_top, padding_bottom),
- (padding_left, padding_right)), mode="CONSTANT")(d1)
- # d1_padded = F.pad(
- # d1,
- # (padding_left, padding_right, padding_top, padding_bottom),
- # mode="constant",
- # value=0,
- # )
-
- # =============== 这是false的bpp的求解——理论
- tester = PrTest(model._center, model_ctx)
- x_hat, qbar, qsoft, qhard, symbols, mask = model(d1_padded, is_training = False)
- logits_test = model_ctx(qhard)
- Nx, Cx, Hx, Wx = x_hat.shape
- # print("network z1hat",out_net['z1_hat'])
- output["x_hat"] = ops.slice(x_hat, begin=(0, 0, padding_top, padding_left),
- size=(Nx, Cx, Hx-padding_top-padding_bottom, Wx-padding_left-padding_right))
- # output["x_hat"] = F.pad(
- # x_hat, (-padding_left, -padding_right, -padding_top, -padding_bottom)#pad负数是去掉了相应的数据,mindspore不支持pad负数
- # )
-
- L = 6
- depth, on_value, off_value = L, ms.Tensor(1.0, ms.float32), ms.Tensor(0.0, ms.float32)
- targets_one_hot = mindspore.ops.one_hot(symbols, depth, on_value, off_value, axis=-1)
-
- log_base_change_factor = ms.Tensor(np.log2(np.e), dtype=ms.float32, const_arg=True)
- targets_one_hot = targets_one_hot.reshape([-1, L])
- logits = logits_test.reshape([-1, L])
- logits = mindspore.ops.softmax(logits, axis=-1)
- bc = ms.ops.binary_cross_entropy(logits, targets_one_hot,reduction="sum") # * log_base_change_factor # NCHW
- bpp = bc/ num_pixels
-
- # virtual bpp
- virtual_bpp = bpp.asnumpy().item()
- output['virtual_bpp'] = bpp
- print("virtual_bpp",output['virtual_bpp'])
-
- ###################################################################################################################
- # real bpp
- #encode
- if real_bpp:
- foutid, fout_p = tempfile.mkstemp()
- out_dec = {}
-
- # encode
- print("=============enc ok==========")
- start = time.time()
- syms_padded = tester.pad_symbols_volume(symbols)
- virtual_num_bits, first_sym, theoretical_bit_cost = tester.encode(foutid, syms_padded)
- enc_time = time.time() - start
-
-
- # calculate
- actual_num_bits = os.path.getsize(fout_p) * 8
- assert actual_num_bits == virtual_num_bits, '{} != {}'.format(actual_num_bits, virtual_num_bits)
-
- print("theoretical_bit_cost:",theoretical_bit_cost)
- print("virtual_num_bits:",virtual_num_bits)
- print("actual_num_bits:",actual_num_bits)
-
- # decode
- start = time.time()
- syms_dec_padded=tester.decode(fout_p, syms_padded, first_sym)
- dec_time = time.time() - start
- syms_dec = tester.undo_pad_symbols_volume(syms_dec_padded)
-
- symbols = symbols.squeeze().asnumpy()
- np.testing.assert_array_equal(syms_dec,symbols )
-
-
- print("=============dec ok==========")
- #====================
- bpp_cal = actual_num_bits/num_pixels
- enctime.update(enc_time)
- dectime.update(dec_time)
- realbpp.update(bpp_cal)
- realbpp_val = bpp_cal
- enctimeval = enc_time
- dectimeval = dec_time
-
- out_criterion = criterion(output["x_hat"], d1)
- psnr.update(out_criterion['psnr'])
- msssim.update(out_criterion['ms_ssim'])
- vbpp.update(virtual_bpp)
-
- psnr_val = out_criterion['psnr']
- msssim_val = out_criterion['ms_ssim'].asnumpy().item()
-
-
- if real_bpp:
- print_context = ( name.asnumpy().item() +
- f'\tPSNR (dB): {psnr_val :.3f} |'
- f'\tMS-SSIM: {msssim_val :.4f} |'
- f'\tenc time: {enctimeval :.4f} |'
- f'\tdec time: {dectimeval :.4f} |'
- f'\tReal_bpp: {realbpp_val:.3f} \n'
- )
- else:
- print_context = ( name.asnumpy().item() +
- f'\tPSNR (dB): {psnr_val :.3f} |'
- f'\tMS-SSIM: {msssim_val :.4f} |'
- f'\tVirtual_bpp: {virtual_bpp:.3f} \n'
- )
-
- out_root_path_file.write(print_context)
- print(print_context)
-
- ## post
- rec_d1 = output["x_hat"]
- rec_d1 = ops.clip_by_value(rec_d1, clip_value_min=-1, clip_value_max=1.0)
- rec_d1 = rec_d1[0].asnumpy()
- # rec_d1 = rec_d1.data[0].cpu().detach().numpy()
- rec_d1 = rec_d1.transpose(1, 2, 0)*128 + 127.5
- rec_d1 = rec_d1.astype('uint8')
- ##save pic
- save_pic(rec_d1, H, W, out_root_path + name.asnumpy().item())
-
- if real_bpp:
- print(f'Test epoch {epoch}: Average losses:'
- f'\tTime: {time.strftime("%Y-%m-%d %H:%M:%S")} |'
- f'\tReal_bpp: {realbpp.avg:.3f} |'
- f'\tMS-SSIM: {msssim.avg :.4f} |'
- f'\tenc time: {enctime.avg :.4f} |'
- f'\tdec time: {dectime.avg :.4f} |'
- f'\tPSNR (dB): {psnr.avg :.3f} \n' # 平均一张图的PSNR
- )
- else:
- print(f'Test epoch {epoch}: Average losses:'
- f'\tTime: {time.strftime("%Y-%m-%d %H:%M:%S")} |'
- f'\tVirtual_bpp: {vbpp.avg:.3f} |'
- f'\tMS-SSIM: {msssim.avg :.4f} |'
- f'\tPSNR (dB): {psnr.avg :.3f} \n' # 平均一张图的PSNR
- )
-
- return loss.avg
-
-
-
-
- def paratuple2load(para_tuple, param_dict):
- failed_id = []
- for id,param in enumerate(para_tuple):
- flag = False
- if isinstance(param, Parameter):
- if param.name in param_dict.keys():
- para_tuple[id].set_data(param_dict[param.name].value(), slice_shape=True)
- flag = True
- if flag == False:
- failed_id.append(id)
-
- if failed_id: #存在加载失败的就打印
- print("failed load ParameterTuple:")
- for id in failed_id:
- print(para_tuple[id])
- return failed_id
-
- def load_checkpoint(net, append_info, save_path):
- param_dict = mindspore.load_checkpoint(save_path)
- param_not_load = mindspore.load_param_into_net(net, param_dict)#加载可训练和不可训练的Parameter参数
- # print("param_dict:",param_dict)
- print('Load checkpoint from '+save_path)
- print("param_not_load: ", param_not_load) #打印网络中没有被加载的参数,正常应该为空
-
- param_info = {} #获取模型文件中的其他参数,以字典的形式返回
- for item in append_info:
- if item in param_dict.keys():
- # print(item+": ",param_dict[item].value().asnumpy().item())
- param_info[item] = param_dict[item].value().asnumpy().item()
- return param_info
-
- def main(argv):
- args = parse_args(argv)
- print("args.real_bpp:",args.real_bpp)
- print("args.iscrop:",args.iscrop)
-
- if args.seed is not None:
- mindspore.set_seed(args.seed)
- random.seed(args.seed)
-
- test_dataset = MyDataset(input_path=args.dataset + 'test/')
- center_crop_op=vision.CenterCrop(size=args.patch_size)
- HWC2CHW_op = vision.HWC2CHW()
- test_dataloader = ds.GeneratorDataset(test_dataset, column_names=["data", "name"],num_parallel_workers=args.num_workers, shuffle=False)
- if args.iscrop:
- test_dataloader = test_dataloader.map(operations=[center_crop_op])
- test_dataloader = test_dataloader.map(operations=[HWC2CHW_op])
- test_dataloader = test_dataloader.batch(args.test_batch_size, drop_remainder=False)
-
- context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") #GRAPH_MODE(静态图模式) PYNATIVE_MODE(动态图模式)
- context.set_context(save_graphs=False)
- context.set_context(device_id=int(os.getenv('DEVICE_ID', '0')))
- print("int(os.getenv('DEVICE_ID', '0')): ",int(os.getenv('DEVICE_ID', '0'))) #0
- if ms.get_context("device_target") == "GPU":
- context.set_context(enable_graph_kernel=False) #如果开启的话,会在本地生成额外的过程文件,开启图算融合以优化网络执行性能,常用于GPU,动静态模式应该都可以
- ms.reset_auto_parallel_context()
- ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.STAND_ALONE, gradients_mean=True, device_num=1)
-
- # 加载模型
- net = Imgcomp(32)
- net_ctx = ContextCNN(32)
- append_info = load_checkpoint(net, ['epoch','loss'], args.pretrained_file)
- append_info = load_checkpoint(net_ctx, ['epoch','loss'], args.pretrained_file_ctx)
- criterion=RateDistortionLoss()
- for epoch in [0]:
- loss = test_epoch(epoch, test_dataloader, net, net_ctx, criterion, args.real_bpp)
-
- if __name__ == '__main__':
- main(sys.argv[1:])
|