|
- # Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unlesee you know exactly what you are doing)
- enable_modelarts: False
- # url for modelarts
- data_url: ""
- train_url: ""
- checkpoint_url: ""
- # path for local
- data_path: "/cache/data"
- output_path: "/cache/train"
- load_path: "/cache/checkpoint_path"
- device_target: "Ascend"
- enable_profiling: False
- need_modelarts_dataset_unzip: True
- modelarts_dataset_unzip_name: "MindRecord_COCO"
-
- # ======================================================================================
- # common options
- distribute: True
-
- # ======================================================================================
- # create dataset
- create_dataset: "coco"
- prefix: "retinanet.mindrecord"
- is_training: True
-
- # ======================================================================================
- # Training options
- img_shape: [640, 640]
- num_retinanet_boxes: 76725
- match_thershold: 0.5
- nms_thershold: 0.6
- min_score: 0.1
- max_boxes: 100
-
- # learning rate settings
- lr: 0.1
- global_step: 0
- lr_init: 1.e-6
- lr_end_rate: 5.e-3
- warmup_epochs: 2
- warmup_epochs1: 2
- warmup_epochs2: 5
- warmup_epochs3: 23
- warmup_epochs4: 60
- warmup_epochs5: 160
- momentum: 0.9
- weight_decay: 1.5e-4
-
- # network
- num_default: [9, 9, 9, 9, 9]
- extras_out_channels: [256, 256, 256, 256, 256]
- feature_size: [80, 40, 20, 10, 5]
- aspect_ratios: [[0.5, 1.0, 2.0], [0.5, 1.0, 2.0], [0.5, 1.0, 2.0], [0.5, 1.0, 2.0], [0.5, 1.0, 2.0]]
- steps: [8, 16, 32, 64, 128]
- anchor_size: [32, 64, 128, 256, 512]
- prior_scaling: [0.1, 0.2]
- gamma: 2.0
- alpha: 0.75
- num_classes: 81
-
- # nasfpn
- nasfpn_input_channels: [512, 1024, 2048]
- nasfpn_out_channel: 256
- nasfpn_num_outs: 5
- nasfpn_stack_times: 7
- nasfpn_start_level: 0
- nasfpn_end_level: -1
-
- # `mindrecord_dir` and `coco_root` are better to use absolute path.
- mindrecord_dir: "/home/datasets/coco_mindrecord"
- coco_root: "/home/datasets/COCO2017"
- train_data_type: "train2017"
- val_data_type: "val2017"
- instances_set: "annotations/instances_{}.json"
- coco_classes: ["background", "person", "bicycle", "car", "motorcycle", "airplane", "bus", #"background",
- "train", "truck", "boat", "traffic light", "fire hydrant",
- "stop sign", "parking meter", "bench", "bird", "cat", "dog",
- "horse", "sheep", "cow", "elephant", "bear", "zebra",
- "giraffe", "backpack", "umbrella", "handbag", "tie",
- "suitcase", "frisbee", "skis", "snowboard", "sports ball",
- "kite", "baseball bat", "baseball glove", "skateboard",
- "surfboard", "tennis racket", "bottle", "wine glass", "cup",
- "fork", "knife", "spoon", "bowl", "banana", "apple",
- "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza",
- "donut", "cake", "chair", "couch", "potted plant", "bed",
- "dining table", "toilet", "tv", "laptop", "mouse", "remote",
- "keyboard", "cell phone", "microwave", "oven", "toaster", "sink",
- "refrigerator", "book", "clock", "vase", "scissors",
- "teddy bear", "hair drier", "toothbrush"]
-
- # if coco used, `image_dir` and `anno_path` are useless
- image_dir: ""
- anno_path: ""
- save_checkpoint: True
- save_checkpoint_epochs: 1
- keep_checkpoint_max: 10
- save_checkpoint_path: "./ckpt"
- finish_epoch: 0
-
- # optimiter options
- workers: 24
- mode: "sink"
- epoch_size: 300
- batch_size: 16
- pre_trained: ""
- pre_trained_epoch_size: 0
- loss_scale: 1024
- filter_weight: False
-
- # ======================================================================================
- # Eval options
- dataset: "coco"
- checkpoint_path: ""
-
- # ======================================================================================
- # export options
- device_id: 0
- file_format: "MINDIR"
- export_batch_size: 1
- file_name: "retinanet_nasfpn"
-
- # ======================================================================================
- # postprocess options
- result_path: ""
- dataset_path: ""
- anno_path: ""
-
- ---
- # Help description for each configuration
- enable_modelarts: "Whether training on modelarts default: False"
- data_url: "Url for modelarts"
- train_url: "Url for modelarts"
- data_path: "The location of input data"
- output_pah: "The location of the output file"
- device_target: "device id of GPU or Ascend. (Default: None)"
- enable_profiling: "Whether enable profiling while training default: False"
- workers: "Num parallel workers."
- lr: "Learning rate, default is 0.1."
- mode: "Run sink mode or not, default is sink."
- epoch_size: "Epoch size, default is 500."
- batch_size: "Batch size, default is 16."
- pre_trained: "Pretrained Checkpoint file path."
- pre_trained_epoch_size: "Pretrained epoch size."
- save_checkpoint_epochs: "Save checkpoint epochs, default is 1."
- loss_scale: "Loss scale, default is 1024."
- filter_weight: "Filter weight parameters, default is False."
- dataset: "Dataset, default is coco."
- device_id: "Device id, default is 0."
- file_format: "file format choices [AIR, MINDIR]"
- file_name: "output file name."
- export_batch_size: "batch size"
- result_path: "result file path."
- dataset_path: "val image file path."
- anno_path: "val json file path"
|