picodet_s_192_lcnet_pedestrian.yml 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. use_gpu: true
  2. use_xpu: false
  3. log_iter: 20
  4. save_dir: output
  5. snapshot_epoch: 1
  6. print_flops: false
  7. # Exporting the model
  8. export:
  9. post_process: True # Whether post-processing is included in the network when export model.
  10. nms: True # Whether NMS is included in the network when export model.
  11. benchmark: False # It is used to testing model performance, if set `True`, post-process and NMS will not be exported.
  12. metric: COCO
  13. num_classes: 1
  14. architecture: PicoDet
  15. pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams
  16. weights: output/picodet_s_192_lcnet_pedestrian/best_model
  17. find_unused_parameters: True
  18. use_ema: true
  19. epoch: 300
  20. snapshot_epoch: 10
  21. PicoDet:
  22. backbone: LCNet
  23. neck: LCPAN
  24. head: PicoHeadV2
  25. LCNet:
  26. scale: 0.75
  27. feature_maps: [3, 4, 5]
  28. LCPAN:
  29. out_channels: 96
  30. use_depthwise: True
  31. num_features: 4
  32. PicoHeadV2:
  33. conv_feat:
  34. name: PicoFeat
  35. feat_in: 96
  36. feat_out: 96
  37. num_convs: 2
  38. num_fpn_stride: 4
  39. norm_type: bn
  40. share_cls_reg: True
  41. use_se: True
  42. feat_in_chan: 96
  43. fpn_stride: [8, 16, 32, 64]
  44. prior_prob: 0.01
  45. reg_max: 7
  46. cell_offset: 0.5
  47. grid_cell_scale: 5.0
  48. static_assigner_epoch: 100
  49. use_align_head: True
  50. static_assigner:
  51. name: ATSSAssigner
  52. topk: 4
  53. force_gt_matching: False
  54. assigner:
  55. name: TaskAlignedAssigner
  56. topk: 13
  57. alpha: 1.0
  58. beta: 6.0
  59. loss_class:
  60. name: VarifocalLoss
  61. use_sigmoid: False
  62. iou_weighted: True
  63. loss_weight: 1.0
  64. loss_dfl:
  65. name: DistributionFocalLoss
  66. loss_weight: 0.5
  67. loss_bbox:
  68. name: GIoULoss
  69. loss_weight: 2.5
  70. nms:
  71. name: MultiClassNMS
  72. nms_top_k: 1000
  73. keep_top_k: 100
  74. score_threshold: 0.025
  75. nms_threshold: 0.6
  76. LearningRate:
  77. base_lr: 0.32
  78. schedulers:
  79. - !CosineDecay
  80. max_epochs: 300
  81. - !LinearWarmup
  82. start_factor: 0.1
  83. steps: 300
  84. OptimizerBuilder:
  85. optimizer:
  86. momentum: 0.9
  87. type: Momentum
  88. regularizer:
  89. factor: 0.00004
  90. type: L2
  91. worker_num: 6
  92. eval_height: &eval_height 192
  93. eval_width: &eval_width 192
  94. eval_size: &eval_size [*eval_height, *eval_width]
  95. TrainReader:
  96. sample_transforms:
  97. - Decode: {}
  98. - RandomCrop: {}
  99. - RandomFlip: {prob: 0.5}
  100. - RandomDistort: {}
  101. batch_transforms:
  102. - BatchRandomResize: {target_size: [128, 160, 192, 224, 256], random_size: True, random_interp: True, keep_ratio: False}
  103. - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
  104. - Permute: {}
  105. - PadGT: {}
  106. batch_size: 64
  107. shuffle: true
  108. drop_last: true
  109. EvalReader:
  110. sample_transforms:
  111. - Decode: {}
  112. - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}
  113. - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
  114. - Permute: {}
  115. batch_transforms:
  116. - PadBatch: {pad_to_stride: 32}
  117. batch_size: 8
  118. shuffle: false
  119. TestReader:
  120. inputs_def:
  121. image_shape: [1, 3, *eval_height, *eval_width]
  122. sample_transforms:
  123. - Decode: {}
  124. - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}
  125. - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
  126. - Permute: {}
  127. batch_size: 1
  128. TrainDataset:
  129. !COCODataSet
  130. image_dir: ""
  131. anno_path: aic_coco_train_cocoformat.json
  132. dataset_dir: dataset
  133. data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
  134. EvalDataset:
  135. !COCODataSet
  136. image_dir: val2017
  137. anno_path: annotations/instances_val2017.json
  138. dataset_dir: dataset/coco
  139. TestDataset:
  140. !ImageFolder
  141. anno_path: annotations/instances_val2017.json # also support txt (like VOC's label_list.txt)
  142. dataset_dir: dataset/coco # if set, anno_path will be 'dataset_dir/anno_path'