epoch: 12 LearningRate: base_lr: 0.06 schedulers: - !PiecewiseDecay gamma: 0.1 milestones: [8, 11] - !LinearWarmup start_factor: 0.00066 steps: 1500 OptimizerBuilder: optimizer: momentum: 0.9 type: Momentum regularizer: factor: 0.0001 type: L2