sde_predictor.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <ctime>
  16. #include <memory>
  17. #include <string>
  18. #include <utility>
  19. #include <vector>
  20. #include <opencv2/core/core.hpp>
  21. #include <opencv2/highgui/highgui.hpp>
  22. #include <opencv2/imgproc/imgproc.hpp>
  23. #include "paddle_inference_api.h" // NOLINT
  24. #include "include/config_parser.h"
  25. #include "include/preprocess_op.h"
  26. #include "include/utils.h"
  27. using namespace paddle_infer; // NOLINT
  28. namespace PaddleDetection {
  29. class SDEPredictor {
  30. public:
  31. explicit SDEPredictor(const std::string& device,
  32. const std::string& det_model_dir = "",
  33. const std::string& reid_model_dir = "",
  34. const double threshold = -1.,
  35. const std::string& run_mode = "paddle",
  36. const int gpu_id = 0,
  37. const bool use_mkldnn = false,
  38. const int cpu_threads = 1,
  39. bool trt_calib_mode = false,
  40. const int min_box_area = 200) {
  41. this->device_ = device;
  42. this->gpu_id_ = gpu_id;
  43. this->use_mkldnn_ = use_mkldnn;
  44. this->cpu_math_library_num_threads_ = cpu_threads;
  45. this->trt_calib_mode_ = trt_calib_mode;
  46. this->min_box_area_ = min_box_area;
  47. det_config_.load_config(det_model_dir);
  48. this->min_subgraph_size_ = det_config_.min_subgraph_size_;
  49. det_preprocessor_.Init(det_config_.preprocess_info_);
  50. reid_config_.load_config(reid_model_dir);
  51. reid_preprocessor_.Init(reid_config_.preprocess_info_);
  52. LoadModel(det_model_dir, reid_model_dir, run_mode);
  53. this->conf_thresh_ = det_config_.conf_thresh_;
  54. }
  55. // Load Paddle inference model
  56. void LoadModel(const std::string& det_model_dir,
  57. const std::string& reid_model_dir,
  58. const std::string& run_mode = "paddle");
  59. // Run predictor
  60. void Predict(const std::vector<cv::Mat> imgs,
  61. const double threshold = 0.5,
  62. MOTResult* result = nullptr,
  63. std::vector<double>* times = nullptr);
  64. private:
  65. std::string device_ = "CPU";
  66. float threhold = 0.5;
  67. int gpu_id_ = 0;
  68. bool use_mkldnn_ = false;
  69. int cpu_math_library_num_threads_ = 1;
  70. int min_subgraph_size_ = 3;
  71. bool trt_calib_mode_ = false;
  72. // Preprocess image and copy data to input buffer
  73. void Preprocess(const cv::Mat& image_mat);
  74. // Postprocess result
  75. void Postprocess(const cv::Mat dets, const cv::Mat emb, MOTResult* result);
  76. std::shared_ptr<Predictor> det_predictor_;
  77. std::shared_ptr<Predictor> reid_predictor_;
  78. Preprocessor det_preprocessor_;
  79. Preprocessor reid_preprocessor_;
  80. ImageBlob inputs_;
  81. std::vector<float> bbox_data_;
  82. std::vector<float> emb_data_;
  83. double threshold_;
  84. ConfigPaser det_config_;
  85. ConfigPaser reid_config_;
  86. float min_box_area_ = 200;
  87. float conf_thresh_;
  88. };
  89. } // namespace PaddleDetection