jde_predictor.h 3.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <ctime>
  16. #include <memory>
  17. #include <string>
  18. #include <utility>
  19. #include <vector>
  20. #include <opencv2/core/core.hpp>
  21. #include <opencv2/highgui/highgui.hpp>
  22. #include <opencv2/imgproc/imgproc.hpp>
  23. #include "paddle_inference_api.h" // NOLINT
  24. #include "include/config_parser.h"
  25. #include "include/preprocess_op.h"
  26. #include "include/utils.h"
  27. using namespace paddle_infer; // NOLINT
  28. namespace PaddleDetection {
  29. class JDEPredictor {
  30. public:
  31. explicit JDEPredictor(const std::string& device = "CPU",
  32. const std::string& model_dir = "",
  33. const double threshold = -1.,
  34. const std::string& run_mode = "paddle",
  35. const int gpu_id = 0,
  36. const bool use_mkldnn = false,
  37. const int cpu_threads = 1,
  38. bool trt_calib_mode = false,
  39. const int min_box_area = 200) {
  40. this->device_ = device;
  41. this->gpu_id_ = gpu_id;
  42. this->use_mkldnn_ = use_mkldnn;
  43. this->cpu_math_library_num_threads_ = cpu_threads;
  44. this->trt_calib_mode_ = trt_calib_mode;
  45. this->min_box_area_ = min_box_area;
  46. config_.load_config(model_dir);
  47. this->min_subgraph_size_ = config_.min_subgraph_size_;
  48. preprocessor_.Init(config_.preprocess_info_);
  49. LoadModel(model_dir, run_mode);
  50. this->conf_thresh_ = config_.conf_thresh_;
  51. }
  52. // Load Paddle inference model
  53. void LoadModel(const std::string& model_dir,
  54. const std::string& run_mode = "paddle");
  55. // Run predictor
  56. void Predict(const std::vector<cv::Mat> imgs,
  57. const double threshold = 0.5,
  58. MOTResult* result = nullptr,
  59. std::vector<double>* times = nullptr);
  60. private:
  61. std::string device_ = "CPU";
  62. float threhold = 0.5;
  63. int gpu_id_ = 0;
  64. bool use_mkldnn_ = false;
  65. int cpu_math_library_num_threads_ = 1;
  66. int min_subgraph_size_ = 3;
  67. bool trt_calib_mode_ = false;
  68. // Preprocess image and copy data to input buffer
  69. void Preprocess(const cv::Mat& image_mat);
  70. // Postprocess result
  71. void Postprocess(const cv::Mat dets, const cv::Mat emb, MOTResult* result);
  72. std::shared_ptr<Predictor> predictor_;
  73. Preprocessor preprocessor_;
  74. ImageBlob inputs_;
  75. std::vector<float> bbox_data_;
  76. std::vector<float> emb_data_;
  77. double threshold_;
  78. ConfigPaser config_;
  79. float min_box_area_;
  80. float conf_thresh_;
  81. };
  82. } // namespace PaddleDetection