Dnn.h 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964
  1. //
  2. // This file is auto-generated. Please don't modify it!
  3. //
  4. #pragma once
  5. #ifdef __cplusplus
  6. //#import "opencv.hpp"
  7. #import "opencv2/dnn.hpp"
  8. #else
  9. #define CV_EXPORTS
  10. #endif
  11. #import <Foundation/Foundation.h>
  12. @class ByteVector;
  13. @class FloatVector;
  14. @class IntVector;
  15. @class Mat;
  16. @class Net;
  17. @class Rect2d;
  18. @class Rect2i;
  19. @class RotatedRect;
  20. @class Scalar;
  21. @class Size2i;
  22. // C++: enum Backend (cv.dnn.Backend)
  23. typedef NS_ENUM(int, Backend) {
  24. DNN_BACKEND_DEFAULT = 0,
  25. DNN_BACKEND_HALIDE = 0+1,
  26. DNN_BACKEND_INFERENCE_ENGINE = 0+2,
  27. DNN_BACKEND_OPENCV = 0+3,
  28. DNN_BACKEND_VKCOM = 0+4,
  29. DNN_BACKEND_CUDA = 0+5,
  30. DNN_BACKEND_WEBNN = 0+6,
  31. DNN_BACKEND_TIMVX = 0+7
  32. };
  33. // C++: enum SoftNMSMethod (cv.dnn.SoftNMSMethod)
  34. typedef NS_ENUM(int, SoftNMSMethod) {
  35. SoftNMSMethod_SOFTNMS_LINEAR = 1,
  36. SoftNMSMethod_SOFTNMS_GAUSSIAN = 2
  37. };
  38. // C++: enum Target (cv.dnn.Target)
  39. typedef NS_ENUM(int, Target) {
  40. DNN_TARGET_CPU = 0,
  41. DNN_TARGET_OPENCL = 0+1,
  42. DNN_TARGET_OPENCL_FP16 = 0+2,
  43. DNN_TARGET_MYRIAD = 0+3,
  44. DNN_TARGET_VULKAN = 0+4,
  45. DNN_TARGET_FPGA = 0+5,
  46. DNN_TARGET_CUDA = 0+6,
  47. DNN_TARGET_CUDA_FP16 = 0+7,
  48. DNN_TARGET_HDDL = 0+8,
  49. DNN_TARGET_NPU = 0+9
  50. };
  51. NS_ASSUME_NONNULL_BEGIN
  52. // C++: class Dnn
  53. /**
  54. * The Dnn module
  55. *
  56. * Member classes: `DictValue`, `Layer`, `Net`, `Model`, `ClassificationModel`, `KeypointsModel`, `SegmentationModel`, `DetectionModel`, `TextRecognitionModel`, `TextDetectionModel`, `TextDetectionModel_EAST`, `TextDetectionModel_DB`
  57. *
  58. * Member enums: `Backend`, `Target`, `SoftNMSMethod`
  59. */
  60. CV_EXPORTS @interface Dnn : NSObject
  61. #pragma mark - Methods
  62. //
  63. // vector_Target cv::dnn::getAvailableTargets(dnn_Backend be)
  64. //
  65. // Return type 'vector_Target' is not supported, skipping the function
  66. //
  67. // Net cv::dnn::readNetFromDarknet(String cfgFile, String darknetModel = String())
  68. //
  69. /**
  70. * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
  71. * @param cfgFile path to the .cfg file with text description of the network architecture.
  72. * @param darknetModel path to the .weights file with learned network.
  73. * @return Network object that ready to do forward, throw an exception in failure cases.
  74. * @return Net object.
  75. */
  76. + (Net*)readNetFromDarknetFile:(NSString*)cfgFile darknetModel:(NSString*)darknetModel NS_SWIFT_NAME(readNetFromDarknet(cfgFile:darknetModel:));
  77. /**
  78. * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
  79. * @param cfgFile path to the .cfg file with text description of the network architecture.
  80. * @return Network object that ready to do forward, throw an exception in failure cases.
  81. * @return Net object.
  82. */
  83. + (Net*)readNetFromDarknetFile:(NSString*)cfgFile NS_SWIFT_NAME(readNetFromDarknet(cfgFile:));
  84. //
  85. // Net cv::dnn::readNetFromDarknet(vector_uchar bufferCfg, vector_uchar bufferModel = std::vector<uchar>())
  86. //
  87. /**
  88. * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
  89. * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture.
  90. * @param bufferModel A buffer contains a content of .weights file with learned network.
  91. * @return Net object.
  92. */
  93. + (Net*)readNetFromDarknetBuffer:(ByteVector*)bufferCfg bufferModel:(ByteVector*)bufferModel NS_SWIFT_NAME(readNetFromDarknet(bufferCfg:bufferModel:));
  94. /**
  95. * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
  96. * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture.
  97. * @return Net object.
  98. */
  99. + (Net*)readNetFromDarknetBuffer:(ByteVector*)bufferCfg NS_SWIFT_NAME(readNetFromDarknet(bufferCfg:));
  100. //
  101. // Net cv::dnn::readNetFromCaffe(String prototxt, String caffeModel = String())
  102. //
  103. /**
  104. * Reads a network model stored in <a href="http://caffe.berkeleyvision.org">Caffe</a> framework's format.
  105. * @param prototxt path to the .prototxt file with text description of the network architecture.
  106. * @param caffeModel path to the .caffemodel file with learned network.
  107. * @return Net object.
  108. */
  109. + (Net*)readNetFromCaffeFile:(NSString*)prototxt caffeModel:(NSString*)caffeModel NS_SWIFT_NAME(readNetFromCaffe(prototxt:caffeModel:));
  110. /**
  111. * Reads a network model stored in <a href="http://caffe.berkeleyvision.org">Caffe</a> framework's format.
  112. * @param prototxt path to the .prototxt file with text description of the network architecture.
  113. * @return Net object.
  114. */
  115. + (Net*)readNetFromCaffeFile:(NSString*)prototxt NS_SWIFT_NAME(readNetFromCaffe(prototxt:));
  116. //
  117. // Net cv::dnn::readNetFromCaffe(vector_uchar bufferProto, vector_uchar bufferModel = std::vector<uchar>())
  118. //
  119. /**
  120. * Reads a network model stored in Caffe model in memory.
  121. * @param bufferProto buffer containing the content of the .prototxt file
  122. * @param bufferModel buffer containing the content of the .caffemodel file
  123. * @return Net object.
  124. */
  125. + (Net*)readNetFromCaffeBuffer:(ByteVector*)bufferProto bufferModel:(ByteVector*)bufferModel NS_SWIFT_NAME(readNetFromCaffe(bufferProto:bufferModel:));
  126. /**
  127. * Reads a network model stored in Caffe model in memory.
  128. * @param bufferProto buffer containing the content of the .prototxt file
  129. * @return Net object.
  130. */
  131. + (Net*)readNetFromCaffeBuffer:(ByteVector*)bufferProto NS_SWIFT_NAME(readNetFromCaffe(bufferProto:));
  132. //
  133. // Net cv::dnn::readNetFromTensorflow(String model, String config = String())
  134. //
  135. /**
  136. * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
  137. * @param model path to the .pb file with binary protobuf description of the network architecture
  138. * @param config path to the .pbtxt file that contains text graph definition in protobuf format.
  139. * Resulting Net object is built by text graph using weights from a binary one that
  140. * let us make it more flexible.
  141. * @return Net object.
  142. */
  143. + (Net*)readNetFromTensorflowFile:(NSString*)model config:(NSString*)config NS_SWIFT_NAME(readNetFromTensorflow(model:config:));
  144. /**
  145. * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
  146. * @param model path to the .pb file with binary protobuf description of the network architecture
  147. * Resulting Net object is built by text graph using weights from a binary one that
  148. * let us make it more flexible.
  149. * @return Net object.
  150. */
  151. + (Net*)readNetFromTensorflowFile:(NSString*)model NS_SWIFT_NAME(readNetFromTensorflow(model:));
  152. //
  153. // Net cv::dnn::readNetFromTensorflow(vector_uchar bufferModel, vector_uchar bufferConfig = std::vector<uchar>())
  154. //
  155. /**
  156. * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
  157. * @param bufferModel buffer containing the content of the pb file
  158. * @param bufferConfig buffer containing the content of the pbtxt file
  159. * @return Net object.
  160. */
  161. + (Net*)readNetFromTensorflowBuffer:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig NS_SWIFT_NAME(readNetFromTensorflow(bufferModel:bufferConfig:));
  162. /**
  163. * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
  164. * @param bufferModel buffer containing the content of the pb file
  165. * @return Net object.
  166. */
  167. + (Net*)readNetFromTensorflowBuffer:(ByteVector*)bufferModel NS_SWIFT_NAME(readNetFromTensorflow(bufferModel:));
  168. //
  169. // Net cv::dnn::readNetFromTorch(String model, bool isBinary = true, bool evaluate = true)
  170. //
  171. /**
  172. * Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format.
  173. * @param model path to the file, dumped from Torch by using torch.save() function.
  174. * @param isBinary specifies whether the network was serialized in ascii mode or binary.
  175. * @param evaluate specifies testing phase of network. If true, it's similar to evaluate() method in Torch.
  176. * @return Net object.
  177. *
  178. * NOTE: Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
  179. * which has various bit-length on different systems.
  180. *
  181. * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object
  182. * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.
  183. *
  184. * List of supported layers (i.e. object instances derived from Torch nn.Module class):
  185. * - nn.Sequential
  186. * - nn.Parallel
  187. * - nn.Concat
  188. * - nn.Linear
  189. * - nn.SpatialConvolution
  190. * - nn.SpatialMaxPooling, nn.SpatialAveragePooling
  191. * - nn.ReLU, nn.TanH, nn.Sigmoid
  192. * - nn.Reshape
  193. * - nn.SoftMax, nn.LogSoftMax
  194. *
  195. * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
  196. */
  197. + (Net*)readNetFromTorch:(NSString*)model isBinary:(BOOL)isBinary evaluate:(BOOL)evaluate NS_SWIFT_NAME(readNetFromTorch(model:isBinary:evaluate:));
  198. /**
  199. * Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format.
  200. * @param model path to the file, dumped from Torch by using torch.save() function.
  201. * @param isBinary specifies whether the network was serialized in ascii mode or binary.
  202. * @return Net object.
  203. *
  204. * NOTE: Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
  205. * which has various bit-length on different systems.
  206. *
  207. * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object
  208. * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.
  209. *
  210. * List of supported layers (i.e. object instances derived from Torch nn.Module class):
  211. * - nn.Sequential
  212. * - nn.Parallel
  213. * - nn.Concat
  214. * - nn.Linear
  215. * - nn.SpatialConvolution
  216. * - nn.SpatialMaxPooling, nn.SpatialAveragePooling
  217. * - nn.ReLU, nn.TanH, nn.Sigmoid
  218. * - nn.Reshape
  219. * - nn.SoftMax, nn.LogSoftMax
  220. *
  221. * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
  222. */
  223. + (Net*)readNetFromTorch:(NSString*)model isBinary:(BOOL)isBinary NS_SWIFT_NAME(readNetFromTorch(model:isBinary:));
  224. /**
  225. * Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format.
  226. * @param model path to the file, dumped from Torch by using torch.save() function.
  227. * @return Net object.
  228. *
  229. * NOTE: Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
  230. * which has various bit-length on different systems.
  231. *
  232. * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object
  233. * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.
  234. *
  235. * List of supported layers (i.e. object instances derived from Torch nn.Module class):
  236. * - nn.Sequential
  237. * - nn.Parallel
  238. * - nn.Concat
  239. * - nn.Linear
  240. * - nn.SpatialConvolution
  241. * - nn.SpatialMaxPooling, nn.SpatialAveragePooling
  242. * - nn.ReLU, nn.TanH, nn.Sigmoid
  243. * - nn.Reshape
  244. * - nn.SoftMax, nn.LogSoftMax
  245. *
  246. * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
  247. */
  248. + (Net*)readNetFromTorch:(NSString*)model NS_SWIFT_NAME(readNetFromTorch(model:));
  249. //
  250. // Net cv::dnn::readNet(String model, String config = "", String framework = "")
  251. //
  252. /**
  253. * Read deep learning network represented in one of the supported formats.
  254. * @param model Binary file contains trained weights. The following file
  255. * extensions are expected for models from different frameworks:
  256. * * `*.caffemodel` (Caffe, http://caffe.berkeleyvision.org/)
  257. * * `*.pb` (TensorFlow, https://www.tensorflow.org/)
  258. * * `*.t7` | `*.net` (Torch, http://torch.ch/)
  259. * * `*.weights` (Darknet, https://pjreddie.com/darknet/)
  260. * * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit)
  261. * * `*.onnx` (ONNX, https://onnx.ai/)
  262. * @param config Text file contains network configuration. It could be a
  263. * file with the following extensions:
  264. * * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/)
  265. * * `*.pbtxt` (TensorFlow, https://www.tensorflow.org/)
  266. * * `*.cfg` (Darknet, https://pjreddie.com/darknet/)
  267. * * `*.xml` (DLDT, https://software.intel.com/openvino-toolkit)
  268. * @param framework Explicit framework name tag to determine a format.
  269. * @return Net object.
  270. *
  271. * This function automatically detects an origin framework of trained model
  272. * and calls an appropriate function such REF: readNetFromCaffe, REF: readNetFromTensorflow,
  273. * REF: readNetFromTorch or REF: readNetFromDarknet. An order of @p model and @p config
  274. * arguments does not matter.
  275. */
  276. + (Net*)readNet:(NSString*)model config:(NSString*)config framework:(NSString*)framework NS_SWIFT_NAME(readNet(model:config:framework:));
  277. /**
  278. * Read deep learning network represented in one of the supported formats.
  279. * @param model Binary file contains trained weights. The following file
  280. * extensions are expected for models from different frameworks:
  281. * * `*.caffemodel` (Caffe, http://caffe.berkeleyvision.org/)
  282. * * `*.pb` (TensorFlow, https://www.tensorflow.org/)
  283. * * `*.t7` | `*.net` (Torch, http://torch.ch/)
  284. * * `*.weights` (Darknet, https://pjreddie.com/darknet/)
  285. * * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit)
  286. * * `*.onnx` (ONNX, https://onnx.ai/)
  287. * @param config Text file contains network configuration. It could be a
  288. * file with the following extensions:
  289. * * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/)
  290. * * `*.pbtxt` (TensorFlow, https://www.tensorflow.org/)
  291. * * `*.cfg` (Darknet, https://pjreddie.com/darknet/)
  292. * * `*.xml` (DLDT, https://software.intel.com/openvino-toolkit)
  293. * @return Net object.
  294. *
  295. * This function automatically detects an origin framework of trained model
  296. * and calls an appropriate function such REF: readNetFromCaffe, REF: readNetFromTensorflow,
  297. * REF: readNetFromTorch or REF: readNetFromDarknet. An order of @p model and @p config
  298. * arguments does not matter.
  299. */
  300. + (Net*)readNet:(NSString*)model config:(NSString*)config NS_SWIFT_NAME(readNet(model:config:));
  301. /**
  302. * Read deep learning network represented in one of the supported formats.
  303. * @param model Binary file contains trained weights. The following file
  304. * extensions are expected for models from different frameworks:
  305. * * `*.caffemodel` (Caffe, http://caffe.berkeleyvision.org/)
  306. * * `*.pb` (TensorFlow, https://www.tensorflow.org/)
  307. * * `*.t7` | `*.net` (Torch, http://torch.ch/)
  308. * * `*.weights` (Darknet, https://pjreddie.com/darknet/)
  309. * * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit)
  310. * * `*.onnx` (ONNX, https://onnx.ai/)
  311. * file with the following extensions:
  312. * * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/)
  313. * * `*.pbtxt` (TensorFlow, https://www.tensorflow.org/)
  314. * * `*.cfg` (Darknet, https://pjreddie.com/darknet/)
  315. * * `*.xml` (DLDT, https://software.intel.com/openvino-toolkit)
  316. * @return Net object.
  317. *
  318. * This function automatically detects an origin framework of trained model
  319. * and calls an appropriate function such REF: readNetFromCaffe, REF: readNetFromTensorflow,
  320. * REF: readNetFromTorch or REF: readNetFromDarknet. An order of @p model and @p config
  321. * arguments does not matter.
  322. */
  323. + (Net*)readNet:(NSString*)model NS_SWIFT_NAME(readNet(model:));
  324. //
  325. // Net cv::dnn::readNet(String framework, vector_uchar bufferModel, vector_uchar bufferConfig = std::vector<uchar>())
  326. //
  327. /**
  328. * Read deep learning network represented in one of the supported formats.
  329. * This is an overloaded member function, provided for convenience.
  330. * It differs from the above function only in what argument(s) it accepts.
  331. * @param framework Name of origin framework.
  332. * @param bufferModel A buffer with a content of binary file with weights
  333. * @param bufferConfig A buffer with a content of text file contains network configuration.
  334. * @return Net object.
  335. */
  336. + (Net*)readNet:(NSString*)framework bufferModel:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig NS_SWIFT_NAME(readNet(framework:bufferModel:bufferConfig:));
  337. /**
  338. * Read deep learning network represented in one of the supported formats.
  339. * This is an overloaded member function, provided for convenience.
  340. * It differs from the above function only in what argument(s) it accepts.
  341. * @param framework Name of origin framework.
  342. * @param bufferModel A buffer with a content of binary file with weights
  343. * @return Net object.
  344. */
  345. + (Net*)readNet:(NSString*)framework bufferModel:(ByteVector*)bufferModel NS_SWIFT_NAME(readNet(framework:bufferModel:));
  346. //
  347. // Mat cv::dnn::readTorchBlob(String filename, bool isBinary = true)
  348. //
  349. /**
  350. * Loads blob which was serialized as torch.Tensor object of Torch7 framework.
  351. * @warning This function has the same limitations as readNetFromTorch().
  352. */
  353. + (Mat*)readTorchBlob:(NSString*)filename isBinary:(BOOL)isBinary NS_SWIFT_NAME(readTorchBlob(filename:isBinary:));
  354. /**
  355. * Loads blob which was serialized as torch.Tensor object of Torch7 framework.
  356. * @warning This function has the same limitations as readNetFromTorch().
  357. */
  358. + (Mat*)readTorchBlob:(NSString*)filename NS_SWIFT_NAME(readTorchBlob(filename:));
  359. //
  360. // Net cv::dnn::readNetFromModelOptimizer(String xml, String bin)
  361. //
  362. /**
  363. * Load a network from Intel's Model Optimizer intermediate representation.
  364. * @param xml XML configuration file with network's topology.
  365. * @param bin Binary file with trained weights.
  366. * @return Net object.
  367. * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
  368. * backend.
  369. */
  370. + (Net*)readNetFromModelOptimizer:(NSString*)xml bin:(NSString*)bin NS_SWIFT_NAME(readNetFromModelOptimizer(xml:bin:));
  371. //
  372. // Net cv::dnn::readNetFromModelOptimizer(vector_uchar bufferModelConfig, vector_uchar bufferWeights)
  373. //
  374. /**
  375. * Load a network from Intel's Model Optimizer intermediate representation.
  376. * @param bufferModelConfig Buffer contains XML configuration with network's topology.
  377. * @param bufferWeights Buffer contains binary data with trained weights.
  378. * @return Net object.
  379. * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
  380. * backend.
  381. */
  382. + (Net*)readNetFromModelOptimizer:(ByteVector*)bufferModelConfig bufferWeights:(ByteVector*)bufferWeights NS_SWIFT_NAME(readNetFromModelOptimizer(bufferModelConfig:bufferWeights:));
  383. //
  384. // Net cv::dnn::readNetFromONNX(String onnxFile)
  385. //
  386. /**
  387. * Reads a network model <a href="https://onnx.ai/">ONNX</a>.
  388. * @param onnxFile path to the .onnx file with text description of the network architecture.
  389. * @return Network object that ready to do forward, throw an exception in failure cases.
  390. */
  391. + (Net*)readNetFromONNXFile:(NSString*)onnxFile NS_SWIFT_NAME(readNetFromONNX(onnxFile:));
  392. //
  393. // Net cv::dnn::readNetFromONNX(vector_uchar buffer)
  394. //
  395. /**
  396. * Reads a network model from <a href="https://onnx.ai/">ONNX</a>
  397. * in-memory buffer.
  398. * @param buffer in-memory buffer that stores the ONNX model bytes.
  399. * @return Network object that ready to do forward, throw an exception
  400. * in failure cases.
  401. */
  402. + (Net*)readNetFromONNXBuffer:(ByteVector*)buffer NS_SWIFT_NAME(readNetFromONNX(buffer:));
  403. //
  404. // Mat cv::dnn::readTensorFromONNX(String path)
  405. //
  406. /**
  407. * Creates blob from .pb file.
  408. * @param path to the .pb file with input tensor.
  409. * @return Mat.
  410. */
  411. + (Mat*)readTensorFromONNX:(NSString*)path NS_SWIFT_NAME(readTensorFromONNX(path:));
  412. //
  413. // Mat cv::dnn::blobFromImage(Mat image, double scalefactor = 1.0, Size size = Size(), Scalar mean = Scalar(), bool swapRB = false, bool crop = false, int ddepth = CV_32F)
  414. //
  415. /**
  416. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  417. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  418. * @param image input image (with 1-, 3- or 4-channels).
  419. * @param size spatial size for output image
  420. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  421. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  422. * @param scalefactor multiplier for @p image values.
  423. * @param swapRB flag which indicates that swap first and last channels
  424. * in 3-channel image is necessary.
  425. * @param crop flag which indicates whether image will be cropped after resize or not
  426. * @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
  427. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  428. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  429. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  430. * @return 4-dimensional Mat with NCHW dimensions order.
  431. */
  432. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB crop:(BOOL)crop ddepth:(int)ddepth NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:mean:swapRB:crop:ddepth:));
  433. /**
  434. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  435. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  436. * @param image input image (with 1-, 3- or 4-channels).
  437. * @param size spatial size for output image
  438. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  439. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  440. * @param scalefactor multiplier for @p image values.
  441. * @param swapRB flag which indicates that swap first and last channels
  442. * in 3-channel image is necessary.
  443. * @param crop flag which indicates whether image will be cropped after resize or not
  444. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  445. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  446. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  447. * @return 4-dimensional Mat with NCHW dimensions order.
  448. */
  449. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB crop:(BOOL)crop NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:mean:swapRB:crop:));
  450. /**
  451. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  452. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  453. * @param image input image (with 1-, 3- or 4-channels).
  454. * @param size spatial size for output image
  455. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  456. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  457. * @param scalefactor multiplier for @p image values.
  458. * @param swapRB flag which indicates that swap first and last channels
  459. * in 3-channel image is necessary.
  460. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  461. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  462. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  463. * @return 4-dimensional Mat with NCHW dimensions order.
  464. */
  465. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:mean:swapRB:));
  466. /**
  467. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  468. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  469. * @param image input image (with 1-, 3- or 4-channels).
  470. * @param size spatial size for output image
  471. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  472. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  473. * @param scalefactor multiplier for @p image values.
  474. * in 3-channel image is necessary.
  475. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  476. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  477. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  478. * @return 4-dimensional Mat with NCHW dimensions order.
  479. */
  480. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:mean:));
  481. /**
  482. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  483. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  484. * @param image input image (with 1-, 3- or 4-channels).
  485. * @param size spatial size for output image
  486. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  487. * @param scalefactor multiplier for @p image values.
  488. * in 3-channel image is necessary.
  489. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  490. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  491. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  492. * @return 4-dimensional Mat with NCHW dimensions order.
  493. */
  494. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:));
  495. /**
  496. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  497. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  498. * @param image input image (with 1-, 3- or 4-channels).
  499. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  500. * @param scalefactor multiplier for @p image values.
  501. * in 3-channel image is necessary.
  502. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  503. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  504. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  505. * @return 4-dimensional Mat with NCHW dimensions order.
  506. */
  507. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor NS_SWIFT_NAME(blobFromImage(image:scalefactor:));
  508. /**
  509. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  510. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  511. * @param image input image (with 1-, 3- or 4-channels).
  512. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  513. * in 3-channel image is necessary.
  514. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  515. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  516. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  517. * @return 4-dimensional Mat with NCHW dimensions order.
  518. */
  519. + (Mat*)blobFromImage:(Mat*)image NS_SWIFT_NAME(blobFromImage(image:));
  520. //
  521. // Mat cv::dnn::blobFromImages(vector_Mat images, double scalefactor = 1.0, Size size = Size(), Scalar mean = Scalar(), bool swapRB = false, bool crop = false, int ddepth = CV_32F)
  522. //
  523. /**
  524. * Creates 4-dimensional blob from series of images. Optionally resizes and
  525. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  526. * swap Blue and Red channels.
  527. * @param images input images (all with 1-, 3- or 4-channels).
  528. * @param size spatial size for output image
  529. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  530. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  531. * @param scalefactor multiplier for @p images values.
  532. * @param swapRB flag which indicates that swap first and last channels
  533. * in 3-channel image is necessary.
  534. * @param crop flag which indicates whether image will be cropped after resize or not
  535. * @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
  536. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  537. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  538. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  539. * @return 4-dimensional Mat with NCHW dimensions order.
  540. */
  541. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB crop:(BOOL)crop ddepth:(int)ddepth NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:mean:swapRB:crop:ddepth:));
  542. /**
  543. * Creates 4-dimensional blob from series of images. Optionally resizes and
  544. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  545. * swap Blue and Red channels.
  546. * @param images input images (all with 1-, 3- or 4-channels).
  547. * @param size spatial size for output image
  548. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  549. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  550. * @param scalefactor multiplier for @p images values.
  551. * @param swapRB flag which indicates that swap first and last channels
  552. * in 3-channel image is necessary.
  553. * @param crop flag which indicates whether image will be cropped after resize or not
  554. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  555. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  556. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  557. * @return 4-dimensional Mat with NCHW dimensions order.
  558. */
  559. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB crop:(BOOL)crop NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:mean:swapRB:crop:));
  560. /**
  561. * Creates 4-dimensional blob from series of images. Optionally resizes and
  562. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  563. * swap Blue and Red channels.
  564. * @param images input images (all with 1-, 3- or 4-channels).
  565. * @param size spatial size for output image
  566. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  567. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  568. * @param scalefactor multiplier for @p images values.
  569. * @param swapRB flag which indicates that swap first and last channels
  570. * in 3-channel image is necessary.
  571. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  572. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  573. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  574. * @return 4-dimensional Mat with NCHW dimensions order.
  575. */
  576. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:mean:swapRB:));
  577. /**
  578. * Creates 4-dimensional blob from series of images. Optionally resizes and
  579. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  580. * swap Blue and Red channels.
  581. * @param images input images (all with 1-, 3- or 4-channels).
  582. * @param size spatial size for output image
  583. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  584. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  585. * @param scalefactor multiplier for @p images values.
  586. * in 3-channel image is necessary.
  587. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  588. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  589. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  590. * @return 4-dimensional Mat with NCHW dimensions order.
  591. */
  592. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:mean:));
  593. /**
  594. * Creates 4-dimensional blob from series of images. Optionally resizes and
  595. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  596. * swap Blue and Red channels.
  597. * @param images input images (all with 1-, 3- or 4-channels).
  598. * @param size spatial size for output image
  599. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  600. * @param scalefactor multiplier for @p images values.
  601. * in 3-channel image is necessary.
  602. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  603. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  604. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  605. * @return 4-dimensional Mat with NCHW dimensions order.
  606. */
  607. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:));
  608. /**
  609. * Creates 4-dimensional blob from series of images. Optionally resizes and
  610. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  611. * swap Blue and Red channels.
  612. * @param images input images (all with 1-, 3- or 4-channels).
  613. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  614. * @param scalefactor multiplier for @p images values.
  615. * in 3-channel image is necessary.
  616. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  617. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  618. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  619. * @return 4-dimensional Mat with NCHW dimensions order.
  620. */
  621. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor NS_SWIFT_NAME(blobFromImages(images:scalefactor:));
  622. /**
  623. * Creates 4-dimensional blob from series of images. Optionally resizes and
  624. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  625. * swap Blue and Red channels.
  626. * @param images input images (all with 1-, 3- or 4-channels).
  627. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  628. * in 3-channel image is necessary.
  629. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  630. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  631. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  632. * @return 4-dimensional Mat with NCHW dimensions order.
  633. */
  634. + (Mat*)blobFromImages:(NSArray<Mat*>*)images NS_SWIFT_NAME(blobFromImages(images:));
  635. //
  636. // void cv::dnn::imagesFromBlob(Mat blob_, vector_Mat& images_)
  637. //
  638. /**
  639. * Parse a 4D blob and output the images it contains as 2D arrays through a simpler data structure
  640. * (std::vector<cv::Mat>).
  641. * @param blob_ 4 dimensional array (images, channels, height, width) in floating point precision (CV_32F) from
  642. * which you would like to extract the images.
  643. * @param images_ array of 2D Mat containing the images extracted from the blob in floating point precision
  644. * (CV_32F). They are non normalized neither mean added. The number of returned images equals the first dimension
  645. * of the blob (batch size). Every image has a number of channels equals to the second dimension of the blob (depth).
  646. */
  647. + (void)imagesFromBlob:(Mat*)blob_ images_:(NSMutableArray<Mat*>*)images_ NS_SWIFT_NAME(imagesFromBlob(blob_:images_:));
  648. //
  649. // void cv::dnn::shrinkCaffeModel(String src, String dst, vector_String layersTypes = std::vector<String>())
  650. //
  651. /**
  652. * Convert all weights of Caffe network to half precision floating point.
  653. * @param src Path to origin model from Caffe framework contains single
  654. * precision floating point weights (usually has `.caffemodel` extension).
  655. * @param dst Path to destination model with updated weights.
  656. * @param layersTypes Set of layers types which parameters will be converted.
  657. * By default, converts only Convolutional and Fully-Connected layers'
  658. * weights.
  659. *
  660. * NOTE: Shrinked model has no origin float32 weights so it can't be used
  661. * in origin Caffe framework anymore. However the structure of data
  662. * is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe.
  663. * So the resulting model may be used there.
  664. */
  665. + (void)shrinkCaffeModel:(NSString*)src dst:(NSString*)dst layersTypes:(NSArray<NSString*>*)layersTypes NS_SWIFT_NAME(shrinkCaffeModel(src:dst:layersTypes:));
  666. /**
  667. * Convert all weights of Caffe network to half precision floating point.
  668. * @param src Path to origin model from Caffe framework contains single
  669. * precision floating point weights (usually has `.caffemodel` extension).
  670. * @param dst Path to destination model with updated weights.
  671. * By default, converts only Convolutional and Fully-Connected layers'
  672. * weights.
  673. *
  674. * NOTE: Shrinked model has no origin float32 weights so it can't be used
  675. * in origin Caffe framework anymore. However the structure of data
  676. * is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe.
  677. * So the resulting model may be used there.
  678. */
  679. + (void)shrinkCaffeModel:(NSString*)src dst:(NSString*)dst NS_SWIFT_NAME(shrinkCaffeModel(src:dst:));
  680. //
  681. // void cv::dnn::writeTextGraph(String model, String output)
  682. //
  683. /**
  684. * Create a text representation for a binary network stored in protocol buffer format.
  685. * @param model A path to binary network.
  686. * @param output A path to output text file to be created.
  687. *
  688. * NOTE: To reduce output file size, trained weights are not included.
  689. */
  690. + (void)writeTextGraph:(NSString*)model output:(NSString*)output NS_SWIFT_NAME(writeTextGraph(model:output:));
  691. //
  692. // void cv::dnn::NMSBoxes(vector_Rect2d bboxes, vector_float scores, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0)
  693. //
  694. /**
  695. * Performs non maximum suppression given boxes and corresponding scores.
  696. *
  697. * @param bboxes a set of bounding boxes to apply NMS.
  698. * @param scores a set of corresponding confidences.
  699. * @param score_threshold a threshold used to filter boxes by score.
  700. * @param nms_threshold a threshold used in non maximum suppression.
  701. * @param indices the kept indices of bboxes after NMS.
  702. * @param eta a coefficient in adaptive threshold formula: `$$nms\_threshold_{i+1}=eta\cdot nms\_threshold_i$$`.
  703. * @param top_k if `>0`, keep at most @p top_k picked indices.
  704. */
  705. + (void)NMSBoxes:(NSArray<Rect2d*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta top_k:(int)top_k NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:eta:top_k:));
  706. /**
  707. * Performs non maximum suppression given boxes and corresponding scores.
  708. *
  709. * @param bboxes a set of bounding boxes to apply NMS.
  710. * @param scores a set of corresponding confidences.
  711. * @param score_threshold a threshold used to filter boxes by score.
  712. * @param nms_threshold a threshold used in non maximum suppression.
  713. * @param indices the kept indices of bboxes after NMS.
  714. * @param eta a coefficient in adaptive threshold formula: `$$nms\_threshold_{i+1}=eta\cdot nms\_threshold_i$$`.
  715. */
  716. + (void)NMSBoxes:(NSArray<Rect2d*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:eta:));
  717. /**
  718. * Performs non maximum suppression given boxes and corresponding scores.
  719. *
  720. * @param bboxes a set of bounding boxes to apply NMS.
  721. * @param scores a set of corresponding confidences.
  722. * @param score_threshold a threshold used to filter boxes by score.
  723. * @param nms_threshold a threshold used in non maximum suppression.
  724. * @param indices the kept indices of bboxes after NMS.
  725. */
  726. + (void)NMSBoxes:(NSArray<Rect2d*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:));
  727. //
  728. // void cv::dnn::NMSBoxes(vector_RotatedRect bboxes, vector_float scores, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0)
  729. //
  730. + (void)NMSBoxesRotated:(NSArray<RotatedRect*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta top_k:(int)top_k NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:eta:top_k:));
  731. + (void)NMSBoxesRotated:(NSArray<RotatedRect*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:eta:));
  732. + (void)NMSBoxesRotated:(NSArray<RotatedRect*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:));
  733. //
  734. // void cv::dnn::softNMSBoxes(vector_Rect bboxes, vector_float scores, vector_float& updated_scores, float score_threshold, float nms_threshold, vector_int& indices, size_t top_k = 0, float sigma = 0.5, SoftNMSMethod method = SoftNMSMethod::SOFTNMS_GAUSSIAN)
  735. //
  736. /**
  737. * Performs soft non maximum suppression given boxes and corresponding scores.
  738. * Reference: https://arxiv.org/abs/1704.04503
  739. * @param bboxes a set of bounding boxes to apply Soft NMS.
  740. * @param scores a set of corresponding confidences.
  741. * @param updated_scores a set of corresponding updated confidences.
  742. * @param score_threshold a threshold used to filter boxes by score.
  743. * @param nms_threshold a threshold used in non maximum suppression.
  744. * @param indices the kept indices of bboxes after NMS.
  745. * @param top_k keep at most @p top_k picked indices.
  746. * @param sigma parameter of Gaussian weighting.
  747. * @param method Gaussian or linear.
  748. * @see `SoftNMSMethod`
  749. */
  750. + (void)softNMSBoxes:(NSArray<Rect2i*>*)bboxes scores:(FloatVector*)scores updated_scores:(FloatVector*)updated_scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices top_k:(size_t)top_k sigma:(float)sigma method:(SoftNMSMethod)method NS_SWIFT_NAME(softNMSBoxes(bboxes:scores:updated_scores:score_threshold:nms_threshold:indices:top_k:sigma:method:));
  751. /**
  752. * Performs soft non maximum suppression given boxes and corresponding scores.
  753. * Reference: https://arxiv.org/abs/1704.04503
  754. * @param bboxes a set of bounding boxes to apply Soft NMS.
  755. * @param scores a set of corresponding confidences.
  756. * @param updated_scores a set of corresponding updated confidences.
  757. * @param score_threshold a threshold used to filter boxes by score.
  758. * @param nms_threshold a threshold used in non maximum suppression.
  759. * @param indices the kept indices of bboxes after NMS.
  760. * @param top_k keep at most @p top_k picked indices.
  761. * @param sigma parameter of Gaussian weighting.
  762. * @see `SoftNMSMethod`
  763. */
  764. + (void)softNMSBoxes:(NSArray<Rect2i*>*)bboxes scores:(FloatVector*)scores updated_scores:(FloatVector*)updated_scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices top_k:(size_t)top_k sigma:(float)sigma NS_SWIFT_NAME(softNMSBoxes(bboxes:scores:updated_scores:score_threshold:nms_threshold:indices:top_k:sigma:));
  765. /**
  766. * Performs soft non maximum suppression given boxes and corresponding scores.
  767. * Reference: https://arxiv.org/abs/1704.04503
  768. * @param bboxes a set of bounding boxes to apply Soft NMS.
  769. * @param scores a set of corresponding confidences.
  770. * @param updated_scores a set of corresponding updated confidences.
  771. * @param score_threshold a threshold used to filter boxes by score.
  772. * @param nms_threshold a threshold used in non maximum suppression.
  773. * @param indices the kept indices of bboxes after NMS.
  774. * @param top_k keep at most @p top_k picked indices.
  775. * @see `SoftNMSMethod`
  776. */
  777. + (void)softNMSBoxes:(NSArray<Rect2i*>*)bboxes scores:(FloatVector*)scores updated_scores:(FloatVector*)updated_scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices top_k:(size_t)top_k NS_SWIFT_NAME(softNMSBoxes(bboxes:scores:updated_scores:score_threshold:nms_threshold:indices:top_k:));
  778. /**
  779. * Performs soft non maximum suppression given boxes and corresponding scores.
  780. * Reference: https://arxiv.org/abs/1704.04503
  781. * @param bboxes a set of bounding boxes to apply Soft NMS.
  782. * @param scores a set of corresponding confidences.
  783. * @param updated_scores a set of corresponding updated confidences.
  784. * @param score_threshold a threshold used to filter boxes by score.
  785. * @param nms_threshold a threshold used in non maximum suppression.
  786. * @param indices the kept indices of bboxes after NMS.
  787. * @see `SoftNMSMethod`
  788. */
  789. + (void)softNMSBoxes:(NSArray<Rect2i*>*)bboxes scores:(FloatVector*)scores updated_scores:(FloatVector*)updated_scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices NS_SWIFT_NAME(softNMSBoxes(bboxes:scores:updated_scores:score_threshold:nms_threshold:indices:));
  790. //
  791. // String cv::dnn::getInferenceEngineBackendType()
  792. //
  793. /**
  794. * Returns Inference Engine internal backend API.
  795. *
  796. * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
  797. *
  798. * `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable) is ignored since 4.6.0.
  799. *
  800. * @deprecated
  801. */
  802. + (NSString*)getInferenceEngineBackendType NS_SWIFT_NAME(getInferenceEngineBackendType()) DEPRECATED_ATTRIBUTE;
  803. //
  804. // String cv::dnn::setInferenceEngineBackendType(String newBackendType)
  805. //
  806. /**
  807. * Specify Inference Engine internal backend API.
  808. *
  809. * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
  810. *
  811. * @return previous value of internal backend API
  812. *
  813. * @deprecated
  814. */
  815. + (NSString*)setInferenceEngineBackendType:(NSString*)newBackendType NS_SWIFT_NAME(setInferenceEngineBackendType(newBackendType:)) DEPRECATED_ATTRIBUTE;
  816. //
  817. // void cv::dnn::resetMyriadDevice()
  818. //
  819. /**
  820. * Release a Myriad device (binded by OpenCV).
  821. *
  822. * Single Myriad device cannot be shared across multiple processes which uses
  823. * Inference Engine's Myriad plugin.
  824. */
  825. + (void)resetMyriadDevice NS_SWIFT_NAME(resetMyriadDevice());
  826. //
  827. // String cv::dnn::getInferenceEngineVPUType()
  828. //
  829. /**
  830. * Returns Inference Engine VPU type.
  831. *
  832. * See values of `CV_DNN_INFERENCE_ENGINE_VPU_TYPE_*` macros.
  833. */
  834. + (NSString*)getInferenceEngineVPUType NS_SWIFT_NAME(getInferenceEngineVPUType());
  835. //
  836. // String cv::dnn::getInferenceEngineCPUType()
  837. //
  838. /**
  839. * Returns Inference Engine CPU type.
  840. *
  841. * Specify OpenVINO plugin: CPU or ARM.
  842. */
  843. + (NSString*)getInferenceEngineCPUType NS_SWIFT_NAME(getInferenceEngineCPUType());
  844. //
  845. // void cv::dnn::releaseHDDLPlugin()
  846. //
  847. /**
  848. * Release a HDDL plugin.
  849. */
  850. + (void)releaseHDDLPlugin NS_SWIFT_NAME(releaseHDDLPlugin());
  851. @end
  852. NS_ASSUME_NONNULL_END