Net.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. //
  2. // This file is auto-generated. Please don't modify it!
  3. //
  4. #pragma once
  5. #ifdef __cplusplus
  6. //#import "opencv.hpp"
  7. #import "opencv2/dnn.hpp"
  8. #import "opencv2/dnn/dnn.hpp"
  9. #else
  10. #define CV_EXPORTS
  11. #endif
  12. #import <Foundation/Foundation.h>
  13. @class ByteVector;
  14. @class DictValue;
  15. @class DoubleVector;
  16. @class FloatVector;
  17. @class IntVector;
  18. @class Layer;
  19. @class Mat;
  20. @class Scalar;
  21. NS_ASSUME_NONNULL_BEGIN
  22. // C++: class Net
  23. /**
  24. * This class allows to create and manipulate comprehensive artificial neural networks.
  25. *
  26. * Neural network is presented as directed acyclic graph (DAG), where vertices are Layer instances,
  27. * and edges specify relationships between layers inputs and outputs.
  28. *
  29. * Each network layer has unique integer id and unique string name inside its network.
  30. * LayerId can store either layer name or layer id.
  31. *
  32. * This class supports reference counting of its instances, i. e. copies point to the same instance.
  33. *
  34. * Member of `Dnn`
  35. */
  36. CV_EXPORTS @interface Net : NSObject
  37. #ifdef __cplusplus
  38. @property(readonly)cv::Ptr<cv::dnn::Net> nativePtr;
  39. #endif
  40. #ifdef __cplusplus
  41. - (instancetype)initWithNativePtr:(cv::Ptr<cv::dnn::Net>)nativePtr;
  42. + (instancetype)fromNative:(cv::Ptr<cv::dnn::Net>)nativePtr;
  43. #endif
  44. #pragma mark - Methods
  45. //
  46. // cv::dnn::Net::Net()
  47. //
  48. - (instancetype)init;
  49. //
  50. // static Net cv::dnn::Net::readFromModelOptimizer(String xml, String bin)
  51. //
  52. /**
  53. * Create a network from Intel's Model Optimizer intermediate representation (IR).
  54. * @param xml XML configuration file with network's topology.
  55. * @param bin Binary file with trained weights.
  56. * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
  57. * backend.
  58. */
  59. + (Net*)readFromModelOptimizer:(NSString*)xml bin:(NSString*)bin NS_SWIFT_NAME(readFromModelOptimizer(xml:bin:));
  60. //
  61. // static Net cv::dnn::Net::readFromModelOptimizer(vector_uchar bufferModelConfig, vector_uchar bufferWeights)
  62. //
  63. /**
  64. * Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR).
  65. * @param bufferModelConfig buffer with model's configuration.
  66. * @param bufferWeights buffer with model's trained weights.
  67. * @return Net object.
  68. */
  69. + (Net*)readFromModelOptimizer:(ByteVector*)bufferModelConfig bufferWeights:(ByteVector*)bufferWeights NS_SWIFT_NAME(readFromModelOptimizer(bufferModelConfig:bufferWeights:));
  70. //
  71. // bool cv::dnn::Net::empty()
  72. //
  73. /**
  74. * Returns true if there are no layers in the network.
  75. */
  76. - (BOOL)empty NS_SWIFT_NAME(empty());
  77. //
  78. // String cv::dnn::Net::dump()
  79. //
  80. /**
  81. * Dump net to String
  82. * @return String with structure, hyperparameters, backend, target and fusion
  83. * Call method after setInput(). To see correct backend, target and fusion run after forward().
  84. */
  85. - (NSString*)dump NS_SWIFT_NAME(dump());
  86. //
  87. // void cv::dnn::Net::dumpToFile(String path)
  88. //
  89. /**
  90. * Dump net structure, hyperparameters, backend, target and fusion to dot file
  91. * @param path path to output file with .dot extension
  92. * @see `dump()`
  93. */
  94. - (void)dumpToFile:(NSString*)path NS_SWIFT_NAME(dumpToFile(path:));
  95. //
  96. // int cv::dnn::Net::getLayerId(String layer)
  97. //
  98. /**
  99. * Converts string name of the layer to the integer identifier.
  100. * @return id of the layer, or -1 if the layer wasn't found.
  101. */
  102. - (int)getLayerId:(NSString*)layer NS_SWIFT_NAME(getLayerId(layer:));
  103. //
  104. // vector_String cv::dnn::Net::getLayerNames()
  105. //
  106. - (NSArray<NSString*>*)getLayerNames NS_SWIFT_NAME(getLayerNames());
  107. //
  108. // Ptr_Layer cv::dnn::Net::getLayer(int layerId)
  109. //
  110. /**
  111. * Returns pointer to layer with specified id or name which the network use.
  112. */
  113. - (Layer*)getLayer:(int)layerId NS_SWIFT_NAME(getLayer(layerId:));
  114. //
  115. // Ptr_Layer cv::dnn::Net::getLayer(String layerName)
  116. //
  117. /**
  118. *
  119. * @deprecated Use int getLayerId(const String &layer)
  120. */
  121. - (Layer*)getLayerByName:(NSString*)layerName NS_SWIFT_NAME(getLayer(layerName:)) DEPRECATED_ATTRIBUTE;
  122. //
  123. // Ptr_Layer cv::dnn::Net::getLayer(LayerId layerId)
  124. //
  125. /**
  126. *
  127. * @deprecated to be removed
  128. */
  129. - (Layer*)getLayerByDictValue:(DictValue*)layerId NS_SWIFT_NAME(getLayer(layerId:)) DEPRECATED_ATTRIBUTE;
  130. //
  131. // void cv::dnn::Net::connect(String outPin, String inpPin)
  132. //
  133. /**
  134. * Connects output of the first layer to input of the second layer.
  135. * @param outPin descriptor of the first layer output.
  136. * @param inpPin descriptor of the second layer input.
  137. *
  138. * Descriptors have the following template <DFN>&lt;layer_name&gt;[.input_number]</DFN>:
  139. * - the first part of the template <DFN>layer_name</DFN> is string name of the added layer.
  140. * If this part is empty then the network input pseudo layer will be used;
  141. * - the second optional part of the template <DFN>input_number</DFN>
  142. * is either number of the layer input, either label one.
  143. * If this part is omitted then the first layer input will be used.
  144. *
  145. * @see `setNetInputs()`, `Layer::inputNameToIndex()`, `Layer::outputNameToIndex()`
  146. */
  147. - (void)connect:(NSString*)outPin inpPin:(NSString*)inpPin NS_SWIFT_NAME(connect(outPin:inpPin:));
  148. //
  149. // void cv::dnn::Net::setInputsNames(vector_String inputBlobNames)
  150. //
  151. /**
  152. * Sets outputs names of the network input pseudo layer.
  153. *
  154. * Each net always has special own the network input pseudo layer with id=0.
  155. * This layer stores the user blobs only and don't make any computations.
  156. * In fact, this layer provides the only way to pass user data into the network.
  157. * As any other layer, this layer can label its outputs and this function provides an easy way to do this.
  158. */
  159. - (void)setInputsNames:(NSArray<NSString*>*)inputBlobNames NS_SWIFT_NAME(setInputsNames(inputBlobNames:));
  160. //
  161. // void cv::dnn::Net::setInputShape(String inputName, MatShape shape)
  162. //
  163. /**
  164. * Specify shape of network input.
  165. */
  166. - (void)setInputShape:(NSString*)inputName shape:(IntVector*)shape NS_SWIFT_NAME(setInputShape(inputName:shape:));
  167. //
  168. // Mat cv::dnn::Net::forward(String outputName = String())
  169. //
  170. /**
  171. * Runs forward pass to compute output of layer with name @p outputName.
  172. * @param outputName name for layer which output is needed to get
  173. * @return blob for first output of specified layer.
  174. * By default runs forward pass for the whole network.
  175. */
  176. - (Mat*)forward:(NSString*)outputName NS_SWIFT_NAME(forward(outputName:));
  177. /**
  178. * Runs forward pass to compute output of layer with name @p outputName.
  179. * @return blob for first output of specified layer.
  180. * By default runs forward pass for the whole network.
  181. */
  182. - (Mat*)forward NS_SWIFT_NAME(forward());
  183. //
  184. // AsyncArray cv::dnn::Net::forwardAsync(String outputName = String())
  185. //
  186. // Return type 'AsyncArray' is not supported, skipping the function
  187. //
  188. // void cv::dnn::Net::forward(vector_Mat& outputBlobs, String outputName = String())
  189. //
  190. /**
  191. * Runs forward pass to compute output of layer with name @p outputName.
  192. * @param outputBlobs contains all output blobs for specified layer.
  193. * @param outputName name for layer which output is needed to get
  194. * If @p outputName is empty, runs forward pass for the whole network.
  195. */
  196. - (void)forwardOutputBlobs:(NSMutableArray<Mat*>*)outputBlobs outputName:(NSString*)outputName NS_SWIFT_NAME(forward(outputBlobs:outputName:));
  197. /**
  198. * Runs forward pass to compute output of layer with name @p outputName.
  199. * @param outputBlobs contains all output blobs for specified layer.
  200. * If @p outputName is empty, runs forward pass for the whole network.
  201. */
  202. - (void)forwardOutputBlobs:(NSMutableArray<Mat*>*)outputBlobs NS_SWIFT_NAME(forward(outputBlobs:));
  203. //
  204. // void cv::dnn::Net::forward(vector_Mat& outputBlobs, vector_String outBlobNames)
  205. //
  206. /**
  207. * Runs forward pass to compute outputs of layers listed in @p outBlobNames.
  208. * @param outputBlobs contains blobs for first outputs of specified layers.
  209. * @param outBlobNames names for layers which outputs are needed to get
  210. */
  211. - (void)forwardOutputBlobs:(NSMutableArray<Mat*>*)outputBlobs outBlobNames:(NSArray<NSString*>*)outBlobNames NS_SWIFT_NAME(forward(outputBlobs:outBlobNames:));
  212. //
  213. // void cv::dnn::Net::forward(vector_vector_Mat& outputBlobs, vector_String outBlobNames)
  214. //
  215. /**
  216. * Runs forward pass to compute outputs of layers listed in @p outBlobNames.
  217. * @param outputBlobs contains all output blobs for each layer specified in @p outBlobNames.
  218. * @param outBlobNames names for layers which outputs are needed to get
  219. */
  220. - (void)forwardAndRetrieve:(NSMutableArray<NSMutableArray<Mat*>*>*)outputBlobs outBlobNames:(NSArray<NSString*>*)outBlobNames NS_SWIFT_NAME(forwardAndRetrieve(outputBlobs:outBlobNames:));
  221. //
  222. // Net cv::dnn::Net::quantize(vector_Mat calibData, int inputsDtype, int outputsDtype)
  223. //
  224. /**
  225. * Returns a quantized Net from a floating-point Net.
  226. * @param calibData Calibration data to compute the quantization parameters.
  227. * @param inputsDtype Datatype of quantized net's inputs. Can be CV_32F or CV_8S.
  228. * @param outputsDtype Datatype of quantized net's outputs. Can be CV_32F or CV_8S.
  229. */
  230. - (Net*)quantize:(NSArray<Mat*>*)calibData inputsDtype:(int)inputsDtype outputsDtype:(int)outputsDtype NS_SWIFT_NAME(quantize(calibData:inputsDtype:outputsDtype:));
  231. //
  232. // void cv::dnn::Net::getInputDetails(vector_float& scales, vector_int& zeropoints)
  233. //
  234. /**
  235. * Returns input scale and zeropoint for a quantized Net.
  236. * @param scales output parameter for returning input scales.
  237. * @param zeropoints output parameter for returning input zeropoints.
  238. */
  239. - (void)getInputDetails:(FloatVector*)scales zeropoints:(IntVector*)zeropoints NS_SWIFT_NAME(getInputDetails(scales:zeropoints:));
  240. //
  241. // void cv::dnn::Net::getOutputDetails(vector_float& scales, vector_int& zeropoints)
  242. //
  243. /**
  244. * Returns output scale and zeropoint for a quantized Net.
  245. * @param scales output parameter for returning output scales.
  246. * @param zeropoints output parameter for returning output zeropoints.
  247. */
  248. - (void)getOutputDetails:(FloatVector*)scales zeropoints:(IntVector*)zeropoints NS_SWIFT_NAME(getOutputDetails(scales:zeropoints:));
  249. //
  250. // void cv::dnn::Net::setHalideScheduler(String scheduler)
  251. //
  252. /**
  253. * Compile Halide layers.
  254. * @param scheduler Path to YAML file with scheduling directives.
  255. * @see `-setPreferableBackend:`
  256. *
  257. * Schedule layers that support Halide backend. Then compile them for
  258. * specific target. For layers that not represented in scheduling file
  259. * or if no manual scheduling used at all, automatic scheduling will be applied.
  260. */
  261. - (void)setHalideScheduler:(NSString*)scheduler NS_SWIFT_NAME(setHalideScheduler(scheduler:));
  262. //
  263. // void cv::dnn::Net::setPreferableBackend(int backendId)
  264. //
  265. /**
  266. * Ask network to use specific computation backend where it supported.
  267. * @param backendId backend identifier.
  268. * @see `Backend`
  269. *
  270. * If OpenCV is compiled with Intel's Inference Engine library, DNN_BACKEND_DEFAULT
  271. * means DNN_BACKEND_INFERENCE_ENGINE. Otherwise it equals to DNN_BACKEND_OPENCV.
  272. */
  273. - (void)setPreferableBackend:(int)backendId NS_SWIFT_NAME(setPreferableBackend(backendId:));
  274. //
  275. // void cv::dnn::Net::setPreferableTarget(int targetId)
  276. //
  277. /**
  278. * Ask network to make computations on specific target device.
  279. * @param targetId target identifier.
  280. * @see `Target`
  281. *
  282. * List of supported combinations backend / target:
  283. * | | DNN_BACKEND_OPENCV | DNN_BACKEND_INFERENCE_ENGINE | DNN_BACKEND_HALIDE | DNN_BACKEND_CUDA |
  284. * |------------------------|--------------------|------------------------------|--------------------|-------------------|
  285. * | DNN_TARGET_CPU | + | + | + | |
  286. * | DNN_TARGET_OPENCL | + | + | + | |
  287. * | DNN_TARGET_OPENCL_FP16 | + | + | | |
  288. * | DNN_TARGET_MYRIAD | | + | | |
  289. * | DNN_TARGET_FPGA | | + | | |
  290. * | DNN_TARGET_CUDA | | | | + |
  291. * | DNN_TARGET_CUDA_FP16 | | | | + |
  292. * | DNN_TARGET_HDDL | | + | | |
  293. */
  294. - (void)setPreferableTarget:(int)targetId NS_SWIFT_NAME(setPreferableTarget(targetId:));
  295. //
  296. // void cv::dnn::Net::setInput(Mat blob, String name = "", double scalefactor = 1.0, Scalar mean = Scalar())
  297. //
  298. /**
  299. * Sets the new input value for the network
  300. * @param blob A new blob. Should have CV_32F or CV_8U depth.
  301. * @param name A name of input layer.
  302. * @param scalefactor An optional normalization scale.
  303. * @param mean An optional mean subtraction values.
  304. * @see `connect(String`, `String) to know format of the descriptor.`
  305. *
  306. * If scale or mean values are specified, a final input blob is computed
  307. * as:
  308. * `$$input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)$$`
  309. */
  310. - (void)setInput:(Mat*)blob name:(NSString*)name scalefactor:(double)scalefactor mean:(Scalar*)mean NS_SWIFT_NAME(setInput(blob:name:scalefactor:mean:));
  311. /**
  312. * Sets the new input value for the network
  313. * @param blob A new blob. Should have CV_32F or CV_8U depth.
  314. * @param name A name of input layer.
  315. * @param scalefactor An optional normalization scale.
  316. * @see `connect(String`, `String) to know format of the descriptor.`
  317. *
  318. * If scale or mean values are specified, a final input blob is computed
  319. * as:
  320. * `$$input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)$$`
  321. */
  322. - (void)setInput:(Mat*)blob name:(NSString*)name scalefactor:(double)scalefactor NS_SWIFT_NAME(setInput(blob:name:scalefactor:));
  323. /**
  324. * Sets the new input value for the network
  325. * @param blob A new blob. Should have CV_32F or CV_8U depth.
  326. * @param name A name of input layer.
  327. * @see `connect(String`, `String) to know format of the descriptor.`
  328. *
  329. * If scale or mean values are specified, a final input blob is computed
  330. * as:
  331. * `$$input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)$$`
  332. */
  333. - (void)setInput:(Mat*)blob name:(NSString*)name NS_SWIFT_NAME(setInput(blob:name:));
  334. /**
  335. * Sets the new input value for the network
  336. * @param blob A new blob. Should have CV_32F or CV_8U depth.
  337. * @see `connect(String`, `String) to know format of the descriptor.`
  338. *
  339. * If scale or mean values are specified, a final input blob is computed
  340. * as:
  341. * `$$input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)$$`
  342. */
  343. - (void)setInput:(Mat*)blob NS_SWIFT_NAME(setInput(blob:));
  344. //
  345. // void cv::dnn::Net::setParam(int layer, int numParam, Mat blob)
  346. //
  347. /**
  348. * Sets the new value for the learned param of the layer.
  349. * @param layer name or id of the layer.
  350. * @param numParam index of the layer parameter in the Layer::blobs array.
  351. * @param blob the new value.
  352. * @see `Layer::blobs`
  353. * NOTE: If shape of the new blob differs from the previous shape,
  354. * then the following forward pass may fail.
  355. */
  356. - (void)setParam:(int)layer numParam:(int)numParam blob:(Mat*)blob NS_SWIFT_NAME(setParam(layer:numParam:blob:));
  357. //
  358. // void cv::dnn::Net::setParam(String layerName, int numParam, Mat blob)
  359. //
  360. - (void)setParamByName:(NSString*)layerName numParam:(int)numParam blob:(Mat*)blob NS_SWIFT_NAME(setParam(layerName:numParam:blob:));
  361. //
  362. // Mat cv::dnn::Net::getParam(int layer, int numParam = 0)
  363. //
  364. /**
  365. * Returns parameter blob of the layer.
  366. * @param layer name or id of the layer.
  367. * @param numParam index of the layer parameter in the Layer::blobs array.
  368. * @see `Layer::blobs`
  369. */
  370. - (Mat*)getParam:(int)layer numParam:(int)numParam NS_SWIFT_NAME(getParam(layer:numParam:));
  371. /**
  372. * Returns parameter blob of the layer.
  373. * @param layer name or id of the layer.
  374. * @see `Layer::blobs`
  375. */
  376. - (Mat*)getParam:(int)layer NS_SWIFT_NAME(getParam(layer:));
  377. //
  378. // Mat cv::dnn::Net::getParam(String layerName, int numParam = 0)
  379. //
  380. - (Mat*)getParamByName:(NSString*)layerName numParam:(int)numParam NS_SWIFT_NAME(getParam(layerName:numParam:));
  381. - (Mat*)getParamByName:(NSString*)layerName NS_SWIFT_NAME(getParam(layerName:));
  382. //
  383. // vector_int cv::dnn::Net::getUnconnectedOutLayers()
  384. //
  385. /**
  386. * Returns indexes of layers with unconnected outputs.
  387. *
  388. * FIXIT: Rework API to registerOutput() approach, deprecate this call
  389. */
  390. - (IntVector*)getUnconnectedOutLayers NS_SWIFT_NAME(getUnconnectedOutLayers());
  391. //
  392. // vector_String cv::dnn::Net::getUnconnectedOutLayersNames()
  393. //
  394. /**
  395. * Returns names of layers with unconnected outputs.
  396. *
  397. * FIXIT: Rework API to registerOutput() approach, deprecate this call
  398. */
  399. - (NSArray<NSString*>*)getUnconnectedOutLayersNames NS_SWIFT_NAME(getUnconnectedOutLayersNames());
  400. //
  401. // void cv::dnn::Net::getLayersShapes(vector_MatShape netInputShapes, vector_int& layersIds, vector_vector_MatShape& inLayersShapes, vector_vector_MatShape& outLayersShapes)
  402. //
  403. /**
  404. * Returns input and output shapes for all layers in loaded model;
  405. * preliminary inferencing isn't necessary.
  406. * @param netInputShapes shapes for all input blobs in net input layer.
  407. * @param layersIds output parameter for layer IDs.
  408. * @param inLayersShapes output parameter for input layers shapes;
  409. * order is the same as in layersIds
  410. * @param outLayersShapes output parameter for output layers shapes;
  411. * order is the same as in layersIds
  412. */
  413. - (void)getLayersShapesWithNetInputShapes:(NSArray<IntVector*>*)netInputShapes layersIds:(IntVector*)layersIds inLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)inLayersShapes outLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)outLayersShapes NS_SWIFT_NAME(getLayersShapes(netInputShapes:layersIds:inLayersShapes:outLayersShapes:));
  414. //
  415. // void cv::dnn::Net::getLayersShapes(MatShape netInputShape, vector_int& layersIds, vector_vector_MatShape& inLayersShapes, vector_vector_MatShape& outLayersShapes)
  416. //
  417. - (void)getLayersShapesWithNetInputShape:(IntVector*)netInputShape layersIds:(IntVector*)layersIds inLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)inLayersShapes outLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)outLayersShapes NS_SWIFT_NAME(getLayersShapes(netInputShape:layersIds:inLayersShapes:outLayersShapes:));
  418. //
  419. // int64 cv::dnn::Net::getFLOPS(vector_MatShape netInputShapes)
  420. //
  421. /**
  422. * Computes FLOP for whole loaded model with specified input shapes.
  423. * @param netInputShapes vector of shapes for all net inputs.
  424. * @return computed FLOP.
  425. */
  426. - (long)getFLOPSWithNetInputShapes:(NSArray<IntVector*>*)netInputShapes NS_SWIFT_NAME(getFLOPS(netInputShapes:));
  427. //
  428. // int64 cv::dnn::Net::getFLOPS(MatShape netInputShape)
  429. //
  430. - (long)getFLOPSWithNetInputShape:(IntVector*)netInputShape NS_SWIFT_NAME(getFLOPS(netInputShape:));
  431. //
  432. // int64 cv::dnn::Net::getFLOPS(int layerId, vector_MatShape netInputShapes)
  433. //
  434. - (long)getFLOPSWithLayerId:(int)layerId netInputShapes:(NSArray<IntVector*>*)netInputShapes NS_SWIFT_NAME(getFLOPS(layerId:netInputShapes:));
  435. //
  436. // int64 cv::dnn::Net::getFLOPS(int layerId, MatShape netInputShape)
  437. //
  438. - (long)getFLOPSWithLayerId:(int)layerId netInputShape:(IntVector*)netInputShape NS_SWIFT_NAME(getFLOPS(layerId:netInputShape:));
  439. //
  440. // void cv::dnn::Net::getLayerTypes(vector_String& layersTypes)
  441. //
  442. /**
  443. * Returns list of types for layer used in model.
  444. * @param layersTypes output parameter for returning types.
  445. */
  446. - (void)getLayerTypes:(NSMutableArray<NSString*>*)layersTypes NS_SWIFT_NAME(getLayerTypes(layersTypes:));
  447. //
  448. // int cv::dnn::Net::getLayersCount(String layerType)
  449. //
  450. /**
  451. * Returns count of layers of specified type.
  452. * @param layerType type.
  453. * @return count of layers
  454. */
  455. - (int)getLayersCount:(NSString*)layerType NS_SWIFT_NAME(getLayersCount(layerType:));
  456. //
  457. // void cv::dnn::Net::getMemoryConsumption(MatShape netInputShape, size_t& weights, size_t& blobs)
  458. //
  459. - (void)getMemoryConsumption:(IntVector*)netInputShape weights:(size_t)weights blobs:(size_t)blobs NS_SWIFT_NAME(getMemoryConsumption(netInputShape:weights:blobs:));
  460. //
  461. // void cv::dnn::Net::getMemoryConsumption(int layerId, vector_MatShape netInputShapes, size_t& weights, size_t& blobs)
  462. //
  463. - (void)getMemoryConsumption:(int)layerId netInputShapes:(NSArray<IntVector*>*)netInputShapes weights:(size_t)weights blobs:(size_t)blobs NS_SWIFT_NAME(getMemoryConsumption(layerId:netInputShapes:weights:blobs:));
  464. //
  465. // void cv::dnn::Net::getMemoryConsumption(int layerId, MatShape netInputShape, size_t& weights, size_t& blobs)
  466. //
  467. - (void)getMemoryConsumption:(int)layerId netInputShape:(IntVector*)netInputShape weights:(size_t)weights blobs:(size_t)blobs NS_SWIFT_NAME(getMemoryConsumption(layerId:netInputShape:weights:blobs:));
  468. //
  469. // void cv::dnn::Net::enableFusion(bool fusion)
  470. //
  471. /**
  472. * Enables or disables layer fusion in the network.
  473. * @param fusion true to enable the fusion, false to disable. The fusion is enabled by default.
  474. */
  475. - (void)enableFusion:(BOOL)fusion NS_SWIFT_NAME(enableFusion(fusion:));
  476. //
  477. // int64 cv::dnn::Net::getPerfProfile(vector_double& timings)
  478. //
  479. /**
  480. * Returns overall time for inference and timings (in ticks) for layers.
  481. *
  482. * Indexes in returned vector correspond to layers ids. Some layers can be fused with others,
  483. * in this case zero ticks count will be return for that skipped layers. Supported by DNN_BACKEND_OPENCV on DNN_TARGET_CPU only.
  484. *
  485. * @param timings vector for tick timings for all layers.
  486. * @return overall ticks for model inference.
  487. */
  488. - (long)getPerfProfile:(DoubleVector*)timings NS_SWIFT_NAME(getPerfProfile(timings:));
  489. @end
  490. NS_ASSUME_NONNULL_END