Video.h 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. //
  2. // This file is auto-generated. Please don't modify it!
  3. //
  4. #pragma once
  5. #ifdef __cplusplus
  6. //#import "opencv.hpp"
  7. #import "opencv2/video.hpp"
  8. #else
  9. #define CV_EXPORTS
  10. #endif
  11. #import <Foundation/Foundation.h>
  12. @class BackgroundSubtractorKNN;
  13. @class BackgroundSubtractorMOG2;
  14. @class Mat;
  15. @class Rect2i;
  16. @class RotatedRect;
  17. @class Size2i;
  18. @class TermCriteria;
  19. NS_ASSUME_NONNULL_BEGIN
  20. // C++: class Video
  21. /**
  22. * The Video module
  23. *
  24. * Member classes: `KalmanFilter`, `DenseOpticalFlow`, `SparseOpticalFlow`, `FarnebackOpticalFlow`, `VariationalRefinement`, `DISOpticalFlow`, `SparsePyrLKOpticalFlow`, `Tracker`, `TrackerMIL`, `TrackerMILParams`, `TrackerGOTURN`, `TrackerGOTURNParams`, `TrackerDaSiamRPN`, `TrackerDaSiamRPNParams`, `BackgroundSubtractor`, `BackgroundSubtractorMOG2`, `BackgroundSubtractorKNN`
  25. *
  26. */
  27. CV_EXPORTS @interface Video : NSObject
  28. #pragma mark - Class Constants
  29. @property (class, readonly) int OPTFLOW_USE_INITIAL_FLOW NS_SWIFT_NAME(OPTFLOW_USE_INITIAL_FLOW);
  30. @property (class, readonly) int OPTFLOW_LK_GET_MIN_EIGENVALS NS_SWIFT_NAME(OPTFLOW_LK_GET_MIN_EIGENVALS);
  31. @property (class, readonly) int OPTFLOW_FARNEBACK_GAUSSIAN NS_SWIFT_NAME(OPTFLOW_FARNEBACK_GAUSSIAN);
  32. @property (class, readonly) int MOTION_TRANSLATION NS_SWIFT_NAME(MOTION_TRANSLATION);
  33. @property (class, readonly) int MOTION_EUCLIDEAN NS_SWIFT_NAME(MOTION_EUCLIDEAN);
  34. @property (class, readonly) int MOTION_AFFINE NS_SWIFT_NAME(MOTION_AFFINE);
  35. @property (class, readonly) int MOTION_HOMOGRAPHY NS_SWIFT_NAME(MOTION_HOMOGRAPHY);
  36. #pragma mark - Methods
  37. //
  38. // RotatedRect cv::CamShift(Mat probImage, Rect& window, TermCriteria criteria)
  39. //
  40. /**
  41. * Finds an object center, size, and orientation.
  42. *
  43. * @param probImage Back projection of the object histogram. See calcBackProject.
  44. * @param window Initial search window.
  45. * @param criteria Stop criteria for the underlying meanShift.
  46. * returns
  47. * (in old interfaces) Number of iterations CAMSHIFT took to converge
  48. * The function implements the CAMSHIFT object tracking algorithm CITE: Bradski98 . First, it finds an
  49. * object center using meanShift and then adjusts the window size and finds the optimal rotation. The
  50. * function returns the rotated rectangle structure that includes the object position, size, and
  51. * orientation. The next position of the search window can be obtained with RotatedRect::boundingRect()
  52. *
  53. * See the OpenCV sample camshiftdemo.c that tracks colored objects.
  54. *
  55. * NOTE:
  56. * - (Python) A sample explaining the camshift tracking algorithm can be found at
  57. * opencv_source_code/samples/python/camshift.py
  58. */
  59. + (RotatedRect*)CamShift:(Mat*)probImage window:(Rect2i*)window criteria:(TermCriteria*)criteria NS_SWIFT_NAME(CamShift(probImage:window:criteria:));
  60. //
  61. // int cv::meanShift(Mat probImage, Rect& window, TermCriteria criteria)
  62. //
  63. /**
  64. * Finds an object on a back projection image.
  65. *
  66. * @param probImage Back projection of the object histogram. See calcBackProject for details.
  67. * @param window Initial search window.
  68. * @param criteria Stop criteria for the iterative search algorithm.
  69. * returns
  70. * : Number of iterations CAMSHIFT took to converge.
  71. * The function implements the iterative object search algorithm. It takes the input back projection of
  72. * an object and the initial position. The mass center in window of the back projection image is
  73. * computed and the search window center shifts to the mass center. The procedure is repeated until the
  74. * specified number of iterations criteria.maxCount is done or until the window center shifts by less
  75. * than criteria.epsilon. The algorithm is used inside CamShift and, unlike CamShift , the search
  76. * window size or orientation do not change during the search. You can simply pass the output of
  77. * calcBackProject to this function. But better results can be obtained if you pre-filter the back
  78. * projection and remove the noise. For example, you can do this by retrieving connected components
  79. * with findContours , throwing away contours with small area ( contourArea ), and rendering the
  80. * remaining contours with drawContours.
  81. */
  82. + (int)meanShift:(Mat*)probImage window:(Rect2i*)window criteria:(TermCriteria*)criteria NS_SWIFT_NAME(meanShift(probImage:window:criteria:));
  83. //
  84. // int cv::buildOpticalFlowPyramid(Mat img, vector_Mat& pyramid, Size winSize, int maxLevel, bool withDerivatives = true, int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, bool tryReuseInputImage = true)
  85. //
  86. /**
  87. * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
  88. *
  89. * @param img 8-bit input image.
  90. * @param pyramid output pyramid.
  91. * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
  92. * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
  93. * @param maxLevel 0-based maximal pyramid level number.
  94. * @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
  95. * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
  96. * @param pyrBorder the border mode for pyramid layers.
  97. * @param derivBorder the border mode for gradients.
  98. * @param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false
  99. * to force data copying.
  100. * @return number of levels in constructed pyramid. Can be less than maxLevel.
  101. */
  102. + (int)buildOpticalFlowPyramid:(Mat*)img pyramid:(NSMutableArray<Mat*>*)pyramid winSize:(Size2i*)winSize maxLevel:(int)maxLevel withDerivatives:(BOOL)withDerivatives pyrBorder:(int)pyrBorder derivBorder:(int)derivBorder tryReuseInputImage:(BOOL)tryReuseInputImage NS_SWIFT_NAME(buildOpticalFlowPyramid(img:pyramid:winSize:maxLevel:withDerivatives:pyrBorder:derivBorder:tryReuseInputImage:));
  103. /**
  104. * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
  105. *
  106. * @param img 8-bit input image.
  107. * @param pyramid output pyramid.
  108. * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
  109. * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
  110. * @param maxLevel 0-based maximal pyramid level number.
  111. * @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
  112. * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
  113. * @param pyrBorder the border mode for pyramid layers.
  114. * @param derivBorder the border mode for gradients.
  115. * to force data copying.
  116. * @return number of levels in constructed pyramid. Can be less than maxLevel.
  117. */
  118. + (int)buildOpticalFlowPyramid:(Mat*)img pyramid:(NSMutableArray<Mat*>*)pyramid winSize:(Size2i*)winSize maxLevel:(int)maxLevel withDerivatives:(BOOL)withDerivatives pyrBorder:(int)pyrBorder derivBorder:(int)derivBorder NS_SWIFT_NAME(buildOpticalFlowPyramid(img:pyramid:winSize:maxLevel:withDerivatives:pyrBorder:derivBorder:));
  119. /**
  120. * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
  121. *
  122. * @param img 8-bit input image.
  123. * @param pyramid output pyramid.
  124. * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
  125. * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
  126. * @param maxLevel 0-based maximal pyramid level number.
  127. * @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
  128. * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
  129. * @param pyrBorder the border mode for pyramid layers.
  130. * to force data copying.
  131. * @return number of levels in constructed pyramid. Can be less than maxLevel.
  132. */
  133. + (int)buildOpticalFlowPyramid:(Mat*)img pyramid:(NSMutableArray<Mat*>*)pyramid winSize:(Size2i*)winSize maxLevel:(int)maxLevel withDerivatives:(BOOL)withDerivatives pyrBorder:(int)pyrBorder NS_SWIFT_NAME(buildOpticalFlowPyramid(img:pyramid:winSize:maxLevel:withDerivatives:pyrBorder:));
  134. /**
  135. * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
  136. *
  137. * @param img 8-bit input image.
  138. * @param pyramid output pyramid.
  139. * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
  140. * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
  141. * @param maxLevel 0-based maximal pyramid level number.
  142. * @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
  143. * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
  144. * to force data copying.
  145. * @return number of levels in constructed pyramid. Can be less than maxLevel.
  146. */
  147. + (int)buildOpticalFlowPyramid:(Mat*)img pyramid:(NSMutableArray<Mat*>*)pyramid winSize:(Size2i*)winSize maxLevel:(int)maxLevel withDerivatives:(BOOL)withDerivatives NS_SWIFT_NAME(buildOpticalFlowPyramid(img:pyramid:winSize:maxLevel:withDerivatives:));
  148. /**
  149. * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
  150. *
  151. * @param img 8-bit input image.
  152. * @param pyramid output pyramid.
  153. * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
  154. * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
  155. * @param maxLevel 0-based maximal pyramid level number.
  156. * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
  157. * to force data copying.
  158. * @return number of levels in constructed pyramid. Can be less than maxLevel.
  159. */
  160. + (int)buildOpticalFlowPyramid:(Mat*)img pyramid:(NSMutableArray<Mat*>*)pyramid winSize:(Size2i*)winSize maxLevel:(int)maxLevel NS_SWIFT_NAME(buildOpticalFlowPyramid(img:pyramid:winSize:maxLevel:));
  161. //
  162. // void cv::calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, Mat prevPts, Mat& nextPts, Mat& status, Mat& err, Size winSize = Size(21,21), int maxLevel = 3, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), int flags = 0, double minEigThreshold = 1e-4)
  163. //
  164. /**
  165. * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
  166. * pyramids.
  167. *
  168. * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
  169. * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
  170. * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
  171. * single-precision floating-point numbers.
  172. * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
  173. * containing the calculated new positions of input features in the second image; when
  174. * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
  175. * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
  176. * the flow for the corresponding features has been found, otherwise, it is set to 0.
  177. * @param err output vector of errors; each element of the vector is set to an error for the
  178. * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
  179. * found then the error is not defined (use the status parameter to find such cases).
  180. * @param winSize size of the search window at each pyramid level.
  181. * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
  182. * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
  183. * algorithm will use as many levels as pyramids have but no more than maxLevel.
  184. * @param criteria parameter, specifying the termination criteria of the iterative search algorithm
  185. * (after the specified maximum number of iterations criteria.maxCount or when the search window
  186. * moves by less than criteria.epsilon.
  187. * @param flags operation flags:
  188. * - **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
  189. * not set, then prevPts is copied to nextPts and is considered the initial estimate.
  190. * - **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
  191. * minEigThreshold description); if the flag is not set, then L1 distance between patches
  192. * around the original and a moved point, divided by number of pixels in a window, is used as a
  193. * error measure.
  194. * @param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of
  195. * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
  196. * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
  197. * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
  198. * performance boost.
  199. *
  200. * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
  201. * CITE: Bouguet00 . The function is parallelized with the TBB library.
  202. *
  203. * NOTE:
  204. *
  205. * - An example using the Lucas-Kanade optical flow algorithm can be found at
  206. * opencv_source_code/samples/cpp/lkdemo.cpp
  207. * - (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
  208. * opencv_source_code/samples/python/lk_track.py
  209. * - (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
  210. * opencv_source_code/samples/python/lk_homography.py
  211. */
  212. + (void)calcOpticalFlowPyrLK:(Mat*)prevImg nextImg:(Mat*)nextImg prevPts:(Mat*)prevPts nextPts:(Mat*)nextPts status:(Mat*)status err:(Mat*)err winSize:(Size2i*)winSize maxLevel:(int)maxLevel criteria:(TermCriteria*)criteria flags:(int)flags minEigThreshold:(double)minEigThreshold NS_SWIFT_NAME(calcOpticalFlowPyrLK(prevImg:nextImg:prevPts:nextPts:status:err:winSize:maxLevel:criteria:flags:minEigThreshold:));
  213. /**
  214. * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
  215. * pyramids.
  216. *
  217. * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
  218. * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
  219. * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
  220. * single-precision floating-point numbers.
  221. * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
  222. * containing the calculated new positions of input features in the second image; when
  223. * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
  224. * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
  225. * the flow for the corresponding features has been found, otherwise, it is set to 0.
  226. * @param err output vector of errors; each element of the vector is set to an error for the
  227. * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
  228. * found then the error is not defined (use the status parameter to find such cases).
  229. * @param winSize size of the search window at each pyramid level.
  230. * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
  231. * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
  232. * algorithm will use as many levels as pyramids have but no more than maxLevel.
  233. * @param criteria parameter, specifying the termination criteria of the iterative search algorithm
  234. * (after the specified maximum number of iterations criteria.maxCount or when the search window
  235. * moves by less than criteria.epsilon.
  236. * @param flags operation flags:
  237. * - **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
  238. * not set, then prevPts is copied to nextPts and is considered the initial estimate.
  239. * - **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
  240. * minEigThreshold description); if the flag is not set, then L1 distance between patches
  241. * around the original and a moved point, divided by number of pixels in a window, is used as a
  242. * error measure.
  243. * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
  244. * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
  245. * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
  246. * performance boost.
  247. *
  248. * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
  249. * CITE: Bouguet00 . The function is parallelized with the TBB library.
  250. *
  251. * NOTE:
  252. *
  253. * - An example using the Lucas-Kanade optical flow algorithm can be found at
  254. * opencv_source_code/samples/cpp/lkdemo.cpp
  255. * - (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
  256. * opencv_source_code/samples/python/lk_track.py
  257. * - (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
  258. * opencv_source_code/samples/python/lk_homography.py
  259. */
  260. + (void)calcOpticalFlowPyrLK:(Mat*)prevImg nextImg:(Mat*)nextImg prevPts:(Mat*)prevPts nextPts:(Mat*)nextPts status:(Mat*)status err:(Mat*)err winSize:(Size2i*)winSize maxLevel:(int)maxLevel criteria:(TermCriteria*)criteria flags:(int)flags NS_SWIFT_NAME(calcOpticalFlowPyrLK(prevImg:nextImg:prevPts:nextPts:status:err:winSize:maxLevel:criteria:flags:));
  261. /**
  262. * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
  263. * pyramids.
  264. *
  265. * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
  266. * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
  267. * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
  268. * single-precision floating-point numbers.
  269. * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
  270. * containing the calculated new positions of input features in the second image; when
  271. * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
  272. * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
  273. * the flow for the corresponding features has been found, otherwise, it is set to 0.
  274. * @param err output vector of errors; each element of the vector is set to an error for the
  275. * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
  276. * found then the error is not defined (use the status parameter to find such cases).
  277. * @param winSize size of the search window at each pyramid level.
  278. * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
  279. * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
  280. * algorithm will use as many levels as pyramids have but no more than maxLevel.
  281. * @param criteria parameter, specifying the termination criteria of the iterative search algorithm
  282. * (after the specified maximum number of iterations criteria.maxCount or when the search window
  283. * moves by less than criteria.epsilon.
  284. * - **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
  285. * not set, then prevPts is copied to nextPts and is considered the initial estimate.
  286. * - **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
  287. * minEigThreshold description); if the flag is not set, then L1 distance between patches
  288. * around the original and a moved point, divided by number of pixels in a window, is used as a
  289. * error measure.
  290. * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
  291. * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
  292. * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
  293. * performance boost.
  294. *
  295. * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
  296. * CITE: Bouguet00 . The function is parallelized with the TBB library.
  297. *
  298. * NOTE:
  299. *
  300. * - An example using the Lucas-Kanade optical flow algorithm can be found at
  301. * opencv_source_code/samples/cpp/lkdemo.cpp
  302. * - (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
  303. * opencv_source_code/samples/python/lk_track.py
  304. * - (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
  305. * opencv_source_code/samples/python/lk_homography.py
  306. */
  307. + (void)calcOpticalFlowPyrLK:(Mat*)prevImg nextImg:(Mat*)nextImg prevPts:(Mat*)prevPts nextPts:(Mat*)nextPts status:(Mat*)status err:(Mat*)err winSize:(Size2i*)winSize maxLevel:(int)maxLevel criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calcOpticalFlowPyrLK(prevImg:nextImg:prevPts:nextPts:status:err:winSize:maxLevel:criteria:));
  308. /**
  309. * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
  310. * pyramids.
  311. *
  312. * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
  313. * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
  314. * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
  315. * single-precision floating-point numbers.
  316. * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
  317. * containing the calculated new positions of input features in the second image; when
  318. * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
  319. * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
  320. * the flow for the corresponding features has been found, otherwise, it is set to 0.
  321. * @param err output vector of errors; each element of the vector is set to an error for the
  322. * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
  323. * found then the error is not defined (use the status parameter to find such cases).
  324. * @param winSize size of the search window at each pyramid level.
  325. * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
  326. * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
  327. * algorithm will use as many levels as pyramids have but no more than maxLevel.
  328. * (after the specified maximum number of iterations criteria.maxCount or when the search window
  329. * moves by less than criteria.epsilon.
  330. * - **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
  331. * not set, then prevPts is copied to nextPts and is considered the initial estimate.
  332. * - **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
  333. * minEigThreshold description); if the flag is not set, then L1 distance between patches
  334. * around the original and a moved point, divided by number of pixels in a window, is used as a
  335. * error measure.
  336. * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
  337. * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
  338. * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
  339. * performance boost.
  340. *
  341. * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
  342. * CITE: Bouguet00 . The function is parallelized with the TBB library.
  343. *
  344. * NOTE:
  345. *
  346. * - An example using the Lucas-Kanade optical flow algorithm can be found at
  347. * opencv_source_code/samples/cpp/lkdemo.cpp
  348. * - (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
  349. * opencv_source_code/samples/python/lk_track.py
  350. * - (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
  351. * opencv_source_code/samples/python/lk_homography.py
  352. */
  353. + (void)calcOpticalFlowPyrLK:(Mat*)prevImg nextImg:(Mat*)nextImg prevPts:(Mat*)prevPts nextPts:(Mat*)nextPts status:(Mat*)status err:(Mat*)err winSize:(Size2i*)winSize maxLevel:(int)maxLevel NS_SWIFT_NAME(calcOpticalFlowPyrLK(prevImg:nextImg:prevPts:nextPts:status:err:winSize:maxLevel:));
  354. /**
  355. * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
  356. * pyramids.
  357. *
  358. * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
  359. * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
  360. * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
  361. * single-precision floating-point numbers.
  362. * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
  363. * containing the calculated new positions of input features in the second image; when
  364. * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
  365. * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
  366. * the flow for the corresponding features has been found, otherwise, it is set to 0.
  367. * @param err output vector of errors; each element of the vector is set to an error for the
  368. * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
  369. * found then the error is not defined (use the status parameter to find such cases).
  370. * @param winSize size of the search window at each pyramid level.
  371. * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
  372. * algorithm will use as many levels as pyramids have but no more than maxLevel.
  373. * (after the specified maximum number of iterations criteria.maxCount or when the search window
  374. * moves by less than criteria.epsilon.
  375. * - **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
  376. * not set, then prevPts is copied to nextPts and is considered the initial estimate.
  377. * - **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
  378. * minEigThreshold description); if the flag is not set, then L1 distance between patches
  379. * around the original and a moved point, divided by number of pixels in a window, is used as a
  380. * error measure.
  381. * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
  382. * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
  383. * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
  384. * performance boost.
  385. *
  386. * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
  387. * CITE: Bouguet00 . The function is parallelized with the TBB library.
  388. *
  389. * NOTE:
  390. *
  391. * - An example using the Lucas-Kanade optical flow algorithm can be found at
  392. * opencv_source_code/samples/cpp/lkdemo.cpp
  393. * - (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
  394. * opencv_source_code/samples/python/lk_track.py
  395. * - (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
  396. * opencv_source_code/samples/python/lk_homography.py
  397. */
  398. + (void)calcOpticalFlowPyrLK:(Mat*)prevImg nextImg:(Mat*)nextImg prevPts:(Mat*)prevPts nextPts:(Mat*)nextPts status:(Mat*)status err:(Mat*)err winSize:(Size2i*)winSize NS_SWIFT_NAME(calcOpticalFlowPyrLK(prevImg:nextImg:prevPts:nextPts:status:err:winSize:));
  399. /**
  400. * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
  401. * pyramids.
  402. *
  403. * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
  404. * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
  405. * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
  406. * single-precision floating-point numbers.
  407. * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
  408. * containing the calculated new positions of input features in the second image; when
  409. * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
  410. * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
  411. * the flow for the corresponding features has been found, otherwise, it is set to 0.
  412. * @param err output vector of errors; each element of the vector is set to an error for the
  413. * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
  414. * found then the error is not defined (use the status parameter to find such cases).
  415. * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
  416. * algorithm will use as many levels as pyramids have but no more than maxLevel.
  417. * (after the specified maximum number of iterations criteria.maxCount or when the search window
  418. * moves by less than criteria.epsilon.
  419. * - **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
  420. * not set, then prevPts is copied to nextPts and is considered the initial estimate.
  421. * - **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
  422. * minEigThreshold description); if the flag is not set, then L1 distance between patches
  423. * around the original and a moved point, divided by number of pixels in a window, is used as a
  424. * error measure.
  425. * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
  426. * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
  427. * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
  428. * performance boost.
  429. *
  430. * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
  431. * CITE: Bouguet00 . The function is parallelized with the TBB library.
  432. *
  433. * NOTE:
  434. *
  435. * - An example using the Lucas-Kanade optical flow algorithm can be found at
  436. * opencv_source_code/samples/cpp/lkdemo.cpp
  437. * - (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
  438. * opencv_source_code/samples/python/lk_track.py
  439. * - (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
  440. * opencv_source_code/samples/python/lk_homography.py
  441. */
  442. + (void)calcOpticalFlowPyrLK:(Mat*)prevImg nextImg:(Mat*)nextImg prevPts:(Mat*)prevPts nextPts:(Mat*)nextPts status:(Mat*)status err:(Mat*)err NS_SWIFT_NAME(calcOpticalFlowPyrLK(prevImg:nextImg:prevPts:nextPts:status:err:));
  443. //
  444. // void cv::calcOpticalFlowFarneback(Mat prev, Mat next, Mat& flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags)
  445. //
  446. /**
  447. * Computes a dense optical flow using the Gunnar Farneback's algorithm.
  448. *
  449. * @param prev first 8-bit single-channel input image.
  450. * @param next second input image of the same size and the same type as prev.
  451. * @param flow computed flow image that has the same size as prev and type CV_32FC2.
  452. * @param pyr_scale parameter, specifying the image scale (\<1) to build pyramids for each image;
  453. * pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
  454. * one.
  455. * @param levels number of pyramid layers including the initial image; levels=1 means that no extra
  456. * layers are created and only the original images are used.
  457. * @param winsize averaging window size; larger values increase the algorithm robustness to image
  458. * noise and give more chances for fast motion detection, but yield more blurred motion field.
  459. * @param iterations number of iterations the algorithm does at each pyramid level.
  460. * @param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel;
  461. * larger values mean that the image will be approximated with smoother surfaces, yielding more
  462. * robust algorithm and more blurred motion field, typically poly_n =5 or 7.
  463. * @param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a
  464. * basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a
  465. * good value would be poly_sigma=1.5.
  466. * @param flags operation flags that can be a combination of the following:
  467. * - **OPTFLOW_USE_INITIAL_FLOW** uses the input flow as an initial flow approximation.
  468. * - **OPTFLOW_FARNEBACK_GAUSSIAN** uses the Gaussian `$$\texttt{winsize}\times\texttt{winsize}$$`
  469. * filter instead of a box filter of the same size for optical flow estimation; usually, this
  470. * option gives z more accurate flow than with a box filter, at the cost of lower speed;
  471. * normally, winsize for a Gaussian window should be set to a larger value to achieve the same
  472. * level of robustness.
  473. *
  474. * The function finds an optical flow for each prev pixel using the CITE: Farneback2003 algorithm so that
  475. *
  476. * `$$\texttt{prev} (y,x) \sim \texttt{next} ( y + \texttt{flow} (y,x)[1], x + \texttt{flow} (y,x)[0])$$`
  477. *
  478. * NOTE:
  479. *
  480. * - An example using the optical flow algorithm described by Gunnar Farneback can be found at
  481. * opencv_source_code/samples/cpp/fback.cpp
  482. * - (Python) An example using the optical flow algorithm described by Gunnar Farneback can be
  483. * found at opencv_source_code/samples/python/opt_flow.py
  484. */
  485. + (void)calcOpticalFlowFarneback:(Mat*)prev next:(Mat*)next flow:(Mat*)flow pyr_scale:(double)pyr_scale levels:(int)levels winsize:(int)winsize iterations:(int)iterations poly_n:(int)poly_n poly_sigma:(double)poly_sigma flags:(int)flags NS_SWIFT_NAME(calcOpticalFlowFarneback(prev:next:flow:pyr_scale:levels:winsize:iterations:poly_n:poly_sigma:flags:));
  486. //
  487. // double cv::computeECC(Mat templateImage, Mat inputImage, Mat inputMask = Mat())
  488. //
  489. /**
  490. * Computes the Enhanced Correlation Coefficient value between two images CITE: EP08 .
  491. *
  492. * @param templateImage single-channel template image; CV_8U or CV_32F array.
  493. * @param inputImage single-channel input image to be warped to provide an image similar to
  494. * templateImage, same type as templateImage.
  495. * @param inputMask An optional mask to indicate valid values of inputImage.
  496. *
  497. * @sa
  498. * findTransformECC
  499. */
  500. + (double)computeECC:(Mat*)templateImage inputImage:(Mat*)inputImage inputMask:(Mat*)inputMask NS_SWIFT_NAME(computeECC(templateImage:inputImage:inputMask:));
  501. /**
  502. * Computes the Enhanced Correlation Coefficient value between two images CITE: EP08 .
  503. *
  504. * @param templateImage single-channel template image; CV_8U or CV_32F array.
  505. * @param inputImage single-channel input image to be warped to provide an image similar to
  506. * templateImage, same type as templateImage.
  507. *
  508. * @sa
  509. * findTransformECC
  510. */
  511. + (double)computeECC:(Mat*)templateImage inputImage:(Mat*)inputImage NS_SWIFT_NAME(computeECC(templateImage:inputImage:));
  512. //
  513. // double cv::findTransformECC(Mat templateImage, Mat inputImage, Mat& warpMatrix, int motionType, TermCriteria criteria, Mat inputMask, int gaussFiltSize)
  514. //
  515. /**
  516. * Finds the geometric transform (warp) between two images in terms of the ECC criterion CITE: EP08 .
  517. *
  518. * @param templateImage single-channel template image; CV_8U or CV_32F array.
  519. * @param inputImage single-channel input image which should be warped with the final warpMatrix in
  520. * order to provide an image similar to templateImage, same type as templateImage.
  521. * @param warpMatrix floating-point `$$2\times 3$$` or `$$3\times 3$$` mapping matrix (warp).
  522. * @param motionType parameter, specifying the type of motion:
  523. * - **MOTION_TRANSLATION** sets a translational motion model; warpMatrix is `$$2\times 3$$` with
  524. * the first `$$2\times 2$$` part being the unity matrix and the rest two parameters being
  525. * estimated.
  526. * - **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three
  527. * parameters are estimated; warpMatrix is `$$2\times 3$$`.
  528. * - **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated;
  529. * warpMatrix is `$$2\times 3$$`.
  530. * - **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are
  531. * estimated;\`warpMatrix\` is `$$3\times 3$$`.
  532. * @param criteria parameter, specifying the termination criteria of the ECC algorithm;
  533. * criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
  534. * iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
  535. * Default values are shown in the declaration above.
  536. * @param inputMask An optional mask to indicate valid values of inputImage.
  537. * @param gaussFiltSize An optional value indicating size of gaussian blur filter; (DEFAULT: 5)
  538. *
  539. * The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion
  540. * (CITE: EP08), that is
  541. *
  542. * `$$\texttt{warpMatrix} = \arg\max_{W} \texttt{ECC}(\texttt{templateImage}(x,y),\texttt{inputImage}(x',y'))$$`
  543. *
  544. * where
  545. *
  546. * `$$\begin{bmatrix} x' \\ y' \end{bmatrix} = W \cdot \begin{bmatrix} x \\ y \\ 1 \end{bmatrix}$$`
  547. *
  548. * (the equation holds with homogeneous coordinates for homography). It returns the final enhanced
  549. * correlation coefficient, that is the correlation coefficient between the template image and the
  550. * final warped input image. When a `$$3\times 3$$` matrix is given with motionType =0, 1 or 2, the third
  551. * row is ignored.
  552. *
  553. * Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an
  554. * area-based alignment that builds on intensity similarities. In essence, the function updates the
  555. * initial transformation that roughly aligns the images. If this information is missing, the identity
  556. * warp (unity matrix) is used as an initialization. Note that if images undergo strong
  557. * displacements/rotations, an initial transformation that roughly aligns the images is necessary
  558. * (e.g., a simple euclidean/similarity transform that allows for the images showing the same image
  559. * content approximately). Use inverse warping in the second image to take an image close to the first
  560. * one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV
  561. * sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws
  562. * an exception if algorithm does not converges.
  563. *
  564. * @sa
  565. * computeECC, estimateAffine2D, estimateAffinePartial2D, findHomography
  566. */
  567. + (double)findTransformECC:(Mat*)templateImage inputImage:(Mat*)inputImage warpMatrix:(Mat*)warpMatrix motionType:(int)motionType criteria:(TermCriteria*)criteria inputMask:(Mat*)inputMask gaussFiltSize:(int)gaussFiltSize NS_SWIFT_NAME(findTransformECC(templateImage:inputImage:warpMatrix:motionType:criteria:inputMask:gaussFiltSize:));
  568. //
  569. // double cv::findTransformECC(Mat templateImage, Mat inputImage, Mat& warpMatrix, int motionType = MOTION_AFFINE, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001), Mat inputMask = Mat())
  570. //
  571. + (double)findTransformECC:(Mat*)templateImage inputImage:(Mat*)inputImage warpMatrix:(Mat*)warpMatrix motionType:(int)motionType criteria:(TermCriteria*)criteria inputMask:(Mat*)inputMask NS_SWIFT_NAME(findTransformECC(templateImage:inputImage:warpMatrix:motionType:criteria:inputMask:));
  572. + (double)findTransformECC:(Mat*)templateImage inputImage:(Mat*)inputImage warpMatrix:(Mat*)warpMatrix motionType:(int)motionType criteria:(TermCriteria*)criteria NS_SWIFT_NAME(findTransformECC(templateImage:inputImage:warpMatrix:motionType:criteria:));
  573. + (double)findTransformECC:(Mat*)templateImage inputImage:(Mat*)inputImage warpMatrix:(Mat*)warpMatrix motionType:(int)motionType NS_SWIFT_NAME(findTransformECC(templateImage:inputImage:warpMatrix:motionType:));
  574. + (double)findTransformECC:(Mat*)templateImage inputImage:(Mat*)inputImage warpMatrix:(Mat*)warpMatrix NS_SWIFT_NAME(findTransformECC(templateImage:inputImage:warpMatrix:));
  575. //
  576. // Mat cv::readOpticalFlow(String path)
  577. //
  578. /**
  579. * Read a .flo file
  580. *
  581. * @param path Path to the file to be loaded
  582. *
  583. * The function readOpticalFlow loads a flow field from a file and returns it as a single matrix.
  584. * Resulting Mat has a type CV_32FC2 - floating-point, 2-channel. First channel corresponds to the
  585. * flow in the horizontal direction (u), second - vertical (v).
  586. */
  587. + (Mat*)readOpticalFlow:(NSString*)path NS_SWIFT_NAME(readOpticalFlow(path:));
  588. //
  589. // bool cv::writeOpticalFlow(String path, Mat flow)
  590. //
  591. /**
  592. * Write a .flo to disk
  593. *
  594. * @param path Path to the file to be written
  595. * @param flow Flow field to be stored
  596. *
  597. * The function stores a flow field in a file, returns true on success, false otherwise.
  598. * The flow field must be a 2-channel, floating-point matrix (CV_32FC2). First channel corresponds
  599. * to the flow in the horizontal direction (u), second - vertical (v).
  600. */
  601. + (BOOL)writeOpticalFlow:(NSString*)path flow:(Mat*)flow NS_SWIFT_NAME(writeOpticalFlow(path:flow:));
  602. //
  603. // Ptr_BackgroundSubtractorMOG2 cv::createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16, bool detectShadows = true)
  604. //
  605. /**
  606. * Creates MOG2 Background Subtractor
  607. *
  608. * @param history Length of the history.
  609. * @param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model
  610. * to decide whether a pixel is well described by the background model. This parameter does not
  611. * affect the background update.
  612. * @param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
  613. * speed a bit, so if you do not need this feature, set the parameter to false.
  614. */
  615. + (BackgroundSubtractorMOG2*)createBackgroundSubtractorMOG2:(int)history varThreshold:(double)varThreshold detectShadows:(BOOL)detectShadows NS_SWIFT_NAME(createBackgroundSubtractorMOG2(history:varThreshold:detectShadows:));
  616. /**
  617. * Creates MOG2 Background Subtractor
  618. *
  619. * @param history Length of the history.
  620. * @param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model
  621. * to decide whether a pixel is well described by the background model. This parameter does not
  622. * affect the background update.
  623. * speed a bit, so if you do not need this feature, set the parameter to false.
  624. */
  625. + (BackgroundSubtractorMOG2*)createBackgroundSubtractorMOG2:(int)history varThreshold:(double)varThreshold NS_SWIFT_NAME(createBackgroundSubtractorMOG2(history:varThreshold:));
  626. /**
  627. * Creates MOG2 Background Subtractor
  628. *
  629. * @param history Length of the history.
  630. * to decide whether a pixel is well described by the background model. This parameter does not
  631. * affect the background update.
  632. * speed a bit, so if you do not need this feature, set the parameter to false.
  633. */
  634. + (BackgroundSubtractorMOG2*)createBackgroundSubtractorMOG2:(int)history NS_SWIFT_NAME(createBackgroundSubtractorMOG2(history:));
  635. /**
  636. * Creates MOG2 Background Subtractor
  637. *
  638. * to decide whether a pixel is well described by the background model. This parameter does not
  639. * affect the background update.
  640. * speed a bit, so if you do not need this feature, set the parameter to false.
  641. */
  642. + (BackgroundSubtractorMOG2*)createBackgroundSubtractorMOG2 NS_SWIFT_NAME(createBackgroundSubtractorMOG2());
  643. //
  644. // Ptr_BackgroundSubtractorKNN cv::createBackgroundSubtractorKNN(int history = 500, double dist2Threshold = 400.0, bool detectShadows = true)
  645. //
  646. /**
  647. * Creates KNN Background Subtractor
  648. *
  649. * @param history Length of the history.
  650. * @param dist2Threshold Threshold on the squared distance between the pixel and the sample to decide
  651. * whether a pixel is close to that sample. This parameter does not affect the background update.
  652. * @param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
  653. * speed a bit, so if you do not need this feature, set the parameter to false.
  654. */
  655. + (BackgroundSubtractorKNN*)createBackgroundSubtractorKNN:(int)history dist2Threshold:(double)dist2Threshold detectShadows:(BOOL)detectShadows NS_SWIFT_NAME(createBackgroundSubtractorKNN(history:dist2Threshold:detectShadows:));
  656. /**
  657. * Creates KNN Background Subtractor
  658. *
  659. * @param history Length of the history.
  660. * @param dist2Threshold Threshold on the squared distance between the pixel and the sample to decide
  661. * whether a pixel is close to that sample. This parameter does not affect the background update.
  662. * speed a bit, so if you do not need this feature, set the parameter to false.
  663. */
  664. + (BackgroundSubtractorKNN*)createBackgroundSubtractorKNN:(int)history dist2Threshold:(double)dist2Threshold NS_SWIFT_NAME(createBackgroundSubtractorKNN(history:dist2Threshold:));
  665. /**
  666. * Creates KNN Background Subtractor
  667. *
  668. * @param history Length of the history.
  669. * whether a pixel is close to that sample. This parameter does not affect the background update.
  670. * speed a bit, so if you do not need this feature, set the parameter to false.
  671. */
  672. + (BackgroundSubtractorKNN*)createBackgroundSubtractorKNN:(int)history NS_SWIFT_NAME(createBackgroundSubtractorKNN(history:));
  673. /**
  674. * Creates KNN Background Subtractor
  675. *
  676. * whether a pixel is close to that sample. This parameter does not affect the background update.
  677. * speed a bit, so if you do not need this feature, set the parameter to false.
  678. */
  679. + (BackgroundSubtractorKNN*)createBackgroundSubtractorKNN NS_SWIFT_NAME(createBackgroundSubtractorKNN());
  680. @end
  681. NS_ASSUME_NONNULL_END