EM.h 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. //
  2. // This file is auto-generated. Please don't modify it!
  3. //
  4. #pragma once
  5. #ifdef __cplusplus
  6. //#import "opencv.hpp"
  7. #import "opencv2/ml.hpp"
  8. #else
  9. #define CV_EXPORTS
  10. #endif
  11. #import <Foundation/Foundation.h>
  12. #import "StatModel.h"
  13. @class Double2;
  14. @class Mat;
  15. @class TermCriteria;
  16. // C++: enum EMTypes (cv.ml.EM.Types)
  17. typedef NS_ENUM(int, EMTypes) {
  18. COV_MAT_SPHERICAL = 0,
  19. COV_MAT_DIAGONAL = 1,
  20. COV_MAT_GENERIC = 2,
  21. COV_MAT_DEFAULT = COV_MAT_DIAGONAL
  22. };
  23. NS_ASSUME_NONNULL_BEGIN
  24. // C++: class EM
  25. /**
  26. * The class implements the Expectation Maximization algorithm.
  27. *
  28. * @see REF: ml_intro_em
  29. *
  30. * Member of `Ml`
  31. */
  32. CV_EXPORTS @interface EM : StatModel
  33. #ifdef __cplusplus
  34. @property(readonly)cv::Ptr<cv::ml::EM> nativePtrEM;
  35. #endif
  36. #ifdef __cplusplus
  37. - (instancetype)initWithNativePtr:(cv::Ptr<cv::ml::EM>)nativePtr;
  38. + (instancetype)fromNative:(cv::Ptr<cv::ml::EM>)nativePtr;
  39. #endif
  40. #pragma mark - Class Constants
  41. @property (class, readonly) int DEFAULT_NCLUSTERS NS_SWIFT_NAME(DEFAULT_NCLUSTERS);
  42. @property (class, readonly) int DEFAULT_MAX_ITERS NS_SWIFT_NAME(DEFAULT_MAX_ITERS);
  43. @property (class, readonly) int START_E_STEP NS_SWIFT_NAME(START_E_STEP);
  44. @property (class, readonly) int START_M_STEP NS_SWIFT_NAME(START_M_STEP);
  45. @property (class, readonly) int START_AUTO_STEP NS_SWIFT_NAME(START_AUTO_STEP);
  46. #pragma mark - Methods
  47. //
  48. // int cv::ml::EM::getClustersNumber()
  49. //
  50. /**
  51. * @see `-setClustersNumber:`
  52. */
  53. - (int)getClustersNumber NS_SWIFT_NAME(getClustersNumber());
  54. //
  55. // void cv::ml::EM::setClustersNumber(int val)
  56. //
  57. /**
  58. * getClustersNumber @see `-getClustersNumber:`
  59. */
  60. - (void)setClustersNumber:(int)val NS_SWIFT_NAME(setClustersNumber(val:));
  61. //
  62. // int cv::ml::EM::getCovarianceMatrixType()
  63. //
  64. /**
  65. * @see `-setCovarianceMatrixType:`
  66. */
  67. - (int)getCovarianceMatrixType NS_SWIFT_NAME(getCovarianceMatrixType());
  68. //
  69. // void cv::ml::EM::setCovarianceMatrixType(int val)
  70. //
  71. /**
  72. * getCovarianceMatrixType @see `-getCovarianceMatrixType:`
  73. */
  74. - (void)setCovarianceMatrixType:(int)val NS_SWIFT_NAME(setCovarianceMatrixType(val:));
  75. //
  76. // TermCriteria cv::ml::EM::getTermCriteria()
  77. //
  78. /**
  79. * @see `-setTermCriteria:`
  80. */
  81. - (TermCriteria*)getTermCriteria NS_SWIFT_NAME(getTermCriteria());
  82. //
  83. // void cv::ml::EM::setTermCriteria(TermCriteria val)
  84. //
  85. /**
  86. * getTermCriteria @see `-getTermCriteria:`
  87. */
  88. - (void)setTermCriteria:(TermCriteria*)val NS_SWIFT_NAME(setTermCriteria(val:));
  89. //
  90. // Mat cv::ml::EM::getWeights()
  91. //
  92. /**
  93. * Returns weights of the mixtures
  94. *
  95. * Returns vector with the number of elements equal to the number of mixtures.
  96. */
  97. - (Mat*)getWeights NS_SWIFT_NAME(getWeights());
  98. //
  99. // Mat cv::ml::EM::getMeans()
  100. //
  101. /**
  102. * Returns the cluster centers (means of the Gaussian mixture)
  103. *
  104. * Returns matrix with the number of rows equal to the number of mixtures and number of columns
  105. * equal to the space dimensionality.
  106. */
  107. - (Mat*)getMeans NS_SWIFT_NAME(getMeans());
  108. //
  109. // void cv::ml::EM::getCovs(vector_Mat& covs)
  110. //
  111. /**
  112. * Returns covariation matrices
  113. *
  114. * Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,
  115. * each matrix is a square floating-point matrix NxN, where N is the space dimensionality.
  116. */
  117. - (void)getCovs:(NSMutableArray<Mat*>*)covs NS_SWIFT_NAME(getCovs(covs:));
  118. //
  119. // float cv::ml::EM::predict(Mat samples, Mat& results = Mat(), int flags = 0)
  120. //
  121. /**
  122. * Returns posterior probabilities for the provided samples
  123. *
  124. * @param samples The input samples, floating-point matrix
  125. * @param results The optional output `$$ nSamples \times nClusters$$` matrix of results. It contains
  126. * posterior probabilities for each sample from the input
  127. * @param flags This parameter will be ignored
  128. */
  129. - (float)predict:(Mat*)samples results:(Mat*)results flags:(int)flags NS_SWIFT_NAME(predict(samples:results:flags:));
  130. /**
  131. * Returns posterior probabilities for the provided samples
  132. *
  133. * @param samples The input samples, floating-point matrix
  134. * @param results The optional output `$$ nSamples \times nClusters$$` matrix of results. It contains
  135. * posterior probabilities for each sample from the input
  136. */
  137. - (float)predict:(Mat*)samples results:(Mat*)results NS_SWIFT_NAME(predict(samples:results:));
  138. /**
  139. * Returns posterior probabilities for the provided samples
  140. *
  141. * @param samples The input samples, floating-point matrix
  142. * posterior probabilities for each sample from the input
  143. */
  144. - (float)predict:(Mat*)samples NS_SWIFT_NAME(predict(samples:));
  145. //
  146. // Vec2d cv::ml::EM::predict2(Mat sample, Mat& probs)
  147. //
  148. /**
  149. * Returns a likelihood logarithm value and an index of the most probable mixture component
  150. * for the given sample.
  151. *
  152. * @param sample A sample for classification. It should be a one-channel matrix of
  153. * `$$1 \times dims$$` or `$$dims \times 1$$` size.
  154. * @param probs Optional output matrix that contains posterior probabilities of each component
  155. * given the sample. It has `$$1 \times nclusters$$` size and CV_64FC1 type.
  156. *
  157. * The method returns a two-element double vector. Zero element is a likelihood logarithm value for
  158. * the sample. First element is an index of the most probable mixture component for the given
  159. * sample.
  160. */
  161. - (Double2*)predict2:(Mat*)sample probs:(Mat*)probs NS_SWIFT_NAME(predict2(sample:probs:));
  162. //
  163. // bool cv::ml::EM::trainEM(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
  164. //
  165. /**
  166. * Estimate the Gaussian mixture parameters from a samples set.
  167. *
  168. * This variation starts with Expectation step. Initial values of the model parameters will be
  169. * estimated by the k-means algorithm.
  170. *
  171. * Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
  172. * responses (class labels or function values) as input. Instead, it computes the *Maximum
  173. * Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
  174. * parameters inside the structure: `$$p_{i,k}$$` in probs, `$$a_k$$` in means , `$$S_k$$` in
  175. * covs[k], `$$\pi_k$$` in weights , and optionally computes the output "class label" for each
  176. * sample: `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most
  177. * probable mixture component for each sample).
  178. *
  179. * The trained model can be used further for prediction, just like any other classifier. The
  180. * trained model is similar to the NormalBayesClassifier.
  181. *
  182. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  183. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  184. * it will be converted to the inner matrix of such type for the further computing.
  185. * @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  186. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  187. * @param labels The optional output "class label" for each sample:
  188. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  189. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  190. * @param probs The optional output matrix that contains posterior probabilities of each Gaussian
  191. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  192. * CV_64FC1 type.
  193. */
  194. - (BOOL)trainEM:(Mat*)samples logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels probs:(Mat*)probs NS_SWIFT_NAME(trainEM(samples:logLikelihoods:labels:probs:));
  195. /**
  196. * Estimate the Gaussian mixture parameters from a samples set.
  197. *
  198. * This variation starts with Expectation step. Initial values of the model parameters will be
  199. * estimated by the k-means algorithm.
  200. *
  201. * Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
  202. * responses (class labels or function values) as input. Instead, it computes the *Maximum
  203. * Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
  204. * parameters inside the structure: `$$p_{i,k}$$` in probs, `$$a_k$$` in means , `$$S_k$$` in
  205. * covs[k], `$$\pi_k$$` in weights , and optionally computes the output "class label" for each
  206. * sample: `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most
  207. * probable mixture component for each sample).
  208. *
  209. * The trained model can be used further for prediction, just like any other classifier. The
  210. * trained model is similar to the NormalBayesClassifier.
  211. *
  212. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  213. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  214. * it will be converted to the inner matrix of such type for the further computing.
  215. * @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  216. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  217. * @param labels The optional output "class label" for each sample:
  218. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  219. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  220. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  221. * CV_64FC1 type.
  222. */
  223. - (BOOL)trainEM:(Mat*)samples logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels NS_SWIFT_NAME(trainEM(samples:logLikelihoods:labels:));
  224. /**
  225. * Estimate the Gaussian mixture parameters from a samples set.
  226. *
  227. * This variation starts with Expectation step. Initial values of the model parameters will be
  228. * estimated by the k-means algorithm.
  229. *
  230. * Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
  231. * responses (class labels or function values) as input. Instead, it computes the *Maximum
  232. * Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
  233. * parameters inside the structure: `$$p_{i,k}$$` in probs, `$$a_k$$` in means , `$$S_k$$` in
  234. * covs[k], `$$\pi_k$$` in weights , and optionally computes the output "class label" for each
  235. * sample: `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most
  236. * probable mixture component for each sample).
  237. *
  238. * The trained model can be used further for prediction, just like any other classifier. The
  239. * trained model is similar to the NormalBayesClassifier.
  240. *
  241. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  242. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  243. * it will be converted to the inner matrix of such type for the further computing.
  244. * @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  245. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  246. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  247. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  248. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  249. * CV_64FC1 type.
  250. */
  251. - (BOOL)trainEM:(Mat*)samples logLikelihoods:(Mat*)logLikelihoods NS_SWIFT_NAME(trainEM(samples:logLikelihoods:));
  252. /**
  253. * Estimate the Gaussian mixture parameters from a samples set.
  254. *
  255. * This variation starts with Expectation step. Initial values of the model parameters will be
  256. * estimated by the k-means algorithm.
  257. *
  258. * Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
  259. * responses (class labels or function values) as input. Instead, it computes the *Maximum
  260. * Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
  261. * parameters inside the structure: `$$p_{i,k}$$` in probs, `$$a_k$$` in means , `$$S_k$$` in
  262. * covs[k], `$$\pi_k$$` in weights , and optionally computes the output "class label" for each
  263. * sample: `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most
  264. * probable mixture component for each sample).
  265. *
  266. * The trained model can be used further for prediction, just like any other classifier. The
  267. * trained model is similar to the NormalBayesClassifier.
  268. *
  269. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  270. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  271. * it will be converted to the inner matrix of such type for the further computing.
  272. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  273. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  274. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  275. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  276. * CV_64FC1 type.
  277. */
  278. - (BOOL)trainEM:(Mat*)samples NS_SWIFT_NAME(trainEM(samples:));
  279. //
  280. // bool cv::ml::EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
  281. //
  282. /**
  283. * Estimate the Gaussian mixture parameters from a samples set.
  284. *
  285. * This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
  286. * mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
  287. * `$$S_k$$` of mixture components.
  288. *
  289. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  290. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  291. * it will be converted to the inner matrix of such type for the further computing.
  292. * @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
  293. * `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
  294. * converted to the inner matrix of such type for the further computing.
  295. * @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
  296. * covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
  297. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  298. * further computing.
  299. * @param weights0 Initial weights `$$\pi_k$$` of mixture components. It should be a one-channel
  300. * floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
  301. * @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  302. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  303. * @param labels The optional output "class label" for each sample:
  304. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  305. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  306. * @param probs The optional output matrix that contains posterior probabilities of each Gaussian
  307. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  308. * CV_64FC1 type.
  309. */
  310. - (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 weights0:(Mat*)weights0 logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels probs:(Mat*)probs NS_SWIFT_NAME(trainE(samples:means0:covs0:weights0:logLikelihoods:labels:probs:));
  311. /**
  312. * Estimate the Gaussian mixture parameters from a samples set.
  313. *
  314. * This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
  315. * mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
  316. * `$$S_k$$` of mixture components.
  317. *
  318. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  319. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  320. * it will be converted to the inner matrix of such type for the further computing.
  321. * @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
  322. * `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
  323. * converted to the inner matrix of such type for the further computing.
  324. * @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
  325. * covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
  326. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  327. * further computing.
  328. * @param weights0 Initial weights `$$\pi_k$$` of mixture components. It should be a one-channel
  329. * floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
  330. * @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  331. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  332. * @param labels The optional output "class label" for each sample:
  333. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  334. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  335. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  336. * CV_64FC1 type.
  337. */
  338. - (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 weights0:(Mat*)weights0 logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels NS_SWIFT_NAME(trainE(samples:means0:covs0:weights0:logLikelihoods:labels:));
  339. /**
  340. * Estimate the Gaussian mixture parameters from a samples set.
  341. *
  342. * This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
  343. * mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
  344. * `$$S_k$$` of mixture components.
  345. *
  346. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  347. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  348. * it will be converted to the inner matrix of such type for the further computing.
  349. * @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
  350. * `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
  351. * converted to the inner matrix of such type for the further computing.
  352. * @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
  353. * covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
  354. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  355. * further computing.
  356. * @param weights0 Initial weights `$$\pi_k$$` of mixture components. It should be a one-channel
  357. * floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
  358. * @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  359. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  360. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  361. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  362. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  363. * CV_64FC1 type.
  364. */
  365. - (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 weights0:(Mat*)weights0 logLikelihoods:(Mat*)logLikelihoods NS_SWIFT_NAME(trainE(samples:means0:covs0:weights0:logLikelihoods:));
  366. /**
  367. * Estimate the Gaussian mixture parameters from a samples set.
  368. *
  369. * This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
  370. * mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
  371. * `$$S_k$$` of mixture components.
  372. *
  373. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  374. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  375. * it will be converted to the inner matrix of such type for the further computing.
  376. * @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
  377. * `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
  378. * converted to the inner matrix of such type for the further computing.
  379. * @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
  380. * covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
  381. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  382. * further computing.
  383. * @param weights0 Initial weights `$$\pi_k$$` of mixture components. It should be a one-channel
  384. * floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
  385. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  386. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  387. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  388. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  389. * CV_64FC1 type.
  390. */
  391. - (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 weights0:(Mat*)weights0 NS_SWIFT_NAME(trainE(samples:means0:covs0:weights0:));
  392. /**
  393. * Estimate the Gaussian mixture parameters from a samples set.
  394. *
  395. * This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
  396. * mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
  397. * `$$S_k$$` of mixture components.
  398. *
  399. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  400. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  401. * it will be converted to the inner matrix of such type for the further computing.
  402. * @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
  403. * `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
  404. * converted to the inner matrix of such type for the further computing.
  405. * @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
  406. * covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
  407. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  408. * further computing.
  409. * floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
  410. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  411. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  412. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  413. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  414. * CV_64FC1 type.
  415. */
  416. - (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 NS_SWIFT_NAME(trainE(samples:means0:covs0:));
  417. /**
  418. * Estimate the Gaussian mixture parameters from a samples set.
  419. *
  420. * This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
  421. * mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
  422. * `$$S_k$$` of mixture components.
  423. *
  424. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  425. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  426. * it will be converted to the inner matrix of such type for the further computing.
  427. * @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
  428. * `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
  429. * converted to the inner matrix of such type for the further computing.
  430. * covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
  431. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  432. * further computing.
  433. * floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
  434. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  435. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  436. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  437. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  438. * CV_64FC1 type.
  439. */
  440. - (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 NS_SWIFT_NAME(trainE(samples:means0:));
  441. //
  442. // bool cv::ml::EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
  443. //
  444. /**
  445. * Estimate the Gaussian mixture parameters from a samples set.
  446. *
  447. * This variation starts with Maximization step. You need to provide initial probabilities
  448. * `$$p_{i,k}$$` to use this option.
  449. *
  450. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  451. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  452. * it will be converted to the inner matrix of such type for the further computing.
  453. * @param probs0 the probabilities
  454. * @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  455. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  456. * @param labels The optional output "class label" for each sample:
  457. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  458. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  459. * @param probs The optional output matrix that contains posterior probabilities of each Gaussian
  460. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  461. * CV_64FC1 type.
  462. */
  463. - (BOOL)trainM:(Mat*)samples probs0:(Mat*)probs0 logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels probs:(Mat*)probs NS_SWIFT_NAME(trainM(samples:probs0:logLikelihoods:labels:probs:));
  464. /**
  465. * Estimate the Gaussian mixture parameters from a samples set.
  466. *
  467. * This variation starts with Maximization step. You need to provide initial probabilities
  468. * `$$p_{i,k}$$` to use this option.
  469. *
  470. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  471. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  472. * it will be converted to the inner matrix of such type for the further computing.
  473. * @param probs0 the probabilities
  474. * @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  475. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  476. * @param labels The optional output "class label" for each sample:
  477. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  478. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  479. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  480. * CV_64FC1 type.
  481. */
  482. - (BOOL)trainM:(Mat*)samples probs0:(Mat*)probs0 logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels NS_SWIFT_NAME(trainM(samples:probs0:logLikelihoods:labels:));
  483. /**
  484. * Estimate the Gaussian mixture parameters from a samples set.
  485. *
  486. * This variation starts with Maximization step. You need to provide initial probabilities
  487. * `$$p_{i,k}$$` to use this option.
  488. *
  489. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  490. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  491. * it will be converted to the inner matrix of such type for the further computing.
  492. * @param probs0 the probabilities
  493. * @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  494. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  495. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  496. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  497. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  498. * CV_64FC1 type.
  499. */
  500. - (BOOL)trainM:(Mat*)samples probs0:(Mat*)probs0 logLikelihoods:(Mat*)logLikelihoods NS_SWIFT_NAME(trainM(samples:probs0:logLikelihoods:));
  501. /**
  502. * Estimate the Gaussian mixture parameters from a samples set.
  503. *
  504. * This variation starts with Maximization step. You need to provide initial probabilities
  505. * `$$p_{i,k}$$` to use this option.
  506. *
  507. * @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  508. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  509. * it will be converted to the inner matrix of such type for the further computing.
  510. * @param probs0 the probabilities
  511. * each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
  512. * `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
  513. * mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
  514. * mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
  515. * CV_64FC1 type.
  516. */
  517. - (BOOL)trainM:(Mat*)samples probs0:(Mat*)probs0 NS_SWIFT_NAME(trainM(samples:probs0:));
  518. //
  519. // static Ptr_EM cv::ml::EM::create()
  520. //
  521. /**
  522. * Creates empty %EM model.
  523. * The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
  524. * can use one of the EM::train\* methods or load it from file using Algorithm::load\<EM\>(filename).
  525. */
  526. + (EM*)create NS_SWIFT_NAME(create());
  527. //
  528. // static Ptr_EM cv::ml::EM::load(String filepath, String nodeName = String())
  529. //
  530. /**
  531. * Loads and creates a serialized EM from a file
  532. *
  533. * Use EM::save to serialize and store an EM to disk.
  534. * Load the EM from this file again, by calling this function with the path to the file.
  535. * Optionally specify the node for the file containing the classifier
  536. *
  537. * @param filepath path to serialized EM
  538. * @param nodeName name of node containing the classifier
  539. */
  540. + (EM*)load:(NSString*)filepath nodeName:(NSString*)nodeName NS_SWIFT_NAME(load(filepath:nodeName:));
  541. /**
  542. * Loads and creates a serialized EM from a file
  543. *
  544. * Use EM::save to serialize and store an EM to disk.
  545. * Load the EM from this file again, by calling this function with the path to the file.
  546. * Optionally specify the node for the file containing the classifier
  547. *
  548. * @param filepath path to serialized EM
  549. */
  550. + (EM*)load:(NSString*)filepath NS_SWIFT_NAME(load(filepath:));
  551. @end
  552. NS_ASSUME_NONNULL_END