ml.hpp 90 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
  15. // Copyright (C) 2014, Itseez Inc, all rights reserved.
  16. // Third party copyrights are property of their respective owners.
  17. //
  18. // Redistribution and use in source and binary forms, with or without modification,
  19. // are permitted provided that the following conditions are met:
  20. //
  21. // * Redistribution's of source code must retain the above copyright notice,
  22. // this list of conditions and the following disclaimer.
  23. //
  24. // * Redistribution's in binary form must reproduce the above copyright notice,
  25. // this list of conditions and the following disclaimer in the documentation
  26. // and/or other materials provided with the distribution.
  27. //
  28. // * The name of the copyright holders may not be used to endorse or promote products
  29. // derived from this software without specific prior written permission.
  30. //
  31. // This software is provided by the copyright holders and contributors "as is" and
  32. // any express or implied warranties, including, but not limited to, the implied
  33. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  34. // In no event shall the Intel Corporation or contributors be liable for any direct,
  35. // indirect, incidental, special, exemplary, or consequential damages
  36. // (including, but not limited to, procurement of substitute goods or services;
  37. // loss of use, data, or profits; or business interruption) however caused
  38. // and on any theory of liability, whether in contract, strict liability,
  39. // or tort (including negligence or otherwise) arising in any way out of
  40. // the use of this software, even if advised of the possibility of such damage.
  41. //
  42. //M*/
  43. #ifndef OPENCV_ML_HPP
  44. #define OPENCV_ML_HPP
  45. #ifdef __cplusplus
  46. # include "opencv2/core.hpp"
  47. #endif
  48. #ifdef __cplusplus
  49. #include <float.h>
  50. #include <map>
  51. #include <iostream>
  52. /**
  53. @defgroup ml Machine Learning
  54. The Machine Learning Library (MLL) is a set of classes and functions for statistical
  55. classification, regression, and clustering of data.
  56. Most of the classification and regression algorithms are implemented as C++ classes. As the
  57. algorithms have different sets of features (like an ability to handle missing measurements or
  58. categorical input variables), there is a little common ground between the classes. This common
  59. ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from.
  60. See detailed overview here: @ref ml_intro.
  61. */
  62. namespace cv
  63. {
  64. namespace ml
  65. {
  66. //! @addtogroup ml
  67. //! @{
  68. /** @brief Variable types */
  69. enum VariableTypes
  70. {
  71. VAR_NUMERICAL =0, //!< same as VAR_ORDERED
  72. VAR_ORDERED =0, //!< ordered variables
  73. VAR_CATEGORICAL =1 //!< categorical variables
  74. };
  75. /** @brief %Error types */
  76. enum ErrorTypes
  77. {
  78. TEST_ERROR = 0,
  79. TRAIN_ERROR = 1
  80. };
  81. /** @brief Sample types */
  82. enum SampleTypes
  83. {
  84. ROW_SAMPLE = 0, //!< each training sample is a row of samples
  85. COL_SAMPLE = 1 //!< each training sample occupies a column of samples
  86. };
  87. /** @brief The structure represents the logarithmic grid range of statmodel parameters.
  88. It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate
  89. being computed by cross-validation.
  90. */
  91. class CV_EXPORTS_W ParamGrid
  92. {
  93. public:
  94. /** @brief Default constructor */
  95. ParamGrid();
  96. /** @brief Constructor with parameters */
  97. ParamGrid(double _minVal, double _maxVal, double _logStep);
  98. CV_PROP_RW double minVal; //!< Minimum value of the statmodel parameter. Default value is 0.
  99. CV_PROP_RW double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0.
  100. /** @brief Logarithmic step for iterating the statmodel parameter.
  101. The grid determines the following iteration sequence of the statmodel parameter values:
  102. \f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f]
  103. where \f$n\f$ is the maximal index satisfying
  104. \f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f]
  105. The grid is logarithmic, so logStep must always be greater than 1. Default value is 1.
  106. */
  107. CV_PROP_RW double logStep;
  108. /** @brief Creates a ParamGrid Ptr that can be given to the %SVM::trainAuto method
  109. @param minVal minimum value of the parameter grid
  110. @param maxVal maximum value of the parameter grid
  111. @param logstep Logarithmic step for iterating the statmodel parameter
  112. */
  113. CV_WRAP static Ptr<ParamGrid> create(double minVal=0., double maxVal=0., double logstep=1.);
  114. };
  115. /** @brief Class encapsulating training data.
  116. Please note that the class only specifies the interface of training data, but not implementation.
  117. All the statistical model classes in _ml_ module accepts Ptr\<TrainData\> as parameter. In other
  118. words, you can create your own class derived from TrainData and pass smart pointer to the instance
  119. of this class into StatModel::train.
  120. @sa @ref ml_intro_data
  121. */
  122. class CV_EXPORTS_W TrainData
  123. {
  124. public:
  125. static inline float missingValue() { return FLT_MAX; }
  126. virtual ~TrainData();
  127. CV_WRAP virtual int getLayout() const = 0;
  128. CV_WRAP virtual int getNTrainSamples() const = 0;
  129. CV_WRAP virtual int getNTestSamples() const = 0;
  130. CV_WRAP virtual int getNSamples() const = 0;
  131. CV_WRAP virtual int getNVars() const = 0;
  132. CV_WRAP virtual int getNAllVars() const = 0;
  133. CV_WRAP virtual void getSample(InputArray varIdx, int sidx, float* buf) const = 0;
  134. CV_WRAP virtual Mat getSamples() const = 0;
  135. CV_WRAP virtual Mat getMissing() const = 0;
  136. /** @brief Returns matrix of train samples
  137. @param layout The requested layout. If it's different from the initial one, the matrix is
  138. transposed. See ml::SampleTypes.
  139. @param compressSamples if true, the function returns only the training samples (specified by
  140. sampleIdx)
  141. @param compressVars if true, the function returns the shorter training samples, containing only
  142. the active variables.
  143. In current implementation the function tries to avoid physical data copying and returns the
  144. matrix stored inside TrainData (unless the transposition or compression is needed).
  145. */
  146. CV_WRAP virtual Mat getTrainSamples(int layout=ROW_SAMPLE,
  147. bool compressSamples=true,
  148. bool compressVars=true) const = 0;
  149. /** @brief Returns the vector of responses
  150. The function returns ordered or the original categorical responses. Usually it's used in
  151. regression algorithms.
  152. */
  153. CV_WRAP virtual Mat getTrainResponses() const = 0;
  154. /** @brief Returns the vector of normalized categorical responses
  155. The function returns vector of responses. Each response is integer from `0` to `<number of
  156. classes>-1`. The actual label value can be retrieved then from the class label vector, see
  157. TrainData::getClassLabels.
  158. */
  159. CV_WRAP virtual Mat getTrainNormCatResponses() const = 0;
  160. CV_WRAP virtual Mat getTestResponses() const = 0;
  161. CV_WRAP virtual Mat getTestNormCatResponses() const = 0;
  162. CV_WRAP virtual Mat getResponses() const = 0;
  163. CV_WRAP virtual Mat getNormCatResponses() const = 0;
  164. CV_WRAP virtual Mat getSampleWeights() const = 0;
  165. CV_WRAP virtual Mat getTrainSampleWeights() const = 0;
  166. CV_WRAP virtual Mat getTestSampleWeights() const = 0;
  167. CV_WRAP virtual Mat getVarIdx() const = 0;
  168. CV_WRAP virtual Mat getVarType() const = 0;
  169. CV_WRAP virtual Mat getVarSymbolFlags() const = 0;
  170. CV_WRAP virtual int getResponseType() const = 0;
  171. CV_WRAP virtual Mat getTrainSampleIdx() const = 0;
  172. CV_WRAP virtual Mat getTestSampleIdx() const = 0;
  173. CV_WRAP virtual void getValues(int vi, InputArray sidx, float* values) const = 0;
  174. virtual void getNormCatValues(int vi, InputArray sidx, int* values) const = 0;
  175. CV_WRAP virtual Mat getDefaultSubstValues() const = 0;
  176. CV_WRAP virtual int getCatCount(int vi) const = 0;
  177. /** @brief Returns the vector of class labels
  178. The function returns vector of unique labels occurred in the responses.
  179. */
  180. CV_WRAP virtual Mat getClassLabels() const = 0;
  181. CV_WRAP virtual Mat getCatOfs() const = 0;
  182. CV_WRAP virtual Mat getCatMap() const = 0;
  183. /** @brief Splits the training data into the training and test parts
  184. @sa TrainData::setTrainTestSplitRatio
  185. */
  186. CV_WRAP virtual void setTrainTestSplit(int count, bool shuffle=true) = 0;
  187. /** @brief Splits the training data into the training and test parts
  188. The function selects a subset of specified relative size and then returns it as the training
  189. set. If the function is not called, all the data is used for training. Please, note that for
  190. each of TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test
  191. subset can be retrieved and processed as well.
  192. @sa TrainData::setTrainTestSplit
  193. */
  194. CV_WRAP virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0;
  195. CV_WRAP virtual void shuffleTrainTest() = 0;
  196. /** @brief Returns matrix of test samples */
  197. CV_WRAP virtual Mat getTestSamples() const = 0;
  198. /** @brief Returns vector of symbolic names captured in loadFromCSV() */
  199. CV_WRAP virtual void getNames(std::vector<String>& names) const = 0;
  200. /** @brief Extract from 1D vector elements specified by passed indexes.
  201. @param vec input vector (supported types: CV_32S, CV_32F, CV_64F)
  202. @param idx 1D index vector
  203. */
  204. static CV_WRAP Mat getSubVector(const Mat& vec, const Mat& idx);
  205. /** @brief Extract from matrix rows/cols specified by passed indexes.
  206. @param matrix input matrix (supported types: CV_32S, CV_32F, CV_64F)
  207. @param idx 1D index vector
  208. @param layout specifies to extract rows (cv::ml::ROW_SAMPLES) or to extract columns (cv::ml::COL_SAMPLES)
  209. */
  210. static CV_WRAP Mat getSubMatrix(const Mat& matrix, const Mat& idx, int layout);
  211. /** @brief Reads the dataset from a .csv file and returns the ready-to-use training data.
  212. @param filename The input file name
  213. @param headerLineCount The number of lines in the beginning to skip; besides the header, the
  214. function also skips empty lines and lines staring with `#`
  215. @param responseStartIdx Index of the first output variable. If -1, the function considers the
  216. last variable as the response
  217. @param responseEndIdx Index of the last output variable + 1. If -1, then there is single
  218. response variable at responseStartIdx.
  219. @param varTypeSpec The optional text string that specifies the variables' types. It has the
  220. format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2`
  221. (inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are
  222. considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]`
  223. should cover all the variables. If varTypeSpec is not specified, then algorithm uses the
  224. following rules:
  225. - all input variables are considered ordered by default. If some column contains has non-
  226. numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding
  227. variable is considered categorical.
  228. - if there are several output variables, they are all considered as ordered. Error is
  229. reported when non-numerical values are used.
  230. - if there is a single output variable, then if its values are non-numerical or are all
  231. integers, then it's considered categorical. Otherwise, it's considered ordered.
  232. @param delimiter The character used to separate values in each line.
  233. @param missch The character used to specify missing measurements. It should not be a digit.
  234. Although it's a non-numerical value, it surely does not affect the decision of whether the
  235. variable ordered or categorical.
  236. @note If the dataset only contains input variables and no responses, use responseStartIdx = -2
  237. and responseEndIdx = 0. The output variables vector will just contain zeros.
  238. */
  239. static Ptr<TrainData> loadFromCSV(const String& filename,
  240. int headerLineCount,
  241. int responseStartIdx=-1,
  242. int responseEndIdx=-1,
  243. const String& varTypeSpec=String(),
  244. char delimiter=',',
  245. char missch='?');
  246. /** @brief Creates training data from in-memory arrays.
  247. @param samples matrix of samples. It should have CV_32F type.
  248. @param layout see ml::SampleTypes.
  249. @param responses matrix of responses. If the responses are scalar, they should be stored as a
  250. single row or as a single column. The matrix should have type CV_32F or CV_32S (in the
  251. former case the responses are considered as ordered by default; in the latter case - as
  252. categorical)
  253. @param varIdx vector specifying which variables to use for training. It can be an integer vector
  254. (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of
  255. active variables.
  256. @param sampleIdx vector specifying which samples to use for training. It can be an integer
  257. vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask
  258. of training samples.
  259. @param sampleWeights optional vector with weights for each sample. It should have CV_32F type.
  260. @param varType optional vector of type CV_8U and size `<number_of_variables_in_samples> +
  261. <number_of_variables_in_responses>`, containing types of each input and output variable. See
  262. ml::VariableTypes.
  263. */
  264. CV_WRAP static Ptr<TrainData> create(InputArray samples, int layout, InputArray responses,
  265. InputArray varIdx=noArray(), InputArray sampleIdx=noArray(),
  266. InputArray sampleWeights=noArray(), InputArray varType=noArray());
  267. };
  268. /** @brief Base class for statistical models in OpenCV ML.
  269. */
  270. class CV_EXPORTS_W StatModel : public Algorithm
  271. {
  272. public:
  273. /** Predict options */
  274. enum Flags {
  275. UPDATE_MODEL = 1,
  276. RAW_OUTPUT=1, //!< makes the method return the raw results (the sum), not the class label
  277. COMPRESSED_INPUT=2,
  278. PREPROCESSED_INPUT=4
  279. };
  280. /** @brief Returns the number of variables in training samples */
  281. CV_WRAP virtual int getVarCount() const = 0;
  282. CV_WRAP virtual bool empty() const CV_OVERRIDE;
  283. /** @brief Returns true if the model is trained */
  284. CV_WRAP virtual bool isTrained() const = 0;
  285. /** @brief Returns true if the model is classifier */
  286. CV_WRAP virtual bool isClassifier() const = 0;
  287. /** @brief Trains the statistical model
  288. @param trainData training data that can be loaded from file using TrainData::loadFromCSV or
  289. created with TrainData::create.
  290. @param flags optional flags, depending on the model. Some of the models can be updated with the
  291. new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).
  292. */
  293. CV_WRAP virtual bool train( const Ptr<TrainData>& trainData, int flags=0 );
  294. /** @brief Trains the statistical model
  295. @param samples training samples
  296. @param layout See ml::SampleTypes.
  297. @param responses vector of responses associated with the training samples.
  298. */
  299. CV_WRAP virtual bool train( InputArray samples, int layout, InputArray responses );
  300. /** @brief Computes error on the training or test dataset
  301. @param data the training data
  302. @param test if true, the error is computed over the test subset of the data, otherwise it's
  303. computed over the training subset of the data. Please note that if you loaded a completely
  304. different dataset to evaluate already trained classifier, you will probably want not to set
  305. the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so
  306. that the error is computed for the whole new set. Yes, this sounds a bit confusing.
  307. @param resp the optional output responses.
  308. The method uses StatModel::predict to compute the error. For regression models the error is
  309. computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%).
  310. */
  311. CV_WRAP virtual float calcError( const Ptr<TrainData>& data, bool test, OutputArray resp ) const;
  312. /** @brief Predicts response(s) for the provided sample(s)
  313. @param samples The input samples, floating-point matrix
  314. @param results The optional output matrix of results.
  315. @param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags.
  316. */
  317. CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
  318. /** @brief Create and train model with default parameters
  319. The class must implement static `create()` method with no parameters or with all default parameter values
  320. */
  321. template<typename _Tp> static Ptr<_Tp> train(const Ptr<TrainData>& data, int flags=0)
  322. {
  323. Ptr<_Tp> model = _Tp::create();
  324. return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>();
  325. }
  326. };
  327. /****************************************************************************************\
  328. * Normal Bayes Classifier *
  329. \****************************************************************************************/
  330. /** @brief Bayes classifier for normally distributed data.
  331. @sa @ref ml_intro_bayes
  332. */
  333. class CV_EXPORTS_W NormalBayesClassifier : public StatModel
  334. {
  335. public:
  336. /** @brief Predicts the response for sample(s).
  337. The method estimates the most probable classes for input vectors. Input vectors (one or more)
  338. are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one
  339. output vector outputs. The predicted class for a single input vector is returned by the method.
  340. The vector outputProbs contains the output probabilities corresponding to each element of
  341. result.
  342. */
  343. CV_WRAP virtual float predictProb( InputArray inputs, OutputArray outputs,
  344. OutputArray outputProbs, int flags=0 ) const = 0;
  345. /** Creates empty model
  346. Use StatModel::train to train the model after creation. */
  347. CV_WRAP static Ptr<NormalBayesClassifier> create();
  348. /** @brief Loads and creates a serialized NormalBayesClassifier from a file
  349. *
  350. * Use NormalBayesClassifier::save to serialize and store an NormalBayesClassifier to disk.
  351. * Load the NormalBayesClassifier from this file again, by calling this function with the path to the file.
  352. * Optionally specify the node for the file containing the classifier
  353. *
  354. * @param filepath path to serialized NormalBayesClassifier
  355. * @param nodeName name of node containing the classifier
  356. */
  357. CV_WRAP static Ptr<NormalBayesClassifier> load(const String& filepath , const String& nodeName = String());
  358. };
  359. /****************************************************************************************\
  360. * K-Nearest Neighbour Classifier *
  361. \****************************************************************************************/
  362. /** @brief The class implements K-Nearest Neighbors model
  363. @sa @ref ml_intro_knn
  364. */
  365. class CV_EXPORTS_W KNearest : public StatModel
  366. {
  367. public:
  368. /** Default number of neighbors to use in predict method. */
  369. /** @see setDefaultK */
  370. CV_WRAP virtual int getDefaultK() const = 0;
  371. /** @copybrief getDefaultK @see getDefaultK */
  372. CV_WRAP virtual void setDefaultK(int val) = 0;
  373. /** Whether classification or regression model should be trained. */
  374. /** @see setIsClassifier */
  375. CV_WRAP virtual bool getIsClassifier() const = 0;
  376. /** @copybrief getIsClassifier @see getIsClassifier */
  377. CV_WRAP virtual void setIsClassifier(bool val) = 0;
  378. /** Parameter for KDTree implementation. */
  379. /** @see setEmax */
  380. CV_WRAP virtual int getEmax() const = 0;
  381. /** @copybrief getEmax @see getEmax */
  382. CV_WRAP virtual void setEmax(int val) = 0;
  383. /** %Algorithm type, one of KNearest::Types. */
  384. /** @see setAlgorithmType */
  385. CV_WRAP virtual int getAlgorithmType() const = 0;
  386. /** @copybrief getAlgorithmType @see getAlgorithmType */
  387. CV_WRAP virtual void setAlgorithmType(int val) = 0;
  388. /** @brief Finds the neighbors and predicts responses for input vectors.
  389. @param samples Input samples stored by rows. It is a single-precision floating-point matrix of
  390. `<number_of_samples> * k` size.
  391. @param k Number of used nearest neighbors. Should be greater than 1.
  392. @param results Vector with results of prediction (regression or classification) for each input
  393. sample. It is a single-precision floating-point vector with `<number_of_samples>` elements.
  394. @param neighborResponses Optional output values for corresponding neighbors. It is a single-
  395. precision floating-point matrix of `<number_of_samples> * k` size.
  396. @param dist Optional output distances from the input vectors to the corresponding neighbors. It
  397. is a single-precision floating-point matrix of `<number_of_samples> * k` size.
  398. For each input vector (a row of the matrix samples), the method finds the k nearest neighbors.
  399. In case of regression, the predicted result is a mean value of the particular vector's neighbor
  400. responses. In case of classification, the class is determined by voting.
  401. For each input vector, the neighbors are sorted by their distances to the vector.
  402. In case of C++ interface you can use output pointers to empty matrices and the function will
  403. allocate memory itself.
  404. If only a single input vector is passed, all output matrices are optional and the predicted
  405. value is returned by the method.
  406. The function is parallelized with the TBB library.
  407. */
  408. CV_WRAP virtual float findNearest( InputArray samples, int k,
  409. OutputArray results,
  410. OutputArray neighborResponses=noArray(),
  411. OutputArray dist=noArray() ) const = 0;
  412. /** @brief Implementations of KNearest algorithm
  413. */
  414. enum Types
  415. {
  416. BRUTE_FORCE=1,
  417. KDTREE=2
  418. };
  419. /** @brief Creates the empty model
  420. The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method.
  421. */
  422. CV_WRAP static Ptr<KNearest> create();
  423. /** @brief Loads and creates a serialized knearest from a file
  424. *
  425. * Use KNearest::save to serialize and store an KNearest to disk.
  426. * Load the KNearest from this file again, by calling this function with the path to the file.
  427. *
  428. * @param filepath path to serialized KNearest
  429. */
  430. CV_WRAP static Ptr<KNearest> load(const String& filepath);
  431. };
  432. /****************************************************************************************\
  433. * Support Vector Machines *
  434. \****************************************************************************************/
  435. /** @brief Support Vector Machines.
  436. @sa @ref ml_intro_svm
  437. */
  438. class CV_EXPORTS_W SVM : public StatModel
  439. {
  440. public:
  441. class CV_EXPORTS Kernel : public Algorithm
  442. {
  443. public:
  444. virtual int getType() const = 0;
  445. virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0;
  446. };
  447. /** Type of a %SVM formulation.
  448. See SVM::Types. Default value is SVM::C_SVC. */
  449. /** @see setType */
  450. CV_WRAP virtual int getType() const = 0;
  451. /** @copybrief getType @see getType */
  452. CV_WRAP virtual void setType(int val) = 0;
  453. /** Parameter \f$\gamma\f$ of a kernel function.
  454. For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */
  455. /** @see setGamma */
  456. CV_WRAP virtual double getGamma() const = 0;
  457. /** @copybrief getGamma @see getGamma */
  458. CV_WRAP virtual void setGamma(double val) = 0;
  459. /** Parameter _coef0_ of a kernel function.
  460. For SVM::POLY or SVM::SIGMOID. Default value is 0.*/
  461. /** @see setCoef0 */
  462. CV_WRAP virtual double getCoef0() const = 0;
  463. /** @copybrief getCoef0 @see getCoef0 */
  464. CV_WRAP virtual void setCoef0(double val) = 0;
  465. /** Parameter _degree_ of a kernel function.
  466. For SVM::POLY. Default value is 0. */
  467. /** @see setDegree */
  468. CV_WRAP virtual double getDegree() const = 0;
  469. /** @copybrief getDegree @see getDegree */
  470. CV_WRAP virtual void setDegree(double val) = 0;
  471. /** Parameter _C_ of a %SVM optimization problem.
  472. For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */
  473. /** @see setC */
  474. CV_WRAP virtual double getC() const = 0;
  475. /** @copybrief getC @see getC */
  476. CV_WRAP virtual void setC(double val) = 0;
  477. /** Parameter \f$\nu\f$ of a %SVM optimization problem.
  478. For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */
  479. /** @see setNu */
  480. CV_WRAP virtual double getNu() const = 0;
  481. /** @copybrief getNu @see getNu */
  482. CV_WRAP virtual void setNu(double val) = 0;
  483. /** Parameter \f$\epsilon\f$ of a %SVM optimization problem.
  484. For SVM::EPS_SVR. Default value is 0. */
  485. /** @see setP */
  486. CV_WRAP virtual double getP() const = 0;
  487. /** @copybrief getP @see getP */
  488. CV_WRAP virtual void setP(double val) = 0;
  489. /** Optional weights in the SVM::C_SVC problem, assigned to particular classes.
  490. They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus
  491. these weights affect the misclassification penalty for different classes. The larger weight,
  492. the larger penalty on misclassification of data from the corresponding class. Default value is
  493. empty Mat. */
  494. /** @see setClassWeights */
  495. CV_WRAP virtual cv::Mat getClassWeights() const = 0;
  496. /** @copybrief getClassWeights @see getClassWeights */
  497. CV_WRAP virtual void setClassWeights(const cv::Mat &val) = 0;
  498. /** Termination criteria of the iterative %SVM training procedure which solves a partial
  499. case of constrained quadratic optimization problem.
  500. You can specify tolerance and/or the maximum number of iterations. Default value is
  501. `TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */
  502. /** @see setTermCriteria */
  503. CV_WRAP virtual cv::TermCriteria getTermCriteria() const = 0;
  504. /** @copybrief getTermCriteria @see getTermCriteria */
  505. CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0;
  506. /** Type of a %SVM kernel.
  507. See SVM::KernelTypes. Default value is SVM::RBF. */
  508. CV_WRAP virtual int getKernelType() const = 0;
  509. /** Initialize with one of predefined kernels.
  510. See SVM::KernelTypes. */
  511. CV_WRAP virtual void setKernel(int kernelType) = 0;
  512. /** Initialize with custom kernel.
  513. See SVM::Kernel class for implementation details */
  514. virtual void setCustomKernel(const Ptr<Kernel> &_kernel) = 0;
  515. //! %SVM type
  516. enum Types {
  517. /** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows
  518. imperfect separation of classes with penalty multiplier C for outliers. */
  519. C_SVC=100,
  520. /** \f$\nu\f$-Support Vector Classification. n-class classification with possible
  521. imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother
  522. the decision boundary) is used instead of C. */
  523. NU_SVC=101,
  524. /** Distribution Estimation (One-class %SVM). All the training data are from
  525. the same class, %SVM builds a boundary that separates the class from the rest of the feature
  526. space. */
  527. ONE_CLASS=102,
  528. /** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors
  529. from the training set and the fitting hyper-plane must be less than p. For outliers the
  530. penalty multiplier C is used. */
  531. EPS_SVR=103,
  532. /** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p.
  533. See @cite LibSVM for details. */
  534. NU_SVR=104
  535. };
  536. /** @brief %SVM kernel type
  537. A comparison of different kernels on the following 2D test case with four classes. Four
  538. SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three
  539. different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score.
  540. Bright means max-score \> 0, dark means max-score \< 0.
  541. ![image](pics/SVM_Comparison.png)
  542. */
  543. enum KernelTypes {
  544. /** Returned by SVM::getKernelType in case when custom kernel has been set */
  545. CUSTOM=-1,
  546. /** Linear kernel. No mapping is done, linear discrimination (or regression) is
  547. done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */
  548. LINEAR=0,
  549. /** Polynomial kernel:
  550. \f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. */
  551. POLY=1,
  552. /** Radial basis function (RBF), a good choice in most cases.
  553. \f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. */
  554. RBF=2,
  555. /** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. */
  556. SIGMOID=3,
  557. /** Exponential Chi2 kernel, similar to the RBF kernel:
  558. \f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. */
  559. CHI2=4,
  560. /** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. */
  561. INTER=5
  562. };
  563. //! %SVM params type
  564. enum ParamTypes {
  565. C=0,
  566. GAMMA=1,
  567. P=2,
  568. NU=3,
  569. COEF=4,
  570. DEGREE=5
  571. };
  572. /** @brief Trains an %SVM with optimal parameters.
  573. @param data the training data that can be constructed using TrainData::create or
  574. TrainData::loadFromCSV.
  575. @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One
  576. subset is used to test the model, the others form the train set. So, the %SVM algorithm is
  577. executed kFold times.
  578. @param Cgrid grid for C
  579. @param gammaGrid grid for gamma
  580. @param pGrid grid for p
  581. @param nuGrid grid for nu
  582. @param coeffGrid grid for coeff
  583. @param degreeGrid grid for degree
  584. @param balanced If true and the problem is 2-class classification then the method creates more
  585. balanced cross-validation subsets that is proportions between classes in subsets are close
  586. to such proportion in the whole train dataset.
  587. The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p,
  588. nu, coef0, degree. Parameters are considered optimal when the cross-validation
  589. estimate of the test set error is minimal.
  590. If there is no need to optimize a parameter, the corresponding grid step should be set to any
  591. value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step
  592. = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value
  593. `Gamma` is taken for gamma.
  594. And, finally, if the optimization in a parameter is required but the corresponding grid is
  595. unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for
  596. gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`.
  597. This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the
  598. regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and
  599. the usual %SVM with parameters specified in params is executed.
  600. */
  601. virtual bool trainAuto( const Ptr<TrainData>& data, int kFold = 10,
  602. ParamGrid Cgrid = getDefaultGrid(C),
  603. ParamGrid gammaGrid = getDefaultGrid(GAMMA),
  604. ParamGrid pGrid = getDefaultGrid(P),
  605. ParamGrid nuGrid = getDefaultGrid(NU),
  606. ParamGrid coeffGrid = getDefaultGrid(COEF),
  607. ParamGrid degreeGrid = getDefaultGrid(DEGREE),
  608. bool balanced=false) = 0;
  609. /** @brief Trains an %SVM with optimal parameters
  610. @param samples training samples
  611. @param layout See ml::SampleTypes.
  612. @param responses vector of responses associated with the training samples.
  613. @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One
  614. subset is used to test the model, the others form the train set. So, the %SVM algorithm is
  615. @param Cgrid grid for C
  616. @param gammaGrid grid for gamma
  617. @param pGrid grid for p
  618. @param nuGrid grid for nu
  619. @param coeffGrid grid for coeff
  620. @param degreeGrid grid for degree
  621. @param balanced If true and the problem is 2-class classification then the method creates more
  622. balanced cross-validation subsets that is proportions between classes in subsets are close
  623. to such proportion in the whole train dataset.
  624. The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p,
  625. nu, coef0, degree. Parameters are considered optimal when the cross-validation
  626. estimate of the test set error is minimal.
  627. This function only makes use of SVM::getDefaultGrid for parameter optimization and thus only
  628. offers rudimentary parameter options.
  629. This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the
  630. regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and
  631. the usual %SVM with parameters specified in params is executed.
  632. */
  633. CV_WRAP virtual bool trainAuto(InputArray samples,
  634. int layout,
  635. InputArray responses,
  636. int kFold = 10,
  637. Ptr<ParamGrid> Cgrid = SVM::getDefaultGridPtr(SVM::C),
  638. Ptr<ParamGrid> gammaGrid = SVM::getDefaultGridPtr(SVM::GAMMA),
  639. Ptr<ParamGrid> pGrid = SVM::getDefaultGridPtr(SVM::P),
  640. Ptr<ParamGrid> nuGrid = SVM::getDefaultGridPtr(SVM::NU),
  641. Ptr<ParamGrid> coeffGrid = SVM::getDefaultGridPtr(SVM::COEF),
  642. Ptr<ParamGrid> degreeGrid = SVM::getDefaultGridPtr(SVM::DEGREE),
  643. bool balanced=false) = 0;
  644. /** @brief Retrieves all the support vectors
  645. The method returns all the support vectors as a floating-point matrix, where support vectors are
  646. stored as matrix rows.
  647. */
  648. CV_WRAP virtual Mat getSupportVectors() const = 0;
  649. /** @brief Retrieves all the uncompressed support vectors of a linear %SVM
  650. The method returns all the uncompressed support vectors of a linear %SVM that the compressed
  651. support vector, used for prediction, was derived from. They are returned in a floating-point
  652. matrix, where the support vectors are stored as matrix rows.
  653. */
  654. CV_WRAP virtual Mat getUncompressedSupportVectors() const = 0;
  655. /** @brief Retrieves the decision function
  656. @param i the index of the decision function. If the problem solved is regression, 1-class or
  657. 2-class classification, then there will be just one decision function and the index should
  658. always be 0. Otherwise, in the case of N-class classification, there will be \f$N(N-1)/2\f$
  659. decision functions.
  660. @param alpha the optional output vector for weights, corresponding to different support vectors.
  661. In the case of linear %SVM all the alpha's will be 1's.
  662. @param svidx the optional output vector of indices of support vectors within the matrix of
  663. support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear
  664. %SVM each decision function consists of a single "compressed" support vector.
  665. The method returns rho parameter of the decision function, a scalar subtracted from the weighted
  666. sum of kernel responses.
  667. */
  668. CV_WRAP virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0;
  669. /** @brief Generates a grid for %SVM parameters.
  670. @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is
  671. generated for the parameter with this ID.
  672. The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be
  673. passed to the function SVM::trainAuto.
  674. */
  675. static ParamGrid getDefaultGrid( int param_id );
  676. /** @brief Generates a grid for %SVM parameters.
  677. @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is
  678. generated for the parameter with this ID.
  679. The function generates a grid pointer for the specified parameter of the %SVM algorithm.
  680. The grid may be passed to the function SVM::trainAuto.
  681. */
  682. CV_WRAP static Ptr<ParamGrid> getDefaultGridPtr( int param_id );
  683. /** Creates empty model.
  684. Use StatModel::train to train the model. Since %SVM has several parameters, you may want to
  685. find the best parameters for your problem, it can be done with SVM::trainAuto. */
  686. CV_WRAP static Ptr<SVM> create();
  687. /** @brief Loads and creates a serialized svm from a file
  688. *
  689. * Use SVM::save to serialize and store an SVM to disk.
  690. * Load the SVM from this file again, by calling this function with the path to the file.
  691. *
  692. * @param filepath path to serialized svm
  693. */
  694. CV_WRAP static Ptr<SVM> load(const String& filepath);
  695. };
  696. /****************************************************************************************\
  697. * Expectation - Maximization *
  698. \****************************************************************************************/
  699. /** @brief The class implements the Expectation Maximization algorithm.
  700. @sa @ref ml_intro_em
  701. */
  702. class CV_EXPORTS_W EM : public StatModel
  703. {
  704. public:
  705. //! Type of covariation matrices
  706. enum Types {
  707. /** A scaled identity matrix \f$\mu_k * I\f$. There is the only
  708. parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases,
  709. when the constraint is relevant, or as a first step in the optimization (for example in case
  710. when the data is preprocessed with PCA). The results of such preliminary estimation may be
  711. passed again to the optimization procedure, this time with
  712. covMatType=EM::COV_MAT_DIAGONAL. */
  713. COV_MAT_SPHERICAL=0,
  714. /** A diagonal matrix with positive diagonal elements. The number of
  715. free parameters is d for each matrix. This is most commonly used option yielding good
  716. estimation results. */
  717. COV_MAT_DIAGONAL=1,
  718. /** A symmetric positively defined matrix. The number of free
  719. parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless
  720. there is pretty accurate initial estimation of the parameters and/or a huge number of
  721. training samples. */
  722. COV_MAT_GENERIC=2,
  723. COV_MAT_DEFAULT=COV_MAT_DIAGONAL
  724. };
  725. //! Default parameters
  726. enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};
  727. //! The initial step
  728. enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
  729. /** The number of mixture components in the Gaussian mixture model.
  730. Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could
  731. determine the optimal number of mixtures within a specified value range, but that is not the
  732. case in ML yet. */
  733. /** @see setClustersNumber */
  734. CV_WRAP virtual int getClustersNumber() const = 0;
  735. /** @copybrief getClustersNumber @see getClustersNumber */
  736. CV_WRAP virtual void setClustersNumber(int val) = 0;
  737. /** Constraint on covariance matrices which defines type of matrices.
  738. See EM::Types. */
  739. /** @see setCovarianceMatrixType */
  740. CV_WRAP virtual int getCovarianceMatrixType() const = 0;
  741. /** @copybrief getCovarianceMatrixType @see getCovarianceMatrixType */
  742. CV_WRAP virtual void setCovarianceMatrixType(int val) = 0;
  743. /** The termination criteria of the %EM algorithm.
  744. The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of
  745. M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default
  746. maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */
  747. /** @see setTermCriteria */
  748. CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
  749. /** @copybrief getTermCriteria @see getTermCriteria */
  750. CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;
  751. /** @brief Returns weights of the mixtures
  752. Returns vector with the number of elements equal to the number of mixtures.
  753. */
  754. CV_WRAP virtual Mat getWeights() const = 0;
  755. /** @brief Returns the cluster centers (means of the Gaussian mixture)
  756. Returns matrix with the number of rows equal to the number of mixtures and number of columns
  757. equal to the space dimensionality.
  758. */
  759. CV_WRAP virtual Mat getMeans() const = 0;
  760. /** @brief Returns covariation matrices
  761. Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,
  762. each matrix is a square floating-point matrix NxN, where N is the space dimensionality.
  763. */
  764. CV_WRAP virtual void getCovs(CV_OUT std::vector<Mat>& covs) const = 0;
  765. /** @brief Returns posterior probabilities for the provided samples
  766. @param samples The input samples, floating-point matrix
  767. @param results The optional output \f$ nSamples \times nClusters\f$ matrix of results. It contains
  768. posterior probabilities for each sample from the input
  769. @param flags This parameter will be ignored
  770. */
  771. CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0;
  772. /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component
  773. for the given sample.
  774. @param sample A sample for classification. It should be a one-channel matrix of
  775. \f$1 \times dims\f$ or \f$dims \times 1\f$ size.
  776. @param probs Optional output matrix that contains posterior probabilities of each component
  777. given the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type.
  778. The method returns a two-element double vector. Zero element is a likelihood logarithm value for
  779. the sample. First element is an index of the most probable mixture component for the given
  780. sample.
  781. */
  782. CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0;
  783. /** @brief Estimate the Gaussian mixture parameters from a samples set.
  784. This variation starts with Expectation step. Initial values of the model parameters will be
  785. estimated by the k-means algorithm.
  786. Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
  787. responses (class labels or function values) as input. Instead, it computes the *Maximum
  788. Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
  789. parameters inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in
  790. covs[k], \f$\pi_k\f$ in weights , and optionally computes the output "class label" for each
  791. sample: \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most
  792. probable mixture component for each sample).
  793. The trained model can be used further for prediction, just like any other classifier. The
  794. trained model is similar to the NormalBayesClassifier.
  795. @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  796. one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  797. it will be converted to the inner matrix of such type for the further computing.
  798. @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  799. each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.
  800. @param labels The optional output "class label" for each sample:
  801. \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable
  802. mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.
  803. @param probs The optional output matrix that contains posterior probabilities of each Gaussian
  804. mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
  805. CV_64FC1 type.
  806. */
  807. CV_WRAP virtual bool trainEM(InputArray samples,
  808. OutputArray logLikelihoods=noArray(),
  809. OutputArray labels=noArray(),
  810. OutputArray probs=noArray()) = 0;
  811. /** @brief Estimate the Gaussian mixture parameters from a samples set.
  812. This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of
  813. mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices
  814. \f$S_k\f$ of mixture components.
  815. @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  816. one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  817. it will be converted to the inner matrix of such type for the further computing.
  818. @param means0 Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of
  819. \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be
  820. converted to the inner matrix of such type for the further computing.
  821. @param covs0 The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of
  822. covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices
  823. do not have CV_64F type they will be converted to the inner matrices of such type for the
  824. further computing.
  825. @param weights0 Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel
  826. floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size.
  827. @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  828. each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.
  829. @param labels The optional output "class label" for each sample:
  830. \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable
  831. mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.
  832. @param probs The optional output matrix that contains posterior probabilities of each Gaussian
  833. mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
  834. CV_64FC1 type.
  835. */
  836. CV_WRAP virtual bool trainE(InputArray samples, InputArray means0,
  837. InputArray covs0=noArray(),
  838. InputArray weights0=noArray(),
  839. OutputArray logLikelihoods=noArray(),
  840. OutputArray labels=noArray(),
  841. OutputArray probs=noArray()) = 0;
  842. /** @brief Estimate the Gaussian mixture parameters from a samples set.
  843. This variation starts with Maximization step. You need to provide initial probabilities
  844. \f$p_{i,k}\f$ to use this option.
  845. @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  846. one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  847. it will be converted to the inner matrix of such type for the further computing.
  848. @param probs0 the probabilities
  849. @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  850. each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.
  851. @param labels The optional output "class label" for each sample:
  852. \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable
  853. mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.
  854. @param probs The optional output matrix that contains posterior probabilities of each Gaussian
  855. mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
  856. CV_64FC1 type.
  857. */
  858. CV_WRAP virtual bool trainM(InputArray samples, InputArray probs0,
  859. OutputArray logLikelihoods=noArray(),
  860. OutputArray labels=noArray(),
  861. OutputArray probs=noArray()) = 0;
  862. /** Creates empty %EM model.
  863. The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
  864. can use one of the EM::train\* methods or load it from file using Algorithm::load\<EM\>(filename).
  865. */
  866. CV_WRAP static Ptr<EM> create();
  867. /** @brief Loads and creates a serialized EM from a file
  868. *
  869. * Use EM::save to serialize and store an EM to disk.
  870. * Load the EM from this file again, by calling this function with the path to the file.
  871. * Optionally specify the node for the file containing the classifier
  872. *
  873. * @param filepath path to serialized EM
  874. * @param nodeName name of node containing the classifier
  875. */
  876. CV_WRAP static Ptr<EM> load(const String& filepath , const String& nodeName = String());
  877. };
  878. /****************************************************************************************\
  879. * Decision Tree *
  880. \****************************************************************************************/
  881. /** @brief The class represents a single decision tree or a collection of decision trees.
  882. The current public interface of the class allows user to train only a single decision tree, however
  883. the class is capable of storing multiple decision trees and using them for prediction (by summing
  884. responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost)
  885. use this capability to implement decision tree ensembles.
  886. @sa @ref ml_intro_trees
  887. */
  888. class CV_EXPORTS_W DTrees : public StatModel
  889. {
  890. public:
  891. /** Predict options */
  892. enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) };
  893. /** Cluster possible values of a categorical variable into K\<=maxCategories clusters to
  894. find a suboptimal split.
  895. If a discrete variable, on which the training procedure tries to make a split, takes more than
  896. maxCategories values, the precise best subset estimation may take a very long time because the
  897. algorithm is exponential. Instead, many decision trees engines (including our implementation)
  898. try to find sub-optimal split in this case by clustering all the samples into maxCategories
  899. clusters that is some categories are merged together. The clustering is applied only in n \>
  900. 2-class classification problems for categorical variables with N \> max_categories possible
  901. values. In case of regression and 2-class classification the optimal split can be found
  902. efficiently without employing clustering, thus the parameter is not used in these cases.
  903. Default value is 10.*/
  904. /** @see setMaxCategories */
  905. CV_WRAP virtual int getMaxCategories() const = 0;
  906. /** @copybrief getMaxCategories @see getMaxCategories */
  907. CV_WRAP virtual void setMaxCategories(int val) = 0;
  908. /** The maximum possible depth of the tree.
  909. That is the training algorithms attempts to split a node while its depth is less than maxDepth.
  910. The root node has zero depth. The actual depth may be smaller if the other termination criteria
  911. are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the
  912. tree is pruned. Default value is INT_MAX.*/
  913. /** @see setMaxDepth */
  914. CV_WRAP virtual int getMaxDepth() const = 0;
  915. /** @copybrief getMaxDepth @see getMaxDepth */
  916. CV_WRAP virtual void setMaxDepth(int val) = 0;
  917. /** If the number of samples in a node is less than this parameter then the node will not be split.
  918. Default value is 10.*/
  919. /** @see setMinSampleCount */
  920. CV_WRAP virtual int getMinSampleCount() const = 0;
  921. /** @copybrief getMinSampleCount @see getMinSampleCount */
  922. CV_WRAP virtual void setMinSampleCount(int val) = 0;
  923. /** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold
  924. cross-validation procedure where K is equal to CVFolds.
  925. Default value is 10.*/
  926. /** @see setCVFolds */
  927. CV_WRAP virtual int getCVFolds() const = 0;
  928. /** @copybrief getCVFolds @see getCVFolds */
  929. CV_WRAP virtual void setCVFolds(int val) = 0;
  930. /** If true then surrogate splits will be built.
  931. These splits allow to work with missing data and compute variable importance correctly.
  932. Default value is false.
  933. @note currently it's not implemented.*/
  934. /** @see setUseSurrogates */
  935. CV_WRAP virtual bool getUseSurrogates() const = 0;
  936. /** @copybrief getUseSurrogates @see getUseSurrogates */
  937. CV_WRAP virtual void setUseSurrogates(bool val) = 0;
  938. /** If true then a pruning will be harsher.
  939. This will make a tree more compact and more resistant to the training data noise but a bit less
  940. accurate. Default value is true.*/
  941. /** @see setUse1SERule */
  942. CV_WRAP virtual bool getUse1SERule() const = 0;
  943. /** @copybrief getUse1SERule @see getUse1SERule */
  944. CV_WRAP virtual void setUse1SERule(bool val) = 0;
  945. /** If true then pruned branches are physically removed from the tree.
  946. Otherwise they are retained and it is possible to get results from the original unpruned (or
  947. pruned less aggressively) tree. Default value is true.*/
  948. /** @see setTruncatePrunedTree */
  949. CV_WRAP virtual bool getTruncatePrunedTree() const = 0;
  950. /** @copybrief getTruncatePrunedTree @see getTruncatePrunedTree */
  951. CV_WRAP virtual void setTruncatePrunedTree(bool val) = 0;
  952. /** Termination criteria for regression trees.
  953. If all absolute differences between an estimated value in a node and values of train samples
  954. in this node are less than this parameter then the node will not be split further. Default
  955. value is 0.01f*/
  956. /** @see setRegressionAccuracy */
  957. CV_WRAP virtual float getRegressionAccuracy() const = 0;
  958. /** @copybrief getRegressionAccuracy @see getRegressionAccuracy */
  959. CV_WRAP virtual void setRegressionAccuracy(float val) = 0;
  960. /** @brief The array of a priori class probabilities, sorted by the class label value.
  961. The parameter can be used to tune the decision tree preferences toward a certain class. For
  962. example, if you want to detect some rare anomaly occurrence, the training base will likely
  963. contain much more normal cases than anomalies, so a very good classification performance
  964. will be achieved just by considering every case as normal. To avoid this, the priors can be
  965. specified, where the anomaly probability is artificially increased (up to 0.5 or even
  966. greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is
  967. adjusted properly.
  968. You can also think about this parameter as weights of prediction categories which determine
  969. relative weights that you give to misclassification. That is, if the weight of the first
  970. category is 1 and the weight of the second category is 10, then each mistake in predicting
  971. the second category is equivalent to making 10 mistakes in predicting the first category.
  972. Default value is empty Mat.*/
  973. /** @see setPriors */
  974. CV_WRAP virtual cv::Mat getPriors() const = 0;
  975. /** @copybrief getPriors @see getPriors */
  976. CV_WRAP virtual void setPriors(const cv::Mat &val) = 0;
  977. /** @brief The class represents a decision tree node.
  978. */
  979. class CV_EXPORTS Node
  980. {
  981. public:
  982. Node();
  983. double value; //!< Value at the node: a class label in case of classification or estimated
  984. //!< function value in case of regression.
  985. int classIdx; //!< Class index normalized to 0..class_count-1 range and assigned to the
  986. //!< node. It is used internally in classification trees and tree ensembles.
  987. int parent; //!< Index of the parent node
  988. int left; //!< Index of the left child node
  989. int right; //!< Index of right child node
  990. int defaultDir; //!< Default direction where to go (-1: left or +1: right). It helps in the
  991. //!< case of missing values.
  992. int split; //!< Index of the first split
  993. };
  994. /** @brief The class represents split in a decision tree.
  995. */
  996. class CV_EXPORTS Split
  997. {
  998. public:
  999. Split();
  1000. int varIdx; //!< Index of variable on which the split is created.
  1001. bool inversed; //!< If true, then the inverse split rule is used (i.e. left and right
  1002. //!< branches are exchanged in the rule expressions below).
  1003. float quality; //!< The split quality, a positive number. It is used to choose the best split.
  1004. int next; //!< Index of the next split in the list of splits for the node
  1005. float c; /**< The threshold value in case of split on an ordered variable.
  1006. The rule is:
  1007. @code{.none}
  1008. if var_value < c
  1009. then next_node <- left
  1010. else next_node <- right
  1011. @endcode */
  1012. int subsetOfs; /**< Offset of the bitset used by the split on a categorical variable.
  1013. The rule is:
  1014. @code{.none}
  1015. if bitset[var_value] == 1
  1016. then next_node <- left
  1017. else next_node <- right
  1018. @endcode */
  1019. };
  1020. /** @brief Returns indices of root nodes
  1021. */
  1022. virtual const std::vector<int>& getRoots() const = 0;
  1023. /** @brief Returns all the nodes
  1024. all the node indices are indices in the returned vector
  1025. */
  1026. virtual const std::vector<Node>& getNodes() const = 0;
  1027. /** @brief Returns all the splits
  1028. all the split indices are indices in the returned vector
  1029. */
  1030. virtual const std::vector<Split>& getSplits() const = 0;
  1031. /** @brief Returns all the bitsets for categorical splits
  1032. Split::subsetOfs is an offset in the returned vector
  1033. */
  1034. virtual const std::vector<int>& getSubsets() const = 0;
  1035. /** @brief Creates the empty model
  1036. The static method creates empty decision tree with the specified parameters. It should be then
  1037. trained using train method (see StatModel::train). Alternatively, you can load the model from
  1038. file using Algorithm::load\<DTrees\>(filename).
  1039. */
  1040. CV_WRAP static Ptr<DTrees> create();
  1041. /** @brief Loads and creates a serialized DTrees from a file
  1042. *
  1043. * Use DTree::save to serialize and store an DTree to disk.
  1044. * Load the DTree from this file again, by calling this function with the path to the file.
  1045. * Optionally specify the node for the file containing the classifier
  1046. *
  1047. * @param filepath path to serialized DTree
  1048. * @param nodeName name of node containing the classifier
  1049. */
  1050. CV_WRAP static Ptr<DTrees> load(const String& filepath , const String& nodeName = String());
  1051. };
  1052. /****************************************************************************************\
  1053. * Random Trees Classifier *
  1054. \****************************************************************************************/
  1055. /** @brief The class implements the random forest predictor.
  1056. @sa @ref ml_intro_rtrees
  1057. */
  1058. class CV_EXPORTS_W RTrees : public DTrees
  1059. {
  1060. public:
  1061. /** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance.
  1062. Default value is false.*/
  1063. /** @see setCalculateVarImportance */
  1064. CV_WRAP virtual bool getCalculateVarImportance() const = 0;
  1065. /** @copybrief getCalculateVarImportance @see getCalculateVarImportance */
  1066. CV_WRAP virtual void setCalculateVarImportance(bool val) = 0;
  1067. /** The size of the randomly selected subset of features at each tree node and that are used
  1068. to find the best split(s).
  1069. If you set it to 0 then the size will be set to the square root of the total number of
  1070. features. Default value is 0.*/
  1071. /** @see setActiveVarCount */
  1072. CV_WRAP virtual int getActiveVarCount() const = 0;
  1073. /** @copybrief getActiveVarCount @see getActiveVarCount */
  1074. CV_WRAP virtual void setActiveVarCount(int val) = 0;
  1075. /** The termination criteria that specifies when the training algorithm stops.
  1076. Either when the specified number of trees is trained and added to the ensemble or when
  1077. sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the
  1078. better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes
  1079. pass a certain number of trees. Also to keep in mind, the number of tree increases the
  1080. prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS +
  1081. TermCriteria::EPS, 50, 0.1)*/
  1082. /** @see setTermCriteria */
  1083. CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
  1084. /** @copybrief getTermCriteria @see getTermCriteria */
  1085. CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;
  1086. /** Returns the variable importance array.
  1087. The method returns the variable importance vector, computed at the training stage when
  1088. CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is
  1089. returned.
  1090. */
  1091. CV_WRAP virtual Mat getVarImportance() const = 0;
  1092. /** Returns the result of each individual tree in the forest.
  1093. In case the model is a regression problem, the method will return each of the trees'
  1094. results for each of the sample cases. If the model is a classifier, it will return
  1095. a Mat with samples + 1 rows, where the first row gives the class number and the
  1096. following rows return the votes each class had for each sample.
  1097. @param samples Array containing the samples for which votes will be calculated.
  1098. @param results Array where the result of the calculation will be written.
  1099. @param flags Flags for defining the type of RTrees.
  1100. */
  1101. CV_WRAP virtual void getVotes(InputArray samples, OutputArray results, int flags) const = 0;
  1102. /** Returns the OOB error value, computed at the training stage when calcOOBError is set to true.
  1103. * If this flag was set to false, 0 is returned. The OOB error is also scaled by sample weighting.
  1104. */
  1105. #if CV_VERSION_MAJOR == 4
  1106. CV_WRAP virtual double getOOBError() const { return 0; }
  1107. #else
  1108. /*CV_WRAP*/ virtual double getOOBError() const = 0;
  1109. #endif
  1110. /** Creates the empty model.
  1111. Use StatModel::train to train the model, StatModel::train to create and train the model,
  1112. Algorithm::load to load the pre-trained model.
  1113. */
  1114. CV_WRAP static Ptr<RTrees> create();
  1115. /** @brief Loads and creates a serialized RTree from a file
  1116. *
  1117. * Use RTree::save to serialize and store an RTree to disk.
  1118. * Load the RTree from this file again, by calling this function with the path to the file.
  1119. * Optionally specify the node for the file containing the classifier
  1120. *
  1121. * @param filepath path to serialized RTree
  1122. * @param nodeName name of node containing the classifier
  1123. */
  1124. CV_WRAP static Ptr<RTrees> load(const String& filepath , const String& nodeName = String());
  1125. };
  1126. /****************************************************************************************\
  1127. * Boosted tree classifier *
  1128. \****************************************************************************************/
  1129. /** @brief Boosted tree classifier derived from DTrees
  1130. @sa @ref ml_intro_boost
  1131. */
  1132. class CV_EXPORTS_W Boost : public DTrees
  1133. {
  1134. public:
  1135. /** Type of the boosting algorithm.
  1136. See Boost::Types. Default value is Boost::REAL. */
  1137. /** @see setBoostType */
  1138. CV_WRAP virtual int getBoostType() const = 0;
  1139. /** @copybrief getBoostType @see getBoostType */
  1140. CV_WRAP virtual void setBoostType(int val) = 0;
  1141. /** The number of weak classifiers.
  1142. Default value is 100. */
  1143. /** @see setWeakCount */
  1144. CV_WRAP virtual int getWeakCount() const = 0;
  1145. /** @copybrief getWeakCount @see getWeakCount */
  1146. CV_WRAP virtual void setWeakCount(int val) = 0;
  1147. /** A threshold between 0 and 1 used to save computational time.
  1148. Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next*
  1149. iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/
  1150. /** @see setWeightTrimRate */
  1151. CV_WRAP virtual double getWeightTrimRate() const = 0;
  1152. /** @copybrief getWeightTrimRate @see getWeightTrimRate */
  1153. CV_WRAP virtual void setWeightTrimRate(double val) = 0;
  1154. /** Boosting type.
  1155. Gentle AdaBoost and Real AdaBoost are often the preferable choices. */
  1156. enum Types {
  1157. DISCRETE=0, //!< Discrete AdaBoost.
  1158. REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions
  1159. //!< and works well with categorical data.
  1160. LOGIT=2, //!< LogitBoost. It can produce good regression fits.
  1161. GENTLE=3 //!< Gentle AdaBoost. It puts less weight on outlier data points and for that
  1162. //!<reason is often good with regression data.
  1163. };
  1164. /** Creates the empty model.
  1165. Use StatModel::train to train the model, Algorithm::load\<Boost\>(filename) to load the pre-trained model. */
  1166. CV_WRAP static Ptr<Boost> create();
  1167. /** @brief Loads and creates a serialized Boost from a file
  1168. *
  1169. * Use Boost::save to serialize and store an RTree to disk.
  1170. * Load the Boost from this file again, by calling this function with the path to the file.
  1171. * Optionally specify the node for the file containing the classifier
  1172. *
  1173. * @param filepath path to serialized Boost
  1174. * @param nodeName name of node containing the classifier
  1175. */
  1176. CV_WRAP static Ptr<Boost> load(const String& filepath , const String& nodeName = String());
  1177. };
  1178. /****************************************************************************************\
  1179. * Gradient Boosted Trees *
  1180. \****************************************************************************************/
  1181. /*class CV_EXPORTS_W GBTrees : public DTrees
  1182. {
  1183. public:
  1184. struct CV_EXPORTS_W_MAP Params : public DTrees::Params
  1185. {
  1186. CV_PROP_RW int weakCount;
  1187. CV_PROP_RW int lossFunctionType;
  1188. CV_PROP_RW float subsamplePortion;
  1189. CV_PROP_RW float shrinkage;
  1190. Params();
  1191. Params( int lossFunctionType, int weakCount, float shrinkage,
  1192. float subsamplePortion, int maxDepth, bool useSurrogates );
  1193. };
  1194. enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS};
  1195. virtual void setK(int k) = 0;
  1196. virtual float predictSerial( InputArray samples,
  1197. OutputArray weakResponses, int flags) const = 0;
  1198. static Ptr<GBTrees> create(const Params& p);
  1199. };*/
  1200. /****************************************************************************************\
  1201. * Artificial Neural Networks (ANN) *
  1202. \****************************************************************************************/
  1203. /////////////////////////////////// Multi-Layer Perceptrons //////////////////////////////
  1204. /** @brief Artificial Neural Networks - Multi-Layer Perceptrons.
  1205. Unlike many other models in ML that are constructed and trained at once, in the MLP model these
  1206. steps are separated. First, a network with the specified topology is created using the non-default
  1207. constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is
  1208. trained using a set of input and output vectors. The training procedure can be repeated more than
  1209. once, that is, the weights can be adjusted based on the new training data.
  1210. Additional flags for StatModel::train are available: ANN_MLP::TrainFlags.
  1211. @sa @ref ml_intro_ann
  1212. */
  1213. class CV_EXPORTS_W ANN_MLP : public StatModel
  1214. {
  1215. public:
  1216. /** Available training methods */
  1217. enum TrainingMethods {
  1218. BACKPROP=0, //!< The back-propagation algorithm.
  1219. RPROP = 1, //!< The RPROP algorithm. See @cite RPROP93 for details.
  1220. ANNEAL = 2 //!< The simulated annealing algorithm. See @cite Kirkpatrick83 for details.
  1221. };
  1222. /** Sets training method and common parameters.
  1223. @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods.
  1224. @param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP and to initialT for ANN_MLP::ANNEAL.
  1225. @param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP and to finalT for ANN_MLP::ANNEAL.
  1226. */
  1227. CV_WRAP virtual void setTrainMethod(int method, double param1 = 0, double param2 = 0) = 0;
  1228. /** Returns current training method */
  1229. CV_WRAP virtual int getTrainMethod() const = 0;
  1230. /** Initialize the activation function for each neuron.
  1231. Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM.
  1232. @param type The type of activation function. See ANN_MLP::ActivationFunctions.
  1233. @param param1 The first parameter of the activation function, \f$\alpha\f$. Default value is 0.
  1234. @param param2 The second parameter of the activation function, \f$\beta\f$. Default value is 0.
  1235. */
  1236. CV_WRAP virtual void setActivationFunction(int type, double param1 = 0, double param2 = 0) = 0;
  1237. /** Integer vector specifying the number of neurons in each layer including the input and output layers.
  1238. The very first element specifies the number of elements in the input layer.
  1239. The last element - number of elements in the output layer. Default value is empty Mat.
  1240. @sa getLayerSizes */
  1241. CV_WRAP virtual void setLayerSizes(InputArray _layer_sizes) = 0;
  1242. /** Integer vector specifying the number of neurons in each layer including the input and output layers.
  1243. The very first element specifies the number of elements in the input layer.
  1244. The last element - number of elements in the output layer.
  1245. @sa setLayerSizes */
  1246. CV_WRAP virtual cv::Mat getLayerSizes() const = 0;
  1247. /** Termination criteria of the training algorithm.
  1248. You can specify the maximum number of iterations (maxCount) and/or how much the error could
  1249. change between the iterations to make the algorithm continue (epsilon). Default value is
  1250. TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/
  1251. /** @see setTermCriteria */
  1252. CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
  1253. /** @copybrief getTermCriteria @see getTermCriteria */
  1254. CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0;
  1255. /** BPROP: Strength of the weight gradient term.
  1256. The recommended value is about 0.1. Default value is 0.1.*/
  1257. /** @see setBackpropWeightScale */
  1258. CV_WRAP virtual double getBackpropWeightScale() const = 0;
  1259. /** @copybrief getBackpropWeightScale @see getBackpropWeightScale */
  1260. CV_WRAP virtual void setBackpropWeightScale(double val) = 0;
  1261. /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations).
  1262. This parameter provides some inertia to smooth the random fluctuations of the weights. It can
  1263. vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.
  1264. Default value is 0.1.*/
  1265. /** @see setBackpropMomentumScale */
  1266. CV_WRAP virtual double getBackpropMomentumScale() const = 0;
  1267. /** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */
  1268. CV_WRAP virtual void setBackpropMomentumScale(double val) = 0;
  1269. /** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$.
  1270. Default value is 0.1.*/
  1271. /** @see setRpropDW0 */
  1272. CV_WRAP virtual double getRpropDW0() const = 0;
  1273. /** @copybrief getRpropDW0 @see getRpropDW0 */
  1274. CV_WRAP virtual void setRpropDW0(double val) = 0;
  1275. /** RPROP: Increase factor \f$\eta^+\f$.
  1276. It must be \>1. Default value is 1.2.*/
  1277. /** @see setRpropDWPlus */
  1278. CV_WRAP virtual double getRpropDWPlus() const = 0;
  1279. /** @copybrief getRpropDWPlus @see getRpropDWPlus */
  1280. CV_WRAP virtual void setRpropDWPlus(double val) = 0;
  1281. /** RPROP: Decrease factor \f$\eta^-\f$.
  1282. It must be \<1. Default value is 0.5.*/
  1283. /** @see setRpropDWMinus */
  1284. CV_WRAP virtual double getRpropDWMinus() const = 0;
  1285. /** @copybrief getRpropDWMinus @see getRpropDWMinus */
  1286. CV_WRAP virtual void setRpropDWMinus(double val) = 0;
  1287. /** RPROP: Update-values lower limit \f$\Delta_{min}\f$.
  1288. It must be positive. Default value is FLT_EPSILON.*/
  1289. /** @see setRpropDWMin */
  1290. CV_WRAP virtual double getRpropDWMin() const = 0;
  1291. /** @copybrief getRpropDWMin @see getRpropDWMin */
  1292. CV_WRAP virtual void setRpropDWMin(double val) = 0;
  1293. /** RPROP: Update-values upper limit \f$\Delta_{max}\f$.
  1294. It must be \>1. Default value is 50.*/
  1295. /** @see setRpropDWMax */
  1296. CV_WRAP virtual double getRpropDWMax() const = 0;
  1297. /** @copybrief getRpropDWMax @see getRpropDWMax */
  1298. CV_WRAP virtual void setRpropDWMax(double val) = 0;
  1299. /** ANNEAL: Update initial temperature.
  1300. It must be \>=0. Default value is 10.*/
  1301. /** @see setAnnealInitialT */
  1302. CV_WRAP virtual double getAnnealInitialT() const = 0;
  1303. /** @copybrief getAnnealInitialT @see getAnnealInitialT */
  1304. CV_WRAP virtual void setAnnealInitialT(double val) = 0;
  1305. /** ANNEAL: Update final temperature.
  1306. It must be \>=0 and less than initialT. Default value is 0.1.*/
  1307. /** @see setAnnealFinalT */
  1308. CV_WRAP virtual double getAnnealFinalT() const = 0;
  1309. /** @copybrief getAnnealFinalT @see getAnnealFinalT */
  1310. CV_WRAP virtual void setAnnealFinalT(double val) = 0;
  1311. /** ANNEAL: Update cooling ratio.
  1312. It must be \>0 and less than 1. Default value is 0.95.*/
  1313. /** @see setAnnealCoolingRatio */
  1314. CV_WRAP virtual double getAnnealCoolingRatio() const = 0;
  1315. /** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */
  1316. CV_WRAP virtual void setAnnealCoolingRatio(double val) = 0;
  1317. /** ANNEAL: Update iteration per step.
  1318. It must be \>0 . Default value is 10.*/
  1319. /** @see setAnnealItePerStep */
  1320. CV_WRAP virtual int getAnnealItePerStep() const = 0;
  1321. /** @copybrief getAnnealItePerStep @see getAnnealItePerStep */
  1322. CV_WRAP virtual void setAnnealItePerStep(int val) = 0;
  1323. /** @brief Set/initialize anneal RNG */
  1324. virtual void setAnnealEnergyRNG(const RNG& rng) = 0;
  1325. /** possible activation functions */
  1326. enum ActivationFunctions {
  1327. /** Identity function: \f$f(x)=x\f$ */
  1328. IDENTITY = 0,
  1329. /** Symmetrical sigmoid: \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x})\f$
  1330. @note
  1331. If you are using the default sigmoid activation function with the default parameter values
  1332. fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), so the output
  1333. will range from [-1.7159, 1.7159], instead of [0,1].*/
  1334. SIGMOID_SYM = 1,
  1335. /** Gaussian function: \f$f(x)=\beta e^{-\alpha x*x}\f$ */
  1336. GAUSSIAN = 2,
  1337. /** ReLU function: \f$f(x)=max(0,x)\f$ */
  1338. RELU = 3,
  1339. /** Leaky ReLU function: for x>0 \f$f(x)=x \f$ and x<=0 \f$f(x)=\alpha x \f$*/
  1340. LEAKYRELU= 4
  1341. };
  1342. /** Train options */
  1343. enum TrainFlags {
  1344. /** Update the network weights, rather than compute them from scratch. In the latter case
  1345. the weights are initialized using the Nguyen-Widrow algorithm. */
  1346. UPDATE_WEIGHTS = 1,
  1347. /** Do not normalize the input vectors. If this flag is not set, the training algorithm
  1348. normalizes each input feature independently, shifting its mean value to 0 and making the
  1349. standard deviation equal to 1. If the network is assumed to be updated frequently, the new
  1350. training data could be much different from original one. In this case, you should take care
  1351. of proper normalization. */
  1352. NO_INPUT_SCALE = 2,
  1353. /** Do not normalize the output vectors. If the flag is not set, the training algorithm
  1354. normalizes each output feature independently, by transforming it to the certain range
  1355. depending on the used activation function. */
  1356. NO_OUTPUT_SCALE = 4
  1357. };
  1358. CV_WRAP virtual Mat getWeights(int layerIdx) const = 0;
  1359. /** @brief Creates empty model
  1360. Use StatModel::train to train the model, Algorithm::load\<ANN_MLP\>(filename) to load the pre-trained model.
  1361. Note that the train method has optional flags: ANN_MLP::TrainFlags.
  1362. */
  1363. CV_WRAP static Ptr<ANN_MLP> create();
  1364. /** @brief Loads and creates a serialized ANN from a file
  1365. *
  1366. * Use ANN::save to serialize and store an ANN to disk.
  1367. * Load the ANN from this file again, by calling this function with the path to the file.
  1368. *
  1369. * @param filepath path to serialized ANN
  1370. */
  1371. CV_WRAP static Ptr<ANN_MLP> load(const String& filepath);
  1372. };
  1373. #ifndef DISABLE_OPENCV_3_COMPATIBILITY
  1374. typedef ANN_MLP ANN_MLP_ANNEAL;
  1375. #endif
  1376. /****************************************************************************************\
  1377. * Logistic Regression *
  1378. \****************************************************************************************/
  1379. /** @brief Implements Logistic Regression classifier.
  1380. @sa @ref ml_intro_lr
  1381. */
  1382. class CV_EXPORTS_W LogisticRegression : public StatModel
  1383. {
  1384. public:
  1385. /** Learning rate. */
  1386. /** @see setLearningRate */
  1387. CV_WRAP virtual double getLearningRate() const = 0;
  1388. /** @copybrief getLearningRate @see getLearningRate */
  1389. CV_WRAP virtual void setLearningRate(double val) = 0;
  1390. /** Number of iterations. */
  1391. /** @see setIterations */
  1392. CV_WRAP virtual int getIterations() const = 0;
  1393. /** @copybrief getIterations @see getIterations */
  1394. CV_WRAP virtual void setIterations(int val) = 0;
  1395. /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */
  1396. /** @see setRegularization */
  1397. CV_WRAP virtual int getRegularization() const = 0;
  1398. /** @copybrief getRegularization @see getRegularization */
  1399. CV_WRAP virtual void setRegularization(int val) = 0;
  1400. /** Kind of training method used. See LogisticRegression::Methods. */
  1401. /** @see setTrainMethod */
  1402. CV_WRAP virtual int getTrainMethod() const = 0;
  1403. /** @copybrief getTrainMethod @see getTrainMethod */
  1404. CV_WRAP virtual void setTrainMethod(int val) = 0;
  1405. /** Specifies the number of training samples taken in each step of Mini-Batch Gradient
  1406. Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It
  1407. has to take values less than the total number of training samples. */
  1408. /** @see setMiniBatchSize */
  1409. CV_WRAP virtual int getMiniBatchSize() const = 0;
  1410. /** @copybrief getMiniBatchSize @see getMiniBatchSize */
  1411. CV_WRAP virtual void setMiniBatchSize(int val) = 0;
  1412. /** Termination criteria of the algorithm. */
  1413. /** @see setTermCriteria */
  1414. CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
  1415. /** @copybrief getTermCriteria @see getTermCriteria */
  1416. CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0;
  1417. //! Regularization kinds
  1418. enum RegKinds {
  1419. REG_DISABLE = -1, //!< Regularization disabled
  1420. REG_L1 = 0, //!< %L1 norm
  1421. REG_L2 = 1 //!< %L2 norm
  1422. };
  1423. //! Training methods
  1424. enum Methods {
  1425. BATCH = 0,
  1426. MINI_BATCH = 1 //!< Set MiniBatchSize to a positive integer when using this method.
  1427. };
  1428. /** @brief Predicts responses for input samples and returns a float type.
  1429. @param samples The input data for the prediction algorithm. Matrix [m x n], where each row
  1430. contains variables (features) of one object being classified. Should have data type CV_32F.
  1431. @param results Predicted labels as a column matrix of type CV_32S.
  1432. @param flags Not used.
  1433. */
  1434. CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0;
  1435. /** @brief This function returns the trained parameters arranged across rows.
  1436. For a two class classification problem, it returns a row matrix. It returns learnt parameters of
  1437. the Logistic Regression as a matrix of type CV_32F.
  1438. */
  1439. CV_WRAP virtual Mat get_learnt_thetas() const = 0;
  1440. /** @brief Creates empty model.
  1441. Creates Logistic Regression model with parameters given.
  1442. */
  1443. CV_WRAP static Ptr<LogisticRegression> create();
  1444. /** @brief Loads and creates a serialized LogisticRegression from a file
  1445. *
  1446. * Use LogisticRegression::save to serialize and store an LogisticRegression to disk.
  1447. * Load the LogisticRegression from this file again, by calling this function with the path to the file.
  1448. * Optionally specify the node for the file containing the classifier
  1449. *
  1450. * @param filepath path to serialized LogisticRegression
  1451. * @param nodeName name of node containing the classifier
  1452. */
  1453. CV_WRAP static Ptr<LogisticRegression> load(const String& filepath , const String& nodeName = String());
  1454. };
  1455. /****************************************************************************************\
  1456. * Stochastic Gradient Descent SVM Classifier *
  1457. \****************************************************************************************/
  1458. /*!
  1459. @brief Stochastic Gradient Descent SVM classifier
  1460. SVMSGD provides a fast and easy-to-use implementation of the SVM classifier using the Stochastic Gradient Descent approach,
  1461. as presented in @cite bottou2010large.
  1462. The classifier has following parameters:
  1463. - model type,
  1464. - margin type,
  1465. - margin regularization (\f$\lambda\f$),
  1466. - initial step size (\f$\gamma_0\f$),
  1467. - step decreasing power (\f$c\f$),
  1468. - and termination criteria.
  1469. The model type may have one of the following values: \ref SGD and \ref ASGD.
  1470. - \ref SGD is the classic version of SVMSGD classifier: every next step is calculated by the formula
  1471. \f[w_{t+1} = w_t - \gamma(t) \frac{dQ_i}{dw} |_{w = w_t}\f]
  1472. where
  1473. - \f$w_t\f$ is the weights vector for decision function at step \f$t\f$,
  1474. - \f$\gamma(t)\f$ is the step size of model parameters at the iteration \f$t\f$, it is decreased on each step by the formula
  1475. \f$\gamma(t) = \gamma_0 (1 + \lambda \gamma_0 t) ^ {-c}\f$
  1476. - \f$Q_i\f$ is the target functional from SVM task for sample with number \f$i\f$, this sample is chosen stochastically on each step of the algorithm.
  1477. - \ref ASGD is Average Stochastic Gradient Descent SVM Classifier. ASGD classifier averages weights vector on each step of algorithm by the formula
  1478. \f$\widehat{w}_{t+1} = \frac{t}{1+t}\widehat{w}_{t} + \frac{1}{1+t}w_{t+1}\f$
  1479. The recommended model type is ASGD (following @cite bottou2010large).
  1480. The margin type may have one of the following values: \ref SOFT_MARGIN or \ref HARD_MARGIN.
  1481. - You should use \ref HARD_MARGIN type, if you have linearly separable sets.
  1482. - You should use \ref SOFT_MARGIN type, if you have non-linearly separable sets or sets with outliers.
  1483. - In the general case (if you know nothing about linear separability of your sets), use SOFT_MARGIN.
  1484. The other parameters may be described as follows:
  1485. - Margin regularization parameter is responsible for weights decreasing at each step and for the strength of restrictions on outliers
  1486. (the less the parameter, the less probability that an outlier will be ignored).
  1487. Recommended value for SGD model is 0.0001, for ASGD model is 0.00001.
  1488. - Initial step size parameter is the initial value for the step size \f$\gamma(t)\f$.
  1489. You will have to find the best initial step for your problem.
  1490. - Step decreasing power is the power parameter for \f$\gamma(t)\f$ decreasing by the formula, mentioned above.
  1491. Recommended value for SGD model is 1, for ASGD model is 0.75.
  1492. - Termination criteria can be TermCriteria::COUNT, TermCriteria::EPS or TermCriteria::COUNT + TermCriteria::EPS.
  1493. You will have to find the best termination criteria for your problem.
  1494. Note that the parameters margin regularization, initial step size, and step decreasing power should be positive.
  1495. To use SVMSGD algorithm do as follows:
  1496. - first, create the SVMSGD object. The algorithm will set optimal parameters by default, but you can set your own parameters via functions setSvmsgdType(),
  1497. setMarginType(), setMarginRegularization(), setInitialStepSize(), and setStepDecreasingPower().
  1498. - then the SVM model can be trained using the train features and the correspondent labels by the method train().
  1499. - after that, the label of a new feature vector can be predicted using the method predict().
  1500. @code
  1501. // Create empty object
  1502. cv::Ptr<SVMSGD> svmsgd = SVMSGD::create();
  1503. // Train the Stochastic Gradient Descent SVM
  1504. svmsgd->train(trainData);
  1505. // Predict labels for the new samples
  1506. svmsgd->predict(samples, responses);
  1507. @endcode
  1508. */
  1509. class CV_EXPORTS_W SVMSGD : public cv::ml::StatModel
  1510. {
  1511. public:
  1512. /** SVMSGD type.
  1513. ASGD is often the preferable choice. */
  1514. enum SvmsgdType
  1515. {
  1516. SGD, //!< Stochastic Gradient Descent
  1517. ASGD //!< Average Stochastic Gradient Descent
  1518. };
  1519. /** Margin type.*/
  1520. enum MarginType
  1521. {
  1522. SOFT_MARGIN, //!< General case, suits to the case of non-linearly separable sets, allows outliers.
  1523. HARD_MARGIN //!< More accurate for the case of linearly separable sets.
  1524. };
  1525. /**
  1526. * @return the weights of the trained model (decision function f(x) = weights * x + shift).
  1527. */
  1528. CV_WRAP virtual Mat getWeights() = 0;
  1529. /**
  1530. * @return the shift of the trained model (decision function f(x) = weights * x + shift).
  1531. */
  1532. CV_WRAP virtual float getShift() = 0;
  1533. /** @brief Creates empty model.
  1534. * Use StatModel::train to train the model. Since %SVMSGD has several parameters, you may want to
  1535. * find the best parameters for your problem or use setOptimalParameters() to set some default parameters.
  1536. */
  1537. CV_WRAP static Ptr<SVMSGD> create();
  1538. /** @brief Loads and creates a serialized SVMSGD from a file
  1539. *
  1540. * Use SVMSGD::save to serialize and store an SVMSGD to disk.
  1541. * Load the SVMSGD from this file again, by calling this function with the path to the file.
  1542. * Optionally specify the node for the file containing the classifier
  1543. *
  1544. * @param filepath path to serialized SVMSGD
  1545. * @param nodeName name of node containing the classifier
  1546. */
  1547. CV_WRAP static Ptr<SVMSGD> load(const String& filepath , const String& nodeName = String());
  1548. /** @brief Function sets optimal parameters values for chosen SVM SGD model.
  1549. * @param svmsgdType is the type of SVMSGD classifier.
  1550. * @param marginType is the type of margin constraint.
  1551. */
  1552. CV_WRAP virtual void setOptimalParameters(int svmsgdType = SVMSGD::ASGD, int marginType = SVMSGD::SOFT_MARGIN) = 0;
  1553. /** @brief %Algorithm type, one of SVMSGD::SvmsgdType. */
  1554. /** @see setSvmsgdType */
  1555. CV_WRAP virtual int getSvmsgdType() const = 0;
  1556. /** @copybrief getSvmsgdType @see getSvmsgdType */
  1557. CV_WRAP virtual void setSvmsgdType(int svmsgdType) = 0;
  1558. /** @brief %Margin type, one of SVMSGD::MarginType. */
  1559. /** @see setMarginType */
  1560. CV_WRAP virtual int getMarginType() const = 0;
  1561. /** @copybrief getMarginType @see getMarginType */
  1562. CV_WRAP virtual void setMarginType(int marginType) = 0;
  1563. /** @brief Parameter marginRegularization of a %SVMSGD optimization problem. */
  1564. /** @see setMarginRegularization */
  1565. CV_WRAP virtual float getMarginRegularization() const = 0;
  1566. /** @copybrief getMarginRegularization @see getMarginRegularization */
  1567. CV_WRAP virtual void setMarginRegularization(float marginRegularization) = 0;
  1568. /** @brief Parameter initialStepSize of a %SVMSGD optimization problem. */
  1569. /** @see setInitialStepSize */
  1570. CV_WRAP virtual float getInitialStepSize() const = 0;
  1571. /** @copybrief getInitialStepSize @see getInitialStepSize */
  1572. CV_WRAP virtual void setInitialStepSize(float InitialStepSize) = 0;
  1573. /** @brief Parameter stepDecreasingPower of a %SVMSGD optimization problem. */
  1574. /** @see setStepDecreasingPower */
  1575. CV_WRAP virtual float getStepDecreasingPower() const = 0;
  1576. /** @copybrief getStepDecreasingPower @see getStepDecreasingPower */
  1577. CV_WRAP virtual void setStepDecreasingPower(float stepDecreasingPower) = 0;
  1578. /** @brief Termination criteria of the training algorithm.
  1579. You can specify the maximum number of iterations (maxCount) and/or how much the error could
  1580. change between the iterations to make the algorithm continue (epsilon).*/
  1581. /** @see setTermCriteria */
  1582. CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
  1583. /** @copybrief getTermCriteria @see getTermCriteria */
  1584. CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0;
  1585. };
  1586. /****************************************************************************************\
  1587. * Auxiliary functions declarations *
  1588. \****************************************************************************************/
  1589. /** @brief Generates _sample_ from multivariate normal distribution
  1590. @param mean an average row vector
  1591. @param cov symmetric covariation matrix
  1592. @param nsamples returned samples count
  1593. @param samples returned samples array
  1594. */
  1595. CV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, OutputArray samples);
  1596. /** @brief Creates test set */
  1597. CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses,
  1598. OutputArray samples, OutputArray responses);
  1599. /****************************************************************************************\
  1600. * Simulated annealing solver *
  1601. \****************************************************************************************/
  1602. #ifdef CV_DOXYGEN
  1603. /** @brief This class declares example interface for system state used in simulated annealing optimization algorithm.
  1604. @note This class is not defined in C++ code and can't be use directly - you need your own implementation with the same methods.
  1605. */
  1606. struct SimulatedAnnealingSolverSystem
  1607. {
  1608. /** Give energy value for a state of system.*/
  1609. double energy() const;
  1610. /** Function which change the state of system (random perturbation).*/
  1611. void changeState();
  1612. /** Function to reverse to the previous state. Can be called once only after changeState(). */
  1613. void reverseState();
  1614. };
  1615. #endif // CV_DOXYGEN
  1616. /** @brief The class implements simulated annealing for optimization.
  1617. @cite Kirkpatrick83 for details
  1618. @param solverSystem optimization system (see SimulatedAnnealingSolverSystem)
  1619. @param initialTemperature initial temperature
  1620. @param finalTemperature final temperature
  1621. @param coolingRatio temperature step multiplies
  1622. @param iterationsPerStep number of iterations per temperature changing step
  1623. @param lastTemperature optional output for last used temperature
  1624. @param rngEnergy specify custom random numbers generator (cv::theRNG() by default)
  1625. */
  1626. template<class SimulatedAnnealingSolverSystem>
  1627. int simulatedAnnealingSolver(SimulatedAnnealingSolverSystem& solverSystem,
  1628. double initialTemperature, double finalTemperature, double coolingRatio,
  1629. size_t iterationsPerStep,
  1630. CV_OUT double* lastTemperature = NULL,
  1631. cv::RNG& rngEnergy = cv::theRNG()
  1632. );
  1633. //! @} ml
  1634. }
  1635. }
  1636. #include <opencv2/ml/ml.inl.hpp>
  1637. #endif // __cplusplus
  1638. #endif // OPENCV_ML_HPP
  1639. /* End of file. */