dist.h 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. /***********************************************************************
  2. * Software License Agreement (BSD License)
  3. *
  4. * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
  5. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
  6. *
  7. * THE BSD LICENSE
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. *
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions and the following disclaimer.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  20. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  21. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  22. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  23. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  24. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  28. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *************************************************************************/
  30. #ifndef OPENCV_FLANN_DIST_H_
  31. #define OPENCV_FLANN_DIST_H_
  32. //! @cond IGNORED
  33. #include <cmath>
  34. #include <cstdlib>
  35. #include <string.h>
  36. #ifdef _MSC_VER
  37. typedef unsigned __int32 uint32_t;
  38. typedef unsigned __int64 uint64_t;
  39. #else
  40. #include <stdint.h>
  41. #endif
  42. #include "defines.h"
  43. #if defined _WIN32 && (defined(_M_ARM) || defined(_M_ARM64))
  44. # include <Intrin.h>
  45. #endif
  46. #if defined(__ARM_NEON__) && !defined(__CUDACC__)
  47. # include "arm_neon.h"
  48. #endif
  49. namespace cvflann
  50. {
  51. template<typename T>
  52. inline T abs(T x) { return (x<0) ? -x : x; }
  53. template<>
  54. inline int abs<int>(int x) { return ::abs(x); }
  55. template<>
  56. inline float abs<float>(float x) { return fabsf(x); }
  57. template<>
  58. inline double abs<double>(double x) { return fabs(x); }
  59. template<typename TargetType>
  60. inline TargetType round(float x) { return static_cast<TargetType>(x); }
  61. template<>
  62. inline unsigned int round<unsigned int>(float x) { return static_cast<unsigned int>(x + 0.5f); }
  63. template<>
  64. inline unsigned short round<unsigned short>(float x) { return static_cast<unsigned short>(x + 0.5f); }
  65. template<>
  66. inline unsigned char round<unsigned char>(float x) { return static_cast<unsigned char>(x + 0.5f); }
  67. template<>
  68. inline long long round<long long>(float x) { return static_cast<long long>(x + 0.5f); }
  69. template<>
  70. inline long round<long>(float x) { return static_cast<long>(x + 0.5f); }
  71. template<>
  72. inline int round<int>(float x) { return static_cast<int>(x + 0.5f) - (x<0); }
  73. template<>
  74. inline short round<short>(float x) { return static_cast<short>(x + 0.5f) - (x<0); }
  75. template<>
  76. inline char round<char>(float x) { return static_cast<char>(x + 0.5f) - (x<0); }
  77. template<typename TargetType>
  78. inline TargetType round(double x) { return static_cast<TargetType>(x); }
  79. template<>
  80. inline unsigned int round<unsigned int>(double x) { return static_cast<unsigned int>(x + 0.5); }
  81. template<>
  82. inline unsigned short round<unsigned short>(double x) { return static_cast<unsigned short>(x + 0.5); }
  83. template<>
  84. inline unsigned char round<unsigned char>(double x) { return static_cast<unsigned char>(x + 0.5); }
  85. template<>
  86. inline long long round<long long>(double x) { return static_cast<long long>(x + 0.5); }
  87. template<>
  88. inline long round<long>(double x) { return static_cast<long>(x + 0.5); }
  89. template<>
  90. inline int round<int>(double x) { return static_cast<int>(x + 0.5) - (x<0); }
  91. template<>
  92. inline short round<short>(double x) { return static_cast<short>(x + 0.5) - (x<0); }
  93. template<>
  94. inline char round<char>(double x) { return static_cast<char>(x + 0.5) - (x<0); }
  95. template<typename T>
  96. struct Accumulator { typedef T Type; };
  97. template<>
  98. struct Accumulator<unsigned char> { typedef float Type; };
  99. template<>
  100. struct Accumulator<unsigned short> { typedef float Type; };
  101. template<>
  102. struct Accumulator<unsigned int> { typedef float Type; };
  103. template<>
  104. struct Accumulator<char> { typedef float Type; };
  105. template<>
  106. struct Accumulator<short> { typedef float Type; };
  107. template<>
  108. struct Accumulator<int> { typedef float Type; };
  109. #undef True
  110. #undef False
  111. class True
  112. {
  113. public:
  114. static const bool val = true;
  115. };
  116. class False
  117. {
  118. public:
  119. static const bool val = false;
  120. };
  121. /*
  122. * This is a "zero iterator". It basically behaves like a zero filled
  123. * array to all algorithms that use arrays as iterators (STL style).
  124. * It's useful when there's a need to compute the distance between feature
  125. * and origin it and allows for better compiler optimisation than using a
  126. * zero-filled array.
  127. */
  128. template <typename T>
  129. struct ZeroIterator
  130. {
  131. T operator*()
  132. {
  133. return 0;
  134. }
  135. T operator[](int)
  136. {
  137. return 0;
  138. }
  139. const ZeroIterator<T>& operator ++()
  140. {
  141. return *this;
  142. }
  143. ZeroIterator<T> operator ++(int)
  144. {
  145. return *this;
  146. }
  147. ZeroIterator<T>& operator+=(int)
  148. {
  149. return *this;
  150. }
  151. };
  152. /**
  153. * Squared Euclidean distance functor.
  154. *
  155. * This is the simpler, unrolled version. This is preferable for
  156. * very low dimensionality data (eg 3D points)
  157. */
  158. template<class T>
  159. struct L2_Simple
  160. {
  161. typedef True is_kdtree_distance;
  162. typedef True is_vector_space_distance;
  163. typedef T ElementType;
  164. typedef typename Accumulator<T>::Type ResultType;
  165. typedef ResultType CentersType;
  166. template <typename Iterator1, typename Iterator2>
  167. ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
  168. {
  169. ResultType result = ResultType();
  170. ResultType diff;
  171. for(size_t i = 0; i < size; ++i ) {
  172. diff = (ResultType)(*a++ - *b++);
  173. result += diff*diff;
  174. }
  175. return result;
  176. }
  177. template <typename U, typename V>
  178. inline ResultType accum_dist(const U& a, const V& b, int) const
  179. {
  180. return (a-b)*(a-b);
  181. }
  182. };
  183. /**
  184. * Squared Euclidean distance functor, optimized version
  185. */
  186. template<class T>
  187. struct L2
  188. {
  189. typedef True is_kdtree_distance;
  190. typedef True is_vector_space_distance;
  191. typedef T ElementType;
  192. typedef typename Accumulator<T>::Type ResultType;
  193. typedef ResultType CentersType;
  194. /**
  195. * Compute the squared Euclidean distance between two vectors.
  196. *
  197. * This is highly optimised, with loop unrolling, as it is one
  198. * of the most expensive inner loops.
  199. *
  200. * The computation of squared root at the end is omitted for
  201. * efficiency.
  202. */
  203. template <typename Iterator1, typename Iterator2>
  204. ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
  205. {
  206. ResultType result = ResultType();
  207. ResultType diff0, diff1, diff2, diff3;
  208. Iterator1 last = a + size;
  209. Iterator1 lastgroup = last - 3;
  210. /* Process 4 items with each loop for efficiency. */
  211. while (a < lastgroup) {
  212. diff0 = (ResultType)(a[0] - b[0]);
  213. diff1 = (ResultType)(a[1] - b[1]);
  214. diff2 = (ResultType)(a[2] - b[2]);
  215. diff3 = (ResultType)(a[3] - b[3]);
  216. result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
  217. a += 4;
  218. b += 4;
  219. if ((worst_dist>0)&&(result>worst_dist)) {
  220. return result;
  221. }
  222. }
  223. /* Process last 0-3 pixels. Not needed for standard vector lengths. */
  224. while (a < last) {
  225. diff0 = (ResultType)(*a++ - *b++);
  226. result += diff0 * diff0;
  227. }
  228. return result;
  229. }
  230. /**
  231. * Partial euclidean distance, using just one dimension. This is used by the
  232. * kd-tree when computing partial distances while traversing the tree.
  233. *
  234. * Squared root is omitted for efficiency.
  235. */
  236. template <typename U, typename V>
  237. inline ResultType accum_dist(const U& a, const V& b, int) const
  238. {
  239. return (a-b)*(a-b);
  240. }
  241. };
  242. /*
  243. * Manhattan distance functor, optimized version
  244. */
  245. template<class T>
  246. struct L1
  247. {
  248. typedef True is_kdtree_distance;
  249. typedef True is_vector_space_distance;
  250. typedef T ElementType;
  251. typedef typename Accumulator<T>::Type ResultType;
  252. typedef ResultType CentersType;
  253. /**
  254. * Compute the Manhattan (L_1) distance between two vectors.
  255. *
  256. * This is highly optimised, with loop unrolling, as it is one
  257. * of the most expensive inner loops.
  258. */
  259. template <typename Iterator1, typename Iterator2>
  260. ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
  261. {
  262. ResultType result = ResultType();
  263. ResultType diff0, diff1, diff2, diff3;
  264. Iterator1 last = a + size;
  265. Iterator1 lastgroup = last - 3;
  266. /* Process 4 items with each loop for efficiency. */
  267. while (a < lastgroup) {
  268. diff0 = (ResultType)abs(a[0] - b[0]);
  269. diff1 = (ResultType)abs(a[1] - b[1]);
  270. diff2 = (ResultType)abs(a[2] - b[2]);
  271. diff3 = (ResultType)abs(a[3] - b[3]);
  272. result += diff0 + diff1 + diff2 + diff3;
  273. a += 4;
  274. b += 4;
  275. if ((worst_dist>0)&&(result>worst_dist)) {
  276. return result;
  277. }
  278. }
  279. /* Process last 0-3 pixels. Not needed for standard vector lengths. */
  280. while (a < last) {
  281. diff0 = (ResultType)abs(*a++ - *b++);
  282. result += diff0;
  283. }
  284. return result;
  285. }
  286. /**
  287. * Partial distance, used by the kd-tree.
  288. */
  289. template <typename U, typename V>
  290. inline ResultType accum_dist(const U& a, const V& b, int) const
  291. {
  292. return abs(a-b);
  293. }
  294. };
  295. template<class T>
  296. struct MinkowskiDistance
  297. {
  298. typedef True is_kdtree_distance;
  299. typedef True is_vector_space_distance;
  300. typedef T ElementType;
  301. typedef typename Accumulator<T>::Type ResultType;
  302. typedef ResultType CentersType;
  303. int order;
  304. MinkowskiDistance(int order_) : order(order_) {}
  305. /**
  306. * Compute the Minkowski (L_p) distance between two vectors.
  307. *
  308. * This is highly optimised, with loop unrolling, as it is one
  309. * of the most expensive inner loops.
  310. *
  311. * The computation of squared root at the end is omitted for
  312. * efficiency.
  313. */
  314. template <typename Iterator1, typename Iterator2>
  315. ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
  316. {
  317. ResultType result = ResultType();
  318. ResultType diff0, diff1, diff2, diff3;
  319. Iterator1 last = a + size;
  320. Iterator1 lastgroup = last - 3;
  321. /* Process 4 items with each loop for efficiency. */
  322. while (a < lastgroup) {
  323. diff0 = (ResultType)abs(a[0] - b[0]);
  324. diff1 = (ResultType)abs(a[1] - b[1]);
  325. diff2 = (ResultType)abs(a[2] - b[2]);
  326. diff3 = (ResultType)abs(a[3] - b[3]);
  327. result += pow(diff0,order) + pow(diff1,order) + pow(diff2,order) + pow(diff3,order);
  328. a += 4;
  329. b += 4;
  330. if ((worst_dist>0)&&(result>worst_dist)) {
  331. return result;
  332. }
  333. }
  334. /* Process last 0-3 pixels. Not needed for standard vector lengths. */
  335. while (a < last) {
  336. diff0 = (ResultType)abs(*a++ - *b++);
  337. result += pow(diff0,order);
  338. }
  339. return result;
  340. }
  341. /**
  342. * Partial distance, used by the kd-tree.
  343. */
  344. template <typename U, typename V>
  345. inline ResultType accum_dist(const U& a, const V& b, int) const
  346. {
  347. return pow(static_cast<ResultType>(abs(a-b)),order);
  348. }
  349. };
  350. template<class T>
  351. struct MaxDistance
  352. {
  353. typedef False is_kdtree_distance;
  354. typedef True is_vector_space_distance;
  355. typedef T ElementType;
  356. typedef typename Accumulator<T>::Type ResultType;
  357. typedef ResultType CentersType;
  358. /**
  359. * Compute the max distance (L_infinity) between two vectors.
  360. *
  361. * This distance is not a valid kdtree distance, it's not dimensionwise additive.
  362. */
  363. template <typename Iterator1, typename Iterator2>
  364. ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
  365. {
  366. ResultType result = ResultType();
  367. ResultType diff0, diff1, diff2, diff3;
  368. Iterator1 last = a + size;
  369. Iterator1 lastgroup = last - 3;
  370. /* Process 4 items with each loop for efficiency. */
  371. while (a < lastgroup) {
  372. diff0 = abs(a[0] - b[0]);
  373. diff1 = abs(a[1] - b[1]);
  374. diff2 = abs(a[2] - b[2]);
  375. diff3 = abs(a[3] - b[3]);
  376. if (diff0>result) {result = diff0; }
  377. if (diff1>result) {result = diff1; }
  378. if (diff2>result) {result = diff2; }
  379. if (diff3>result) {result = diff3; }
  380. a += 4;
  381. b += 4;
  382. if ((worst_dist>0)&&(result>worst_dist)) {
  383. return result;
  384. }
  385. }
  386. /* Process last 0-3 pixels. Not needed for standard vector lengths. */
  387. while (a < last) {
  388. diff0 = abs(*a++ - *b++);
  389. result = (diff0>result) ? diff0 : result;
  390. }
  391. return result;
  392. }
  393. /* This distance functor is not dimension-wise additive, which
  394. * makes it an invalid kd-tree distance, not implementing the accum_dist method */
  395. };
  396. ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
  397. /**
  398. * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
  399. * bit count of A exclusive XOR'ed with B
  400. */
  401. struct HammingLUT
  402. {
  403. typedef False is_kdtree_distance;
  404. typedef False is_vector_space_distance;
  405. typedef unsigned char ElementType;
  406. typedef int ResultType;
  407. typedef ElementType CentersType;
  408. /** this will count the bits in a ^ b
  409. */
  410. template<typename Iterator2>
  411. ResultType operator()(const unsigned char* a, const Iterator2 b, size_t size) const
  412. {
  413. static const uchar popCountTable[] =
  414. {
  415. 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  416. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  417. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  418. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  419. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  420. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  421. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  422. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
  423. };
  424. ResultType result = 0;
  425. const unsigned char* b2 = reinterpret_cast<const unsigned char*> (b);
  426. for (size_t i = 0; i < size; i++) {
  427. result += popCountTable[a[i] ^ b2[i]];
  428. }
  429. return result;
  430. }
  431. ResultType operator()(const unsigned char* a, const ZeroIterator<unsigned char> b, size_t size) const
  432. {
  433. (void)b;
  434. static const uchar popCountTable[] =
  435. {
  436. 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  437. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  438. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  439. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  440. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  441. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  442. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  443. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
  444. };
  445. ResultType result = 0;
  446. for (size_t i = 0; i < size; i++) {
  447. result += popCountTable[a[i]];
  448. }
  449. return result;
  450. }
  451. };
  452. /**
  453. * Hamming distance functor (pop count between two binary vectors, i.e. xor them and count the number of bits set)
  454. * That code was taken from brief.cpp in OpenCV
  455. */
  456. template<class T>
  457. struct Hamming
  458. {
  459. typedef False is_kdtree_distance;
  460. typedef False is_vector_space_distance;
  461. typedef T ElementType;
  462. typedef int ResultType;
  463. typedef ElementType CentersType;
  464. template<typename Iterator1, typename Iterator2>
  465. ResultType operator()(const Iterator1 a, const Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
  466. {
  467. ResultType result = 0;
  468. #if defined(__ARM_NEON__) && !defined(__CUDACC__)
  469. {
  470. const unsigned char* a2 = reinterpret_cast<const unsigned char*> (a);
  471. const unsigned char* b2 = reinterpret_cast<const unsigned char*> (b);
  472. uint32x4_t bits = vmovq_n_u32(0);
  473. for (size_t i = 0; i < size; i += 16) {
  474. uint8x16_t A_vec = vld1q_u8 (a2 + i);
  475. uint8x16_t B_vec = vld1q_u8 (b2 + i);
  476. uint8x16_t AxorB = veorq_u8 (A_vec, B_vec);
  477. uint8x16_t bitsSet = vcntq_u8 (AxorB);
  478. uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet);
  479. uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8);
  480. bits = vaddq_u32(bits, bitSet4);
  481. }
  482. uint64x2_t bitSet2 = vpaddlq_u32 (bits);
  483. result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0);
  484. result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2);
  485. }
  486. #elif defined(__GNUC__)
  487. {
  488. //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll)
  489. typedef unsigned long long pop_t;
  490. const size_t modulo = size % sizeof(pop_t);
  491. const pop_t* a2 = reinterpret_cast<const pop_t*> (a);
  492. const pop_t* b2 = reinterpret_cast<const pop_t*> (b);
  493. const pop_t* a2_end = a2 + (size / sizeof(pop_t));
  494. for (; a2 != a2_end; ++a2, ++b2) result += __builtin_popcountll((*a2) ^ (*b2));
  495. if (modulo) {
  496. //in the case where size is not dividable by sizeof(size_t)
  497. //need to mask off the bits at the end
  498. pop_t a_final = 0, b_final = 0;
  499. memcpy(&a_final, a2, modulo);
  500. memcpy(&b_final, b2, modulo);
  501. result += __builtin_popcountll(a_final ^ b_final);
  502. }
  503. }
  504. #else // NO NEON and NOT GNUC
  505. HammingLUT lut;
  506. result = lut(reinterpret_cast<const unsigned char*> (a),
  507. reinterpret_cast<const unsigned char*> (b), size);
  508. #endif
  509. return result;
  510. }
  511. template<typename Iterator1>
  512. ResultType operator()(const Iterator1 a, ZeroIterator<unsigned char> b, size_t size, ResultType /*worst_dist*/ = -1) const
  513. {
  514. (void)b;
  515. ResultType result = 0;
  516. #if defined(__ARM_NEON__) && !defined(__CUDACC__)
  517. {
  518. const unsigned char* a2 = reinterpret_cast<const unsigned char*> (a);
  519. uint32x4_t bits = vmovq_n_u32(0);
  520. for (size_t i = 0; i < size; i += 16) {
  521. uint8x16_t A_vec = vld1q_u8 (a2 + i);
  522. uint8x16_t bitsSet = vcntq_u8 (A_vec);
  523. uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet);
  524. uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8);
  525. bits = vaddq_u32(bits, bitSet4);
  526. }
  527. uint64x2_t bitSet2 = vpaddlq_u32 (bits);
  528. result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0);
  529. result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2);
  530. }
  531. #elif defined(__GNUC__)
  532. {
  533. //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll)
  534. typedef unsigned long long pop_t;
  535. const size_t modulo = size % sizeof(pop_t);
  536. const pop_t* a2 = reinterpret_cast<const pop_t*> (a);
  537. const pop_t* a2_end = a2 + (size / sizeof(pop_t));
  538. for (; a2 != a2_end; ++a2) result += __builtin_popcountll(*a2);
  539. if (modulo) {
  540. //in the case where size is not dividable by sizeof(size_t)
  541. //need to mask off the bits at the end
  542. pop_t a_final = 0;
  543. memcpy(&a_final, a2, modulo);
  544. result += __builtin_popcountll(a_final);
  545. }
  546. }
  547. #else // NO NEON and NOT GNUC
  548. HammingLUT lut;
  549. result = lut(reinterpret_cast<const unsigned char*> (a), b, size);
  550. #endif
  551. return result;
  552. }
  553. };
  554. template<typename T>
  555. struct Hamming2
  556. {
  557. typedef False is_kdtree_distance;
  558. typedef False is_vector_space_distance;
  559. typedef T ElementType;
  560. typedef int ResultType;
  561. typedef ElementType CentersType;
  562. /** This is popcount_3() from:
  563. * http://en.wikipedia.org/wiki/Hamming_weight */
  564. unsigned int popcnt32(uint32_t n) const
  565. {
  566. n -= ((n >> 1) & 0x55555555);
  567. n = (n & 0x33333333) + ((n >> 2) & 0x33333333);
  568. return (((n + (n >> 4))& 0xF0F0F0F)* 0x1010101) >> 24;
  569. }
  570. #ifdef FLANN_PLATFORM_64_BIT
  571. unsigned int popcnt64(uint64_t n) const
  572. {
  573. n -= ((n >> 1) & 0x5555555555555555);
  574. n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333);
  575. return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56;
  576. }
  577. #endif
  578. template <typename Iterator1, typename Iterator2>
  579. ResultType operator()(const Iterator1 a, const Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
  580. {
  581. CV_DbgAssert(!(size % long_word_size_) && "vectors size must be multiple of long words size (i.e. 8)");
  582. #ifdef FLANN_PLATFORM_64_BIT
  583. const uint64_t* pa = reinterpret_cast<const uint64_t*>(a);
  584. const uint64_t* pb = reinterpret_cast<const uint64_t*>(b);
  585. ResultType result = 0;
  586. size /= long_word_size_;
  587. for(size_t i = 0; i < size; ++i ) {
  588. result += popcnt64(*pa ^ *pb);
  589. ++pa;
  590. ++pb;
  591. }
  592. #else
  593. const uint32_t* pa = reinterpret_cast<const uint32_t*>(a);
  594. const uint32_t* pb = reinterpret_cast<const uint32_t*>(b);
  595. ResultType result = 0;
  596. size /= long_word_size_;
  597. for(size_t i = 0; i < size; ++i ) {
  598. result += popcnt32(*pa ^ *pb);
  599. ++pa;
  600. ++pb;
  601. }
  602. #endif
  603. return result;
  604. }
  605. template <typename Iterator1>
  606. ResultType operator()(const Iterator1 a, ZeroIterator<unsigned char> b, size_t size, ResultType /*worst_dist*/ = -1) const
  607. {
  608. CV_DbgAssert(!(size % long_word_size_) && "vectors size must be multiple of long words size (i.e. 8)");
  609. (void)b;
  610. #ifdef FLANN_PLATFORM_64_BIT
  611. const uint64_t* pa = reinterpret_cast<const uint64_t*>(a);
  612. ResultType result = 0;
  613. size /= long_word_size_;
  614. for(size_t i = 0; i < size; ++i ) {
  615. result += popcnt64(*pa);
  616. ++pa;
  617. }
  618. #else
  619. const uint32_t* pa = reinterpret_cast<const uint32_t*>(a);
  620. ResultType result = 0;
  621. size /= long_word_size_;
  622. for(size_t i = 0; i < size; ++i ) {
  623. result += popcnt32(*pa);
  624. ++pa;
  625. }
  626. #endif
  627. return result;
  628. }
  629. private:
  630. #ifdef FLANN_PLATFORM_64_BIT
  631. static const size_t long_word_size_ = sizeof(uint64_t)/sizeof(unsigned char);
  632. #else
  633. static const size_t long_word_size_ = sizeof(uint32_t)/sizeof(unsigned char);
  634. #endif
  635. };
  636. ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
  637. struct DNAmmingLUT
  638. {
  639. typedef False is_kdtree_distance;
  640. typedef False is_vector_space_distance;
  641. typedef unsigned char ElementType;
  642. typedef int ResultType;
  643. typedef ElementType CentersType;
  644. /** this will count the bits in a ^ b
  645. */
  646. template<typename Iterator2>
  647. ResultType operator()(const unsigned char* a, const Iterator2 b, size_t size) const
  648. {
  649. static const uchar popCountTable[] =
  650. {
  651. 0, 1, 1, 1, 1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3,
  652. 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3,
  653. 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  654. 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  655. 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  656. 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  657. 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  658. 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4
  659. };
  660. ResultType result = 0;
  661. const unsigned char* b2 = reinterpret_cast<const unsigned char*> (b);
  662. for (size_t i = 0; i < size; i++) {
  663. result += popCountTable[a[i] ^ b2[i]];
  664. }
  665. return result;
  666. }
  667. ResultType operator()(const unsigned char* a, const ZeroIterator<unsigned char> b, size_t size) const
  668. {
  669. (void)b;
  670. static const uchar popCountTable[] =
  671. {
  672. 0, 1, 1, 1, 1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3,
  673. 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3,
  674. 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  675. 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  676. 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  677. 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  678. 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4,
  679. 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4
  680. };
  681. ResultType result = 0;
  682. for (size_t i = 0; i < size; i++) {
  683. result += popCountTable[a[i]];
  684. }
  685. return result;
  686. }
  687. };
  688. template<typename T>
  689. struct DNAmming2
  690. {
  691. typedef False is_kdtree_distance;
  692. typedef False is_vector_space_distance;
  693. typedef T ElementType;
  694. typedef int ResultType;
  695. typedef ElementType CentersType;
  696. /** This is popcount_3() from:
  697. * http://en.wikipedia.org/wiki/Hamming_weight */
  698. unsigned int popcnt32(uint32_t n) const
  699. {
  700. n = ((n >> 1) | n) & 0x55555555;
  701. n = (n & 0x33333333) + ((n >> 2) & 0x33333333);
  702. return (((n + (n >> 4))& 0x0F0F0F0F)* 0x01010101) >> 24;
  703. }
  704. #ifdef FLANN_PLATFORM_64_BIT
  705. unsigned int popcnt64(uint64_t n) const
  706. {
  707. n = ((n >> 1) | n) & 0x5555555555555555;
  708. n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333);
  709. return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56;
  710. }
  711. #endif
  712. template <typename Iterator1, typename Iterator2>
  713. ResultType operator()(const Iterator1 a, const Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
  714. {
  715. CV_DbgAssert(!(size % long_word_size_) && "vectors size must be multiple of long words size (i.e. 8)");
  716. #ifdef FLANN_PLATFORM_64_BIT
  717. const uint64_t* pa = reinterpret_cast<const uint64_t*>(a);
  718. const uint64_t* pb = reinterpret_cast<const uint64_t*>(b);
  719. ResultType result = 0;
  720. size /= long_word_size_;
  721. for(size_t i = 0; i < size; ++i ) {
  722. result += popcnt64(*pa ^ *pb);
  723. ++pa;
  724. ++pb;
  725. }
  726. #else
  727. const uint32_t* pa = reinterpret_cast<const uint32_t*>(a);
  728. const uint32_t* pb = reinterpret_cast<const uint32_t*>(b);
  729. ResultType result = 0;
  730. size /= long_word_size_;
  731. for(size_t i = 0; i < size; ++i ) {
  732. result += popcnt32(*pa ^ *pb);
  733. ++pa;
  734. ++pb;
  735. }
  736. #endif
  737. return result;
  738. }
  739. template <typename Iterator1>
  740. ResultType operator()(const Iterator1 a, ZeroIterator<unsigned char> b, size_t size, ResultType /*worst_dist*/ = -1) const
  741. {
  742. CV_DbgAssert(!(size % long_word_size_) && "vectors size must be multiple of long words size (i.e. 8)");
  743. (void)b;
  744. #ifdef FLANN_PLATFORM_64_BIT
  745. const uint64_t* pa = reinterpret_cast<const uint64_t*>(a);
  746. ResultType result = 0;
  747. size /= long_word_size_;
  748. for(size_t i = 0; i < size; ++i ) {
  749. result += popcnt64(*pa);
  750. ++pa;
  751. }
  752. #else
  753. const uint32_t* pa = reinterpret_cast<const uint32_t*>(a);
  754. ResultType result = 0;
  755. size /= long_word_size_;
  756. for(size_t i = 0; i < size; ++i ) {
  757. result += popcnt32(*pa);
  758. ++pa;
  759. }
  760. #endif
  761. return result;
  762. }
  763. private:
  764. #ifdef FLANN_PLATFORM_64_BIT
  765. static const size_t long_word_size_= sizeof(uint64_t)/sizeof(unsigned char);
  766. #else
  767. static const size_t long_word_size_= sizeof(uint32_t)/sizeof(unsigned char);
  768. #endif
  769. };
  770. template<class T>
  771. struct HistIntersectionDistance
  772. {
  773. typedef True is_kdtree_distance;
  774. typedef True is_vector_space_distance;
  775. typedef T ElementType;
  776. typedef typename Accumulator<T>::Type ResultType;
  777. typedef ResultType CentersType;
  778. /**
  779. * Compute the histogram intersection distance
  780. */
  781. template <typename Iterator1, typename Iterator2>
  782. ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
  783. {
  784. ResultType result = ResultType();
  785. ResultType min0, min1, min2, min3;
  786. Iterator1 last = a + size;
  787. Iterator1 lastgroup = last - 3;
  788. /* Process 4 items with each loop for efficiency. */
  789. while (a < lastgroup) {
  790. min0 = (ResultType)(a[0] < b[0] ? a[0] : b[0]);
  791. min1 = (ResultType)(a[1] < b[1] ? a[1] : b[1]);
  792. min2 = (ResultType)(a[2] < b[2] ? a[2] : b[2]);
  793. min3 = (ResultType)(a[3] < b[3] ? a[3] : b[3]);
  794. result += min0 + min1 + min2 + min3;
  795. a += 4;
  796. b += 4;
  797. if ((worst_dist>0)&&(result>worst_dist)) {
  798. return result;
  799. }
  800. }
  801. /* Process last 0-3 pixels. Not needed for standard vector lengths. */
  802. while (a < last) {
  803. min0 = (ResultType)(*a < *b ? *a : *b);
  804. result += min0;
  805. ++a;
  806. ++b;
  807. }
  808. return result;
  809. }
  810. /**
  811. * Partial distance, used by the kd-tree.
  812. */
  813. template <typename U, typename V>
  814. inline ResultType accum_dist(const U& a, const V& b, int) const
  815. {
  816. return a<b ? a : b;
  817. }
  818. };
  819. template<class T>
  820. struct HellingerDistance
  821. {
  822. typedef True is_kdtree_distance;
  823. typedef True is_vector_space_distance;
  824. typedef T ElementType;
  825. typedef typename Accumulator<T>::Type ResultType;
  826. typedef ResultType CentersType;
  827. /**
  828. * Compute the Hellinger distance
  829. */
  830. template <typename Iterator1, typename Iterator2>
  831. ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
  832. {
  833. ResultType result = ResultType();
  834. ResultType diff0, diff1, diff2, diff3;
  835. Iterator1 last = a + size;
  836. Iterator1 lastgroup = last - 3;
  837. /* Process 4 items with each loop for efficiency. */
  838. while (a < lastgroup) {
  839. diff0 = sqrt(static_cast<ResultType>(a[0])) - sqrt(static_cast<ResultType>(b[0]));
  840. diff1 = sqrt(static_cast<ResultType>(a[1])) - sqrt(static_cast<ResultType>(b[1]));
  841. diff2 = sqrt(static_cast<ResultType>(a[2])) - sqrt(static_cast<ResultType>(b[2]));
  842. diff3 = sqrt(static_cast<ResultType>(a[3])) - sqrt(static_cast<ResultType>(b[3]));
  843. result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
  844. a += 4;
  845. b += 4;
  846. }
  847. while (a < last) {
  848. diff0 = sqrt(static_cast<ResultType>(*a++)) - sqrt(static_cast<ResultType>(*b++));
  849. result += diff0 * diff0;
  850. }
  851. return result;
  852. }
  853. /**
  854. * Partial distance, used by the kd-tree.
  855. */
  856. template <typename U, typename V>
  857. inline ResultType accum_dist(const U& a, const V& b, int) const
  858. {
  859. ResultType diff = sqrt(static_cast<ResultType>(a)) - sqrt(static_cast<ResultType>(b));
  860. return diff * diff;
  861. }
  862. };
  863. template<class T>
  864. struct ChiSquareDistance
  865. {
  866. typedef True is_kdtree_distance;
  867. typedef True is_vector_space_distance;
  868. typedef T ElementType;
  869. typedef typename Accumulator<T>::Type ResultType;
  870. typedef ResultType CentersType;
  871. /**
  872. * Compute the chi-square distance
  873. */
  874. template <typename Iterator1, typename Iterator2>
  875. ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
  876. {
  877. ResultType result = ResultType();
  878. ResultType sum, diff;
  879. Iterator1 last = a + size;
  880. while (a < last) {
  881. sum = (ResultType)(*a + *b);
  882. if (sum>0) {
  883. diff = (ResultType)(*a - *b);
  884. result += diff*diff/sum;
  885. }
  886. ++a;
  887. ++b;
  888. if ((worst_dist>0)&&(result>worst_dist)) {
  889. return result;
  890. }
  891. }
  892. return result;
  893. }
  894. /**
  895. * Partial distance, used by the kd-tree.
  896. */
  897. template <typename U, typename V>
  898. inline ResultType accum_dist(const U& a, const V& b, int) const
  899. {
  900. ResultType result = ResultType();
  901. ResultType sum, diff;
  902. sum = (ResultType)(a+b);
  903. if (sum>0) {
  904. diff = (ResultType)(a-b);
  905. result = diff*diff/sum;
  906. }
  907. return result;
  908. }
  909. };
  910. template<class T>
  911. struct KL_Divergence
  912. {
  913. typedef True is_kdtree_distance;
  914. typedef True is_vector_space_distance;
  915. typedef T ElementType;
  916. typedef typename Accumulator<T>::Type ResultType;
  917. typedef ResultType CentersType;
  918. /**
  919. * Compute the Kullback-Leibler divergence
  920. */
  921. template <typename Iterator1, typename Iterator2>
  922. ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
  923. {
  924. ResultType result = ResultType();
  925. Iterator1 last = a + size;
  926. while (a < last) {
  927. if ( *a != 0 && *b != 0 ) {
  928. ResultType ratio = (ResultType)(*a / *b);
  929. if (ratio>0) {
  930. result += *a * log(ratio);
  931. }
  932. }
  933. ++a;
  934. ++b;
  935. if ((worst_dist>0)&&(result>worst_dist)) {
  936. return result;
  937. }
  938. }
  939. return result;
  940. }
  941. /**
  942. * Partial distance, used by the kd-tree.
  943. */
  944. template <typename U, typename V>
  945. inline ResultType accum_dist(const U& a, const V& b, int) const
  946. {
  947. ResultType result = ResultType();
  948. if( a != 0 && b != 0 ) {
  949. ResultType ratio = (ResultType)(a / b);
  950. if (ratio>0) {
  951. result = a * log(ratio);
  952. }
  953. }
  954. return result;
  955. }
  956. };
  957. /*
  958. * Depending on processed distances, some of them are already squared (e.g. L2)
  959. * and some are not (e.g.Hamming). In KMeans++ for instance we want to be sure
  960. * we are working on ^2 distances, thus following templates to ensure that.
  961. */
  962. template <typename Distance, typename ElementType>
  963. struct squareDistance
  964. {
  965. typedef typename Distance::ResultType ResultType;
  966. ResultType operator()( ResultType dist ) { return dist*dist; }
  967. };
  968. template <typename ElementType>
  969. struct squareDistance<L2_Simple<ElementType>, ElementType>
  970. {
  971. typedef typename L2_Simple<ElementType>::ResultType ResultType;
  972. ResultType operator()( ResultType dist ) { return dist; }
  973. };
  974. template <typename ElementType>
  975. struct squareDistance<L2<ElementType>, ElementType>
  976. {
  977. typedef typename L2<ElementType>::ResultType ResultType;
  978. ResultType operator()( ResultType dist ) { return dist; }
  979. };
  980. template <typename ElementType>
  981. struct squareDistance<MinkowskiDistance<ElementType>, ElementType>
  982. {
  983. typedef typename MinkowskiDistance<ElementType>::ResultType ResultType;
  984. ResultType operator()( ResultType dist ) { return dist; }
  985. };
  986. template <typename ElementType>
  987. struct squareDistance<HellingerDistance<ElementType>, ElementType>
  988. {
  989. typedef typename HellingerDistance<ElementType>::ResultType ResultType;
  990. ResultType operator()( ResultType dist ) { return dist; }
  991. };
  992. template <typename ElementType>
  993. struct squareDistance<ChiSquareDistance<ElementType>, ElementType>
  994. {
  995. typedef typename ChiSquareDistance<ElementType>::ResultType ResultType;
  996. ResultType operator()( ResultType dist ) { return dist; }
  997. };
  998. template <typename Distance>
  999. typename Distance::ResultType ensureSquareDistance( typename Distance::ResultType dist )
  1000. {
  1001. typedef typename Distance::ElementType ElementType;
  1002. squareDistance<Distance, ElementType> dummy;
  1003. return dummy( dist );
  1004. }
  1005. /*
  1006. * ...a template to tell the user if the distance he is working with is actually squared
  1007. */
  1008. template <typename Distance, typename ElementType>
  1009. struct isSquareDist
  1010. {
  1011. bool operator()() { return false; }
  1012. };
  1013. template <typename ElementType>
  1014. struct isSquareDist<L2_Simple<ElementType>, ElementType>
  1015. {
  1016. bool operator()() { return true; }
  1017. };
  1018. template <typename ElementType>
  1019. struct isSquareDist<L2<ElementType>, ElementType>
  1020. {
  1021. bool operator()() { return true; }
  1022. };
  1023. template <typename ElementType>
  1024. struct isSquareDist<MinkowskiDistance<ElementType>, ElementType>
  1025. {
  1026. bool operator()() { return true; }
  1027. };
  1028. template <typename ElementType>
  1029. struct isSquareDist<HellingerDistance<ElementType>, ElementType>
  1030. {
  1031. bool operator()() { return true; }
  1032. };
  1033. template <typename ElementType>
  1034. struct isSquareDist<ChiSquareDistance<ElementType>, ElementType>
  1035. {
  1036. bool operator()() { return true; }
  1037. };
  1038. template <typename Distance>
  1039. bool isSquareDistance()
  1040. {
  1041. typedef typename Distance::ElementType ElementType;
  1042. isSquareDist<Distance, ElementType> dummy;
  1043. return dummy();
  1044. }
  1045. /*
  1046. * ...and a template to ensure the user that he will process the normal distance,
  1047. * and not squared distance, without losing processing time calling sqrt(ensureSquareDistance)
  1048. * that will result in doing actually sqrt(dist*dist) for L1 distance for instance.
  1049. */
  1050. template <typename Distance, typename ElementType>
  1051. struct simpleDistance
  1052. {
  1053. typedef typename Distance::ResultType ResultType;
  1054. ResultType operator()( ResultType dist ) { return dist; }
  1055. };
  1056. template <typename ElementType>
  1057. struct simpleDistance<L2_Simple<ElementType>, ElementType>
  1058. {
  1059. typedef typename L2_Simple<ElementType>::ResultType ResultType;
  1060. ResultType operator()( ResultType dist ) { return sqrt(dist); }
  1061. };
  1062. template <typename ElementType>
  1063. struct simpleDistance<L2<ElementType>, ElementType>
  1064. {
  1065. typedef typename L2<ElementType>::ResultType ResultType;
  1066. ResultType operator()( ResultType dist ) { return sqrt(dist); }
  1067. };
  1068. template <typename ElementType>
  1069. struct simpleDistance<MinkowskiDistance<ElementType>, ElementType>
  1070. {
  1071. typedef typename MinkowskiDistance<ElementType>::ResultType ResultType;
  1072. ResultType operator()( ResultType dist ) { return sqrt(dist); }
  1073. };
  1074. template <typename ElementType>
  1075. struct simpleDistance<HellingerDistance<ElementType>, ElementType>
  1076. {
  1077. typedef typename HellingerDistance<ElementType>::ResultType ResultType;
  1078. ResultType operator()( ResultType dist ) { return sqrt(dist); }
  1079. };
  1080. template <typename ElementType>
  1081. struct simpleDistance<ChiSquareDistance<ElementType>, ElementType>
  1082. {
  1083. typedef typename ChiSquareDistance<ElementType>::ResultType ResultType;
  1084. ResultType operator()( ResultType dist ) { return sqrt(dist); }
  1085. };
  1086. template <typename Distance>
  1087. typename Distance::ResultType ensureSimpleDistance( typename Distance::ResultType dist )
  1088. {
  1089. typedef typename Distance::ElementType ElementType;
  1090. simpleDistance<Distance, ElementType> dummy;
  1091. return dummy( dist );
  1092. }
  1093. }
  1094. //! @endcond
  1095. #endif //OPENCV_FLANN_DIST_H_