Imgproc.h 446 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217
  1. //
  2. // This file is auto-generated. Please don't modify it!
  3. //
  4. #pragma once
  5. #ifdef __cplusplus
  6. //#import "opencv.hpp"
  7. #import "opencv2/imgproc.hpp"
  8. #import "imgproc/bindings.hpp"
  9. #else
  10. #define CV_EXPORTS
  11. #endif
  12. #import <Foundation/Foundation.h>
  13. #import "Core.h"
  14. @class CLAHE;
  15. @class FloatVector;
  16. @class GeneralizedHoughBallard;
  17. @class GeneralizedHoughGuil;
  18. @class Int4;
  19. @class IntVector;
  20. @class LineSegmentDetector;
  21. @class Mat;
  22. @class Moments;
  23. @class Point2d;
  24. @class Point2f;
  25. @class Point2i;
  26. @class Rect2i;
  27. @class RotatedRect;
  28. @class Scalar;
  29. @class Size2i;
  30. @class TermCriteria;
  31. // C++: enum AdaptiveThresholdTypes (cv.AdaptiveThresholdTypes)
  32. typedef NS_ENUM(int, AdaptiveThresholdTypes) {
  33. ADAPTIVE_THRESH_MEAN_C = 0,
  34. ADAPTIVE_THRESH_GAUSSIAN_C = 1
  35. };
  36. // C++: enum ColorConversionCodes (cv.ColorConversionCodes)
  37. typedef NS_ENUM(int, ColorConversionCodes) {
  38. COLOR_BGR2BGRA = 0,
  39. COLOR_RGB2RGBA = COLOR_BGR2BGRA,
  40. COLOR_BGRA2BGR = 1,
  41. COLOR_RGBA2RGB = COLOR_BGRA2BGR,
  42. COLOR_BGR2RGBA = 2,
  43. COLOR_RGB2BGRA = COLOR_BGR2RGBA,
  44. COLOR_RGBA2BGR = 3,
  45. COLOR_BGRA2RGB = COLOR_RGBA2BGR,
  46. COLOR_BGR2RGB = 4,
  47. COLOR_RGB2BGR = COLOR_BGR2RGB,
  48. COLOR_BGRA2RGBA = 5,
  49. COLOR_RGBA2BGRA = COLOR_BGRA2RGBA,
  50. COLOR_BGR2GRAY = 6,
  51. COLOR_RGB2GRAY = 7,
  52. COLOR_GRAY2BGR = 8,
  53. COLOR_GRAY2RGB = COLOR_GRAY2BGR,
  54. COLOR_GRAY2BGRA = 9,
  55. COLOR_GRAY2RGBA = COLOR_GRAY2BGRA,
  56. COLOR_BGRA2GRAY = 10,
  57. COLOR_RGBA2GRAY = 11,
  58. COLOR_BGR2BGR565 = 12,
  59. COLOR_RGB2BGR565 = 13,
  60. COLOR_BGR5652BGR = 14,
  61. COLOR_BGR5652RGB = 15,
  62. COLOR_BGRA2BGR565 = 16,
  63. COLOR_RGBA2BGR565 = 17,
  64. COLOR_BGR5652BGRA = 18,
  65. COLOR_BGR5652RGBA = 19,
  66. COLOR_GRAY2BGR565 = 20,
  67. COLOR_BGR5652GRAY = 21,
  68. COLOR_BGR2BGR555 = 22,
  69. COLOR_RGB2BGR555 = 23,
  70. COLOR_BGR5552BGR = 24,
  71. COLOR_BGR5552RGB = 25,
  72. COLOR_BGRA2BGR555 = 26,
  73. COLOR_RGBA2BGR555 = 27,
  74. COLOR_BGR5552BGRA = 28,
  75. COLOR_BGR5552RGBA = 29,
  76. COLOR_GRAY2BGR555 = 30,
  77. COLOR_BGR5552GRAY = 31,
  78. COLOR_BGR2XYZ = 32,
  79. COLOR_RGB2XYZ = 33,
  80. COLOR_XYZ2BGR = 34,
  81. COLOR_XYZ2RGB = 35,
  82. COLOR_BGR2YCrCb = 36,
  83. COLOR_RGB2YCrCb = 37,
  84. COLOR_YCrCb2BGR = 38,
  85. COLOR_YCrCb2RGB = 39,
  86. COLOR_BGR2HSV = 40,
  87. COLOR_RGB2HSV = 41,
  88. COLOR_BGR2Lab = 44,
  89. COLOR_RGB2Lab = 45,
  90. COLOR_BGR2Luv = 50,
  91. COLOR_RGB2Luv = 51,
  92. COLOR_BGR2HLS = 52,
  93. COLOR_RGB2HLS = 53,
  94. COLOR_HSV2BGR = 54,
  95. COLOR_HSV2RGB = 55,
  96. COLOR_Lab2BGR = 56,
  97. COLOR_Lab2RGB = 57,
  98. COLOR_Luv2BGR = 58,
  99. COLOR_Luv2RGB = 59,
  100. COLOR_HLS2BGR = 60,
  101. COLOR_HLS2RGB = 61,
  102. COLOR_BGR2HSV_FULL = 66,
  103. COLOR_RGB2HSV_FULL = 67,
  104. COLOR_BGR2HLS_FULL = 68,
  105. COLOR_RGB2HLS_FULL = 69,
  106. COLOR_HSV2BGR_FULL = 70,
  107. COLOR_HSV2RGB_FULL = 71,
  108. COLOR_HLS2BGR_FULL = 72,
  109. COLOR_HLS2RGB_FULL = 73,
  110. COLOR_LBGR2Lab = 74,
  111. COLOR_LRGB2Lab = 75,
  112. COLOR_LBGR2Luv = 76,
  113. COLOR_LRGB2Luv = 77,
  114. COLOR_Lab2LBGR = 78,
  115. COLOR_Lab2LRGB = 79,
  116. COLOR_Luv2LBGR = 80,
  117. COLOR_Luv2LRGB = 81,
  118. COLOR_BGR2YUV = 82,
  119. COLOR_RGB2YUV = 83,
  120. COLOR_YUV2BGR = 84,
  121. COLOR_YUV2RGB = 85,
  122. COLOR_YUV2RGB_NV12 = 90,
  123. COLOR_YUV2BGR_NV12 = 91,
  124. COLOR_YUV2RGB_NV21 = 92,
  125. COLOR_YUV2BGR_NV21 = 93,
  126. COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21,
  127. COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21,
  128. COLOR_YUV2RGBA_NV12 = 94,
  129. COLOR_YUV2BGRA_NV12 = 95,
  130. COLOR_YUV2RGBA_NV21 = 96,
  131. COLOR_YUV2BGRA_NV21 = 97,
  132. COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,
  133. COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,
  134. COLOR_YUV2RGB_YV12 = 98,
  135. COLOR_YUV2BGR_YV12 = 99,
  136. COLOR_YUV2RGB_IYUV = 100,
  137. COLOR_YUV2BGR_IYUV = 101,
  138. COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV,
  139. COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV,
  140. COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12,
  141. COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12,
  142. COLOR_YUV2RGBA_YV12 = 102,
  143. COLOR_YUV2BGRA_YV12 = 103,
  144. COLOR_YUV2RGBA_IYUV = 104,
  145. COLOR_YUV2BGRA_IYUV = 105,
  146. COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV,
  147. COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,
  148. COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12,
  149. COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12,
  150. COLOR_YUV2GRAY_420 = 106,
  151. COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,
  152. COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,
  153. COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420,
  154. COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420,
  155. COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,
  156. COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,
  157. COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420,
  158. COLOR_YUV2RGB_UYVY = 107,
  159. COLOR_YUV2BGR_UYVY = 108,
  160. COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY,
  161. COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,
  162. COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,
  163. COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,
  164. COLOR_YUV2RGBA_UYVY = 111,
  165. COLOR_YUV2BGRA_UYVY = 112,
  166. COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY,
  167. COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,
  168. COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,
  169. COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,
  170. COLOR_YUV2RGB_YUY2 = 115,
  171. COLOR_YUV2BGR_YUY2 = 116,
  172. COLOR_YUV2RGB_YVYU = 117,
  173. COLOR_YUV2BGR_YVYU = 118,
  174. COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2,
  175. COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,
  176. COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,
  177. COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,
  178. COLOR_YUV2RGBA_YUY2 = 119,
  179. COLOR_YUV2BGRA_YUY2 = 120,
  180. COLOR_YUV2RGBA_YVYU = 121,
  181. COLOR_YUV2BGRA_YVYU = 122,
  182. COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2,
  183. COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,
  184. COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,
  185. COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,
  186. COLOR_YUV2GRAY_UYVY = 123,
  187. COLOR_YUV2GRAY_YUY2 = 124,
  188. COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY,
  189. COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY,
  190. COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,
  191. COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,
  192. COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,
  193. COLOR_RGBA2mRGBA = 125,
  194. COLOR_mRGBA2RGBA = 126,
  195. COLOR_RGB2YUV_I420 = 127,
  196. COLOR_BGR2YUV_I420 = 128,
  197. COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420,
  198. COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420,
  199. COLOR_RGBA2YUV_I420 = 129,
  200. COLOR_BGRA2YUV_I420 = 130,
  201. COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420,
  202. COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420,
  203. COLOR_RGB2YUV_YV12 = 131,
  204. COLOR_BGR2YUV_YV12 = 132,
  205. COLOR_RGBA2YUV_YV12 = 133,
  206. COLOR_BGRA2YUV_YV12 = 134,
  207. COLOR_BayerBG2BGR = 46,
  208. COLOR_BayerGB2BGR = 47,
  209. COLOR_BayerRG2BGR = 48,
  210. COLOR_BayerGR2BGR = 49,
  211. COLOR_BayerRGGB2BGR = COLOR_BayerBG2BGR,
  212. COLOR_BayerGRBG2BGR = COLOR_BayerGB2BGR,
  213. COLOR_BayerBGGR2BGR = COLOR_BayerRG2BGR,
  214. COLOR_BayerGBRG2BGR = COLOR_BayerGR2BGR,
  215. COLOR_BayerRGGB2RGB = COLOR_BayerBGGR2BGR,
  216. COLOR_BayerGRBG2RGB = COLOR_BayerGBRG2BGR,
  217. COLOR_BayerBGGR2RGB = COLOR_BayerRGGB2BGR,
  218. COLOR_BayerGBRG2RGB = COLOR_BayerGRBG2BGR,
  219. COLOR_BayerBG2RGB = COLOR_BayerRG2BGR,
  220. COLOR_BayerGB2RGB = COLOR_BayerGR2BGR,
  221. COLOR_BayerRG2RGB = COLOR_BayerBG2BGR,
  222. COLOR_BayerGR2RGB = COLOR_BayerGB2BGR,
  223. COLOR_BayerBG2GRAY = 86,
  224. COLOR_BayerGB2GRAY = 87,
  225. COLOR_BayerRG2GRAY = 88,
  226. COLOR_BayerGR2GRAY = 89,
  227. COLOR_BayerRGGB2GRAY = COLOR_BayerBG2GRAY,
  228. COLOR_BayerGRBG2GRAY = COLOR_BayerGB2GRAY,
  229. COLOR_BayerBGGR2GRAY = COLOR_BayerRG2GRAY,
  230. COLOR_BayerGBRG2GRAY = COLOR_BayerGR2GRAY,
  231. COLOR_BayerBG2BGR_VNG = 62,
  232. COLOR_BayerGB2BGR_VNG = 63,
  233. COLOR_BayerRG2BGR_VNG = 64,
  234. COLOR_BayerGR2BGR_VNG = 65,
  235. COLOR_BayerRGGB2BGR_VNG = COLOR_BayerBG2BGR_VNG,
  236. COLOR_BayerGRBG2BGR_VNG = COLOR_BayerGB2BGR_VNG,
  237. COLOR_BayerBGGR2BGR_VNG = COLOR_BayerRG2BGR_VNG,
  238. COLOR_BayerGBRG2BGR_VNG = COLOR_BayerGR2BGR_VNG,
  239. COLOR_BayerRGGB2RGB_VNG = COLOR_BayerBGGR2BGR_VNG,
  240. COLOR_BayerGRBG2RGB_VNG = COLOR_BayerGBRG2BGR_VNG,
  241. COLOR_BayerBGGR2RGB_VNG = COLOR_BayerRGGB2BGR_VNG,
  242. COLOR_BayerGBRG2RGB_VNG = COLOR_BayerGRBG2BGR_VNG,
  243. COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG,
  244. COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG,
  245. COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG,
  246. COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG,
  247. COLOR_BayerBG2BGR_EA = 135,
  248. COLOR_BayerGB2BGR_EA = 136,
  249. COLOR_BayerRG2BGR_EA = 137,
  250. COLOR_BayerGR2BGR_EA = 138,
  251. COLOR_BayerRGGB2BGR_EA = COLOR_BayerBG2BGR_EA,
  252. COLOR_BayerGRBG2BGR_EA = COLOR_BayerGB2BGR_EA,
  253. COLOR_BayerBGGR2BGR_EA = COLOR_BayerRG2BGR_EA,
  254. COLOR_BayerGBRG2BGR_EA = COLOR_BayerGR2BGR_EA,
  255. COLOR_BayerRGGB2RGB_EA = COLOR_BayerBGGR2BGR_EA,
  256. COLOR_BayerGRBG2RGB_EA = COLOR_BayerGBRG2BGR_EA,
  257. COLOR_BayerBGGR2RGB_EA = COLOR_BayerRGGB2BGR_EA,
  258. COLOR_BayerGBRG2RGB_EA = COLOR_BayerGRBG2BGR_EA,
  259. COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA,
  260. COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA,
  261. COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA,
  262. COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA,
  263. COLOR_BayerBG2BGRA = 139,
  264. COLOR_BayerGB2BGRA = 140,
  265. COLOR_BayerRG2BGRA = 141,
  266. COLOR_BayerGR2BGRA = 142,
  267. COLOR_BayerRGGB2BGRA = COLOR_BayerBG2BGRA,
  268. COLOR_BayerGRBG2BGRA = COLOR_BayerGB2BGRA,
  269. COLOR_BayerBGGR2BGRA = COLOR_BayerRG2BGRA,
  270. COLOR_BayerGBRG2BGRA = COLOR_BayerGR2BGRA,
  271. COLOR_BayerRGGB2RGBA = COLOR_BayerBGGR2BGRA,
  272. COLOR_BayerGRBG2RGBA = COLOR_BayerGBRG2BGRA,
  273. COLOR_BayerBGGR2RGBA = COLOR_BayerRGGB2BGRA,
  274. COLOR_BayerGBRG2RGBA = COLOR_BayerGRBG2BGRA,
  275. COLOR_BayerBG2RGBA = COLOR_BayerRG2BGRA,
  276. COLOR_BayerGB2RGBA = COLOR_BayerGR2BGRA,
  277. COLOR_BayerRG2RGBA = COLOR_BayerBG2BGRA,
  278. COLOR_BayerGR2RGBA = COLOR_BayerGB2BGRA,
  279. COLOR_COLORCVT_MAX = 143
  280. };
  281. // C++: enum ColormapTypes (cv.ColormapTypes)
  282. typedef NS_ENUM(int, ColormapTypes) {
  283. COLORMAP_AUTUMN = 0,
  284. COLORMAP_BONE = 1,
  285. COLORMAP_JET = 2,
  286. COLORMAP_WINTER = 3,
  287. COLORMAP_RAINBOW = 4,
  288. COLORMAP_OCEAN = 5,
  289. COLORMAP_SUMMER = 6,
  290. COLORMAP_SPRING = 7,
  291. COLORMAP_COOL = 8,
  292. COLORMAP_HSV = 9,
  293. COLORMAP_PINK = 10,
  294. COLORMAP_HOT = 11,
  295. COLORMAP_PARULA = 12,
  296. COLORMAP_MAGMA = 13,
  297. COLORMAP_INFERNO = 14,
  298. COLORMAP_PLASMA = 15,
  299. COLORMAP_VIRIDIS = 16,
  300. COLORMAP_CIVIDIS = 17,
  301. COLORMAP_TWILIGHT = 18,
  302. COLORMAP_TWILIGHT_SHIFTED = 19,
  303. COLORMAP_TURBO = 20,
  304. COLORMAP_DEEPGREEN = 21
  305. };
  306. // C++: enum ConnectedComponentsAlgorithmsTypes (cv.ConnectedComponentsAlgorithmsTypes)
  307. typedef NS_ENUM(int, ConnectedComponentsAlgorithmsTypes) {
  308. CCL_DEFAULT = -1,
  309. CCL_WU = 0,
  310. CCL_GRANA = 1,
  311. CCL_BOLELLI = 2,
  312. CCL_SAUF = 3,
  313. CCL_BBDT = 4,
  314. CCL_SPAGHETTI = 5
  315. };
  316. // C++: enum ConnectedComponentsTypes (cv.ConnectedComponentsTypes)
  317. typedef NS_ENUM(int, ConnectedComponentsTypes) {
  318. CC_STAT_LEFT = 0,
  319. CC_STAT_TOP = 1,
  320. CC_STAT_WIDTH = 2,
  321. CC_STAT_HEIGHT = 3,
  322. CC_STAT_AREA = 4,
  323. CC_STAT_MAX = 5
  324. };
  325. // C++: enum ContourApproximationModes (cv.ContourApproximationModes)
  326. typedef NS_ENUM(int, ContourApproximationModes) {
  327. CHAIN_APPROX_NONE = 1,
  328. CHAIN_APPROX_SIMPLE = 2,
  329. CHAIN_APPROX_TC89_L1 = 3,
  330. CHAIN_APPROX_TC89_KCOS = 4
  331. };
  332. // C++: enum DistanceTransformLabelTypes (cv.DistanceTransformLabelTypes)
  333. typedef NS_ENUM(int, DistanceTransformLabelTypes) {
  334. DIST_LABEL_CCOMP = 0,
  335. DIST_LABEL_PIXEL = 1
  336. };
  337. // C++: enum DistanceTransformMasks (cv.DistanceTransformMasks)
  338. typedef NS_ENUM(int, DistanceTransformMasks) {
  339. DIST_MASK_3 = 3,
  340. DIST_MASK_5 = 5,
  341. DIST_MASK_PRECISE = 0
  342. };
  343. // C++: enum DistanceTypes (cv.DistanceTypes)
  344. typedef NS_ENUM(int, DistanceTypes) {
  345. DIST_USER = -1,
  346. DIST_L1 = 1,
  347. DIST_L2 = 2,
  348. DIST_C = 3,
  349. DIST_L12 = 4,
  350. DIST_FAIR = 5,
  351. DIST_WELSCH = 6,
  352. DIST_HUBER = 7
  353. };
  354. // C++: enum FloodFillFlags (cv.FloodFillFlags)
  355. typedef NS_ENUM(int, FloodFillFlags) {
  356. FLOODFILL_FIXED_RANGE = 1 << 16,
  357. FLOODFILL_MASK_ONLY = 1 << 17
  358. };
  359. // C++: enum GrabCutClasses (cv.GrabCutClasses)
  360. typedef NS_ENUM(int, GrabCutClasses) {
  361. GC_BGD = 0,
  362. GC_FGD = 1,
  363. GC_PR_BGD = 2,
  364. GC_PR_FGD = 3
  365. };
  366. // C++: enum GrabCutModes (cv.GrabCutModes)
  367. typedef NS_ENUM(int, GrabCutModes) {
  368. GC_INIT_WITH_RECT = 0,
  369. GC_INIT_WITH_MASK = 1,
  370. GC_EVAL = 2,
  371. GC_EVAL_FREEZE_MODEL = 3
  372. };
  373. // C++: enum HersheyFonts (cv.HersheyFonts)
  374. typedef NS_ENUM(int, HersheyFonts) {
  375. FONT_HERSHEY_SIMPLEX = 0,
  376. FONT_HERSHEY_PLAIN = 1,
  377. FONT_HERSHEY_DUPLEX = 2,
  378. FONT_HERSHEY_COMPLEX = 3,
  379. FONT_HERSHEY_TRIPLEX = 4,
  380. FONT_HERSHEY_COMPLEX_SMALL = 5,
  381. FONT_HERSHEY_SCRIPT_SIMPLEX = 6,
  382. FONT_HERSHEY_SCRIPT_COMPLEX = 7,
  383. FONT_ITALIC = 16
  384. };
  385. // C++: enum HistCompMethods (cv.HistCompMethods)
  386. typedef NS_ENUM(int, HistCompMethods) {
  387. HISTCMP_CORREL = 0,
  388. HISTCMP_CHISQR = 1,
  389. HISTCMP_INTERSECT = 2,
  390. HISTCMP_BHATTACHARYYA = 3,
  391. HISTCMP_HELLINGER = HISTCMP_BHATTACHARYYA,
  392. HISTCMP_CHISQR_ALT = 4,
  393. HISTCMP_KL_DIV = 5
  394. };
  395. // C++: enum HoughModes (cv.HoughModes)
  396. typedef NS_ENUM(int, HoughModes) {
  397. HOUGH_STANDARD = 0,
  398. HOUGH_PROBABILISTIC = 1,
  399. HOUGH_MULTI_SCALE = 2,
  400. HOUGH_GRADIENT = 3,
  401. HOUGH_GRADIENT_ALT = 4
  402. };
  403. // C++: enum InterpolationFlags (cv.InterpolationFlags)
  404. typedef NS_ENUM(int, InterpolationFlags) {
  405. INTER_NEAREST = 0,
  406. INTER_LINEAR = 1,
  407. INTER_CUBIC = 2,
  408. INTER_AREA = 3,
  409. INTER_LANCZOS4 = 4,
  410. INTER_LINEAR_EXACT = 5,
  411. INTER_NEAREST_EXACT = 6,
  412. INTER_MAX = 7,
  413. WARP_FILL_OUTLIERS = 8,
  414. WARP_INVERSE_MAP = 16
  415. };
  416. // C++: enum InterpolationMasks (cv.InterpolationMasks)
  417. typedef NS_ENUM(int, InterpolationMasks) {
  418. INTER_BITS = 5,
  419. INTER_BITS2 = INTER_BITS * 2,
  420. INTER_TAB_SIZE = 1 << INTER_BITS,
  421. INTER_TAB_SIZE2 = INTER_TAB_SIZE * INTER_TAB_SIZE
  422. };
  423. // C++: enum LineSegmentDetectorModes (cv.LineSegmentDetectorModes)
  424. typedef NS_ENUM(int, LineSegmentDetectorModes) {
  425. LSD_REFINE_NONE = 0,
  426. LSD_REFINE_STD = 1,
  427. LSD_REFINE_ADV = 2
  428. };
  429. // C++: enum LineTypes (cv.LineTypes)
  430. typedef NS_ENUM(int, LineTypes) {
  431. FILLED = -1,
  432. LINE_4 = 4,
  433. LINE_8 = 8,
  434. LINE_AA = 16
  435. };
  436. // C++: enum MarkerTypes (cv.MarkerTypes)
  437. typedef NS_ENUM(int, MarkerTypes) {
  438. MARKER_CROSS = 0,
  439. MARKER_TILTED_CROSS = 1,
  440. MARKER_STAR = 2,
  441. MARKER_DIAMOND = 3,
  442. MARKER_SQUARE = 4,
  443. MARKER_TRIANGLE_UP = 5,
  444. MARKER_TRIANGLE_DOWN = 6
  445. };
  446. // C++: enum MorphShapes (cv.MorphShapes)
  447. typedef NS_ENUM(int, MorphShapes) {
  448. MORPH_RECT = 0,
  449. MORPH_CROSS = 1,
  450. MORPH_ELLIPSE = 2
  451. };
  452. // C++: enum MorphTypes (cv.MorphTypes)
  453. typedef NS_ENUM(int, MorphTypes) {
  454. MORPH_ERODE = 0,
  455. MORPH_DILATE = 1,
  456. MORPH_OPEN = 2,
  457. MORPH_CLOSE = 3,
  458. MORPH_GRADIENT = 4,
  459. MORPH_TOPHAT = 5,
  460. MORPH_BLACKHAT = 6,
  461. MORPH_HITMISS = 7
  462. };
  463. // C++: enum RectanglesIntersectTypes (cv.RectanglesIntersectTypes)
  464. typedef NS_ENUM(int, RectanglesIntersectTypes) {
  465. INTERSECT_NONE = 0,
  466. INTERSECT_PARTIAL = 1,
  467. INTERSECT_FULL = 2
  468. };
  469. // C++: enum RetrievalModes (cv.RetrievalModes)
  470. typedef NS_ENUM(int, RetrievalModes) {
  471. RETR_EXTERNAL = 0,
  472. RETR_LIST = 1,
  473. RETR_CCOMP = 2,
  474. RETR_TREE = 3,
  475. RETR_FLOODFILL = 4
  476. };
  477. // C++: enum ShapeMatchModes (cv.ShapeMatchModes)
  478. typedef NS_ENUM(int, ShapeMatchModes) {
  479. CONTOURS_MATCH_I1 = 1,
  480. CONTOURS_MATCH_I2 = 2,
  481. CONTOURS_MATCH_I3 = 3
  482. };
  483. // C++: enum SpecialFilter (cv.SpecialFilter)
  484. typedef NS_ENUM(int, SpecialFilter) {
  485. FILTER_SCHARR = -1
  486. };
  487. // C++: enum TemplateMatchModes (cv.TemplateMatchModes)
  488. typedef NS_ENUM(int, TemplateMatchModes) {
  489. TM_SQDIFF = 0,
  490. TM_SQDIFF_NORMED = 1,
  491. TM_CCORR = 2,
  492. TM_CCORR_NORMED = 3,
  493. TM_CCOEFF = 4,
  494. TM_CCOEFF_NORMED = 5
  495. };
  496. // C++: enum ThresholdTypes (cv.ThresholdTypes)
  497. typedef NS_ENUM(int, ThresholdTypes) {
  498. THRESH_BINARY = 0,
  499. THRESH_BINARY_INV = 1,
  500. THRESH_TRUNC = 2,
  501. THRESH_TOZERO = 3,
  502. THRESH_TOZERO_INV = 4,
  503. THRESH_MASK = 7,
  504. THRESH_OTSU = 8,
  505. THRESH_TRIANGLE = 16
  506. };
  507. // C++: enum WarpPolarMode (cv.WarpPolarMode)
  508. typedef NS_ENUM(int, WarpPolarMode) {
  509. WARP_POLAR_LINEAR = 0,
  510. WARP_POLAR_LOG = 256
  511. };
  512. NS_ASSUME_NONNULL_BEGIN
  513. // C++: class Imgproc
  514. /**
  515. * The Imgproc module
  516. *
  517. * Member classes: `GeneralizedHough`, `GeneralizedHoughBallard`, `GeneralizedHoughGuil`, `CLAHE`, `Subdiv2D`, `LineSegmentDetector`, `IntelligentScissorsMB`, `Moments`
  518. *
  519. * Member enums: `SpecialFilter`, `MorphTypes`, `MorphShapes`, `InterpolationFlags`, `WarpPolarMode`, `InterpolationMasks`, `DistanceTypes`, `DistanceTransformMasks`, `ThresholdTypes`, `AdaptiveThresholdTypes`, `GrabCutClasses`, `GrabCutModes`, `DistanceTransformLabelTypes`, `FloodFillFlags`, `ConnectedComponentsTypes`, `ConnectedComponentsAlgorithmsTypes`, `RetrievalModes`, `ContourApproximationModes`, `ShapeMatchModes`, `HoughModes`, `LineSegmentDetectorModes`, `HistCompMethods`, `ColorConversionCodes`, `RectanglesIntersectTypes`, `LineTypes`, `HersheyFonts`, `MarkerTypes`, `TemplateMatchModes`, `ColormapTypes`
  520. */
  521. CV_EXPORTS @interface Imgproc : NSObject
  522. #pragma mark - Class Constants
  523. @property (class, readonly) int CV_GAUSSIAN_5x5 NS_SWIFT_NAME(CV_GAUSSIAN_5x5);
  524. @property (class, readonly) int CV_SCHARR NS_SWIFT_NAME(CV_SCHARR);
  525. @property (class, readonly) int CV_MAX_SOBEL_KSIZE NS_SWIFT_NAME(CV_MAX_SOBEL_KSIZE);
  526. @property (class, readonly) int CV_RGBA2mRGBA NS_SWIFT_NAME(CV_RGBA2mRGBA);
  527. @property (class, readonly) int CV_mRGBA2RGBA NS_SWIFT_NAME(CV_mRGBA2RGBA);
  528. @property (class, readonly) int CV_WARP_FILL_OUTLIERS NS_SWIFT_NAME(CV_WARP_FILL_OUTLIERS);
  529. @property (class, readonly) int CV_WARP_INVERSE_MAP NS_SWIFT_NAME(CV_WARP_INVERSE_MAP);
  530. @property (class, readonly) int CV_CHAIN_CODE NS_SWIFT_NAME(CV_CHAIN_CODE);
  531. @property (class, readonly) int CV_LINK_RUNS NS_SWIFT_NAME(CV_LINK_RUNS);
  532. @property (class, readonly) int CV_POLY_APPROX_DP NS_SWIFT_NAME(CV_POLY_APPROX_DP);
  533. @property (class, readonly) int CV_CLOCKWISE NS_SWIFT_NAME(CV_CLOCKWISE);
  534. @property (class, readonly) int CV_COUNTER_CLOCKWISE NS_SWIFT_NAME(CV_COUNTER_CLOCKWISE);
  535. @property (class, readonly) int CV_CANNY_L2_GRADIENT NS_SWIFT_NAME(CV_CANNY_L2_GRADIENT);
  536. #pragma mark - Methods
  537. //
  538. // Ptr_LineSegmentDetector cv::createLineSegmentDetector(int refine = LSD_REFINE_STD, double scale = 0.8, double sigma_scale = 0.6, double quant = 2.0, double ang_th = 22.5, double log_eps = 0, double density_th = 0.7, int n_bins = 1024)
  539. //
  540. /**
  541. * Creates a smart pointer to a LineSegmentDetector object and initializes it.
  542. *
  543. * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
  544. * to edit those, as to tailor it for their own application.
  545. *
  546. * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
  547. * @param scale The scale of the image that will be used to find the lines. Range (0..1].
  548. * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
  549. * @param quant Bound to the quantization error on the gradient norm.
  550. * @param ang_th Gradient angle tolerance in degrees.
  551. * @param log_eps Detection threshold: -log10(NFA) \> log_eps. Used only when advance refinement is chosen.
  552. * @param density_th Minimal density of aligned region points in the enclosing rectangle.
  553. * @param n_bins Number of bins in pseudo-ordering of gradient modulus.
  554. */
  555. + (LineSegmentDetector*)createLineSegmentDetector:(int)refine scale:(double)scale sigma_scale:(double)sigma_scale quant:(double)quant ang_th:(double)ang_th log_eps:(double)log_eps density_th:(double)density_th n_bins:(int)n_bins NS_SWIFT_NAME(createLineSegmentDetector(refine:scale:sigma_scale:quant:ang_th:log_eps:density_th:n_bins:));
  556. /**
  557. * Creates a smart pointer to a LineSegmentDetector object and initializes it.
  558. *
  559. * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
  560. * to edit those, as to tailor it for their own application.
  561. *
  562. * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
  563. * @param scale The scale of the image that will be used to find the lines. Range (0..1].
  564. * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
  565. * @param quant Bound to the quantization error on the gradient norm.
  566. * @param ang_th Gradient angle tolerance in degrees.
  567. * @param log_eps Detection threshold: -log10(NFA) \> log_eps. Used only when advance refinement is chosen.
  568. * @param density_th Minimal density of aligned region points in the enclosing rectangle.
  569. */
  570. + (LineSegmentDetector*)createLineSegmentDetector:(int)refine scale:(double)scale sigma_scale:(double)sigma_scale quant:(double)quant ang_th:(double)ang_th log_eps:(double)log_eps density_th:(double)density_th NS_SWIFT_NAME(createLineSegmentDetector(refine:scale:sigma_scale:quant:ang_th:log_eps:density_th:));
  571. /**
  572. * Creates a smart pointer to a LineSegmentDetector object and initializes it.
  573. *
  574. * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
  575. * to edit those, as to tailor it for their own application.
  576. *
  577. * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
  578. * @param scale The scale of the image that will be used to find the lines. Range (0..1].
  579. * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
  580. * @param quant Bound to the quantization error on the gradient norm.
  581. * @param ang_th Gradient angle tolerance in degrees.
  582. * @param log_eps Detection threshold: -log10(NFA) \> log_eps. Used only when advance refinement is chosen.
  583. */
  584. + (LineSegmentDetector*)createLineSegmentDetector:(int)refine scale:(double)scale sigma_scale:(double)sigma_scale quant:(double)quant ang_th:(double)ang_th log_eps:(double)log_eps NS_SWIFT_NAME(createLineSegmentDetector(refine:scale:sigma_scale:quant:ang_th:log_eps:));
  585. /**
  586. * Creates a smart pointer to a LineSegmentDetector object and initializes it.
  587. *
  588. * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
  589. * to edit those, as to tailor it for their own application.
  590. *
  591. * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
  592. * @param scale The scale of the image that will be used to find the lines. Range (0..1].
  593. * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
  594. * @param quant Bound to the quantization error on the gradient norm.
  595. * @param ang_th Gradient angle tolerance in degrees.
  596. */
  597. + (LineSegmentDetector*)createLineSegmentDetector:(int)refine scale:(double)scale sigma_scale:(double)sigma_scale quant:(double)quant ang_th:(double)ang_th NS_SWIFT_NAME(createLineSegmentDetector(refine:scale:sigma_scale:quant:ang_th:));
  598. /**
  599. * Creates a smart pointer to a LineSegmentDetector object and initializes it.
  600. *
  601. * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
  602. * to edit those, as to tailor it for their own application.
  603. *
  604. * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
  605. * @param scale The scale of the image that will be used to find the lines. Range (0..1].
  606. * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
  607. * @param quant Bound to the quantization error on the gradient norm.
  608. */
  609. + (LineSegmentDetector*)createLineSegmentDetector:(int)refine scale:(double)scale sigma_scale:(double)sigma_scale quant:(double)quant NS_SWIFT_NAME(createLineSegmentDetector(refine:scale:sigma_scale:quant:));
  610. /**
  611. * Creates a smart pointer to a LineSegmentDetector object and initializes it.
  612. *
  613. * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
  614. * to edit those, as to tailor it for their own application.
  615. *
  616. * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
  617. * @param scale The scale of the image that will be used to find the lines. Range (0..1].
  618. * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
  619. */
  620. + (LineSegmentDetector*)createLineSegmentDetector:(int)refine scale:(double)scale sigma_scale:(double)sigma_scale NS_SWIFT_NAME(createLineSegmentDetector(refine:scale:sigma_scale:));
  621. /**
  622. * Creates a smart pointer to a LineSegmentDetector object and initializes it.
  623. *
  624. * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
  625. * to edit those, as to tailor it for their own application.
  626. *
  627. * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
  628. * @param scale The scale of the image that will be used to find the lines. Range (0..1].
  629. */
  630. + (LineSegmentDetector*)createLineSegmentDetector:(int)refine scale:(double)scale NS_SWIFT_NAME(createLineSegmentDetector(refine:scale:));
  631. /**
  632. * Creates a smart pointer to a LineSegmentDetector object and initializes it.
  633. *
  634. * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
  635. * to edit those, as to tailor it for their own application.
  636. *
  637. * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
  638. */
  639. + (LineSegmentDetector*)createLineSegmentDetector:(int)refine NS_SWIFT_NAME(createLineSegmentDetector(refine:));
  640. /**
  641. * Creates a smart pointer to a LineSegmentDetector object and initializes it.
  642. *
  643. * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
  644. * to edit those, as to tailor it for their own application.
  645. *
  646. */
  647. + (LineSegmentDetector*)createLineSegmentDetector NS_SWIFT_NAME(createLineSegmentDetector());
  648. //
  649. // Mat cv::getGaussianKernel(int ksize, double sigma, int ktype = CV_64F)
  650. //
  651. /**
  652. * Returns Gaussian filter coefficients.
  653. *
  654. * The function computes and returns the `$$\texttt{ksize} \times 1$$` matrix of Gaussian filter
  655. * coefficients:
  656. *
  657. * `$$G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},$$`
  658. *
  659. * where `$$i=0..\texttt{ksize}-1$$` and `$$\alpha$$` is the scale factor chosen so that `$$\sum_i G_i=1$$`.
  660. *
  661. * Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize
  662. * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly.
  663. * You may also use the higher-level GaussianBlur.
  664. * @param ksize Aperture size. It should be odd ( `$$\texttt{ksize} \mod 2 = 1$$` ) and positive.
  665. * @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as
  666. * `sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8`.
  667. * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F .
  668. * @see `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+getDerivKernels:ky:dx:dy:ksize:normalize:ktype:`, `+getStructuringElement:ksize:anchor:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`
  669. */
  670. + (Mat*)getGaussianKernel:(int)ksize sigma:(double)sigma ktype:(int)ktype NS_SWIFT_NAME(getGaussianKernel(ksize:sigma:ktype:));
  671. /**
  672. * Returns Gaussian filter coefficients.
  673. *
  674. * The function computes and returns the `$$\texttt{ksize} \times 1$$` matrix of Gaussian filter
  675. * coefficients:
  676. *
  677. * `$$G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},$$`
  678. *
  679. * where `$$i=0..\texttt{ksize}-1$$` and `$$\alpha$$` is the scale factor chosen so that `$$\sum_i G_i=1$$`.
  680. *
  681. * Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize
  682. * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly.
  683. * You may also use the higher-level GaussianBlur.
  684. * @param ksize Aperture size. It should be odd ( `$$\texttt{ksize} \mod 2 = 1$$` ) and positive.
  685. * @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as
  686. * `sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8`.
  687. * @see `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+getDerivKernels:ky:dx:dy:ksize:normalize:ktype:`, `+getStructuringElement:ksize:anchor:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`
  688. */
  689. + (Mat*)getGaussianKernel:(int)ksize sigma:(double)sigma NS_SWIFT_NAME(getGaussianKernel(ksize:sigma:));
  690. //
  691. // void cv::getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F)
  692. //
  693. /**
  694. * Returns filter coefficients for computing spatial image derivatives.
  695. *
  696. * The function computes and returns the filter coefficients for spatial image derivatives. When
  697. * `ksize=FILTER_SCHARR`, the Scharr `$$3 \times 3$$` kernels are generated (see #Scharr). Otherwise, Sobel
  698. * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to
  699. *
  700. * @param kx Output matrix of row filter coefficients. It has the type ktype .
  701. * @param ky Output matrix of column filter coefficients. It has the type ktype .
  702. * @param dx Derivative order in respect of x.
  703. * @param dy Derivative order in respect of y.
  704. * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
  705. * @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.
  706. * Theoretically, the coefficients should have the denominator `$$=2^{ksize*2-dx-dy-2}$$`. If you are
  707. * going to filter floating-point images, you are likely to use the normalized kernels. But if you
  708. * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve
  709. * all the fractional bits, you may want to set normalize=false .
  710. * @param ktype Type of filter coefficients. It can be CV_32f or CV_64F .
  711. */
  712. + (void)getDerivKernels:(Mat*)kx ky:(Mat*)ky dx:(int)dx dy:(int)dy ksize:(int)ksize normalize:(BOOL)normalize ktype:(int)ktype NS_SWIFT_NAME(getDerivKernels(kx:ky:dx:dy:ksize:normalize:ktype:));
  713. /**
  714. * Returns filter coefficients for computing spatial image derivatives.
  715. *
  716. * The function computes and returns the filter coefficients for spatial image derivatives. When
  717. * `ksize=FILTER_SCHARR`, the Scharr `$$3 \times 3$$` kernels are generated (see #Scharr). Otherwise, Sobel
  718. * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to
  719. *
  720. * @param kx Output matrix of row filter coefficients. It has the type ktype .
  721. * @param ky Output matrix of column filter coefficients. It has the type ktype .
  722. * @param dx Derivative order in respect of x.
  723. * @param dy Derivative order in respect of y.
  724. * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
  725. * @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.
  726. * Theoretically, the coefficients should have the denominator `$$=2^{ksize*2-dx-dy-2}$$`. If you are
  727. * going to filter floating-point images, you are likely to use the normalized kernels. But if you
  728. * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve
  729. * all the fractional bits, you may want to set normalize=false .
  730. */
  731. + (void)getDerivKernels:(Mat*)kx ky:(Mat*)ky dx:(int)dx dy:(int)dy ksize:(int)ksize normalize:(BOOL)normalize NS_SWIFT_NAME(getDerivKernels(kx:ky:dx:dy:ksize:normalize:));
  732. /**
  733. * Returns filter coefficients for computing spatial image derivatives.
  734. *
  735. * The function computes and returns the filter coefficients for spatial image derivatives. When
  736. * `ksize=FILTER_SCHARR`, the Scharr `$$3 \times 3$$` kernels are generated (see #Scharr). Otherwise, Sobel
  737. * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to
  738. *
  739. * @param kx Output matrix of row filter coefficients. It has the type ktype .
  740. * @param ky Output matrix of column filter coefficients. It has the type ktype .
  741. * @param dx Derivative order in respect of x.
  742. * @param dy Derivative order in respect of y.
  743. * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
  744. * Theoretically, the coefficients should have the denominator `$$=2^{ksize*2-dx-dy-2}$$`. If you are
  745. * going to filter floating-point images, you are likely to use the normalized kernels. But if you
  746. * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve
  747. * all the fractional bits, you may want to set normalize=false .
  748. */
  749. + (void)getDerivKernels:(Mat*)kx ky:(Mat*)ky dx:(int)dx dy:(int)dy ksize:(int)ksize NS_SWIFT_NAME(getDerivKernels(kx:ky:dx:dy:ksize:));
  750. //
  751. // Mat cv::getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F)
  752. //
  753. /**
  754. * Returns Gabor filter coefficients.
  755. *
  756. * For more details about gabor filter equations and parameters, see: [Gabor
  757. * Filter](http://en.wikipedia.org/wiki/Gabor_filter).
  758. *
  759. * @param ksize Size of the filter returned.
  760. * @param sigma Standard deviation of the gaussian envelope.
  761. * @param theta Orientation of the normal to the parallel stripes of a Gabor function.
  762. * @param lambd Wavelength of the sinusoidal factor.
  763. * @param gamma Spatial aspect ratio.
  764. * @param psi Phase offset.
  765. * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F .
  766. */
  767. + (Mat*)getGaborKernel:(Size2i*)ksize sigma:(double)sigma theta:(double)theta lambd:(double)lambd gamma:(double)gamma psi:(double)psi ktype:(int)ktype NS_SWIFT_NAME(getGaborKernel(ksize:sigma:theta:lambd:gamma:psi:ktype:));
  768. /**
  769. * Returns Gabor filter coefficients.
  770. *
  771. * For more details about gabor filter equations and parameters, see: [Gabor
  772. * Filter](http://en.wikipedia.org/wiki/Gabor_filter).
  773. *
  774. * @param ksize Size of the filter returned.
  775. * @param sigma Standard deviation of the gaussian envelope.
  776. * @param theta Orientation of the normal to the parallel stripes of a Gabor function.
  777. * @param lambd Wavelength of the sinusoidal factor.
  778. * @param gamma Spatial aspect ratio.
  779. * @param psi Phase offset.
  780. */
  781. + (Mat*)getGaborKernel:(Size2i*)ksize sigma:(double)sigma theta:(double)theta lambd:(double)lambd gamma:(double)gamma psi:(double)psi NS_SWIFT_NAME(getGaborKernel(ksize:sigma:theta:lambd:gamma:psi:));
  782. /**
  783. * Returns Gabor filter coefficients.
  784. *
  785. * For more details about gabor filter equations and parameters, see: [Gabor
  786. * Filter](http://en.wikipedia.org/wiki/Gabor_filter).
  787. *
  788. * @param ksize Size of the filter returned.
  789. * @param sigma Standard deviation of the gaussian envelope.
  790. * @param theta Orientation of the normal to the parallel stripes of a Gabor function.
  791. * @param lambd Wavelength of the sinusoidal factor.
  792. * @param gamma Spatial aspect ratio.
  793. */
  794. + (Mat*)getGaborKernel:(Size2i*)ksize sigma:(double)sigma theta:(double)theta lambd:(double)lambd gamma:(double)gamma NS_SWIFT_NAME(getGaborKernel(ksize:sigma:theta:lambd:gamma:));
  795. //
  796. // Mat cv::getStructuringElement(MorphShapes shape, Size ksize, Point anchor = Point(-1,-1))
  797. //
  798. /**
  799. * Returns a structuring element of the specified size and shape for morphological operations.
  800. *
  801. * The function constructs and returns the structuring element that can be further passed to #erode,
  802. * #dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as
  803. * the structuring element.
  804. *
  805. * @param shape Element shape that could be one of #MorphShapes
  806. * @param ksize Size of the structuring element.
  807. * @param anchor Anchor position within the element. The default value `$$(-1, -1)$$` means that the
  808. * anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor
  809. * position. In other cases the anchor just regulates how much the result of the morphological
  810. * operation is shifted.
  811. */
  812. + (Mat*)getStructuringElement:(MorphShapes)shape ksize:(Size2i*)ksize anchor:(Point2i*)anchor NS_SWIFT_NAME(getStructuringElement(shape:ksize:anchor:));
  813. /**
  814. * Returns a structuring element of the specified size and shape for morphological operations.
  815. *
  816. * The function constructs and returns the structuring element that can be further passed to #erode,
  817. * #dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as
  818. * the structuring element.
  819. *
  820. * @param shape Element shape that could be one of #MorphShapes
  821. * @param ksize Size of the structuring element.
  822. * anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor
  823. * position. In other cases the anchor just regulates how much the result of the morphological
  824. * operation is shifted.
  825. */
  826. + (Mat*)getStructuringElement:(MorphShapes)shape ksize:(Size2i*)ksize NS_SWIFT_NAME(getStructuringElement(shape:ksize:));
  827. //
  828. // void cv::medianBlur(Mat src, Mat& dst, int ksize)
  829. //
  830. /**
  831. * Blurs an image using the median filter.
  832. *
  833. * The function smoothes an image using the median filter with the `$$\texttt{ksize} \times
  834. * \texttt{ksize}$$` aperture. Each channel of a multi-channel image is processed independently.
  835. * In-place operation is supported.
  836. *
  837. * NOTE: The median filter uses #BORDER_REPLICATE internally to cope with border pixels, see #BorderTypes
  838. *
  839. * @param src input 1-, 3-, or 4-channel image; when ksize is 3 or 5, the image depth should be
  840. * CV_8U, CV_16U, or CV_32F, for larger aperture sizes, it can only be CV_8U.
  841. * @param dst destination array of the same size and type as src.
  842. * @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
  843. * @see `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+blur:dst:ksize:anchor:borderType:`, `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`
  844. */
  845. + (void)medianBlur:(Mat*)src dst:(Mat*)dst ksize:(int)ksize NS_SWIFT_NAME(medianBlur(src:dst:ksize:));
  846. //
  847. // void cv::GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, BorderTypes borderType = BORDER_DEFAULT)
  848. //
  849. /**
  850. * Blurs an image using a Gaussian filter.
  851. *
  852. * The function convolves the source image with the specified Gaussian kernel. In-place filtering is
  853. * supported.
  854. *
  855. * @param src input image; the image can have any number of channels, which are processed
  856. * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  857. * @param dst output image of the same size and type as src.
  858. * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
  859. * positive and odd. Or, they can be zero's and then they are computed from sigma.
  860. * @param sigmaX Gaussian kernel standard deviation in X direction.
  861. * @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be
  862. * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
  863. * respectively (see #getGaussianKernel for details); to fully control the result regardless of
  864. * possible future modifications of all this semantics, it is recommended to specify all of ksize,
  865. * sigmaX, and sigmaY.
  866. * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  867. *
  868. * @see `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+blur:dst:ksize:anchor:borderType:`, `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+medianBlur:dst:ksize:`
  869. */
  870. + (void)GaussianBlur:(Mat*)src dst:(Mat*)dst ksize:(Size2i*)ksize sigmaX:(double)sigmaX sigmaY:(double)sigmaY borderType:(BorderTypes)borderType NS_SWIFT_NAME(GaussianBlur(src:dst:ksize:sigmaX:sigmaY:borderType:));
  871. /**
  872. * Blurs an image using a Gaussian filter.
  873. *
  874. * The function convolves the source image with the specified Gaussian kernel. In-place filtering is
  875. * supported.
  876. *
  877. * @param src input image; the image can have any number of channels, which are processed
  878. * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  879. * @param dst output image of the same size and type as src.
  880. * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
  881. * positive and odd. Or, they can be zero's and then they are computed from sigma.
  882. * @param sigmaX Gaussian kernel standard deviation in X direction.
  883. * @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be
  884. * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
  885. * respectively (see #getGaussianKernel for details); to fully control the result regardless of
  886. * possible future modifications of all this semantics, it is recommended to specify all of ksize,
  887. * sigmaX, and sigmaY.
  888. *
  889. * @see `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+blur:dst:ksize:anchor:borderType:`, `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+medianBlur:dst:ksize:`
  890. */
  891. + (void)GaussianBlur:(Mat*)src dst:(Mat*)dst ksize:(Size2i*)ksize sigmaX:(double)sigmaX sigmaY:(double)sigmaY NS_SWIFT_NAME(GaussianBlur(src:dst:ksize:sigmaX:sigmaY:));
  892. /**
  893. * Blurs an image using a Gaussian filter.
  894. *
  895. * The function convolves the source image with the specified Gaussian kernel. In-place filtering is
  896. * supported.
  897. *
  898. * @param src input image; the image can have any number of channels, which are processed
  899. * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  900. * @param dst output image of the same size and type as src.
  901. * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
  902. * positive and odd. Or, they can be zero's and then they are computed from sigma.
  903. * @param sigmaX Gaussian kernel standard deviation in X direction.
  904. * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
  905. * respectively (see #getGaussianKernel for details); to fully control the result regardless of
  906. * possible future modifications of all this semantics, it is recommended to specify all of ksize,
  907. * sigmaX, and sigmaY.
  908. *
  909. * @see `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+blur:dst:ksize:anchor:borderType:`, `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+medianBlur:dst:ksize:`
  910. */
  911. + (void)GaussianBlur:(Mat*)src dst:(Mat*)dst ksize:(Size2i*)ksize sigmaX:(double)sigmaX NS_SWIFT_NAME(GaussianBlur(src:dst:ksize:sigmaX:));
  912. //
  913. // void cv::bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, BorderTypes borderType = BORDER_DEFAULT)
  914. //
  915. /**
  916. * Applies the bilateral filter to an image.
  917. *
  918. * The function applies bilateral filtering to the input image, as described in
  919. * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
  920. * bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is
  921. * very slow compared to most filters.
  922. *
  923. * _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (\<
  924. * 10), the filter will not have much effect, whereas if they are large (\> 150), they will have a very
  925. * strong effect, making the image look "cartoonish".
  926. *
  927. * _Filter size_: Large filters (d \> 5) are very slow, so it is recommended to use d=5 for real-time
  928. * applications, and perhaps d=9 for offline applications that need heavy noise filtering.
  929. *
  930. * This filter does not work inplace.
  931. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
  932. * @param dst Destination image of the same size and type as src .
  933. * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
  934. * it is computed from sigmaSpace.
  935. * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
  936. * farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting
  937. * in larger areas of semi-equal color.
  938. * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
  939. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor
  940. * ). When d\>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is
  941. * proportional to sigmaSpace.
  942. * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes
  943. */
  944. + (void)bilateralFilter:(Mat*)src dst:(Mat*)dst d:(int)d sigmaColor:(double)sigmaColor sigmaSpace:(double)sigmaSpace borderType:(BorderTypes)borderType NS_SWIFT_NAME(bilateralFilter(src:dst:d:sigmaColor:sigmaSpace:borderType:));
  945. /**
  946. * Applies the bilateral filter to an image.
  947. *
  948. * The function applies bilateral filtering to the input image, as described in
  949. * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
  950. * bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is
  951. * very slow compared to most filters.
  952. *
  953. * _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (\<
  954. * 10), the filter will not have much effect, whereas if they are large (\> 150), they will have a very
  955. * strong effect, making the image look "cartoonish".
  956. *
  957. * _Filter size_: Large filters (d \> 5) are very slow, so it is recommended to use d=5 for real-time
  958. * applications, and perhaps d=9 for offline applications that need heavy noise filtering.
  959. *
  960. * This filter does not work inplace.
  961. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
  962. * @param dst Destination image of the same size and type as src .
  963. * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
  964. * it is computed from sigmaSpace.
  965. * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
  966. * farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting
  967. * in larger areas of semi-equal color.
  968. * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
  969. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor
  970. * ). When d\>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is
  971. * proportional to sigmaSpace.
  972. */
  973. + (void)bilateralFilter:(Mat*)src dst:(Mat*)dst d:(int)d sigmaColor:(double)sigmaColor sigmaSpace:(double)sigmaSpace NS_SWIFT_NAME(bilateralFilter(src:dst:d:sigmaColor:sigmaSpace:));
  974. //
  975. // void cv::boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, BorderTypes borderType = BORDER_DEFAULT)
  976. //
  977. /**
  978. * Blurs an image using the box filter.
  979. *
  980. * The function smooths an image using the kernel:
  981. *
  982. * `$$\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}$$`
  983. *
  984. * where
  985. *
  986. * `$$\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}$$`
  987. *
  988. * Unnormalized box filter is useful for computing various integral characteristics over each pixel
  989. * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
  990. * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
  991. *
  992. * @param src input image.
  993. * @param dst output image of the same size and type as src.
  994. * @param ddepth the output image depth (-1 to use src.depth()).
  995. * @param ksize blurring kernel size.
  996. * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
  997. * center.
  998. * @param normalize flag, specifying whether the kernel is normalized by its area or not.
  999. * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
  1000. * @see `+blur:dst:ksize:anchor:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+medianBlur:dst:ksize:`, `+integral:sum:sdepth:`
  1001. */
  1002. + (void)boxFilter:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(Size2i*)ksize anchor:(Point2i*)anchor normalize:(BOOL)normalize borderType:(BorderTypes)borderType NS_SWIFT_NAME(boxFilter(src:dst:ddepth:ksize:anchor:normalize:borderType:));
  1003. /**
  1004. * Blurs an image using the box filter.
  1005. *
  1006. * The function smooths an image using the kernel:
  1007. *
  1008. * `$$\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}$$`
  1009. *
  1010. * where
  1011. *
  1012. * `$$\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}$$`
  1013. *
  1014. * Unnormalized box filter is useful for computing various integral characteristics over each pixel
  1015. * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
  1016. * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
  1017. *
  1018. * @param src input image.
  1019. * @param dst output image of the same size and type as src.
  1020. * @param ddepth the output image depth (-1 to use src.depth()).
  1021. * @param ksize blurring kernel size.
  1022. * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
  1023. * center.
  1024. * @param normalize flag, specifying whether the kernel is normalized by its area or not.
  1025. * @see `+blur:dst:ksize:anchor:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+medianBlur:dst:ksize:`, `+integral:sum:sdepth:`
  1026. */
  1027. + (void)boxFilter:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(Size2i*)ksize anchor:(Point2i*)anchor normalize:(BOOL)normalize NS_SWIFT_NAME(boxFilter(src:dst:ddepth:ksize:anchor:normalize:));
  1028. /**
  1029. * Blurs an image using the box filter.
  1030. *
  1031. * The function smooths an image using the kernel:
  1032. *
  1033. * `$$\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}$$`
  1034. *
  1035. * where
  1036. *
  1037. * `$$\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}$$`
  1038. *
  1039. * Unnormalized box filter is useful for computing various integral characteristics over each pixel
  1040. * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
  1041. * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
  1042. *
  1043. * @param src input image.
  1044. * @param dst output image of the same size and type as src.
  1045. * @param ddepth the output image depth (-1 to use src.depth()).
  1046. * @param ksize blurring kernel size.
  1047. * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
  1048. * center.
  1049. * @see `+blur:dst:ksize:anchor:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+medianBlur:dst:ksize:`, `+integral:sum:sdepth:`
  1050. */
  1051. + (void)boxFilter:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(Size2i*)ksize anchor:(Point2i*)anchor NS_SWIFT_NAME(boxFilter(src:dst:ddepth:ksize:anchor:));
  1052. /**
  1053. * Blurs an image using the box filter.
  1054. *
  1055. * The function smooths an image using the kernel:
  1056. *
  1057. * `$$\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}$$`
  1058. *
  1059. * where
  1060. *
  1061. * `$$\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}$$`
  1062. *
  1063. * Unnormalized box filter is useful for computing various integral characteristics over each pixel
  1064. * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
  1065. * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
  1066. *
  1067. * @param src input image.
  1068. * @param dst output image of the same size and type as src.
  1069. * @param ddepth the output image depth (-1 to use src.depth()).
  1070. * @param ksize blurring kernel size.
  1071. * center.
  1072. * @see `+blur:dst:ksize:anchor:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+medianBlur:dst:ksize:`, `+integral:sum:sdepth:`
  1073. */
  1074. + (void)boxFilter:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(Size2i*)ksize NS_SWIFT_NAME(boxFilter(src:dst:ddepth:ksize:));
  1075. //
  1076. // void cv::sqrBoxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1, -1), bool normalize = true, BorderTypes borderType = BORDER_DEFAULT)
  1077. //
  1078. /**
  1079. * Calculates the normalized sum of squares of the pixel values overlapping the filter.
  1080. *
  1081. * For every pixel `$$ (x, y) $$` in the source image, the function calculates the sum of squares of those neighboring
  1082. * pixel values which overlap the filter placed over the pixel `$$ (x, y) $$`.
  1083. *
  1084. * The unnormalized square box filter can be useful in computing local image statistics such as the local
  1085. * variance and standard deviation around the neighborhood of a pixel.
  1086. *
  1087. * @param src input image
  1088. * @param dst output image of the same size and type as src
  1089. * @param ddepth the output image depth (-1 to use src.depth())
  1090. * @param ksize kernel size
  1091. * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
  1092. * center.
  1093. * @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
  1094. * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
  1095. * @see `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`
  1096. */
  1097. + (void)sqrBoxFilter:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(Size2i*)ksize anchor:(Point2i*)anchor normalize:(BOOL)normalize borderType:(BorderTypes)borderType NS_SWIFT_NAME(sqrBoxFilter(src:dst:ddepth:ksize:anchor:normalize:borderType:));
  1098. /**
  1099. * Calculates the normalized sum of squares of the pixel values overlapping the filter.
  1100. *
  1101. * For every pixel `$$ (x, y) $$` in the source image, the function calculates the sum of squares of those neighboring
  1102. * pixel values which overlap the filter placed over the pixel `$$ (x, y) $$`.
  1103. *
  1104. * The unnormalized square box filter can be useful in computing local image statistics such as the local
  1105. * variance and standard deviation around the neighborhood of a pixel.
  1106. *
  1107. * @param src input image
  1108. * @param dst output image of the same size and type as src
  1109. * @param ddepth the output image depth (-1 to use src.depth())
  1110. * @param ksize kernel size
  1111. * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
  1112. * center.
  1113. * @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
  1114. * @see `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`
  1115. */
  1116. + (void)sqrBoxFilter:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(Size2i*)ksize anchor:(Point2i*)anchor normalize:(BOOL)normalize NS_SWIFT_NAME(sqrBoxFilter(src:dst:ddepth:ksize:anchor:normalize:));
  1117. /**
  1118. * Calculates the normalized sum of squares of the pixel values overlapping the filter.
  1119. *
  1120. * For every pixel `$$ (x, y) $$` in the source image, the function calculates the sum of squares of those neighboring
  1121. * pixel values which overlap the filter placed over the pixel `$$ (x, y) $$`.
  1122. *
  1123. * The unnormalized square box filter can be useful in computing local image statistics such as the local
  1124. * variance and standard deviation around the neighborhood of a pixel.
  1125. *
  1126. * @param src input image
  1127. * @param dst output image of the same size and type as src
  1128. * @param ddepth the output image depth (-1 to use src.depth())
  1129. * @param ksize kernel size
  1130. * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
  1131. * center.
  1132. * @see `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`
  1133. */
  1134. + (void)sqrBoxFilter:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(Size2i*)ksize anchor:(Point2i*)anchor NS_SWIFT_NAME(sqrBoxFilter(src:dst:ddepth:ksize:anchor:));
  1135. /**
  1136. * Calculates the normalized sum of squares of the pixel values overlapping the filter.
  1137. *
  1138. * For every pixel `$$ (x, y) $$` in the source image, the function calculates the sum of squares of those neighboring
  1139. * pixel values which overlap the filter placed over the pixel `$$ (x, y) $$`.
  1140. *
  1141. * The unnormalized square box filter can be useful in computing local image statistics such as the local
  1142. * variance and standard deviation around the neighborhood of a pixel.
  1143. *
  1144. * @param src input image
  1145. * @param dst output image of the same size and type as src
  1146. * @param ddepth the output image depth (-1 to use src.depth())
  1147. * @param ksize kernel size
  1148. * center.
  1149. * @see `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`
  1150. */
  1151. + (void)sqrBoxFilter:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(Size2i*)ksize NS_SWIFT_NAME(sqrBoxFilter(src:dst:ddepth:ksize:));
  1152. //
  1153. // void cv::blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), BorderTypes borderType = BORDER_DEFAULT)
  1154. //
  1155. /**
  1156. * Blurs an image using the normalized box filter.
  1157. *
  1158. * The function smooths an image using the kernel:
  1159. *
  1160. * `$$\texttt{K} = \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix}$$`
  1161. *
  1162. * The call `blur(src, dst, ksize, anchor, borderType)` is equivalent to `boxFilter(src, dst, src.type(), ksize,
  1163. * anchor, true, borderType)`.
  1164. *
  1165. * @param src input image; it can have any number of channels, which are processed independently, but
  1166. * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  1167. * @param dst output image of the same size and type as src.
  1168. * @param ksize blurring kernel size.
  1169. * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
  1170. * center.
  1171. * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
  1172. * @see `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+medianBlur:dst:ksize:`
  1173. */
  1174. + (void)blur:(Mat*)src dst:(Mat*)dst ksize:(Size2i*)ksize anchor:(Point2i*)anchor borderType:(BorderTypes)borderType NS_SWIFT_NAME(blur(src:dst:ksize:anchor:borderType:));
  1175. /**
  1176. * Blurs an image using the normalized box filter.
  1177. *
  1178. * The function smooths an image using the kernel:
  1179. *
  1180. * `$$\texttt{K} = \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix}$$`
  1181. *
  1182. * The call `blur(src, dst, ksize, anchor, borderType)` is equivalent to `boxFilter(src, dst, src.type(), ksize,
  1183. * anchor, true, borderType)`.
  1184. *
  1185. * @param src input image; it can have any number of channels, which are processed independently, but
  1186. * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  1187. * @param dst output image of the same size and type as src.
  1188. * @param ksize blurring kernel size.
  1189. * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
  1190. * center.
  1191. * @see `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+medianBlur:dst:ksize:`
  1192. */
  1193. + (void)blur:(Mat*)src dst:(Mat*)dst ksize:(Size2i*)ksize anchor:(Point2i*)anchor NS_SWIFT_NAME(blur(src:dst:ksize:anchor:));
  1194. /**
  1195. * Blurs an image using the normalized box filter.
  1196. *
  1197. * The function smooths an image using the kernel:
  1198. *
  1199. * `$$\texttt{K} = \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix}$$`
  1200. *
  1201. * The call `blur(src, dst, ksize, anchor, borderType)` is equivalent to `boxFilter(src, dst, src.type(), ksize,
  1202. * anchor, true, borderType)`.
  1203. *
  1204. * @param src input image; it can have any number of channels, which are processed independently, but
  1205. * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  1206. * @param dst output image of the same size and type as src.
  1207. * @param ksize blurring kernel size.
  1208. * center.
  1209. * @see `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+bilateralFilter:dst:d:sigmaColor:sigmaSpace:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+medianBlur:dst:ksize:`
  1210. */
  1211. + (void)blur:(Mat*)src dst:(Mat*)dst ksize:(Size2i*)ksize NS_SWIFT_NAME(blur(src:dst:ksize:));
  1212. //
  1213. // void cv::filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, BorderTypes borderType = BORDER_DEFAULT)
  1214. //
  1215. /**
  1216. * Convolves an image with the kernel.
  1217. *
  1218. * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
  1219. * the aperture is partially outside the image, the function interpolates outlier pixel values
  1220. * according to the specified border mode.
  1221. *
  1222. * The function does actually compute correlation, not the convolution:
  1223. *
  1224. * `$$\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )$$`
  1225. *
  1226. * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
  1227. * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
  1228. * anchor.y - 1)`.
  1229. *
  1230. * The function uses the DFT-based algorithm in case of sufficiently large kernels (~`11 x 11` or
  1231. * larger) and the direct algorithm for small kernels.
  1232. *
  1233. * @param src input image.
  1234. * @param dst output image of the same size and the same number of channels as src.
  1235. * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
  1236. * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
  1237. * matrix; if you want to apply different kernels to different channels, split the image into
  1238. * separate color planes using split and process them individually.
  1239. * @param anchor anchor of the kernel that indicates the relative position of a filtered point within
  1240. * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
  1241. * is at the kernel center.
  1242. * @param delta optional value added to the filtered pixels before storing them in dst.
  1243. * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  1244. * @see `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `dft`, `+matchTemplate:templ:result:method:mask:`
  1245. */
  1246. + (void)filter2D:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth kernel:(Mat*)kernel anchor:(Point2i*)anchor delta:(double)delta borderType:(BorderTypes)borderType NS_SWIFT_NAME(filter2D(src:dst:ddepth:kernel:anchor:delta:borderType:));
  1247. /**
  1248. * Convolves an image with the kernel.
  1249. *
  1250. * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
  1251. * the aperture is partially outside the image, the function interpolates outlier pixel values
  1252. * according to the specified border mode.
  1253. *
  1254. * The function does actually compute correlation, not the convolution:
  1255. *
  1256. * `$$\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )$$`
  1257. *
  1258. * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
  1259. * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
  1260. * anchor.y - 1)`.
  1261. *
  1262. * The function uses the DFT-based algorithm in case of sufficiently large kernels (~`11 x 11` or
  1263. * larger) and the direct algorithm for small kernels.
  1264. *
  1265. * @param src input image.
  1266. * @param dst output image of the same size and the same number of channels as src.
  1267. * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
  1268. * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
  1269. * matrix; if you want to apply different kernels to different channels, split the image into
  1270. * separate color planes using split and process them individually.
  1271. * @param anchor anchor of the kernel that indicates the relative position of a filtered point within
  1272. * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
  1273. * is at the kernel center.
  1274. * @param delta optional value added to the filtered pixels before storing them in dst.
  1275. * @see `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `dft`, `+matchTemplate:templ:result:method:mask:`
  1276. */
  1277. + (void)filter2D:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth kernel:(Mat*)kernel anchor:(Point2i*)anchor delta:(double)delta NS_SWIFT_NAME(filter2D(src:dst:ddepth:kernel:anchor:delta:));
  1278. /**
  1279. * Convolves an image with the kernel.
  1280. *
  1281. * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
  1282. * the aperture is partially outside the image, the function interpolates outlier pixel values
  1283. * according to the specified border mode.
  1284. *
  1285. * The function does actually compute correlation, not the convolution:
  1286. *
  1287. * `$$\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )$$`
  1288. *
  1289. * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
  1290. * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
  1291. * anchor.y - 1)`.
  1292. *
  1293. * The function uses the DFT-based algorithm in case of sufficiently large kernels (~`11 x 11` or
  1294. * larger) and the direct algorithm for small kernels.
  1295. *
  1296. * @param src input image.
  1297. * @param dst output image of the same size and the same number of channels as src.
  1298. * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
  1299. * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
  1300. * matrix; if you want to apply different kernels to different channels, split the image into
  1301. * separate color planes using split and process them individually.
  1302. * @param anchor anchor of the kernel that indicates the relative position of a filtered point within
  1303. * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
  1304. * is at the kernel center.
  1305. * @see `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `dft`, `+matchTemplate:templ:result:method:mask:`
  1306. */
  1307. + (void)filter2D:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth kernel:(Mat*)kernel anchor:(Point2i*)anchor NS_SWIFT_NAME(filter2D(src:dst:ddepth:kernel:anchor:));
  1308. /**
  1309. * Convolves an image with the kernel.
  1310. *
  1311. * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
  1312. * the aperture is partially outside the image, the function interpolates outlier pixel values
  1313. * according to the specified border mode.
  1314. *
  1315. * The function does actually compute correlation, not the convolution:
  1316. *
  1317. * `$$\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )$$`
  1318. *
  1319. * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
  1320. * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
  1321. * anchor.y - 1)`.
  1322. *
  1323. * The function uses the DFT-based algorithm in case of sufficiently large kernels (~`11 x 11` or
  1324. * larger) and the direct algorithm for small kernels.
  1325. *
  1326. * @param src input image.
  1327. * @param dst output image of the same size and the same number of channels as src.
  1328. * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
  1329. * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
  1330. * matrix; if you want to apply different kernels to different channels, split the image into
  1331. * separate color planes using split and process them individually.
  1332. * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
  1333. * is at the kernel center.
  1334. * @see `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `dft`, `+matchTemplate:templ:result:method:mask:`
  1335. */
  1336. + (void)filter2D:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth kernel:(Mat*)kernel NS_SWIFT_NAME(filter2D(src:dst:ddepth:kernel:));
  1337. //
  1338. // void cv::sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, BorderTypes borderType = BORDER_DEFAULT)
  1339. //
  1340. /**
  1341. * Applies a separable linear filter to an image.
  1342. *
  1343. * The function applies a separable linear filter to the image. That is, first, every row of src is
  1344. * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
  1345. * kernel kernelY. The final result shifted by delta is stored in dst .
  1346. *
  1347. * @param src Source image.
  1348. * @param dst Destination image of the same size and the same number of channels as src .
  1349. * @param ddepth Destination image depth, see REF: filter_depths "combinations"
  1350. * @param kernelX Coefficients for filtering each row.
  1351. * @param kernelY Coefficients for filtering each column.
  1352. * @param anchor Anchor position within the kernel. The default value `$$(-1,-1)$$` means that the anchor
  1353. * is at the kernel center.
  1354. * @param delta Value added to the filtered results before storing them.
  1355. * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  1356. * @see `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+blur:dst:ksize:anchor:borderType:`
  1357. */
  1358. + (void)sepFilter2D:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth kernelX:(Mat*)kernelX kernelY:(Mat*)kernelY anchor:(Point2i*)anchor delta:(double)delta borderType:(BorderTypes)borderType NS_SWIFT_NAME(sepFilter2D(src:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:));
  1359. /**
  1360. * Applies a separable linear filter to an image.
  1361. *
  1362. * The function applies a separable linear filter to the image. That is, first, every row of src is
  1363. * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
  1364. * kernel kernelY. The final result shifted by delta is stored in dst .
  1365. *
  1366. * @param src Source image.
  1367. * @param dst Destination image of the same size and the same number of channels as src .
  1368. * @param ddepth Destination image depth, see REF: filter_depths "combinations"
  1369. * @param kernelX Coefficients for filtering each row.
  1370. * @param kernelY Coefficients for filtering each column.
  1371. * @param anchor Anchor position within the kernel. The default value `$$(-1,-1)$$` means that the anchor
  1372. * is at the kernel center.
  1373. * @param delta Value added to the filtered results before storing them.
  1374. * @see `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+blur:dst:ksize:anchor:borderType:`
  1375. */
  1376. + (void)sepFilter2D:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth kernelX:(Mat*)kernelX kernelY:(Mat*)kernelY anchor:(Point2i*)anchor delta:(double)delta NS_SWIFT_NAME(sepFilter2D(src:dst:ddepth:kernelX:kernelY:anchor:delta:));
  1377. /**
  1378. * Applies a separable linear filter to an image.
  1379. *
  1380. * The function applies a separable linear filter to the image. That is, first, every row of src is
  1381. * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
  1382. * kernel kernelY. The final result shifted by delta is stored in dst .
  1383. *
  1384. * @param src Source image.
  1385. * @param dst Destination image of the same size and the same number of channels as src .
  1386. * @param ddepth Destination image depth, see REF: filter_depths "combinations"
  1387. * @param kernelX Coefficients for filtering each row.
  1388. * @param kernelY Coefficients for filtering each column.
  1389. * @param anchor Anchor position within the kernel. The default value `$$(-1,-1)$$` means that the anchor
  1390. * is at the kernel center.
  1391. * @see `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+blur:dst:ksize:anchor:borderType:`
  1392. */
  1393. + (void)sepFilter2D:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth kernelX:(Mat*)kernelX kernelY:(Mat*)kernelY anchor:(Point2i*)anchor NS_SWIFT_NAME(sepFilter2D(src:dst:ddepth:kernelX:kernelY:anchor:));
  1394. /**
  1395. * Applies a separable linear filter to an image.
  1396. *
  1397. * The function applies a separable linear filter to the image. That is, first, every row of src is
  1398. * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
  1399. * kernel kernelY. The final result shifted by delta is stored in dst .
  1400. *
  1401. * @param src Source image.
  1402. * @param dst Destination image of the same size and the same number of channels as src .
  1403. * @param ddepth Destination image depth, see REF: filter_depths "combinations"
  1404. * @param kernelX Coefficients for filtering each row.
  1405. * @param kernelY Coefficients for filtering each column.
  1406. * is at the kernel center.
  1407. * @see `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `+boxFilter:dst:ddepth:ksize:anchor:normalize:borderType:`, `+blur:dst:ksize:anchor:borderType:`
  1408. */
  1409. + (void)sepFilter2D:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth kernelX:(Mat*)kernelX kernelY:(Mat*)kernelY NS_SWIFT_NAME(sepFilter2D(src:dst:ddepth:kernelX:kernelY:));
  1410. //
  1411. // void cv::Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, BorderTypes borderType = BORDER_DEFAULT)
  1412. //
  1413. /**
  1414. * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
  1415. *
  1416. * In all cases except one, the `$$\texttt{ksize} \times \texttt{ksize}$$` separable kernel is used to
  1417. * calculate the derivative. When `$$\texttt{ksize = 1}$$`, the `$$3 \times 1$$` or `$$1 \times 3$$`
  1418. * kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first
  1419. * or the second x- or y- derivatives.
  1420. *
  1421. * There is also the special value `ksize = #FILTER_SCHARR (-1)` that corresponds to the `$$3\times3$$` Scharr
  1422. * filter that may give more accurate results than the `$$3\times3$$` Sobel. The Scharr aperture is
  1423. *
  1424. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}$$`
  1425. *
  1426. * for the x-derivative, or transposed for the y-derivative.
  1427. *
  1428. * The function calculates an image derivative by convolving the image with the appropriate kernel:
  1429. *
  1430. * `$$\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}$$`
  1431. *
  1432. * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
  1433. * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
  1434. * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
  1435. * case corresponds to a kernel of:
  1436. *
  1437. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}$$`
  1438. *
  1439. * The second case corresponds to a kernel of:
  1440. *
  1441. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}$$`
  1442. *
  1443. * @param src input image.
  1444. * @param dst output image of the same size and the same number of channels as src .
  1445. * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
  1446. * 8-bit input images it will result in truncated derivatives.
  1447. * @param dx order of the derivative x.
  1448. * @param dy order of the derivative y.
  1449. * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
  1450. * @param scale optional scale factor for the computed derivative values; by default, no scaling is
  1451. * applied (see #getDerivKernels for details).
  1452. * @param delta optional delta value that is added to the results prior to storing them in dst.
  1453. * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  1454. * @see `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`, `+Laplacian:dst:ddepth:ksize:scale:delta:borderType:`, `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `cartToPolar`
  1455. */
  1456. + (void)Sobel:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth dx:(int)dx dy:(int)dy ksize:(int)ksize scale:(double)scale delta:(double)delta borderType:(BorderTypes)borderType NS_SWIFT_NAME(Sobel(src:dst:ddepth:dx:dy:ksize:scale:delta:borderType:));
  1457. /**
  1458. * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
  1459. *
  1460. * In all cases except one, the `$$\texttt{ksize} \times \texttt{ksize}$$` separable kernel is used to
  1461. * calculate the derivative. When `$$\texttt{ksize = 1}$$`, the `$$3 \times 1$$` or `$$1 \times 3$$`
  1462. * kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first
  1463. * or the second x- or y- derivatives.
  1464. *
  1465. * There is also the special value `ksize = #FILTER_SCHARR (-1)` that corresponds to the `$$3\times3$$` Scharr
  1466. * filter that may give more accurate results than the `$$3\times3$$` Sobel. The Scharr aperture is
  1467. *
  1468. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}$$`
  1469. *
  1470. * for the x-derivative, or transposed for the y-derivative.
  1471. *
  1472. * The function calculates an image derivative by convolving the image with the appropriate kernel:
  1473. *
  1474. * `$$\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}$$`
  1475. *
  1476. * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
  1477. * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
  1478. * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
  1479. * case corresponds to a kernel of:
  1480. *
  1481. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}$$`
  1482. *
  1483. * The second case corresponds to a kernel of:
  1484. *
  1485. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}$$`
  1486. *
  1487. * @param src input image.
  1488. * @param dst output image of the same size and the same number of channels as src .
  1489. * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
  1490. * 8-bit input images it will result in truncated derivatives.
  1491. * @param dx order of the derivative x.
  1492. * @param dy order of the derivative y.
  1493. * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
  1494. * @param scale optional scale factor for the computed derivative values; by default, no scaling is
  1495. * applied (see #getDerivKernels for details).
  1496. * @param delta optional delta value that is added to the results prior to storing them in dst.
  1497. * @see `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`, `+Laplacian:dst:ddepth:ksize:scale:delta:borderType:`, `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `cartToPolar`
  1498. */
  1499. + (void)Sobel:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth dx:(int)dx dy:(int)dy ksize:(int)ksize scale:(double)scale delta:(double)delta NS_SWIFT_NAME(Sobel(src:dst:ddepth:dx:dy:ksize:scale:delta:));
  1500. /**
  1501. * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
  1502. *
  1503. * In all cases except one, the `$$\texttt{ksize} \times \texttt{ksize}$$` separable kernel is used to
  1504. * calculate the derivative. When `$$\texttt{ksize = 1}$$`, the `$$3 \times 1$$` or `$$1 \times 3$$`
  1505. * kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first
  1506. * or the second x- or y- derivatives.
  1507. *
  1508. * There is also the special value `ksize = #FILTER_SCHARR (-1)` that corresponds to the `$$3\times3$$` Scharr
  1509. * filter that may give more accurate results than the `$$3\times3$$` Sobel. The Scharr aperture is
  1510. *
  1511. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}$$`
  1512. *
  1513. * for the x-derivative, or transposed for the y-derivative.
  1514. *
  1515. * The function calculates an image derivative by convolving the image with the appropriate kernel:
  1516. *
  1517. * `$$\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}$$`
  1518. *
  1519. * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
  1520. * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
  1521. * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
  1522. * case corresponds to a kernel of:
  1523. *
  1524. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}$$`
  1525. *
  1526. * The second case corresponds to a kernel of:
  1527. *
  1528. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}$$`
  1529. *
  1530. * @param src input image.
  1531. * @param dst output image of the same size and the same number of channels as src .
  1532. * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
  1533. * 8-bit input images it will result in truncated derivatives.
  1534. * @param dx order of the derivative x.
  1535. * @param dy order of the derivative y.
  1536. * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
  1537. * @param scale optional scale factor for the computed derivative values; by default, no scaling is
  1538. * applied (see #getDerivKernels for details).
  1539. * @see `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`, `+Laplacian:dst:ddepth:ksize:scale:delta:borderType:`, `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `cartToPolar`
  1540. */
  1541. + (void)Sobel:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth dx:(int)dx dy:(int)dy ksize:(int)ksize scale:(double)scale NS_SWIFT_NAME(Sobel(src:dst:ddepth:dx:dy:ksize:scale:));
  1542. /**
  1543. * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
  1544. *
  1545. * In all cases except one, the `$$\texttt{ksize} \times \texttt{ksize}$$` separable kernel is used to
  1546. * calculate the derivative. When `$$\texttt{ksize = 1}$$`, the `$$3 \times 1$$` or `$$1 \times 3$$`
  1547. * kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first
  1548. * or the second x- or y- derivatives.
  1549. *
  1550. * There is also the special value `ksize = #FILTER_SCHARR (-1)` that corresponds to the `$$3\times3$$` Scharr
  1551. * filter that may give more accurate results than the `$$3\times3$$` Sobel. The Scharr aperture is
  1552. *
  1553. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}$$`
  1554. *
  1555. * for the x-derivative, or transposed for the y-derivative.
  1556. *
  1557. * The function calculates an image derivative by convolving the image with the appropriate kernel:
  1558. *
  1559. * `$$\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}$$`
  1560. *
  1561. * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
  1562. * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
  1563. * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
  1564. * case corresponds to a kernel of:
  1565. *
  1566. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}$$`
  1567. *
  1568. * The second case corresponds to a kernel of:
  1569. *
  1570. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}$$`
  1571. *
  1572. * @param src input image.
  1573. * @param dst output image of the same size and the same number of channels as src .
  1574. * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
  1575. * 8-bit input images it will result in truncated derivatives.
  1576. * @param dx order of the derivative x.
  1577. * @param dy order of the derivative y.
  1578. * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
  1579. * applied (see #getDerivKernels for details).
  1580. * @see `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`, `+Laplacian:dst:ddepth:ksize:scale:delta:borderType:`, `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `cartToPolar`
  1581. */
  1582. + (void)Sobel:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth dx:(int)dx dy:(int)dy ksize:(int)ksize NS_SWIFT_NAME(Sobel(src:dst:ddepth:dx:dy:ksize:));
  1583. /**
  1584. * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
  1585. *
  1586. * In all cases except one, the `$$\texttt{ksize} \times \texttt{ksize}$$` separable kernel is used to
  1587. * calculate the derivative. When `$$\texttt{ksize = 1}$$`, the `$$3 \times 1$$` or `$$1 \times 3$$`
  1588. * kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first
  1589. * or the second x- or y- derivatives.
  1590. *
  1591. * There is also the special value `ksize = #FILTER_SCHARR (-1)` that corresponds to the `$$3\times3$$` Scharr
  1592. * filter that may give more accurate results than the `$$3\times3$$` Sobel. The Scharr aperture is
  1593. *
  1594. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}$$`
  1595. *
  1596. * for the x-derivative, or transposed for the y-derivative.
  1597. *
  1598. * The function calculates an image derivative by convolving the image with the appropriate kernel:
  1599. *
  1600. * `$$\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}$$`
  1601. *
  1602. * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
  1603. * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
  1604. * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
  1605. * case corresponds to a kernel of:
  1606. *
  1607. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}$$`
  1608. *
  1609. * The second case corresponds to a kernel of:
  1610. *
  1611. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}$$`
  1612. *
  1613. * @param src input image.
  1614. * @param dst output image of the same size and the same number of channels as src .
  1615. * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
  1616. * 8-bit input images it will result in truncated derivatives.
  1617. * @param dx order of the derivative x.
  1618. * @param dy order of the derivative y.
  1619. * applied (see #getDerivKernels for details).
  1620. * @see `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`, `+Laplacian:dst:ddepth:ksize:scale:delta:borderType:`, `+sepFilter2D:dst:ddepth:kernelX:kernelY:anchor:delta:borderType:`, `+filter2D:dst:ddepth:kernel:anchor:delta:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`, `cartToPolar`
  1621. */
  1622. + (void)Sobel:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth dx:(int)dx dy:(int)dy NS_SWIFT_NAME(Sobel(src:dst:ddepth:dx:dy:));
  1623. //
  1624. // void cv::spatialGradient(Mat src, Mat& dx, Mat& dy, int ksize = 3, BorderTypes borderType = BORDER_DEFAULT)
  1625. //
  1626. /**
  1627. * Calculates the first order image derivative in both x and y using a Sobel operator
  1628. *
  1629. * Equivalent to calling:
  1630. *
  1631. *
  1632. * Sobel( src, dx, CV_16SC1, 1, 0, 3 );
  1633. * Sobel( src, dy, CV_16SC1, 0, 1, 3 );
  1634. *
  1635. *
  1636. * @param src input image.
  1637. * @param dx output image with first-order derivative in x.
  1638. * @param dy output image with first-order derivative in y.
  1639. * @param ksize size of Sobel kernel. It must be 3.
  1640. * @param borderType pixel extrapolation method, see #BorderTypes.
  1641. * Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
  1642. *
  1643. * @see `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`
  1644. */
  1645. + (void)spatialGradient:(Mat*)src dx:(Mat*)dx dy:(Mat*)dy ksize:(int)ksize borderType:(BorderTypes)borderType NS_SWIFT_NAME(spatialGradient(src:dx:dy:ksize:borderType:));
  1646. /**
  1647. * Calculates the first order image derivative in both x and y using a Sobel operator
  1648. *
  1649. * Equivalent to calling:
  1650. *
  1651. *
  1652. * Sobel( src, dx, CV_16SC1, 1, 0, 3 );
  1653. * Sobel( src, dy, CV_16SC1, 0, 1, 3 );
  1654. *
  1655. *
  1656. * @param src input image.
  1657. * @param dx output image with first-order derivative in x.
  1658. * @param dy output image with first-order derivative in y.
  1659. * @param ksize size of Sobel kernel. It must be 3.
  1660. * Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
  1661. *
  1662. * @see `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`
  1663. */
  1664. + (void)spatialGradient:(Mat*)src dx:(Mat*)dx dy:(Mat*)dy ksize:(int)ksize NS_SWIFT_NAME(spatialGradient(src:dx:dy:ksize:));
  1665. /**
  1666. * Calculates the first order image derivative in both x and y using a Sobel operator
  1667. *
  1668. * Equivalent to calling:
  1669. *
  1670. *
  1671. * Sobel( src, dx, CV_16SC1, 1, 0, 3 );
  1672. * Sobel( src, dy, CV_16SC1, 0, 1, 3 );
  1673. *
  1674. *
  1675. * @param src input image.
  1676. * @param dx output image with first-order derivative in x.
  1677. * @param dy output image with first-order derivative in y.
  1678. * Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
  1679. *
  1680. * @see `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`
  1681. */
  1682. + (void)spatialGradient:(Mat*)src dx:(Mat*)dx dy:(Mat*)dy NS_SWIFT_NAME(spatialGradient(src:dx:dy:));
  1683. //
  1684. // void cv::Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, BorderTypes borderType = BORDER_DEFAULT)
  1685. //
  1686. /**
  1687. * Calculates the first x- or y- image derivative using Scharr operator.
  1688. *
  1689. * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
  1690. * call
  1691. *
  1692. * `$$\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}$$`
  1693. *
  1694. * is equivalent to
  1695. *
  1696. * `$$\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER\_SCHARR, scale, delta, borderType)} .$$`
  1697. *
  1698. * @param src input image.
  1699. * @param dst output image of the same size and the same number of channels as src.
  1700. * @param ddepth output image depth, see REF: filter_depths "combinations"
  1701. * @param dx order of the derivative x.
  1702. * @param dy order of the derivative y.
  1703. * @param scale optional scale factor for the computed derivative values; by default, no scaling is
  1704. * applied (see #getDerivKernels for details).
  1705. * @param delta optional delta value that is added to the results prior to storing them in dst.
  1706. * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  1707. * @see `cartToPolar`
  1708. */
  1709. + (void)Scharr:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth dx:(int)dx dy:(int)dy scale:(double)scale delta:(double)delta borderType:(BorderTypes)borderType NS_SWIFT_NAME(Scharr(src:dst:ddepth:dx:dy:scale:delta:borderType:));
  1710. /**
  1711. * Calculates the first x- or y- image derivative using Scharr operator.
  1712. *
  1713. * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
  1714. * call
  1715. *
  1716. * `$$\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}$$`
  1717. *
  1718. * is equivalent to
  1719. *
  1720. * `$$\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER\_SCHARR, scale, delta, borderType)} .$$`
  1721. *
  1722. * @param src input image.
  1723. * @param dst output image of the same size and the same number of channels as src.
  1724. * @param ddepth output image depth, see REF: filter_depths "combinations"
  1725. * @param dx order of the derivative x.
  1726. * @param dy order of the derivative y.
  1727. * @param scale optional scale factor for the computed derivative values; by default, no scaling is
  1728. * applied (see #getDerivKernels for details).
  1729. * @param delta optional delta value that is added to the results prior to storing them in dst.
  1730. * @see `cartToPolar`
  1731. */
  1732. + (void)Scharr:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth dx:(int)dx dy:(int)dy scale:(double)scale delta:(double)delta NS_SWIFT_NAME(Scharr(src:dst:ddepth:dx:dy:scale:delta:));
  1733. /**
  1734. * Calculates the first x- or y- image derivative using Scharr operator.
  1735. *
  1736. * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
  1737. * call
  1738. *
  1739. * `$$\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}$$`
  1740. *
  1741. * is equivalent to
  1742. *
  1743. * `$$\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER\_SCHARR, scale, delta, borderType)} .$$`
  1744. *
  1745. * @param src input image.
  1746. * @param dst output image of the same size and the same number of channels as src.
  1747. * @param ddepth output image depth, see REF: filter_depths "combinations"
  1748. * @param dx order of the derivative x.
  1749. * @param dy order of the derivative y.
  1750. * @param scale optional scale factor for the computed derivative values; by default, no scaling is
  1751. * applied (see #getDerivKernels for details).
  1752. * @see `cartToPolar`
  1753. */
  1754. + (void)Scharr:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth dx:(int)dx dy:(int)dy scale:(double)scale NS_SWIFT_NAME(Scharr(src:dst:ddepth:dx:dy:scale:));
  1755. /**
  1756. * Calculates the first x- or y- image derivative using Scharr operator.
  1757. *
  1758. * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
  1759. * call
  1760. *
  1761. * `$$\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}$$`
  1762. *
  1763. * is equivalent to
  1764. *
  1765. * `$$\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER\_SCHARR, scale, delta, borderType)} .$$`
  1766. *
  1767. * @param src input image.
  1768. * @param dst output image of the same size and the same number of channels as src.
  1769. * @param ddepth output image depth, see REF: filter_depths "combinations"
  1770. * @param dx order of the derivative x.
  1771. * @param dy order of the derivative y.
  1772. * applied (see #getDerivKernels for details).
  1773. * @see `cartToPolar`
  1774. */
  1775. + (void)Scharr:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth dx:(int)dx dy:(int)dy NS_SWIFT_NAME(Scharr(src:dst:ddepth:dx:dy:));
  1776. //
  1777. // void cv::Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, BorderTypes borderType = BORDER_DEFAULT)
  1778. //
  1779. /**
  1780. * Calculates the Laplacian of an image.
  1781. *
  1782. * The function calculates the Laplacian of the source image by adding up the second x and y
  1783. * derivatives calculated using the Sobel operator:
  1784. *
  1785. * `$$\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}$$`
  1786. *
  1787. * This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image
  1788. * with the following `$$3 \times 3$$` aperture:
  1789. *
  1790. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}$$`
  1791. *
  1792. * @param src Source image.
  1793. * @param dst Destination image of the same size and the same number of channels as src .
  1794. * @param ddepth Desired depth of the destination image.
  1795. * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
  1796. * details. The size must be positive and odd.
  1797. * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
  1798. * applied. See #getDerivKernels for details.
  1799. * @param delta Optional delta value that is added to the results prior to storing them in dst .
  1800. * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  1801. * @see `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`, `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`
  1802. */
  1803. + (void)Laplacian:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(int)ksize scale:(double)scale delta:(double)delta borderType:(BorderTypes)borderType NS_SWIFT_NAME(Laplacian(src:dst:ddepth:ksize:scale:delta:borderType:));
  1804. /**
  1805. * Calculates the Laplacian of an image.
  1806. *
  1807. * The function calculates the Laplacian of the source image by adding up the second x and y
  1808. * derivatives calculated using the Sobel operator:
  1809. *
  1810. * `$$\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}$$`
  1811. *
  1812. * This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image
  1813. * with the following `$$3 \times 3$$` aperture:
  1814. *
  1815. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}$$`
  1816. *
  1817. * @param src Source image.
  1818. * @param dst Destination image of the same size and the same number of channels as src .
  1819. * @param ddepth Desired depth of the destination image.
  1820. * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
  1821. * details. The size must be positive and odd.
  1822. * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
  1823. * applied. See #getDerivKernels for details.
  1824. * @param delta Optional delta value that is added to the results prior to storing them in dst .
  1825. * @see `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`, `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`
  1826. */
  1827. + (void)Laplacian:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(int)ksize scale:(double)scale delta:(double)delta NS_SWIFT_NAME(Laplacian(src:dst:ddepth:ksize:scale:delta:));
  1828. /**
  1829. * Calculates the Laplacian of an image.
  1830. *
  1831. * The function calculates the Laplacian of the source image by adding up the second x and y
  1832. * derivatives calculated using the Sobel operator:
  1833. *
  1834. * `$$\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}$$`
  1835. *
  1836. * This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image
  1837. * with the following `$$3 \times 3$$` aperture:
  1838. *
  1839. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}$$`
  1840. *
  1841. * @param src Source image.
  1842. * @param dst Destination image of the same size and the same number of channels as src .
  1843. * @param ddepth Desired depth of the destination image.
  1844. * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
  1845. * details. The size must be positive and odd.
  1846. * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
  1847. * applied. See #getDerivKernels for details.
  1848. * @see `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`, `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`
  1849. */
  1850. + (void)Laplacian:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(int)ksize scale:(double)scale NS_SWIFT_NAME(Laplacian(src:dst:ddepth:ksize:scale:));
  1851. /**
  1852. * Calculates the Laplacian of an image.
  1853. *
  1854. * The function calculates the Laplacian of the source image by adding up the second x and y
  1855. * derivatives calculated using the Sobel operator:
  1856. *
  1857. * `$$\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}$$`
  1858. *
  1859. * This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image
  1860. * with the following `$$3 \times 3$$` aperture:
  1861. *
  1862. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}$$`
  1863. *
  1864. * @param src Source image.
  1865. * @param dst Destination image of the same size and the same number of channels as src .
  1866. * @param ddepth Desired depth of the destination image.
  1867. * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
  1868. * details. The size must be positive and odd.
  1869. * applied. See #getDerivKernels for details.
  1870. * @see `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`, `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`
  1871. */
  1872. + (void)Laplacian:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth ksize:(int)ksize NS_SWIFT_NAME(Laplacian(src:dst:ddepth:ksize:));
  1873. /**
  1874. * Calculates the Laplacian of an image.
  1875. *
  1876. * The function calculates the Laplacian of the source image by adding up the second x and y
  1877. * derivatives calculated using the Sobel operator:
  1878. *
  1879. * `$$\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}$$`
  1880. *
  1881. * This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image
  1882. * with the following `$$3 \times 3$$` aperture:
  1883. *
  1884. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}$$`
  1885. *
  1886. * @param src Source image.
  1887. * @param dst Destination image of the same size and the same number of channels as src .
  1888. * @param ddepth Desired depth of the destination image.
  1889. * details. The size must be positive and odd.
  1890. * applied. See #getDerivKernels for details.
  1891. * @see `+Sobel:dst:ddepth:dx:dy:ksize:scale:delta:borderType:`, `+Scharr:dst:ddepth:dx:dy:scale:delta:borderType:`
  1892. */
  1893. + (void)Laplacian:(Mat*)src dst:(Mat*)dst ddepth:(int)ddepth NS_SWIFT_NAME(Laplacian(src:dst:ddepth:));
  1894. //
  1895. // void cv::Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false)
  1896. //
  1897. /**
  1898. * Finds edges in an image using the Canny algorithm CITE: Canny86 .
  1899. *
  1900. * The function finds edges in the input image and marks them in the output map edges using the
  1901. * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
  1902. * largest value is used to find initial segments of strong edges. See
  1903. * <http://en.wikipedia.org/wiki/Canny_edge_detector>
  1904. *
  1905. * @param image 8-bit input image.
  1906. * @param edges output edge map; single channels 8-bit image, which has the same size as image .
  1907. * @param threshold1 first threshold for the hysteresis procedure.
  1908. * @param threshold2 second threshold for the hysteresis procedure.
  1909. * @param apertureSize aperture size for the Sobel operator.
  1910. * @param L2gradient a flag, indicating whether a more accurate `$$L_2$$` norm
  1911. * `$$=\sqrt{(dI/dx)^2 + (dI/dy)^2}$$` should be used to calculate the image gradient magnitude (
  1912. * L2gradient=true ), or whether the default `$$L_1$$` norm `$$=|dI/dx|+|dI/dy|$$` is enough (
  1913. * L2gradient=false ).
  1914. */
  1915. + (void)Canny:(Mat*)image edges:(Mat*)edges threshold1:(double)threshold1 threshold2:(double)threshold2 apertureSize:(int)apertureSize L2gradient:(BOOL)L2gradient NS_SWIFT_NAME(Canny(image:edges:threshold1:threshold2:apertureSize:L2gradient:));
  1916. /**
  1917. * Finds edges in an image using the Canny algorithm CITE: Canny86 .
  1918. *
  1919. * The function finds edges in the input image and marks them in the output map edges using the
  1920. * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
  1921. * largest value is used to find initial segments of strong edges. See
  1922. * <http://en.wikipedia.org/wiki/Canny_edge_detector>
  1923. *
  1924. * @param image 8-bit input image.
  1925. * @param edges output edge map; single channels 8-bit image, which has the same size as image .
  1926. * @param threshold1 first threshold for the hysteresis procedure.
  1927. * @param threshold2 second threshold for the hysteresis procedure.
  1928. * @param apertureSize aperture size for the Sobel operator.
  1929. * `$$=\sqrt{(dI/dx)^2 + (dI/dy)^2}$$` should be used to calculate the image gradient magnitude (
  1930. * L2gradient=true ), or whether the default `$$L_1$$` norm `$$=|dI/dx|+|dI/dy|$$` is enough (
  1931. * L2gradient=false ).
  1932. */
  1933. + (void)Canny:(Mat*)image edges:(Mat*)edges threshold1:(double)threshold1 threshold2:(double)threshold2 apertureSize:(int)apertureSize NS_SWIFT_NAME(Canny(image:edges:threshold1:threshold2:apertureSize:));
  1934. /**
  1935. * Finds edges in an image using the Canny algorithm CITE: Canny86 .
  1936. *
  1937. * The function finds edges in the input image and marks them in the output map edges using the
  1938. * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
  1939. * largest value is used to find initial segments of strong edges. See
  1940. * <http://en.wikipedia.org/wiki/Canny_edge_detector>
  1941. *
  1942. * @param image 8-bit input image.
  1943. * @param edges output edge map; single channels 8-bit image, which has the same size as image .
  1944. * @param threshold1 first threshold for the hysteresis procedure.
  1945. * @param threshold2 second threshold for the hysteresis procedure.
  1946. * `$$=\sqrt{(dI/dx)^2 + (dI/dy)^2}$$` should be used to calculate the image gradient magnitude (
  1947. * L2gradient=true ), or whether the default `$$L_1$$` norm `$$=|dI/dx|+|dI/dy|$$` is enough (
  1948. * L2gradient=false ).
  1949. */
  1950. + (void)Canny:(Mat*)image edges:(Mat*)edges threshold1:(double)threshold1 threshold2:(double)threshold2 NS_SWIFT_NAME(Canny(image:edges:threshold1:threshold2:));
  1951. //
  1952. // void cv::Canny(Mat dx, Mat dy, Mat& edges, double threshold1, double threshold2, bool L2gradient = false)
  1953. //
  1954. /**
  1955. * \overload
  1956. *
  1957. * Finds edges in an image using the Canny algorithm with custom image gradient.
  1958. *
  1959. * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).
  1960. * @param dy 16-bit y derivative of input image (same type as dx).
  1961. * @param edges output edge map; single channels 8-bit image, which has the same size as image .
  1962. * @param threshold1 first threshold for the hysteresis procedure.
  1963. * @param threshold2 second threshold for the hysteresis procedure.
  1964. * @param L2gradient a flag, indicating whether a more accurate `$$L_2$$` norm
  1965. * `$$=\sqrt{(dI/dx)^2 + (dI/dy)^2}$$` should be used to calculate the image gradient magnitude (
  1966. * L2gradient=true ), or whether the default `$$L_1$$` norm `$$=|dI/dx|+|dI/dy|$$` is enough (
  1967. * L2gradient=false ).
  1968. */
  1969. + (void)Canny:(Mat*)dx dy:(Mat*)dy edges:(Mat*)edges threshold1:(double)threshold1 threshold2:(double)threshold2 L2gradient:(BOOL)L2gradient NS_SWIFT_NAME(Canny(dx:dy:edges:threshold1:threshold2:L2gradient:));
  1970. /**
  1971. * \overload
  1972. *
  1973. * Finds edges in an image using the Canny algorithm with custom image gradient.
  1974. *
  1975. * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).
  1976. * @param dy 16-bit y derivative of input image (same type as dx).
  1977. * @param edges output edge map; single channels 8-bit image, which has the same size as image .
  1978. * @param threshold1 first threshold for the hysteresis procedure.
  1979. * @param threshold2 second threshold for the hysteresis procedure.
  1980. * `$$=\sqrt{(dI/dx)^2 + (dI/dy)^2}$$` should be used to calculate the image gradient magnitude (
  1981. * L2gradient=true ), or whether the default `$$L_1$$` norm `$$=|dI/dx|+|dI/dy|$$` is enough (
  1982. * L2gradient=false ).
  1983. */
  1984. + (void)Canny:(Mat*)dx dy:(Mat*)dy edges:(Mat*)edges threshold1:(double)threshold1 threshold2:(double)threshold2 NS_SWIFT_NAME(Canny(dx:dy:edges:threshold1:threshold2:));
  1985. //
  1986. // void cv::cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, BorderTypes borderType = BORDER_DEFAULT)
  1987. //
  1988. /**
  1989. * Calculates the minimal eigenvalue of gradient matrices for corner detection.
  1990. *
  1991. * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
  1992. * eigenvalue of the covariance matrix of derivatives, that is, `$$\min(\lambda_1, \lambda_2)$$` in terms
  1993. * of the formulae in the cornerEigenValsAndVecs description.
  1994. *
  1995. * @param src Input single-channel 8-bit or floating-point image.
  1996. * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as
  1997. * src .
  1998. * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
  1999. * @param ksize Aperture parameter for the Sobel operator.
  2000. * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
  2001. */
  2002. + (void)cornerMinEigenVal:(Mat*)src dst:(Mat*)dst blockSize:(int)blockSize ksize:(int)ksize borderType:(BorderTypes)borderType NS_SWIFT_NAME(cornerMinEigenVal(src:dst:blockSize:ksize:borderType:));
  2003. /**
  2004. * Calculates the minimal eigenvalue of gradient matrices for corner detection.
  2005. *
  2006. * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
  2007. * eigenvalue of the covariance matrix of derivatives, that is, `$$\min(\lambda_1, \lambda_2)$$` in terms
  2008. * of the formulae in the cornerEigenValsAndVecs description.
  2009. *
  2010. * @param src Input single-channel 8-bit or floating-point image.
  2011. * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as
  2012. * src .
  2013. * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
  2014. * @param ksize Aperture parameter for the Sobel operator.
  2015. */
  2016. + (void)cornerMinEigenVal:(Mat*)src dst:(Mat*)dst blockSize:(int)blockSize ksize:(int)ksize NS_SWIFT_NAME(cornerMinEigenVal(src:dst:blockSize:ksize:));
  2017. /**
  2018. * Calculates the minimal eigenvalue of gradient matrices for corner detection.
  2019. *
  2020. * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
  2021. * eigenvalue of the covariance matrix of derivatives, that is, `$$\min(\lambda_1, \lambda_2)$$` in terms
  2022. * of the formulae in the cornerEigenValsAndVecs description.
  2023. *
  2024. * @param src Input single-channel 8-bit or floating-point image.
  2025. * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as
  2026. * src .
  2027. * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
  2028. */
  2029. + (void)cornerMinEigenVal:(Mat*)src dst:(Mat*)dst blockSize:(int)blockSize NS_SWIFT_NAME(cornerMinEigenVal(src:dst:blockSize:));
  2030. //
  2031. // void cv::cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, BorderTypes borderType = BORDER_DEFAULT)
  2032. //
  2033. /**
  2034. * Harris corner detector.
  2035. *
  2036. * The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and
  2037. * cornerEigenValsAndVecs , for each pixel `$$(x, y)$$` it calculates a `$$2\times2$$` gradient covariance
  2038. * matrix `$$M^{(x,y)}$$` over a `$$\texttt{blockSize} \times \texttt{blockSize}$$` neighborhood. Then, it
  2039. * computes the following characteristic:
  2040. *
  2041. * `$$\texttt{dst} (x,y) = \mathrm{det} M^{(x,y)} - k \cdot \left ( \mathrm{tr} M^{(x,y)} \right )^2$$`
  2042. *
  2043. * Corners in the image can be found as the local maxima of this response map.
  2044. *
  2045. * @param src Input single-channel 8-bit or floating-point image.
  2046. * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same
  2047. * size as src .
  2048. * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
  2049. * @param ksize Aperture parameter for the Sobel operator.
  2050. * @param k Harris detector free parameter. See the formula above.
  2051. * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
  2052. */
  2053. + (void)cornerHarris:(Mat*)src dst:(Mat*)dst blockSize:(int)blockSize ksize:(int)ksize k:(double)k borderType:(BorderTypes)borderType NS_SWIFT_NAME(cornerHarris(src:dst:blockSize:ksize:k:borderType:));
  2054. /**
  2055. * Harris corner detector.
  2056. *
  2057. * The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and
  2058. * cornerEigenValsAndVecs , for each pixel `$$(x, y)$$` it calculates a `$$2\times2$$` gradient covariance
  2059. * matrix `$$M^{(x,y)}$$` over a `$$\texttt{blockSize} \times \texttt{blockSize}$$` neighborhood. Then, it
  2060. * computes the following characteristic:
  2061. *
  2062. * `$$\texttt{dst} (x,y) = \mathrm{det} M^{(x,y)} - k \cdot \left ( \mathrm{tr} M^{(x,y)} \right )^2$$`
  2063. *
  2064. * Corners in the image can be found as the local maxima of this response map.
  2065. *
  2066. * @param src Input single-channel 8-bit or floating-point image.
  2067. * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same
  2068. * size as src .
  2069. * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
  2070. * @param ksize Aperture parameter for the Sobel operator.
  2071. * @param k Harris detector free parameter. See the formula above.
  2072. */
  2073. + (void)cornerHarris:(Mat*)src dst:(Mat*)dst blockSize:(int)blockSize ksize:(int)ksize k:(double)k NS_SWIFT_NAME(cornerHarris(src:dst:blockSize:ksize:k:));
  2074. //
  2075. // void cv::cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, BorderTypes borderType = BORDER_DEFAULT)
  2076. //
  2077. /**
  2078. * Calculates eigenvalues and eigenvectors of image blocks for corner detection.
  2079. *
  2080. * For every pixel `$$p$$` , the function cornerEigenValsAndVecs considers a blockSize `$$\times$$` blockSize
  2081. * neighborhood `$$S(p)$$` . It calculates the covariation matrix of derivatives over the neighborhood as:
  2082. *
  2083. * `$$M = \begin{bmatrix} \sum _{S(p)}(dI/dx)^2 & \sum _{S(p)}dI/dx dI/dy \\ \sum _{S(p)}dI/dx dI/dy & \sum _{S(p)}(dI/dy)^2 \end{bmatrix}$$`
  2084. *
  2085. * where the derivatives are computed using the Sobel operator.
  2086. *
  2087. * After that, it finds eigenvectors and eigenvalues of `$$M$$` and stores them in the destination image as
  2088. * `$$(\lambda_1, \lambda_2, x_1, y_1, x_2, y_2)$$` where
  2089. *
  2090. * - `$$\lambda_1, \lambda_2$$` are the non-sorted eigenvalues of `$$M$$`
  2091. * - `$$x_1, y_1$$` are the eigenvectors corresponding to `$$\lambda_1$$`
  2092. * - `$$x_2, y_2$$` are the eigenvectors corresponding to `$$\lambda_2$$`
  2093. *
  2094. * The output of the function can be used for robust edge or corner detection.
  2095. *
  2096. * @param src Input single-channel 8-bit or floating-point image.
  2097. * @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
  2098. * @param blockSize Neighborhood size (see details below).
  2099. * @param ksize Aperture parameter for the Sobel operator.
  2100. * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
  2101. *
  2102. * @see `+cornerMinEigenVal:dst:blockSize:ksize:borderType:`, `+cornerHarris:dst:blockSize:ksize:k:borderType:`, `+preCornerDetect:dst:ksize:borderType:`
  2103. */
  2104. + (void)cornerEigenValsAndVecs:(Mat*)src dst:(Mat*)dst blockSize:(int)blockSize ksize:(int)ksize borderType:(BorderTypes)borderType NS_SWIFT_NAME(cornerEigenValsAndVecs(src:dst:blockSize:ksize:borderType:));
  2105. /**
  2106. * Calculates eigenvalues and eigenvectors of image blocks for corner detection.
  2107. *
  2108. * For every pixel `$$p$$` , the function cornerEigenValsAndVecs considers a blockSize `$$\times$$` blockSize
  2109. * neighborhood `$$S(p)$$` . It calculates the covariation matrix of derivatives over the neighborhood as:
  2110. *
  2111. * `$$M = \begin{bmatrix} \sum _{S(p)}(dI/dx)^2 & \sum _{S(p)}dI/dx dI/dy \\ \sum _{S(p)}dI/dx dI/dy & \sum _{S(p)}(dI/dy)^2 \end{bmatrix}$$`
  2112. *
  2113. * where the derivatives are computed using the Sobel operator.
  2114. *
  2115. * After that, it finds eigenvectors and eigenvalues of `$$M$$` and stores them in the destination image as
  2116. * `$$(\lambda_1, \lambda_2, x_1, y_1, x_2, y_2)$$` where
  2117. *
  2118. * - `$$\lambda_1, \lambda_2$$` are the non-sorted eigenvalues of `$$M$$`
  2119. * - `$$x_1, y_1$$` are the eigenvectors corresponding to `$$\lambda_1$$`
  2120. * - `$$x_2, y_2$$` are the eigenvectors corresponding to `$$\lambda_2$$`
  2121. *
  2122. * The output of the function can be used for robust edge or corner detection.
  2123. *
  2124. * @param src Input single-channel 8-bit or floating-point image.
  2125. * @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
  2126. * @param blockSize Neighborhood size (see details below).
  2127. * @param ksize Aperture parameter for the Sobel operator.
  2128. *
  2129. * @see `+cornerMinEigenVal:dst:blockSize:ksize:borderType:`, `+cornerHarris:dst:blockSize:ksize:k:borderType:`, `+preCornerDetect:dst:ksize:borderType:`
  2130. */
  2131. + (void)cornerEigenValsAndVecs:(Mat*)src dst:(Mat*)dst blockSize:(int)blockSize ksize:(int)ksize NS_SWIFT_NAME(cornerEigenValsAndVecs(src:dst:blockSize:ksize:));
  2132. //
  2133. // void cv::preCornerDetect(Mat src, Mat& dst, int ksize, BorderTypes borderType = BORDER_DEFAULT)
  2134. //
  2135. /**
  2136. * Calculates a feature map for corner detection.
  2137. *
  2138. * The function calculates the complex spatial derivative-based function of the source image
  2139. *
  2140. * `$$\texttt{dst} = (D_x \texttt{src} )^2 \cdot D_{yy} \texttt{src} + (D_y \texttt{src} )^2 \cdot D_{xx} \texttt{src} - 2 D_x \texttt{src} \cdot D_y \texttt{src} \cdot D_{xy} \texttt{src}$$`
  2141. *
  2142. * where `$$D_x$$`,`$$D_y$$` are the first image derivatives, `$$D_{xx}$$`,`$$D_{yy}$$` are the second image
  2143. * derivatives, and `$$D_{xy}$$` is the mixed derivative.
  2144. *
  2145. * The corners can be found as local maximums of the functions, as shown below:
  2146. *
  2147. * Mat corners, dilated_corners;
  2148. * preCornerDetect(image, corners, 3);
  2149. * // dilation with 3x3 rectangular structuring element
  2150. * dilate(corners, dilated_corners, Mat(), 1);
  2151. * Mat corner_mask = corners == dilated_corners;
  2152. *
  2153. *
  2154. * @param src Source single-channel 8-bit of floating-point image.
  2155. * @param dst Output image that has the type CV_32F and the same size as src .
  2156. * @param ksize %Aperture size of the Sobel .
  2157. * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
  2158. */
  2159. + (void)preCornerDetect:(Mat*)src dst:(Mat*)dst ksize:(int)ksize borderType:(BorderTypes)borderType NS_SWIFT_NAME(preCornerDetect(src:dst:ksize:borderType:));
  2160. /**
  2161. * Calculates a feature map for corner detection.
  2162. *
  2163. * The function calculates the complex spatial derivative-based function of the source image
  2164. *
  2165. * `$$\texttt{dst} = (D_x \texttt{src} )^2 \cdot D_{yy} \texttt{src} + (D_y \texttt{src} )^2 \cdot D_{xx} \texttt{src} - 2 D_x \texttt{src} \cdot D_y \texttt{src} \cdot D_{xy} \texttt{src}$$`
  2166. *
  2167. * where `$$D_x$$`,`$$D_y$$` are the first image derivatives, `$$D_{xx}$$`,`$$D_{yy}$$` are the second image
  2168. * derivatives, and `$$D_{xy}$$` is the mixed derivative.
  2169. *
  2170. * The corners can be found as local maximums of the functions, as shown below:
  2171. *
  2172. * Mat corners, dilated_corners;
  2173. * preCornerDetect(image, corners, 3);
  2174. * // dilation with 3x3 rectangular structuring element
  2175. * dilate(corners, dilated_corners, Mat(), 1);
  2176. * Mat corner_mask = corners == dilated_corners;
  2177. *
  2178. *
  2179. * @param src Source single-channel 8-bit of floating-point image.
  2180. * @param dst Output image that has the type CV_32F and the same size as src .
  2181. * @param ksize %Aperture size of the Sobel .
  2182. */
  2183. + (void)preCornerDetect:(Mat*)src dst:(Mat*)dst ksize:(int)ksize NS_SWIFT_NAME(preCornerDetect(src:dst:ksize:));
  2184. //
  2185. // void cv::cornerSubPix(Mat image, Mat& corners, Size winSize, Size zeroZone, TermCriteria criteria)
  2186. //
  2187. /**
  2188. * Refines the corner locations.
  2189. *
  2190. * The function iterates to find the sub-pixel accurate location of corners or radial saddle
  2191. * points as described in CITE: forstner1987fast, and as shown on the figure below.
  2192. *
  2193. * ![image](pics/cornersubpix.png)
  2194. *
  2195. * Sub-pixel accurate corner locator is based on the observation that every vector from the center `$$q$$`
  2196. * to a point `$$p$$` located within a neighborhood of `$$q$$` is orthogonal to the image gradient at `$$p$$`
  2197. * subject to image and measurement noise. Consider the expression:
  2198. *
  2199. * `$$\epsilon _i = {DI_{p_i}}^T \cdot (q - p_i)$$`
  2200. *
  2201. * where `$${DI_{p_i}}$$` is an image gradient at one of the points `$$p_i$$` in a neighborhood of `$$q$$` . The
  2202. * value of `$$q$$` is to be found so that `$$\epsilon_i$$` is minimized. A system of equations may be set up
  2203. * with `$$\epsilon_i$$` set to zero:
  2204. *
  2205. * `$$\sum _i(DI_{p_i} \cdot {DI_{p_i}}^T) \cdot q - \sum _i(DI_{p_i} \cdot {DI_{p_i}}^T \cdot p_i)$$`
  2206. *
  2207. * where the gradients are summed within a neighborhood ("search window") of `$$q$$` . Calling the first
  2208. * gradient term `$$G$$` and the second gradient term `$$b$$` gives:
  2209. *
  2210. * `$$q = G^{-1} \cdot b$$`
  2211. *
  2212. * The algorithm sets the center of the neighborhood window at this new center `$$q$$` and then iterates
  2213. * until the center stays within a set threshold.
  2214. *
  2215. * @param image Input single-channel, 8-bit or float image.
  2216. * @param corners Initial coordinates of the input corners and refined coordinates provided for
  2217. * output.
  2218. * @param winSize Half of the side length of the search window. For example, if winSize=Size(5,5) ,
  2219. * then a `$$(5*2+1) \times (5*2+1) = 11 \times 11$$` search window is used.
  2220. * @param zeroZone Half of the size of the dead region in the middle of the search zone over which
  2221. * the summation in the formula below is not done. It is used sometimes to avoid possible
  2222. * singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such
  2223. * a size.
  2224. * @param criteria Criteria for termination of the iterative process of corner refinement. That is,
  2225. * the process of corner position refinement stops either after criteria.maxCount iterations or when
  2226. * the corner position moves by less than criteria.epsilon on some iteration.
  2227. */
  2228. + (void)cornerSubPix:(Mat*)image corners:(Mat*)corners winSize:(Size2i*)winSize zeroZone:(Size2i*)zeroZone criteria:(TermCriteria*)criteria NS_SWIFT_NAME(cornerSubPix(image:corners:winSize:zeroZone:criteria:));
  2229. //
  2230. // void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04)
  2231. //
  2232. /**
  2233. * Determines strong corners on an image.
  2234. *
  2235. * The function finds the most prominent corners in the image or in the specified image region, as
  2236. * described in CITE: Shi94
  2237. *
  2238. * - Function calculates the corner quality measure at every source image pixel using the
  2239. * #cornerMinEigenVal or #cornerHarris .
  2240. * - Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
  2241. * retained).
  2242. * - The corners with the minimal eigenvalue less than
  2243. * `$$\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)$$` are rejected.
  2244. * - The remaining corners are sorted by the quality measure in the descending order.
  2245. * - Function throws away each corner for which there is a stronger corner at a distance less than
  2246. * maxDistance.
  2247. *
  2248. * The function can be used to initialize a point-based tracker of an object.
  2249. *
  2250. * NOTE: If the function is called with different values A and B of the parameter qualityLevel , and
  2251. * A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
  2252. * with qualityLevel=B .
  2253. *
  2254. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2255. * @param corners Output vector of detected corners.
  2256. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2257. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2258. * and all detected corners are returned.
  2259. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2260. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2261. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2262. * quality measure less than the product are rejected. For example, if the best corner has the
  2263. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2264. * less than 15 are rejected.
  2265. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2266. * @param mask Optional region of interest. If the image is not empty (it needs to have the type
  2267. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2268. * @param blockSize Size of an average block for computing a derivative covariation matrix over each
  2269. * pixel neighborhood. See cornerEigenValsAndVecs .
  2270. * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
  2271. * or #cornerMinEigenVal.
  2272. * @param k Free parameter of the Harris detector.
  2273. *
  2274. * @see `+cornerMinEigenVal:dst:blockSize:ksize:borderType:`, `+cornerHarris:dst:blockSize:ksize:k:borderType:`, `calcOpticalFlowPyrLK`, `estimateRigidTransform`, ``
  2275. */
  2276. + (void)goodFeaturesToTrack:(Mat*)image corners:(NSMutableArray<Point2i*>*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask blockSize:(int)blockSize useHarrisDetector:(BOOL)useHarrisDetector k:(double)k NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:blockSize:useHarrisDetector:k:));
  2277. /**
  2278. * Determines strong corners on an image.
  2279. *
  2280. * The function finds the most prominent corners in the image or in the specified image region, as
  2281. * described in CITE: Shi94
  2282. *
  2283. * - Function calculates the corner quality measure at every source image pixel using the
  2284. * #cornerMinEigenVal or #cornerHarris .
  2285. * - Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
  2286. * retained).
  2287. * - The corners with the minimal eigenvalue less than
  2288. * `$$\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)$$` are rejected.
  2289. * - The remaining corners are sorted by the quality measure in the descending order.
  2290. * - Function throws away each corner for which there is a stronger corner at a distance less than
  2291. * maxDistance.
  2292. *
  2293. * The function can be used to initialize a point-based tracker of an object.
  2294. *
  2295. * NOTE: If the function is called with different values A and B of the parameter qualityLevel , and
  2296. * A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
  2297. * with qualityLevel=B .
  2298. *
  2299. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2300. * @param corners Output vector of detected corners.
  2301. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2302. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2303. * and all detected corners are returned.
  2304. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2305. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2306. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2307. * quality measure less than the product are rejected. For example, if the best corner has the
  2308. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2309. * less than 15 are rejected.
  2310. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2311. * @param mask Optional region of interest. If the image is not empty (it needs to have the type
  2312. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2313. * @param blockSize Size of an average block for computing a derivative covariation matrix over each
  2314. * pixel neighborhood. See cornerEigenValsAndVecs .
  2315. * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
  2316. * or #cornerMinEigenVal.
  2317. *
  2318. * @see `+cornerMinEigenVal:dst:blockSize:ksize:borderType:`, `+cornerHarris:dst:blockSize:ksize:k:borderType:`, `calcOpticalFlowPyrLK`, `estimateRigidTransform`, ``
  2319. */
  2320. + (void)goodFeaturesToTrack:(Mat*)image corners:(NSMutableArray<Point2i*>*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask blockSize:(int)blockSize useHarrisDetector:(BOOL)useHarrisDetector NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:blockSize:useHarrisDetector:));
  2321. /**
  2322. * Determines strong corners on an image.
  2323. *
  2324. * The function finds the most prominent corners in the image or in the specified image region, as
  2325. * described in CITE: Shi94
  2326. *
  2327. * - Function calculates the corner quality measure at every source image pixel using the
  2328. * #cornerMinEigenVal or #cornerHarris .
  2329. * - Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
  2330. * retained).
  2331. * - The corners with the minimal eigenvalue less than
  2332. * `$$\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)$$` are rejected.
  2333. * - The remaining corners are sorted by the quality measure in the descending order.
  2334. * - Function throws away each corner for which there is a stronger corner at a distance less than
  2335. * maxDistance.
  2336. *
  2337. * The function can be used to initialize a point-based tracker of an object.
  2338. *
  2339. * NOTE: If the function is called with different values A and B of the parameter qualityLevel , and
  2340. * A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
  2341. * with qualityLevel=B .
  2342. *
  2343. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2344. * @param corners Output vector of detected corners.
  2345. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2346. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2347. * and all detected corners are returned.
  2348. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2349. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2350. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2351. * quality measure less than the product are rejected. For example, if the best corner has the
  2352. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2353. * less than 15 are rejected.
  2354. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2355. * @param mask Optional region of interest. If the image is not empty (it needs to have the type
  2356. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2357. * @param blockSize Size of an average block for computing a derivative covariation matrix over each
  2358. * pixel neighborhood. See cornerEigenValsAndVecs .
  2359. * or #cornerMinEigenVal.
  2360. *
  2361. * @see `+cornerMinEigenVal:dst:blockSize:ksize:borderType:`, `+cornerHarris:dst:blockSize:ksize:k:borderType:`, `calcOpticalFlowPyrLK`, `estimateRigidTransform`, ``
  2362. */
  2363. + (void)goodFeaturesToTrack:(Mat*)image corners:(NSMutableArray<Point2i*>*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask blockSize:(int)blockSize NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:blockSize:));
  2364. /**
  2365. * Determines strong corners on an image.
  2366. *
  2367. * The function finds the most prominent corners in the image or in the specified image region, as
  2368. * described in CITE: Shi94
  2369. *
  2370. * - Function calculates the corner quality measure at every source image pixel using the
  2371. * #cornerMinEigenVal or #cornerHarris .
  2372. * - Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
  2373. * retained).
  2374. * - The corners with the minimal eigenvalue less than
  2375. * `$$\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)$$` are rejected.
  2376. * - The remaining corners are sorted by the quality measure in the descending order.
  2377. * - Function throws away each corner for which there is a stronger corner at a distance less than
  2378. * maxDistance.
  2379. *
  2380. * The function can be used to initialize a point-based tracker of an object.
  2381. *
  2382. * NOTE: If the function is called with different values A and B of the parameter qualityLevel , and
  2383. * A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
  2384. * with qualityLevel=B .
  2385. *
  2386. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2387. * @param corners Output vector of detected corners.
  2388. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2389. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2390. * and all detected corners are returned.
  2391. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2392. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2393. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2394. * quality measure less than the product are rejected. For example, if the best corner has the
  2395. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2396. * less than 15 are rejected.
  2397. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2398. * @param mask Optional region of interest. If the image is not empty (it needs to have the type
  2399. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2400. * pixel neighborhood. See cornerEigenValsAndVecs .
  2401. * or #cornerMinEigenVal.
  2402. *
  2403. * @see `+cornerMinEigenVal:dst:blockSize:ksize:borderType:`, `+cornerHarris:dst:blockSize:ksize:k:borderType:`, `calcOpticalFlowPyrLK`, `estimateRigidTransform`, ``
  2404. */
  2405. + (void)goodFeaturesToTrack:(Mat*)image corners:(NSMutableArray<Point2i*>*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:));
  2406. /**
  2407. * Determines strong corners on an image.
  2408. *
  2409. * The function finds the most prominent corners in the image or in the specified image region, as
  2410. * described in CITE: Shi94
  2411. *
  2412. * - Function calculates the corner quality measure at every source image pixel using the
  2413. * #cornerMinEigenVal or #cornerHarris .
  2414. * - Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
  2415. * retained).
  2416. * - The corners with the minimal eigenvalue less than
  2417. * `$$\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)$$` are rejected.
  2418. * - The remaining corners are sorted by the quality measure in the descending order.
  2419. * - Function throws away each corner for which there is a stronger corner at a distance less than
  2420. * maxDistance.
  2421. *
  2422. * The function can be used to initialize a point-based tracker of an object.
  2423. *
  2424. * NOTE: If the function is called with different values A and B of the parameter qualityLevel , and
  2425. * A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
  2426. * with qualityLevel=B .
  2427. *
  2428. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2429. * @param corners Output vector of detected corners.
  2430. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2431. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2432. * and all detected corners are returned.
  2433. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2434. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2435. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2436. * quality measure less than the product are rejected. For example, if the best corner has the
  2437. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2438. * less than 15 are rejected.
  2439. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2440. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2441. * pixel neighborhood. See cornerEigenValsAndVecs .
  2442. * or #cornerMinEigenVal.
  2443. *
  2444. * @see `+cornerMinEigenVal:dst:blockSize:ksize:borderType:`, `+cornerHarris:dst:blockSize:ksize:k:borderType:`, `calcOpticalFlowPyrLK`, `estimateRigidTransform`, ``
  2445. */
  2446. + (void)goodFeaturesToTrack:(Mat*)image corners:(NSMutableArray<Point2i*>*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:));
  2447. //
  2448. // void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, bool useHarrisDetector = false, double k = 0.04)
  2449. //
  2450. + (void)goodFeaturesToTrack:(Mat*)image corners:(NSMutableArray<Point2i*>*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask blockSize:(int)blockSize gradientSize:(int)gradientSize useHarrisDetector:(BOOL)useHarrisDetector k:(double)k NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:blockSize:gradientSize:useHarrisDetector:k:));
  2451. + (void)goodFeaturesToTrack:(Mat*)image corners:(NSMutableArray<Point2i*>*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask blockSize:(int)blockSize gradientSize:(int)gradientSize useHarrisDetector:(BOOL)useHarrisDetector NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:blockSize:gradientSize:useHarrisDetector:));
  2452. + (void)goodFeaturesToTrack:(Mat*)image corners:(NSMutableArray<Point2i*>*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask blockSize:(int)blockSize gradientSize:(int)gradientSize NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:blockSize:gradientSize:));
  2453. //
  2454. // void cv::goodFeaturesToTrack(Mat image, Mat& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat& cornersQuality, int blockSize = 3, int gradientSize = 3, bool useHarrisDetector = false, double k = 0.04)
  2455. //
  2456. /**
  2457. * Same as above, but returns also quality measure of the detected corners.
  2458. *
  2459. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2460. * @param corners Output vector of detected corners.
  2461. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2462. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2463. * and all detected corners are returned.
  2464. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2465. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2466. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2467. * quality measure less than the product are rejected. For example, if the best corner has the
  2468. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2469. * less than 15 are rejected.
  2470. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2471. * @param mask Region of interest. If the image is not empty (it needs to have the type
  2472. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2473. * @param cornersQuality Output vector of quality measure of the detected corners.
  2474. * @param blockSize Size of an average block for computing a derivative covariation matrix over each
  2475. * pixel neighborhood. See cornerEigenValsAndVecs .
  2476. * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.
  2477. * See cornerEigenValsAndVecs .
  2478. * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
  2479. * or #cornerMinEigenVal.
  2480. * @param k Free parameter of the Harris detector.
  2481. */
  2482. + (void)goodFeaturesToTrackWithQuality:(Mat*)image corners:(Mat*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask cornersQuality:(Mat*)cornersQuality blockSize:(int)blockSize gradientSize:(int)gradientSize useHarrisDetector:(BOOL)useHarrisDetector k:(double)k NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:cornersQuality:blockSize:gradientSize:useHarrisDetector:k:));
  2483. /**
  2484. * Same as above, but returns also quality measure of the detected corners.
  2485. *
  2486. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2487. * @param corners Output vector of detected corners.
  2488. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2489. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2490. * and all detected corners are returned.
  2491. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2492. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2493. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2494. * quality measure less than the product are rejected. For example, if the best corner has the
  2495. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2496. * less than 15 are rejected.
  2497. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2498. * @param mask Region of interest. If the image is not empty (it needs to have the type
  2499. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2500. * @param cornersQuality Output vector of quality measure of the detected corners.
  2501. * @param blockSize Size of an average block for computing a derivative covariation matrix over each
  2502. * pixel neighborhood. See cornerEigenValsAndVecs .
  2503. * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.
  2504. * See cornerEigenValsAndVecs .
  2505. * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
  2506. * or #cornerMinEigenVal.
  2507. */
  2508. + (void)goodFeaturesToTrackWithQuality:(Mat*)image corners:(Mat*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask cornersQuality:(Mat*)cornersQuality blockSize:(int)blockSize gradientSize:(int)gradientSize useHarrisDetector:(BOOL)useHarrisDetector NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:cornersQuality:blockSize:gradientSize:useHarrisDetector:));
  2509. /**
  2510. * Same as above, but returns also quality measure of the detected corners.
  2511. *
  2512. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2513. * @param corners Output vector of detected corners.
  2514. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2515. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2516. * and all detected corners are returned.
  2517. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2518. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2519. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2520. * quality measure less than the product are rejected. For example, if the best corner has the
  2521. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2522. * less than 15 are rejected.
  2523. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2524. * @param mask Region of interest. If the image is not empty (it needs to have the type
  2525. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2526. * @param cornersQuality Output vector of quality measure of the detected corners.
  2527. * @param blockSize Size of an average block for computing a derivative covariation matrix over each
  2528. * pixel neighborhood. See cornerEigenValsAndVecs .
  2529. * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.
  2530. * See cornerEigenValsAndVecs .
  2531. * or #cornerMinEigenVal.
  2532. */
  2533. + (void)goodFeaturesToTrackWithQuality:(Mat*)image corners:(Mat*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask cornersQuality:(Mat*)cornersQuality blockSize:(int)blockSize gradientSize:(int)gradientSize NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:cornersQuality:blockSize:gradientSize:));
  2534. /**
  2535. * Same as above, but returns also quality measure of the detected corners.
  2536. *
  2537. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2538. * @param corners Output vector of detected corners.
  2539. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2540. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2541. * and all detected corners are returned.
  2542. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2543. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2544. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2545. * quality measure less than the product are rejected. For example, if the best corner has the
  2546. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2547. * less than 15 are rejected.
  2548. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2549. * @param mask Region of interest. If the image is not empty (it needs to have the type
  2550. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2551. * @param cornersQuality Output vector of quality measure of the detected corners.
  2552. * @param blockSize Size of an average block for computing a derivative covariation matrix over each
  2553. * pixel neighborhood. See cornerEigenValsAndVecs .
  2554. * See cornerEigenValsAndVecs .
  2555. * or #cornerMinEigenVal.
  2556. */
  2557. + (void)goodFeaturesToTrackWithQuality:(Mat*)image corners:(Mat*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask cornersQuality:(Mat*)cornersQuality blockSize:(int)blockSize NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:cornersQuality:blockSize:));
  2558. /**
  2559. * Same as above, but returns also quality measure of the detected corners.
  2560. *
  2561. * @param image Input 8-bit or floating-point 32-bit, single-channel image.
  2562. * @param corners Output vector of detected corners.
  2563. * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  2564. * the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  2565. * and all detected corners are returned.
  2566. * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  2567. * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  2568. * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  2569. * quality measure less than the product are rejected. For example, if the best corner has the
  2570. * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  2571. * less than 15 are rejected.
  2572. * @param minDistance Minimum possible Euclidean distance between the returned corners.
  2573. * @param mask Region of interest. If the image is not empty (it needs to have the type
  2574. * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  2575. * @param cornersQuality Output vector of quality measure of the detected corners.
  2576. * pixel neighborhood. See cornerEigenValsAndVecs .
  2577. * See cornerEigenValsAndVecs .
  2578. * or #cornerMinEigenVal.
  2579. */
  2580. + (void)goodFeaturesToTrackWithQuality:(Mat*)image corners:(Mat*)corners maxCorners:(int)maxCorners qualityLevel:(double)qualityLevel minDistance:(double)minDistance mask:(Mat*)mask cornersQuality:(Mat*)cornersQuality NS_SWIFT_NAME(goodFeaturesToTrack(image:corners:maxCorners:qualityLevel:minDistance:mask:cornersQuality:));
  2581. //
  2582. // void cv::HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
  2583. //
  2584. /**
  2585. * Finds lines in a binary image using the standard Hough transform.
  2586. *
  2587. * The function implements the standard or standard multi-scale Hough transform algorithm for line
  2588. * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough
  2589. * transform.
  2590. *
  2591. * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
  2592. * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
  2593. * `$$(\rho, \theta)$$` or `$$(\rho, \theta, \textrm{votes})$$` . `$$\rho$$` is the distance from the coordinate origin `$$(0,0)$$` (top-left corner of
  2594. * the image). `$$\theta$$` is the line rotation angle in radians (
  2595. * `$$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}$$` ).
  2596. * `$$\textrm{votes}$$` is the value of accumulator.
  2597. * @param rho Distance resolution of the accumulator in pixels.
  2598. * @param theta Angle resolution of the accumulator in radians.
  2599. * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
  2600. * votes ( `$$>\texttt{threshold}$$` ).
  2601. * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
  2602. * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
  2603. * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
  2604. * parameters should be positive.
  2605. * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
  2606. * @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
  2607. * Must fall between 0 and max_theta.
  2608. * @param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines.
  2609. * Must fall between min_theta and CV_PI.
  2610. */
  2611. + (void)HoughLines:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold srn:(double)srn stn:(double)stn min_theta:(double)min_theta max_theta:(double)max_theta NS_SWIFT_NAME(HoughLines(image:lines:rho:theta:threshold:srn:stn:min_theta:max_theta:));
  2612. /**
  2613. * Finds lines in a binary image using the standard Hough transform.
  2614. *
  2615. * The function implements the standard or standard multi-scale Hough transform algorithm for line
  2616. * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough
  2617. * transform.
  2618. *
  2619. * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
  2620. * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
  2621. * `$$(\rho, \theta)$$` or `$$(\rho, \theta, \textrm{votes})$$` . `$$\rho$$` is the distance from the coordinate origin `$$(0,0)$$` (top-left corner of
  2622. * the image). `$$\theta$$` is the line rotation angle in radians (
  2623. * `$$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}$$` ).
  2624. * `$$\textrm{votes}$$` is the value of accumulator.
  2625. * @param rho Distance resolution of the accumulator in pixels.
  2626. * @param theta Angle resolution of the accumulator in radians.
  2627. * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
  2628. * votes ( `$$>\texttt{threshold}$$` ).
  2629. * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
  2630. * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
  2631. * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
  2632. * parameters should be positive.
  2633. * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
  2634. * @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
  2635. * Must fall between 0 and max_theta.
  2636. * Must fall between min_theta and CV_PI.
  2637. */
  2638. + (void)HoughLines:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold srn:(double)srn stn:(double)stn min_theta:(double)min_theta NS_SWIFT_NAME(HoughLines(image:lines:rho:theta:threshold:srn:stn:min_theta:));
  2639. /**
  2640. * Finds lines in a binary image using the standard Hough transform.
  2641. *
  2642. * The function implements the standard or standard multi-scale Hough transform algorithm for line
  2643. * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough
  2644. * transform.
  2645. *
  2646. * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
  2647. * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
  2648. * `$$(\rho, \theta)$$` or `$$(\rho, \theta, \textrm{votes})$$` . `$$\rho$$` is the distance from the coordinate origin `$$(0,0)$$` (top-left corner of
  2649. * the image). `$$\theta$$` is the line rotation angle in radians (
  2650. * `$$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}$$` ).
  2651. * `$$\textrm{votes}$$` is the value of accumulator.
  2652. * @param rho Distance resolution of the accumulator in pixels.
  2653. * @param theta Angle resolution of the accumulator in radians.
  2654. * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
  2655. * votes ( `$$>\texttt{threshold}$$` ).
  2656. * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
  2657. * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
  2658. * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
  2659. * parameters should be positive.
  2660. * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
  2661. * Must fall between 0 and max_theta.
  2662. * Must fall between min_theta and CV_PI.
  2663. */
  2664. + (void)HoughLines:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold srn:(double)srn stn:(double)stn NS_SWIFT_NAME(HoughLines(image:lines:rho:theta:threshold:srn:stn:));
  2665. /**
  2666. * Finds lines in a binary image using the standard Hough transform.
  2667. *
  2668. * The function implements the standard or standard multi-scale Hough transform algorithm for line
  2669. * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough
  2670. * transform.
  2671. *
  2672. * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
  2673. * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
  2674. * `$$(\rho, \theta)$$` or `$$(\rho, \theta, \textrm{votes})$$` . `$$\rho$$` is the distance from the coordinate origin `$$(0,0)$$` (top-left corner of
  2675. * the image). `$$\theta$$` is the line rotation angle in radians (
  2676. * `$$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}$$` ).
  2677. * `$$\textrm{votes}$$` is the value of accumulator.
  2678. * @param rho Distance resolution of the accumulator in pixels.
  2679. * @param theta Angle resolution of the accumulator in radians.
  2680. * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
  2681. * votes ( `$$>\texttt{threshold}$$` ).
  2682. * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
  2683. * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
  2684. * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
  2685. * parameters should be positive.
  2686. * Must fall between 0 and max_theta.
  2687. * Must fall between min_theta and CV_PI.
  2688. */
  2689. + (void)HoughLines:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold srn:(double)srn NS_SWIFT_NAME(HoughLines(image:lines:rho:theta:threshold:srn:));
  2690. /**
  2691. * Finds lines in a binary image using the standard Hough transform.
  2692. *
  2693. * The function implements the standard or standard multi-scale Hough transform algorithm for line
  2694. * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough
  2695. * transform.
  2696. *
  2697. * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
  2698. * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
  2699. * `$$(\rho, \theta)$$` or `$$(\rho, \theta, \textrm{votes})$$` . `$$\rho$$` is the distance from the coordinate origin `$$(0,0)$$` (top-left corner of
  2700. * the image). `$$\theta$$` is the line rotation angle in radians (
  2701. * `$$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}$$` ).
  2702. * `$$\textrm{votes}$$` is the value of accumulator.
  2703. * @param rho Distance resolution of the accumulator in pixels.
  2704. * @param theta Angle resolution of the accumulator in radians.
  2705. * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
  2706. * votes ( `$$>\texttt{threshold}$$` ).
  2707. * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
  2708. * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
  2709. * parameters should be positive.
  2710. * Must fall between 0 and max_theta.
  2711. * Must fall between min_theta and CV_PI.
  2712. */
  2713. + (void)HoughLines:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold NS_SWIFT_NAME(HoughLines(image:lines:rho:theta:threshold:));
  2714. //
  2715. // void cv::HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0)
  2716. //
  2717. /**
  2718. * Finds line segments in a binary image using the probabilistic Hough transform.
  2719. *
  2720. * The function implements the probabilistic Hough transform algorithm for line detection, described
  2721. * in CITE: Matas00
  2722. *
  2723. * See the line detection example below:
  2724. * INCLUDE: snippets/imgproc_HoughLinesP.cpp
  2725. * This is a sample picture the function parameters have been tuned for:
  2726. *
  2727. * ![image](pics/building.jpg)
  2728. *
  2729. * And this is the output of the above program in case of the probabilistic Hough transform:
  2730. *
  2731. * ![image](pics/houghp.png)
  2732. *
  2733. * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
  2734. * @param lines Output vector of lines. Each line is represented by a 4-element vector
  2735. * `$$(x_1, y_1, x_2, y_2)$$` , where `$$(x_1,y_1)$$` and `$$(x_2, y_2)$$` are the ending points of each detected
  2736. * line segment.
  2737. * @param rho Distance resolution of the accumulator in pixels.
  2738. * @param theta Angle resolution of the accumulator in radians.
  2739. * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
  2740. * votes ( `$$>\texttt{threshold}$$` ).
  2741. * @param minLineLength Minimum line length. Line segments shorter than that are rejected.
  2742. * @param maxLineGap Maximum allowed gap between points on the same line to link them.
  2743. *
  2744. * @see `LineSegmentDetector`
  2745. */
  2746. + (void)HoughLinesP:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold minLineLength:(double)minLineLength maxLineGap:(double)maxLineGap NS_SWIFT_NAME(HoughLinesP(image:lines:rho:theta:threshold:minLineLength:maxLineGap:));
  2747. /**
  2748. * Finds line segments in a binary image using the probabilistic Hough transform.
  2749. *
  2750. * The function implements the probabilistic Hough transform algorithm for line detection, described
  2751. * in CITE: Matas00
  2752. *
  2753. * See the line detection example below:
  2754. * INCLUDE: snippets/imgproc_HoughLinesP.cpp
  2755. * This is a sample picture the function parameters have been tuned for:
  2756. *
  2757. * ![image](pics/building.jpg)
  2758. *
  2759. * And this is the output of the above program in case of the probabilistic Hough transform:
  2760. *
  2761. * ![image](pics/houghp.png)
  2762. *
  2763. * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
  2764. * @param lines Output vector of lines. Each line is represented by a 4-element vector
  2765. * `$$(x_1, y_1, x_2, y_2)$$` , where `$$(x_1,y_1)$$` and `$$(x_2, y_2)$$` are the ending points of each detected
  2766. * line segment.
  2767. * @param rho Distance resolution of the accumulator in pixels.
  2768. * @param theta Angle resolution of the accumulator in radians.
  2769. * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
  2770. * votes ( `$$>\texttt{threshold}$$` ).
  2771. * @param minLineLength Minimum line length. Line segments shorter than that are rejected.
  2772. *
  2773. * @see `LineSegmentDetector`
  2774. */
  2775. + (void)HoughLinesP:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold minLineLength:(double)minLineLength NS_SWIFT_NAME(HoughLinesP(image:lines:rho:theta:threshold:minLineLength:));
  2776. /**
  2777. * Finds line segments in a binary image using the probabilistic Hough transform.
  2778. *
  2779. * The function implements the probabilistic Hough transform algorithm for line detection, described
  2780. * in CITE: Matas00
  2781. *
  2782. * See the line detection example below:
  2783. * INCLUDE: snippets/imgproc_HoughLinesP.cpp
  2784. * This is a sample picture the function parameters have been tuned for:
  2785. *
  2786. * ![image](pics/building.jpg)
  2787. *
  2788. * And this is the output of the above program in case of the probabilistic Hough transform:
  2789. *
  2790. * ![image](pics/houghp.png)
  2791. *
  2792. * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
  2793. * @param lines Output vector of lines. Each line is represented by a 4-element vector
  2794. * `$$(x_1, y_1, x_2, y_2)$$` , where `$$(x_1,y_1)$$` and `$$(x_2, y_2)$$` are the ending points of each detected
  2795. * line segment.
  2796. * @param rho Distance resolution of the accumulator in pixels.
  2797. * @param theta Angle resolution of the accumulator in radians.
  2798. * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
  2799. * votes ( `$$>\texttt{threshold}$$` ).
  2800. *
  2801. * @see `LineSegmentDetector`
  2802. */
  2803. + (void)HoughLinesP:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold NS_SWIFT_NAME(HoughLinesP(image:lines:rho:theta:threshold:));
  2804. //
  2805. // void cv::HoughLinesPointSet(Mat point, Mat& lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step)
  2806. //
  2807. /**
  2808. * Finds lines in a set of points using the standard Hough transform.
  2809. *
  2810. * The function finds lines in a set of points using a modification of the Hough transform.
  2811. * INCLUDE: snippets/imgproc_HoughLinesPointSet.cpp
  2812. * @param point Input vector of points. Each vector must be encoded as a Point vector `$$(x,y)$$`. Type must be CV_32FC2 or CV_32SC2.
  2813. * @param lines Output vector of found lines. Each vector is encoded as a vector<Vec3d> `$$(votes, rho, theta)$$`.
  2814. * The larger the value of 'votes', the higher the reliability of the Hough line.
  2815. * @param lines_max Max count of Hough lines.
  2816. * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
  2817. * votes ( `$$>\texttt{threshold}$$` ).
  2818. * @param min_rho Minimum value for `$$\rho$$` for the accumulator (Note: `$$\rho$$` can be negative. The absolute value `$$|\rho|$$` is the distance of a line to the origin.).
  2819. * @param max_rho Maximum value for `$$\rho$$` for the accumulator.
  2820. * @param rho_step Distance resolution of the accumulator.
  2821. * @param min_theta Minimum angle value of the accumulator in radians.
  2822. * @param max_theta Maximum angle value of the accumulator in radians.
  2823. * @param theta_step Angle resolution of the accumulator in radians.
  2824. */
  2825. + (void)HoughLinesPointSet:(Mat*)point lines:(Mat*)lines lines_max:(int)lines_max threshold:(int)threshold min_rho:(double)min_rho max_rho:(double)max_rho rho_step:(double)rho_step min_theta:(double)min_theta max_theta:(double)max_theta theta_step:(double)theta_step NS_SWIFT_NAME(HoughLinesPointSet(point:lines:lines_max:threshold:min_rho:max_rho:rho_step:min_theta:max_theta:theta_step:));
  2826. //
  2827. // void cv::HoughCircles(Mat image, Mat& circles, HoughModes method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0)
  2828. //
  2829. /**
  2830. * Finds circles in a grayscale image using the Hough transform.
  2831. *
  2832. * The function finds circles in a grayscale image using a modification of the Hough transform.
  2833. *
  2834. * Example: :
  2835. * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
  2836. *
  2837. * NOTE: Usually the function detects the centers of circles well. However, it may fail to find correct
  2838. * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
  2839. * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
  2840. * to return centers only without radius search, and find the correct radius using an additional procedure.
  2841. *
  2842. * It also helps to smooth image a bit unless it's already soft. For example,
  2843. * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
  2844. *
  2845. * @param image 8-bit, single-channel, grayscale input image.
  2846. * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
  2847. * floating-point vector `$$(x, y, radius)$$` or `$$(x, y, radius, votes)$$` .
  2848. * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
  2849. * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
  2850. * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
  2851. * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
  2852. * unless some small very circles need to be detected.
  2853. * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
  2854. * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
  2855. * too large, some circles may be missed.
  2856. * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
  2857. * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
  2858. * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
  2859. * shough normally be higher, such as 300 or normally exposed and contrasty images.
  2860. * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
  2861. * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
  2862. * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
  2863. * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
  2864. * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
  2865. * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
  2866. * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
  2867. * @param minRadius Minimum circle radius.
  2868. * @param maxRadius Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, #HOUGH_GRADIENT returns
  2869. * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
  2870. *
  2871. * @see `+fitEllipse:`, `+minEnclosingCircle:center:radius:`
  2872. */
  2873. + (void)HoughCircles:(Mat*)image circles:(Mat*)circles method:(HoughModes)method dp:(double)dp minDist:(double)minDist param1:(double)param1 param2:(double)param2 minRadius:(int)minRadius maxRadius:(int)maxRadius NS_SWIFT_NAME(HoughCircles(image:circles:method:dp:minDist:param1:param2:minRadius:maxRadius:));
  2874. /**
  2875. * Finds circles in a grayscale image using the Hough transform.
  2876. *
  2877. * The function finds circles in a grayscale image using a modification of the Hough transform.
  2878. *
  2879. * Example: :
  2880. * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
  2881. *
  2882. * NOTE: Usually the function detects the centers of circles well. However, it may fail to find correct
  2883. * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
  2884. * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
  2885. * to return centers only without radius search, and find the correct radius using an additional procedure.
  2886. *
  2887. * It also helps to smooth image a bit unless it's already soft. For example,
  2888. * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
  2889. *
  2890. * @param image 8-bit, single-channel, grayscale input image.
  2891. * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
  2892. * floating-point vector `$$(x, y, radius)$$` or `$$(x, y, radius, votes)$$` .
  2893. * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
  2894. * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
  2895. * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
  2896. * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
  2897. * unless some small very circles need to be detected.
  2898. * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
  2899. * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
  2900. * too large, some circles may be missed.
  2901. * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
  2902. * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
  2903. * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
  2904. * shough normally be higher, such as 300 or normally exposed and contrasty images.
  2905. * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
  2906. * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
  2907. * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
  2908. * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
  2909. * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
  2910. * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
  2911. * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
  2912. * @param minRadius Minimum circle radius.
  2913. * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
  2914. *
  2915. * @see `+fitEllipse:`, `+minEnclosingCircle:center:radius:`
  2916. */
  2917. + (void)HoughCircles:(Mat*)image circles:(Mat*)circles method:(HoughModes)method dp:(double)dp minDist:(double)minDist param1:(double)param1 param2:(double)param2 minRadius:(int)minRadius NS_SWIFT_NAME(HoughCircles(image:circles:method:dp:minDist:param1:param2:minRadius:));
  2918. /**
  2919. * Finds circles in a grayscale image using the Hough transform.
  2920. *
  2921. * The function finds circles in a grayscale image using a modification of the Hough transform.
  2922. *
  2923. * Example: :
  2924. * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
  2925. *
  2926. * NOTE: Usually the function detects the centers of circles well. However, it may fail to find correct
  2927. * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
  2928. * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
  2929. * to return centers only without radius search, and find the correct radius using an additional procedure.
  2930. *
  2931. * It also helps to smooth image a bit unless it's already soft. For example,
  2932. * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
  2933. *
  2934. * @param image 8-bit, single-channel, grayscale input image.
  2935. * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
  2936. * floating-point vector `$$(x, y, radius)$$` or `$$(x, y, radius, votes)$$` .
  2937. * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
  2938. * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
  2939. * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
  2940. * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
  2941. * unless some small very circles need to be detected.
  2942. * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
  2943. * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
  2944. * too large, some circles may be missed.
  2945. * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
  2946. * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
  2947. * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
  2948. * shough normally be higher, such as 300 or normally exposed and contrasty images.
  2949. * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
  2950. * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
  2951. * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
  2952. * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
  2953. * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
  2954. * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
  2955. * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
  2956. * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
  2957. *
  2958. * @see `+fitEllipse:`, `+minEnclosingCircle:center:radius:`
  2959. */
  2960. + (void)HoughCircles:(Mat*)image circles:(Mat*)circles method:(HoughModes)method dp:(double)dp minDist:(double)minDist param1:(double)param1 param2:(double)param2 NS_SWIFT_NAME(HoughCircles(image:circles:method:dp:minDist:param1:param2:));
  2961. /**
  2962. * Finds circles in a grayscale image using the Hough transform.
  2963. *
  2964. * The function finds circles in a grayscale image using a modification of the Hough transform.
  2965. *
  2966. * Example: :
  2967. * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
  2968. *
  2969. * NOTE: Usually the function detects the centers of circles well. However, it may fail to find correct
  2970. * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
  2971. * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
  2972. * to return centers only without radius search, and find the correct radius using an additional procedure.
  2973. *
  2974. * It also helps to smooth image a bit unless it's already soft. For example,
  2975. * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
  2976. *
  2977. * @param image 8-bit, single-channel, grayscale input image.
  2978. * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
  2979. * floating-point vector `$$(x, y, radius)$$` or `$$(x, y, radius, votes)$$` .
  2980. * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
  2981. * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
  2982. * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
  2983. * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
  2984. * unless some small very circles need to be detected.
  2985. * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
  2986. * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
  2987. * too large, some circles may be missed.
  2988. * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
  2989. * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
  2990. * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
  2991. * shough normally be higher, such as 300 or normally exposed and contrasty images.
  2992. * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
  2993. * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
  2994. * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
  2995. * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
  2996. * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
  2997. * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
  2998. * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
  2999. *
  3000. * @see `+fitEllipse:`, `+minEnclosingCircle:center:radius:`
  3001. */
  3002. + (void)HoughCircles:(Mat*)image circles:(Mat*)circles method:(HoughModes)method dp:(double)dp minDist:(double)minDist param1:(double)param1 NS_SWIFT_NAME(HoughCircles(image:circles:method:dp:minDist:param1:));
  3003. /**
  3004. * Finds circles in a grayscale image using the Hough transform.
  3005. *
  3006. * The function finds circles in a grayscale image using a modification of the Hough transform.
  3007. *
  3008. * Example: :
  3009. * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
  3010. *
  3011. * NOTE: Usually the function detects the centers of circles well. However, it may fail to find correct
  3012. * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
  3013. * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
  3014. * to return centers only without radius search, and find the correct radius using an additional procedure.
  3015. *
  3016. * It also helps to smooth image a bit unless it's already soft. For example,
  3017. * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
  3018. *
  3019. * @param image 8-bit, single-channel, grayscale input image.
  3020. * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
  3021. * floating-point vector `$$(x, y, radius)$$` or `$$(x, y, radius, votes)$$` .
  3022. * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
  3023. * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
  3024. * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
  3025. * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
  3026. * unless some small very circles need to be detected.
  3027. * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
  3028. * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
  3029. * too large, some circles may be missed.
  3030. * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
  3031. * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
  3032. * shough normally be higher, such as 300 or normally exposed and contrasty images.
  3033. * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
  3034. * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
  3035. * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
  3036. * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
  3037. * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
  3038. * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
  3039. * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
  3040. *
  3041. * @see `+fitEllipse:`, `+minEnclosingCircle:center:radius:`
  3042. */
  3043. + (void)HoughCircles:(Mat*)image circles:(Mat*)circles method:(HoughModes)method dp:(double)dp minDist:(double)minDist NS_SWIFT_NAME(HoughCircles(image:circles:method:dp:minDist:));
  3044. //
  3045. // void cv::erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, BorderTypes borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
  3046. //
  3047. /**
  3048. * Erodes an image by using a specific structuring element.
  3049. *
  3050. * The function erodes the source image using the specified structuring element that determines the
  3051. * shape of a pixel neighborhood over which the minimum is taken:
  3052. *
  3053. * `$$\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3054. *
  3055. * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
  3056. * case of multi-channel images, each channel is processed independently.
  3057. *
  3058. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3059. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3060. * @param dst output image of the same size and type as src.
  3061. * @param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular
  3062. * structuring element is used. Kernel can be created using #getStructuringElement.
  3063. * @param anchor position of the anchor within the element; default value (-1, -1) means that the
  3064. * anchor is at the element center.
  3065. * @param iterations number of times erosion is applied.
  3066. * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  3067. * @param borderValue border value in case of a constant border
  3068. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3069. */
  3070. + (void)erode:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel anchor:(Point2i*)anchor iterations:(int)iterations borderType:(BorderTypes)borderType borderValue:(Scalar*)borderValue NS_SWIFT_NAME(erode(src:dst:kernel:anchor:iterations:borderType:borderValue:));
  3071. /**
  3072. * Erodes an image by using a specific structuring element.
  3073. *
  3074. * The function erodes the source image using the specified structuring element that determines the
  3075. * shape of a pixel neighborhood over which the minimum is taken:
  3076. *
  3077. * `$$\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3078. *
  3079. * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
  3080. * case of multi-channel images, each channel is processed independently.
  3081. *
  3082. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3083. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3084. * @param dst output image of the same size and type as src.
  3085. * @param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular
  3086. * structuring element is used. Kernel can be created using #getStructuringElement.
  3087. * @param anchor position of the anchor within the element; default value (-1, -1) means that the
  3088. * anchor is at the element center.
  3089. * @param iterations number of times erosion is applied.
  3090. * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  3091. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3092. */
  3093. + (void)erode:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel anchor:(Point2i*)anchor iterations:(int)iterations borderType:(BorderTypes)borderType NS_SWIFT_NAME(erode(src:dst:kernel:anchor:iterations:borderType:));
  3094. /**
  3095. * Erodes an image by using a specific structuring element.
  3096. *
  3097. * The function erodes the source image using the specified structuring element that determines the
  3098. * shape of a pixel neighborhood over which the minimum is taken:
  3099. *
  3100. * `$$\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3101. *
  3102. * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
  3103. * case of multi-channel images, each channel is processed independently.
  3104. *
  3105. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3106. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3107. * @param dst output image of the same size and type as src.
  3108. * @param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular
  3109. * structuring element is used. Kernel can be created using #getStructuringElement.
  3110. * @param anchor position of the anchor within the element; default value (-1, -1) means that the
  3111. * anchor is at the element center.
  3112. * @param iterations number of times erosion is applied.
  3113. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3114. */
  3115. + (void)erode:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel anchor:(Point2i*)anchor iterations:(int)iterations NS_SWIFT_NAME(erode(src:dst:kernel:anchor:iterations:));
  3116. /**
  3117. * Erodes an image by using a specific structuring element.
  3118. *
  3119. * The function erodes the source image using the specified structuring element that determines the
  3120. * shape of a pixel neighborhood over which the minimum is taken:
  3121. *
  3122. * `$$\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3123. *
  3124. * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
  3125. * case of multi-channel images, each channel is processed independently.
  3126. *
  3127. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3128. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3129. * @param dst output image of the same size and type as src.
  3130. * @param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular
  3131. * structuring element is used. Kernel can be created using #getStructuringElement.
  3132. * @param anchor position of the anchor within the element; default value (-1, -1) means that the
  3133. * anchor is at the element center.
  3134. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3135. */
  3136. + (void)erode:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel anchor:(Point2i*)anchor NS_SWIFT_NAME(erode(src:dst:kernel:anchor:));
  3137. /**
  3138. * Erodes an image by using a specific structuring element.
  3139. *
  3140. * The function erodes the source image using the specified structuring element that determines the
  3141. * shape of a pixel neighborhood over which the minimum is taken:
  3142. *
  3143. * `$$\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3144. *
  3145. * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
  3146. * case of multi-channel images, each channel is processed independently.
  3147. *
  3148. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3149. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3150. * @param dst output image of the same size and type as src.
  3151. * @param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular
  3152. * structuring element is used. Kernel can be created using #getStructuringElement.
  3153. * anchor is at the element center.
  3154. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3155. */
  3156. + (void)erode:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel NS_SWIFT_NAME(erode(src:dst:kernel:));
  3157. //
  3158. // void cv::dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, BorderTypes borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
  3159. //
  3160. /**
  3161. * Dilates an image by using a specific structuring element.
  3162. *
  3163. * The function dilates the source image using the specified structuring element that determines the
  3164. * shape of a pixel neighborhood over which the maximum is taken:
  3165. * `$$\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3166. *
  3167. * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
  3168. * case of multi-channel images, each channel is processed independently.
  3169. *
  3170. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3171. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3172. * @param dst output image of the same size and type as src.
  3173. * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
  3174. * structuring element is used. Kernel can be created using #getStructuringElement
  3175. * @param anchor position of the anchor within the element; default value (-1, -1) means that the
  3176. * anchor is at the element center.
  3177. * @param iterations number of times dilation is applied.
  3178. * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported.
  3179. * @param borderValue border value in case of a constant border
  3180. * @see `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3181. */
  3182. + (void)dilate:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel anchor:(Point2i*)anchor iterations:(int)iterations borderType:(BorderTypes)borderType borderValue:(Scalar*)borderValue NS_SWIFT_NAME(dilate(src:dst:kernel:anchor:iterations:borderType:borderValue:));
  3183. /**
  3184. * Dilates an image by using a specific structuring element.
  3185. *
  3186. * The function dilates the source image using the specified structuring element that determines the
  3187. * shape of a pixel neighborhood over which the maximum is taken:
  3188. * `$$\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3189. *
  3190. * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
  3191. * case of multi-channel images, each channel is processed independently.
  3192. *
  3193. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3194. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3195. * @param dst output image of the same size and type as src.
  3196. * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
  3197. * structuring element is used. Kernel can be created using #getStructuringElement
  3198. * @param anchor position of the anchor within the element; default value (-1, -1) means that the
  3199. * anchor is at the element center.
  3200. * @param iterations number of times dilation is applied.
  3201. * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported.
  3202. * @see `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3203. */
  3204. + (void)dilate:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel anchor:(Point2i*)anchor iterations:(int)iterations borderType:(BorderTypes)borderType NS_SWIFT_NAME(dilate(src:dst:kernel:anchor:iterations:borderType:));
  3205. /**
  3206. * Dilates an image by using a specific structuring element.
  3207. *
  3208. * The function dilates the source image using the specified structuring element that determines the
  3209. * shape of a pixel neighborhood over which the maximum is taken:
  3210. * `$$\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3211. *
  3212. * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
  3213. * case of multi-channel images, each channel is processed independently.
  3214. *
  3215. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3216. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3217. * @param dst output image of the same size and type as src.
  3218. * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
  3219. * structuring element is used. Kernel can be created using #getStructuringElement
  3220. * @param anchor position of the anchor within the element; default value (-1, -1) means that the
  3221. * anchor is at the element center.
  3222. * @param iterations number of times dilation is applied.
  3223. * @see `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3224. */
  3225. + (void)dilate:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel anchor:(Point2i*)anchor iterations:(int)iterations NS_SWIFT_NAME(dilate(src:dst:kernel:anchor:iterations:));
  3226. /**
  3227. * Dilates an image by using a specific structuring element.
  3228. *
  3229. * The function dilates the source image using the specified structuring element that determines the
  3230. * shape of a pixel neighborhood over which the maximum is taken:
  3231. * `$$\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3232. *
  3233. * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
  3234. * case of multi-channel images, each channel is processed independently.
  3235. *
  3236. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3237. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3238. * @param dst output image of the same size and type as src.
  3239. * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
  3240. * structuring element is used. Kernel can be created using #getStructuringElement
  3241. * @param anchor position of the anchor within the element; default value (-1, -1) means that the
  3242. * anchor is at the element center.
  3243. * @see `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3244. */
  3245. + (void)dilate:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel anchor:(Point2i*)anchor NS_SWIFT_NAME(dilate(src:dst:kernel:anchor:));
  3246. /**
  3247. * Dilates an image by using a specific structuring element.
  3248. *
  3249. * The function dilates the source image using the specified structuring element that determines the
  3250. * shape of a pixel neighborhood over which the maximum is taken:
  3251. * `$$\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')$$`
  3252. *
  3253. * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
  3254. * case of multi-channel images, each channel is processed independently.
  3255. *
  3256. * @param src input image; the number of channels can be arbitrary, but the depth should be one of
  3257. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3258. * @param dst output image of the same size and type as src.
  3259. * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
  3260. * structuring element is used. Kernel can be created using #getStructuringElement
  3261. * anchor is at the element center.
  3262. * @see `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+morphologyEx:dst:op:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3263. */
  3264. + (void)dilate:(Mat*)src dst:(Mat*)dst kernel:(Mat*)kernel NS_SWIFT_NAME(dilate(src:dst:kernel:));
  3265. //
  3266. // void cv::morphologyEx(Mat src, Mat& dst, MorphTypes op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, BorderTypes borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
  3267. //
  3268. /**
  3269. * Performs advanced morphological transformations.
  3270. *
  3271. * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
  3272. * basic operations.
  3273. *
  3274. * Any of the operations can be done in-place. In case of multi-channel images, each channel is
  3275. * processed independently.
  3276. *
  3277. * @param src Source image. The number of channels can be arbitrary. The depth should be one of
  3278. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3279. * @param dst Destination image of the same size and type as source image.
  3280. * @param op Type of a morphological operation, see #MorphTypes
  3281. * @param kernel Structuring element. It can be created using #getStructuringElement.
  3282. * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
  3283. * kernel center.
  3284. * @param iterations Number of times erosion and dilation are applied.
  3285. * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  3286. * @param borderValue Border value in case of a constant border. The default value has a special
  3287. * meaning.
  3288. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3289. * NOTE: The number of iterations is the number of times erosion or dilatation operation will be applied.
  3290. * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
  3291. * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate).
  3292. */
  3293. + (void)morphologyEx:(Mat*)src dst:(Mat*)dst op:(MorphTypes)op kernel:(Mat*)kernel anchor:(Point2i*)anchor iterations:(int)iterations borderType:(BorderTypes)borderType borderValue:(Scalar*)borderValue NS_SWIFT_NAME(morphologyEx(src:dst:op:kernel:anchor:iterations:borderType:borderValue:));
  3294. /**
  3295. * Performs advanced morphological transformations.
  3296. *
  3297. * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
  3298. * basic operations.
  3299. *
  3300. * Any of the operations can be done in-place. In case of multi-channel images, each channel is
  3301. * processed independently.
  3302. *
  3303. * @param src Source image. The number of channels can be arbitrary. The depth should be one of
  3304. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3305. * @param dst Destination image of the same size and type as source image.
  3306. * @param op Type of a morphological operation, see #MorphTypes
  3307. * @param kernel Structuring element. It can be created using #getStructuringElement.
  3308. * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
  3309. * kernel center.
  3310. * @param iterations Number of times erosion and dilation are applied.
  3311. * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
  3312. * meaning.
  3313. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3314. * NOTE: The number of iterations is the number of times erosion or dilatation operation will be applied.
  3315. * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
  3316. * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate).
  3317. */
  3318. + (void)morphologyEx:(Mat*)src dst:(Mat*)dst op:(MorphTypes)op kernel:(Mat*)kernel anchor:(Point2i*)anchor iterations:(int)iterations borderType:(BorderTypes)borderType NS_SWIFT_NAME(morphologyEx(src:dst:op:kernel:anchor:iterations:borderType:));
  3319. /**
  3320. * Performs advanced morphological transformations.
  3321. *
  3322. * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
  3323. * basic operations.
  3324. *
  3325. * Any of the operations can be done in-place. In case of multi-channel images, each channel is
  3326. * processed independently.
  3327. *
  3328. * @param src Source image. The number of channels can be arbitrary. The depth should be one of
  3329. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3330. * @param dst Destination image of the same size and type as source image.
  3331. * @param op Type of a morphological operation, see #MorphTypes
  3332. * @param kernel Structuring element. It can be created using #getStructuringElement.
  3333. * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
  3334. * kernel center.
  3335. * @param iterations Number of times erosion and dilation are applied.
  3336. * meaning.
  3337. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3338. * NOTE: The number of iterations is the number of times erosion or dilatation operation will be applied.
  3339. * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
  3340. * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate).
  3341. */
  3342. + (void)morphologyEx:(Mat*)src dst:(Mat*)dst op:(MorphTypes)op kernel:(Mat*)kernel anchor:(Point2i*)anchor iterations:(int)iterations NS_SWIFT_NAME(morphologyEx(src:dst:op:kernel:anchor:iterations:));
  3343. /**
  3344. * Performs advanced morphological transformations.
  3345. *
  3346. * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
  3347. * basic operations.
  3348. *
  3349. * Any of the operations can be done in-place. In case of multi-channel images, each channel is
  3350. * processed independently.
  3351. *
  3352. * @param src Source image. The number of channels can be arbitrary. The depth should be one of
  3353. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3354. * @param dst Destination image of the same size and type as source image.
  3355. * @param op Type of a morphological operation, see #MorphTypes
  3356. * @param kernel Structuring element. It can be created using #getStructuringElement.
  3357. * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
  3358. * kernel center.
  3359. * meaning.
  3360. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3361. * NOTE: The number of iterations is the number of times erosion or dilatation operation will be applied.
  3362. * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
  3363. * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate).
  3364. */
  3365. + (void)morphologyEx:(Mat*)src dst:(Mat*)dst op:(MorphTypes)op kernel:(Mat*)kernel anchor:(Point2i*)anchor NS_SWIFT_NAME(morphologyEx(src:dst:op:kernel:anchor:));
  3366. /**
  3367. * Performs advanced morphological transformations.
  3368. *
  3369. * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
  3370. * basic operations.
  3371. *
  3372. * Any of the operations can be done in-place. In case of multi-channel images, each channel is
  3373. * processed independently.
  3374. *
  3375. * @param src Source image. The number of channels can be arbitrary. The depth should be one of
  3376. * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
  3377. * @param dst Destination image of the same size and type as source image.
  3378. * @param op Type of a morphological operation, see #MorphTypes
  3379. * @param kernel Structuring element. It can be created using #getStructuringElement.
  3380. * kernel center.
  3381. * meaning.
  3382. * @see `+dilate:dst:kernel:anchor:iterations:borderType:borderValue:`, `+erode:dst:kernel:anchor:iterations:borderType:borderValue:`, `+getStructuringElement:ksize:anchor:`
  3383. * NOTE: The number of iterations is the number of times erosion or dilatation operation will be applied.
  3384. * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
  3385. * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate).
  3386. */
  3387. + (void)morphologyEx:(Mat*)src dst:(Mat*)dst op:(MorphTypes)op kernel:(Mat*)kernel NS_SWIFT_NAME(morphologyEx(src:dst:op:kernel:));
  3388. //
  3389. // void cv::resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR)
  3390. //
  3391. /**
  3392. * Resizes an image.
  3393. *
  3394. * The function resize resizes the image src down to or up to the specified size. Note that the
  3395. * initial dst type or size are not taken into account. Instead, the size and type are derived from
  3396. * the `src`,`dsize`,`fx`, and `fy`. If you want to resize src so that it fits the pre-created dst,
  3397. * you may call the function as follows:
  3398. *
  3399. * // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
  3400. * resize(src, dst, dst.size(), 0, 0, interpolation);
  3401. *
  3402. * If you want to decimate the image by factor of 2 in each direction, you can call the function this
  3403. * way:
  3404. *
  3405. * // specify fx and fy and let the function compute the destination image size.
  3406. * resize(src, dst, Size(), 0.5, 0.5, interpolation);
  3407. *
  3408. * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
  3409. * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR
  3410. * (faster but still looks OK).
  3411. *
  3412. * @param src input image.
  3413. * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
  3414. * src.size(), fx, and fy; the type of dst is the same as of src.
  3415. * @param dsize output image size; if it equals zero (`None` in Python), it is computed as:
  3416. * `$$\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}$$`
  3417. * Either dsize or both fx and fy must be non-zero.
  3418. * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
  3419. * `$$\texttt{(double)dsize.width/src.cols}$$`
  3420. * @param fy scale factor along the vertical axis; when it equals 0, it is computed as
  3421. * `$$\texttt{(double)dsize.height/src.rows}$$`
  3422. * @param interpolation interpolation method, see #InterpolationFlags
  3423. *
  3424. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`
  3425. */
  3426. + (void)resize:(Mat*)src dst:(Mat*)dst dsize:(Size2i*)dsize fx:(double)fx fy:(double)fy interpolation:(int)interpolation NS_SWIFT_NAME(resize(src:dst:dsize:fx:fy:interpolation:));
  3427. /**
  3428. * Resizes an image.
  3429. *
  3430. * The function resize resizes the image src down to or up to the specified size. Note that the
  3431. * initial dst type or size are not taken into account. Instead, the size and type are derived from
  3432. * the `src`,`dsize`,`fx`, and `fy`. If you want to resize src so that it fits the pre-created dst,
  3433. * you may call the function as follows:
  3434. *
  3435. * // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
  3436. * resize(src, dst, dst.size(), 0, 0, interpolation);
  3437. *
  3438. * If you want to decimate the image by factor of 2 in each direction, you can call the function this
  3439. * way:
  3440. *
  3441. * // specify fx and fy and let the function compute the destination image size.
  3442. * resize(src, dst, Size(), 0.5, 0.5, interpolation);
  3443. *
  3444. * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
  3445. * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR
  3446. * (faster but still looks OK).
  3447. *
  3448. * @param src input image.
  3449. * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
  3450. * src.size(), fx, and fy; the type of dst is the same as of src.
  3451. * @param dsize output image size; if it equals zero (`None` in Python), it is computed as:
  3452. * `$$\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}$$`
  3453. * Either dsize or both fx and fy must be non-zero.
  3454. * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
  3455. * `$$\texttt{(double)dsize.width/src.cols}$$`
  3456. * @param fy scale factor along the vertical axis; when it equals 0, it is computed as
  3457. * `$$\texttt{(double)dsize.height/src.rows}$$`
  3458. *
  3459. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`
  3460. */
  3461. + (void)resize:(Mat*)src dst:(Mat*)dst dsize:(Size2i*)dsize fx:(double)fx fy:(double)fy NS_SWIFT_NAME(resize(src:dst:dsize:fx:fy:));
  3462. /**
  3463. * Resizes an image.
  3464. *
  3465. * The function resize resizes the image src down to or up to the specified size. Note that the
  3466. * initial dst type or size are not taken into account. Instead, the size and type are derived from
  3467. * the `src`,`dsize`,`fx`, and `fy`. If you want to resize src so that it fits the pre-created dst,
  3468. * you may call the function as follows:
  3469. *
  3470. * // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
  3471. * resize(src, dst, dst.size(), 0, 0, interpolation);
  3472. *
  3473. * If you want to decimate the image by factor of 2 in each direction, you can call the function this
  3474. * way:
  3475. *
  3476. * // specify fx and fy and let the function compute the destination image size.
  3477. * resize(src, dst, Size(), 0.5, 0.5, interpolation);
  3478. *
  3479. * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
  3480. * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR
  3481. * (faster but still looks OK).
  3482. *
  3483. * @param src input image.
  3484. * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
  3485. * src.size(), fx, and fy; the type of dst is the same as of src.
  3486. * @param dsize output image size; if it equals zero (`None` in Python), it is computed as:
  3487. * `$$\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}$$`
  3488. * Either dsize or both fx and fy must be non-zero.
  3489. * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
  3490. * `$$\texttt{(double)dsize.width/src.cols}$$`
  3491. * `$$\texttt{(double)dsize.height/src.rows}$$`
  3492. *
  3493. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`
  3494. */
  3495. + (void)resize:(Mat*)src dst:(Mat*)dst dsize:(Size2i*)dsize fx:(double)fx NS_SWIFT_NAME(resize(src:dst:dsize:fx:));
  3496. /**
  3497. * Resizes an image.
  3498. *
  3499. * The function resize resizes the image src down to or up to the specified size. Note that the
  3500. * initial dst type or size are not taken into account. Instead, the size and type are derived from
  3501. * the `src`,`dsize`,`fx`, and `fy`. If you want to resize src so that it fits the pre-created dst,
  3502. * you may call the function as follows:
  3503. *
  3504. * // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
  3505. * resize(src, dst, dst.size(), 0, 0, interpolation);
  3506. *
  3507. * If you want to decimate the image by factor of 2 in each direction, you can call the function this
  3508. * way:
  3509. *
  3510. * // specify fx and fy and let the function compute the destination image size.
  3511. * resize(src, dst, Size(), 0.5, 0.5, interpolation);
  3512. *
  3513. * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
  3514. * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR
  3515. * (faster but still looks OK).
  3516. *
  3517. * @param src input image.
  3518. * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
  3519. * src.size(), fx, and fy; the type of dst is the same as of src.
  3520. * @param dsize output image size; if it equals zero (`None` in Python), it is computed as:
  3521. * `$$\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}$$`
  3522. * Either dsize or both fx and fy must be non-zero.
  3523. * `$$\texttt{(double)dsize.width/src.cols}$$`
  3524. * `$$\texttt{(double)dsize.height/src.rows}$$`
  3525. *
  3526. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`
  3527. */
  3528. + (void)resize:(Mat*)src dst:(Mat*)dst dsize:(Size2i*)dsize NS_SWIFT_NAME(resize(src:dst:dsize:));
  3529. //
  3530. // void cv::warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, BorderTypes borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
  3531. //
  3532. /**
  3533. * Applies an affine transformation to an image.
  3534. *
  3535. * The function warpAffine transforms the source image using the specified matrix:
  3536. *
  3537. * `$$\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})$$`
  3538. *
  3539. * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
  3540. * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
  3541. * operate in-place.
  3542. *
  3543. * @param src input image.
  3544. * @param dst output image that has the size dsize and the same type as src .
  3545. * @param M `$$2\times 3$$` transformation matrix.
  3546. * @param dsize size of the output image.
  3547. * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional
  3548. * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
  3549. * `$$\texttt{dst}\rightarrow\texttt{src}$$` ).
  3550. * @param borderMode pixel extrapolation method (see #BorderTypes); when
  3551. * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
  3552. * the "outliers" in the source image are not modified by the function.
  3553. * @param borderValue value used in case of a constant border; by default, it is 0.
  3554. *
  3555. * @see `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `+resize:dst:dsize:fx:fy:interpolation:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `+getRectSubPix:patchSize:center:patch:patchType:`, `transform`
  3556. */
  3557. + (void)warpAffine:(Mat*)src dst:(Mat*)dst M:(Mat*)M dsize:(Size2i*)dsize flags:(int)flags borderMode:(BorderTypes)borderMode borderValue:(Scalar*)borderValue NS_SWIFT_NAME(warpAffine(src:dst:M:dsize:flags:borderMode:borderValue:));
  3558. /**
  3559. * Applies an affine transformation to an image.
  3560. *
  3561. * The function warpAffine transforms the source image using the specified matrix:
  3562. *
  3563. * `$$\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})$$`
  3564. *
  3565. * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
  3566. * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
  3567. * operate in-place.
  3568. *
  3569. * @param src input image.
  3570. * @param dst output image that has the size dsize and the same type as src .
  3571. * @param M `$$2\times 3$$` transformation matrix.
  3572. * @param dsize size of the output image.
  3573. * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional
  3574. * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
  3575. * `$$\texttt{dst}\rightarrow\texttt{src}$$` ).
  3576. * @param borderMode pixel extrapolation method (see #BorderTypes); when
  3577. * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
  3578. * the "outliers" in the source image are not modified by the function.
  3579. *
  3580. * @see `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `+resize:dst:dsize:fx:fy:interpolation:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `+getRectSubPix:patchSize:center:patch:patchType:`, `transform`
  3581. */
  3582. + (void)warpAffine:(Mat*)src dst:(Mat*)dst M:(Mat*)M dsize:(Size2i*)dsize flags:(int)flags borderMode:(BorderTypes)borderMode NS_SWIFT_NAME(warpAffine(src:dst:M:dsize:flags:borderMode:));
  3583. /**
  3584. * Applies an affine transformation to an image.
  3585. *
  3586. * The function warpAffine transforms the source image using the specified matrix:
  3587. *
  3588. * `$$\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})$$`
  3589. *
  3590. * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
  3591. * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
  3592. * operate in-place.
  3593. *
  3594. * @param src input image.
  3595. * @param dst output image that has the size dsize and the same type as src .
  3596. * @param M `$$2\times 3$$` transformation matrix.
  3597. * @param dsize size of the output image.
  3598. * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional
  3599. * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
  3600. * `$$\texttt{dst}\rightarrow\texttt{src}$$` ).
  3601. * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
  3602. * the "outliers" in the source image are not modified by the function.
  3603. *
  3604. * @see `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `+resize:dst:dsize:fx:fy:interpolation:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `+getRectSubPix:patchSize:center:patch:patchType:`, `transform`
  3605. */
  3606. + (void)warpAffine:(Mat*)src dst:(Mat*)dst M:(Mat*)M dsize:(Size2i*)dsize flags:(int)flags NS_SWIFT_NAME(warpAffine(src:dst:M:dsize:flags:));
  3607. /**
  3608. * Applies an affine transformation to an image.
  3609. *
  3610. * The function warpAffine transforms the source image using the specified matrix:
  3611. *
  3612. * `$$\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})$$`
  3613. *
  3614. * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
  3615. * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
  3616. * operate in-place.
  3617. *
  3618. * @param src input image.
  3619. * @param dst output image that has the size dsize and the same type as src .
  3620. * @param M `$$2\times 3$$` transformation matrix.
  3621. * @param dsize size of the output image.
  3622. * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
  3623. * `$$\texttt{dst}\rightarrow\texttt{src}$$` ).
  3624. * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
  3625. * the "outliers" in the source image are not modified by the function.
  3626. *
  3627. * @see `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `+resize:dst:dsize:fx:fy:interpolation:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `+getRectSubPix:patchSize:center:patch:patchType:`, `transform`
  3628. */
  3629. + (void)warpAffine:(Mat*)src dst:(Mat*)dst M:(Mat*)M dsize:(Size2i*)dsize NS_SWIFT_NAME(warpAffine(src:dst:M:dsize:));
  3630. //
  3631. // void cv::warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, BorderTypes borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
  3632. //
  3633. /**
  3634. * Applies a perspective transformation to an image.
  3635. *
  3636. * The function warpPerspective transforms the source image using the specified matrix:
  3637. *
  3638. * `$$\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
  3639. * \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )$$`
  3640. *
  3641. * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
  3642. * and then put in the formula above instead of M. The function cannot operate in-place.
  3643. *
  3644. * @param src input image.
  3645. * @param dst output image that has the size dsize and the same type as src .
  3646. * @param M `$$3\times 3$$` transformation matrix.
  3647. * @param dsize size of the output image.
  3648. * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
  3649. * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
  3650. * `$$\texttt{dst}\rightarrow\texttt{src}$$` ).
  3651. * @param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE).
  3652. * @param borderValue value used in case of a constant border; by default, it equals 0.
  3653. *
  3654. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+resize:dst:dsize:fx:fy:interpolation:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `+getRectSubPix:patchSize:center:patch:patchType:`, `perspectiveTransform`
  3655. */
  3656. + (void)warpPerspective:(Mat*)src dst:(Mat*)dst M:(Mat*)M dsize:(Size2i*)dsize flags:(int)flags borderMode:(BorderTypes)borderMode borderValue:(Scalar*)borderValue NS_SWIFT_NAME(warpPerspective(src:dst:M:dsize:flags:borderMode:borderValue:));
  3657. /**
  3658. * Applies a perspective transformation to an image.
  3659. *
  3660. * The function warpPerspective transforms the source image using the specified matrix:
  3661. *
  3662. * `$$\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
  3663. * \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )$$`
  3664. *
  3665. * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
  3666. * and then put in the formula above instead of M. The function cannot operate in-place.
  3667. *
  3668. * @param src input image.
  3669. * @param dst output image that has the size dsize and the same type as src .
  3670. * @param M `$$3\times 3$$` transformation matrix.
  3671. * @param dsize size of the output image.
  3672. * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
  3673. * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
  3674. * `$$\texttt{dst}\rightarrow\texttt{src}$$` ).
  3675. * @param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE).
  3676. *
  3677. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+resize:dst:dsize:fx:fy:interpolation:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `+getRectSubPix:patchSize:center:patch:patchType:`, `perspectiveTransform`
  3678. */
  3679. + (void)warpPerspective:(Mat*)src dst:(Mat*)dst M:(Mat*)M dsize:(Size2i*)dsize flags:(int)flags borderMode:(BorderTypes)borderMode NS_SWIFT_NAME(warpPerspective(src:dst:M:dsize:flags:borderMode:));
  3680. /**
  3681. * Applies a perspective transformation to an image.
  3682. *
  3683. * The function warpPerspective transforms the source image using the specified matrix:
  3684. *
  3685. * `$$\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
  3686. * \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )$$`
  3687. *
  3688. * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
  3689. * and then put in the formula above instead of M. The function cannot operate in-place.
  3690. *
  3691. * @param src input image.
  3692. * @param dst output image that has the size dsize and the same type as src .
  3693. * @param M `$$3\times 3$$` transformation matrix.
  3694. * @param dsize size of the output image.
  3695. * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
  3696. * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
  3697. * `$$\texttt{dst}\rightarrow\texttt{src}$$` ).
  3698. *
  3699. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+resize:dst:dsize:fx:fy:interpolation:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `+getRectSubPix:patchSize:center:patch:patchType:`, `perspectiveTransform`
  3700. */
  3701. + (void)warpPerspective:(Mat*)src dst:(Mat*)dst M:(Mat*)M dsize:(Size2i*)dsize flags:(int)flags NS_SWIFT_NAME(warpPerspective(src:dst:M:dsize:flags:));
  3702. /**
  3703. * Applies a perspective transformation to an image.
  3704. *
  3705. * The function warpPerspective transforms the source image using the specified matrix:
  3706. *
  3707. * `$$\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
  3708. * \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )$$`
  3709. *
  3710. * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
  3711. * and then put in the formula above instead of M. The function cannot operate in-place.
  3712. *
  3713. * @param src input image.
  3714. * @param dst output image that has the size dsize and the same type as src .
  3715. * @param M `$$3\times 3$$` transformation matrix.
  3716. * @param dsize size of the output image.
  3717. * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
  3718. * `$$\texttt{dst}\rightarrow\texttt{src}$$` ).
  3719. *
  3720. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+resize:dst:dsize:fx:fy:interpolation:`, `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `+getRectSubPix:patchSize:center:patch:patchType:`, `perspectiveTransform`
  3721. */
  3722. + (void)warpPerspective:(Mat*)src dst:(Mat*)dst M:(Mat*)M dsize:(Size2i*)dsize NS_SWIFT_NAME(warpPerspective(src:dst:M:dsize:));
  3723. //
  3724. // void cv::remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, BorderTypes borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
  3725. //
  3726. /**
  3727. * Applies a generic geometrical transformation to an image.
  3728. *
  3729. * The function remap transforms the source image using the specified map:
  3730. *
  3731. * `$$\texttt{dst} (x,y) = \texttt{src} (map_x(x,y),map_y(x,y))$$`
  3732. *
  3733. * where values of pixels with non-integer coordinates are computed using one of available
  3734. * interpolation methods. `$$map_x$$` and `$$map_y$$` can be encoded as separate floating-point maps
  3735. * in `$$map_1$$` and `$$map_2$$` respectively, or interleaved floating-point maps of `$$(x,y)$$` in
  3736. * `$$map_1$$`, or fixed-point maps created by using #convertMaps. The reason you might want to
  3737. * convert from floating to fixed-point representations of a map is that they can yield much faster
  3738. * (\~2x) remapping operations. In the converted case, `$$map_1$$` contains pairs (cvFloor(x),
  3739. * cvFloor(y)) and `$$map_2$$` contains indices in a table of interpolation coefficients.
  3740. *
  3741. * This function cannot operate in-place.
  3742. *
  3743. * @param src Source image.
  3744. * @param dst Destination image. It has the same size as map1 and the same type as src .
  3745. * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
  3746. * CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point
  3747. * representation to fixed-point for speed.
  3748. * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
  3749. * if map1 is (x,y) points), respectively.
  3750. * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
  3751. * and #INTER_LINEAR_EXACT are not supported by this function.
  3752. * @param borderMode Pixel extrapolation method (see #BorderTypes). When
  3753. * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
  3754. * corresponds to the "outliers" in the source image are not modified by the function.
  3755. * @param borderValue Value used in case of a constant border. By default, it is 0.
  3756. * NOTE:
  3757. * Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
  3758. */
  3759. + (void)remap:(Mat*)src dst:(Mat*)dst map1:(Mat*)map1 map2:(Mat*)map2 interpolation:(int)interpolation borderMode:(BorderTypes)borderMode borderValue:(Scalar*)borderValue NS_SWIFT_NAME(remap(src:dst:map1:map2:interpolation:borderMode:borderValue:));
  3760. /**
  3761. * Applies a generic geometrical transformation to an image.
  3762. *
  3763. * The function remap transforms the source image using the specified map:
  3764. *
  3765. * `$$\texttt{dst} (x,y) = \texttt{src} (map_x(x,y),map_y(x,y))$$`
  3766. *
  3767. * where values of pixels with non-integer coordinates are computed using one of available
  3768. * interpolation methods. `$$map_x$$` and `$$map_y$$` can be encoded as separate floating-point maps
  3769. * in `$$map_1$$` and `$$map_2$$` respectively, or interleaved floating-point maps of `$$(x,y)$$` in
  3770. * `$$map_1$$`, or fixed-point maps created by using #convertMaps. The reason you might want to
  3771. * convert from floating to fixed-point representations of a map is that they can yield much faster
  3772. * (\~2x) remapping operations. In the converted case, `$$map_1$$` contains pairs (cvFloor(x),
  3773. * cvFloor(y)) and `$$map_2$$` contains indices in a table of interpolation coefficients.
  3774. *
  3775. * This function cannot operate in-place.
  3776. *
  3777. * @param src Source image.
  3778. * @param dst Destination image. It has the same size as map1 and the same type as src .
  3779. * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
  3780. * CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point
  3781. * representation to fixed-point for speed.
  3782. * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
  3783. * if map1 is (x,y) points), respectively.
  3784. * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
  3785. * and #INTER_LINEAR_EXACT are not supported by this function.
  3786. * @param borderMode Pixel extrapolation method (see #BorderTypes). When
  3787. * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
  3788. * corresponds to the "outliers" in the source image are not modified by the function.
  3789. * NOTE:
  3790. * Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
  3791. */
  3792. + (void)remap:(Mat*)src dst:(Mat*)dst map1:(Mat*)map1 map2:(Mat*)map2 interpolation:(int)interpolation borderMode:(BorderTypes)borderMode NS_SWIFT_NAME(remap(src:dst:map1:map2:interpolation:borderMode:));
  3793. /**
  3794. * Applies a generic geometrical transformation to an image.
  3795. *
  3796. * The function remap transforms the source image using the specified map:
  3797. *
  3798. * `$$\texttt{dst} (x,y) = \texttt{src} (map_x(x,y),map_y(x,y))$$`
  3799. *
  3800. * where values of pixels with non-integer coordinates are computed using one of available
  3801. * interpolation methods. `$$map_x$$` and `$$map_y$$` can be encoded as separate floating-point maps
  3802. * in `$$map_1$$` and `$$map_2$$` respectively, or interleaved floating-point maps of `$$(x,y)$$` in
  3803. * `$$map_1$$`, or fixed-point maps created by using #convertMaps. The reason you might want to
  3804. * convert from floating to fixed-point representations of a map is that they can yield much faster
  3805. * (\~2x) remapping operations. In the converted case, `$$map_1$$` contains pairs (cvFloor(x),
  3806. * cvFloor(y)) and `$$map_2$$` contains indices in a table of interpolation coefficients.
  3807. *
  3808. * This function cannot operate in-place.
  3809. *
  3810. * @param src Source image.
  3811. * @param dst Destination image. It has the same size as map1 and the same type as src .
  3812. * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
  3813. * CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point
  3814. * representation to fixed-point for speed.
  3815. * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
  3816. * if map1 is (x,y) points), respectively.
  3817. * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
  3818. * and #INTER_LINEAR_EXACT are not supported by this function.
  3819. * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
  3820. * corresponds to the "outliers" in the source image are not modified by the function.
  3821. * NOTE:
  3822. * Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
  3823. */
  3824. + (void)remap:(Mat*)src dst:(Mat*)dst map1:(Mat*)map1 map2:(Mat*)map2 interpolation:(int)interpolation NS_SWIFT_NAME(remap(src:dst:map1:map2:interpolation:));
  3825. //
  3826. // void cv::convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false)
  3827. //
  3828. /**
  3829. * Converts image transformation maps from one representation to another.
  3830. *
  3831. * The function converts a pair of maps for remap from one representation to another. The following
  3832. * options ( (map1.type(), map2.type()) `$$\rightarrow$$` (dstmap1.type(), dstmap2.type()) ) are
  3833. * supported:
  3834. *
  3835. * - `$$\texttt{(CV\_32FC1, CV\_32FC1)} \rightarrow \texttt{(CV\_16SC2, CV\_16UC1)}$$`. This is the
  3836. * most frequently used conversion operation, in which the original floating-point maps (see #remap)
  3837. * are converted to a more compact and much faster fixed-point representation. The first output array
  3838. * contains the rounded coordinates and the second array (created only when nninterpolation=false )
  3839. * contains indices in the interpolation tables.
  3840. *
  3841. * - `$$\texttt{(CV\_32FC2)} \rightarrow \texttt{(CV\_16SC2, CV\_16UC1)}$$`. The same as above but
  3842. * the original maps are stored in one 2-channel matrix.
  3843. *
  3844. * - Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same
  3845. * as the originals.
  3846. *
  3847. * @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 .
  3848. * @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix),
  3849. * respectively.
  3850. * @param dstmap1 The first output map that has the type dstmap1type and the same size as src .
  3851. * @param dstmap2 The second output map.
  3852. * @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or
  3853. * CV_32FC2 .
  3854. * @param nninterpolation Flag indicating whether the fixed-point maps are used for the
  3855. * nearest-neighbor or for a more complex interpolation.
  3856. *
  3857. * @see `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `undistort`, `initUndistortRectifyMap`
  3858. */
  3859. + (void)convertMaps:(Mat*)map1 map2:(Mat*)map2 dstmap1:(Mat*)dstmap1 dstmap2:(Mat*)dstmap2 dstmap1type:(int)dstmap1type nninterpolation:(BOOL)nninterpolation NS_SWIFT_NAME(convertMaps(map1:map2:dstmap1:dstmap2:dstmap1type:nninterpolation:));
  3860. /**
  3861. * Converts image transformation maps from one representation to another.
  3862. *
  3863. * The function converts a pair of maps for remap from one representation to another. The following
  3864. * options ( (map1.type(), map2.type()) `$$\rightarrow$$` (dstmap1.type(), dstmap2.type()) ) are
  3865. * supported:
  3866. *
  3867. * - `$$\texttt{(CV\_32FC1, CV\_32FC1)} \rightarrow \texttt{(CV\_16SC2, CV\_16UC1)}$$`. This is the
  3868. * most frequently used conversion operation, in which the original floating-point maps (see #remap)
  3869. * are converted to a more compact and much faster fixed-point representation. The first output array
  3870. * contains the rounded coordinates and the second array (created only when nninterpolation=false )
  3871. * contains indices in the interpolation tables.
  3872. *
  3873. * - `$$\texttt{(CV\_32FC2)} \rightarrow \texttt{(CV\_16SC2, CV\_16UC1)}$$`. The same as above but
  3874. * the original maps are stored in one 2-channel matrix.
  3875. *
  3876. * - Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same
  3877. * as the originals.
  3878. *
  3879. * @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 .
  3880. * @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix),
  3881. * respectively.
  3882. * @param dstmap1 The first output map that has the type dstmap1type and the same size as src .
  3883. * @param dstmap2 The second output map.
  3884. * @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or
  3885. * CV_32FC2 .
  3886. * nearest-neighbor or for a more complex interpolation.
  3887. *
  3888. * @see `+remap:dst:map1:map2:interpolation:borderMode:borderValue:`, `undistort`, `initUndistortRectifyMap`
  3889. */
  3890. + (void)convertMaps:(Mat*)map1 map2:(Mat*)map2 dstmap1:(Mat*)dstmap1 dstmap2:(Mat*)dstmap2 dstmap1type:(int)dstmap1type NS_SWIFT_NAME(convertMaps(map1:map2:dstmap1:dstmap2:dstmap1type:));
  3891. //
  3892. // Mat cv::getRotationMatrix2D(Point2f center, double angle, double scale)
  3893. //
  3894. /**
  3895. * Calculates an affine matrix of 2D rotation.
  3896. *
  3897. * The function calculates the following matrix:
  3898. *
  3899. * `$$\begin{bmatrix} \alpha & \beta & (1- \alpha ) \cdot \texttt{center.x} - \beta \cdot \texttt{center.y} \\ - \beta & \alpha & \beta \cdot \texttt{center.x} + (1- \alpha ) \cdot \texttt{center.y} \end{bmatrix}$$`
  3900. *
  3901. * where
  3902. *
  3903. * `$$\begin{array}{l} \alpha = \texttt{scale} \cdot \cos \texttt{angle} , \\ \beta = \texttt{scale} \cdot \sin \texttt{angle} \end{array}$$`
  3904. *
  3905. * The transformation maps the rotation center to itself. If this is not the target, adjust the shift.
  3906. *
  3907. * @param center Center of the rotation in the source image.
  3908. * @param angle Rotation angle in degrees. Positive values mean counter-clockwise rotation (the
  3909. * coordinate origin is assumed to be the top-left corner).
  3910. * @param scale Isotropic scale factor.
  3911. *
  3912. * @see `+getAffineTransform:dst:`, `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `transform`
  3913. */
  3914. + (Mat*)getRotationMatrix2D:(Point2f*)center angle:(double)angle scale:(double)scale NS_SWIFT_NAME(getRotationMatrix2D(center:angle:scale:));
  3915. //
  3916. // void cv::invertAffineTransform(Mat M, Mat& iM)
  3917. //
  3918. /**
  3919. * Inverts an affine transformation.
  3920. *
  3921. * The function computes an inverse affine transformation represented by `$$2 \times 3$$` matrix M:
  3922. *
  3923. * `$$\begin{bmatrix} a_{11} & a_{12} & b_1 \\ a_{21} & a_{22} & b_2 \end{bmatrix}$$`
  3924. *
  3925. * The result is also a `$$2 \times 3$$` matrix of the same type as M.
  3926. *
  3927. * @param M Original affine transformation.
  3928. * @param iM Output reverse affine transformation.
  3929. */
  3930. + (void)invertAffineTransform:(Mat*)M iM:(Mat*)iM NS_SWIFT_NAME(invertAffineTransform(M:iM:));
  3931. //
  3932. // Mat cv::getPerspectiveTransform(Mat src, Mat dst, int solveMethod = DECOMP_LU)
  3933. //
  3934. /**
  3935. * Calculates a perspective transform from four pairs of the corresponding points.
  3936. *
  3937. * The function calculates the `$$3 \times 3$$` matrix of a perspective transform so that:
  3938. *
  3939. * `$$\begin{bmatrix} t_i x'_i \\ t_i y'_i \\ t_i \end{bmatrix} = \texttt{map\_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}$$`
  3940. *
  3941. * where
  3942. *
  3943. * `$$dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3$$`
  3944. *
  3945. * @param src Coordinates of quadrangle vertices in the source image.
  3946. * @param dst Coordinates of the corresponding quadrangle vertices in the destination image.
  3947. * @param solveMethod method passed to cv::solve (#DecompTypes)
  3948. *
  3949. * @see `findHomography`, `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `perspectiveTransform`
  3950. */
  3951. + (Mat*)getPerspectiveTransform:(Mat*)src dst:(Mat*)dst solveMethod:(int)solveMethod NS_SWIFT_NAME(getPerspectiveTransform(src:dst:solveMethod:));
  3952. /**
  3953. * Calculates a perspective transform from four pairs of the corresponding points.
  3954. *
  3955. * The function calculates the `$$3 \times 3$$` matrix of a perspective transform so that:
  3956. *
  3957. * `$$\begin{bmatrix} t_i x'_i \\ t_i y'_i \\ t_i \end{bmatrix} = \texttt{map\_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}$$`
  3958. *
  3959. * where
  3960. *
  3961. * `$$dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3$$`
  3962. *
  3963. * @param src Coordinates of quadrangle vertices in the source image.
  3964. * @param dst Coordinates of the corresponding quadrangle vertices in the destination image.
  3965. *
  3966. * @see `findHomography`, `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`, `perspectiveTransform`
  3967. */
  3968. + (Mat*)getPerspectiveTransform:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(getPerspectiveTransform(src:dst:));
  3969. //
  3970. // Mat cv::getAffineTransform(vector_Point2f src, vector_Point2f dst)
  3971. //
  3972. + (Mat*)getAffineTransform:(NSArray<Point2f*>*)src dst:(NSArray<Point2f*>*)dst NS_SWIFT_NAME(getAffineTransform(src:dst:));
  3973. //
  3974. // void cv::getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1)
  3975. //
  3976. /**
  3977. * Retrieves a pixel rectangle from an image with sub-pixel accuracy.
  3978. *
  3979. * The function getRectSubPix extracts pixels from src:
  3980. *
  3981. * `$$patch(x, y) = src(x + \texttt{center.x} - ( \texttt{dst.cols} -1)*0.5, y + \texttt{center.y} - ( \texttt{dst.rows} -1)*0.5)$$`
  3982. *
  3983. * where the values of the pixels at non-integer coordinates are retrieved using bilinear
  3984. * interpolation. Every channel of multi-channel images is processed independently. Also
  3985. * the image should be a single channel or three channel image. While the center of the
  3986. * rectangle must be inside the image, parts of the rectangle may be outside.
  3987. *
  3988. * @param image Source image.
  3989. * @param patchSize Size of the extracted patch.
  3990. * @param center Floating point coordinates of the center of the extracted rectangle within the
  3991. * source image. The center must be inside the image.
  3992. * @param patch Extracted patch that has the size patchSize and the same number of channels as src .
  3993. * @param patchType Depth of the extracted pixels. By default, they have the same depth as src .
  3994. *
  3995. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`
  3996. */
  3997. + (void)getRectSubPix:(Mat*)image patchSize:(Size2i*)patchSize center:(Point2f*)center patch:(Mat*)patch patchType:(int)patchType NS_SWIFT_NAME(getRectSubPix(image:patchSize:center:patch:patchType:));
  3998. /**
  3999. * Retrieves a pixel rectangle from an image with sub-pixel accuracy.
  4000. *
  4001. * The function getRectSubPix extracts pixels from src:
  4002. *
  4003. * `$$patch(x, y) = src(x + \texttt{center.x} - ( \texttt{dst.cols} -1)*0.5, y + \texttt{center.y} - ( \texttt{dst.rows} -1)*0.5)$$`
  4004. *
  4005. * where the values of the pixels at non-integer coordinates are retrieved using bilinear
  4006. * interpolation. Every channel of multi-channel images is processed independently. Also
  4007. * the image should be a single channel or three channel image. While the center of the
  4008. * rectangle must be inside the image, parts of the rectangle may be outside.
  4009. *
  4010. * @param image Source image.
  4011. * @param patchSize Size of the extracted patch.
  4012. * @param center Floating point coordinates of the center of the extracted rectangle within the
  4013. * source image. The center must be inside the image.
  4014. * @param patch Extracted patch that has the size patchSize and the same number of channels as src .
  4015. *
  4016. * @see `+warpAffine:dst:M:dsize:flags:borderMode:borderValue:`, `+warpPerspective:dst:M:dsize:flags:borderMode:borderValue:`
  4017. */
  4018. + (void)getRectSubPix:(Mat*)image patchSize:(Size2i*)patchSize center:(Point2f*)center patch:(Mat*)patch NS_SWIFT_NAME(getRectSubPix(image:patchSize:center:patch:));
  4019. //
  4020. // void cv::logPolar(Mat src, Mat& dst, Point2f center, double M, int flags)
  4021. //
  4022. /**
  4023. * Remaps an image to semilog-polar coordinates space.
  4024. *
  4025. * @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags+WARP_POLAR_LOG);
  4026. *
  4027. *
  4028. * Transform the source image using the following transformation (See REF: polar_remaps_reference_image "Polar remaps reference image d)"):
  4029. * `$$\begin{array}{l}
  4030. * dst( \rho , \phi ) = src(x,y) \\
  4031. * dst.size() \leftarrow src.size()
  4032. * \end{array}$$`
  4033. *
  4034. * where
  4035. * `$$\begin{array}{l}
  4036. * I = (dx,dy) = (x - center.x,y - center.y) \\
  4037. * \rho = M \cdot log_e(\texttt{magnitude} (I)) ,\\
  4038. * \phi = Kangle \cdot \texttt{angle} (I) \\
  4039. * \end{array}$$`
  4040. *
  4041. * and
  4042. * `$$\begin{array}{l}
  4043. * M = src.cols / log_e(maxRadius) \\
  4044. * Kangle = src.rows / 2\Pi \\
  4045. * \end{array}$$`
  4046. *
  4047. * The function emulates the human "foveal" vision and can be used for fast scale and
  4048. * rotation-invariant template matching, for object tracking and so forth.
  4049. * @param src Source image
  4050. * @param dst Destination image. It will have same size and type as src.
  4051. * @param center The transformation center; where the output precision is maximal
  4052. * @param M Magnitude scale parameter. It determines the radius of the bounding circle to transform too.
  4053. * @param flags A combination of interpolation methods, see #InterpolationFlags
  4054. *
  4055. * NOTE:
  4056. * - The function can not operate in-place.
  4057. * - To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.
  4058. *
  4059. * @see `cv::linearPolar`
  4060. */
  4061. + (void)logPolar:(Mat*)src dst:(Mat*)dst center:(Point2f*)center M:(double)M flags:(int)flags NS_SWIFT_NAME(logPolar(src:dst:center:M:flags:)) DEPRECATED_ATTRIBUTE;
  4062. //
  4063. // void cv::linearPolar(Mat src, Mat& dst, Point2f center, double maxRadius, int flags)
  4064. //
  4065. /**
  4066. * Remaps an image to polar coordinates space.
  4067. *
  4068. * @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags)
  4069. *
  4070. *
  4071. * Transform the source image using the following transformation (See REF: polar_remaps_reference_image "Polar remaps reference image c)"):
  4072. * `$$\begin{array}{l}
  4073. * dst( \rho , \phi ) = src(x,y) \\
  4074. * dst.size() \leftarrow src.size()
  4075. * \end{array}$$`
  4076. *
  4077. * where
  4078. * `$$\begin{array}{l}
  4079. * I = (dx,dy) = (x - center.x,y - center.y) \\
  4080. * \rho = Kmag \cdot \texttt{magnitude} (I) ,\\
  4081. * \phi = angle \cdot \texttt{angle} (I)
  4082. * \end{array}$$`
  4083. *
  4084. * and
  4085. * `$$\begin{array}{l}
  4086. * Kx = src.cols / maxRadius \\
  4087. * Ky = src.rows / 2\Pi
  4088. * \end{array}$$`
  4089. *
  4090. *
  4091. * @param src Source image
  4092. * @param dst Destination image. It will have same size and type as src.
  4093. * @param center The transformation center;
  4094. * @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too.
  4095. * @param flags A combination of interpolation methods, see #InterpolationFlags
  4096. *
  4097. * NOTE:
  4098. * - The function can not operate in-place.
  4099. * - To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.
  4100. *
  4101. * @see `cv::logPolar`
  4102. */
  4103. + (void)linearPolar:(Mat*)src dst:(Mat*)dst center:(Point2f*)center maxRadius:(double)maxRadius flags:(int)flags NS_SWIFT_NAME(linearPolar(src:dst:center:maxRadius:flags:)) DEPRECATED_ATTRIBUTE;
  4104. //
  4105. // void cv::warpPolar(Mat src, Mat& dst, Size dsize, Point2f center, double maxRadius, int flags)
  4106. //
  4107. /**
  4108. * Remaps an image to polar or semilog-polar coordinates space
  4109. *
  4110. * polar_remaps_reference_image
  4111. * ![Polar remaps reference](pics/polar_remap_doc.png)
  4112. *
  4113. * Transform the source image using the following transformation:
  4114. * `$$
  4115. * dst(\rho , \phi ) = src(x,y)
  4116. * $$`
  4117. *
  4118. * where
  4119. * `$$
  4120. * \begin{array}{l}
  4121. * \vec{I} = (x - center.x, \;y - center.y) \\
  4122. * \phi = Kangle \cdot \texttt{angle} (\vec{I}) \\
  4123. * \rho = \left\{\begin{matrix}
  4124. * Klin \cdot \texttt{magnitude} (\vec{I}) & default \\
  4125. * Klog \cdot log_e(\texttt{magnitude} (\vec{I})) & if \; semilog \\
  4126. * \end{matrix}\right.
  4127. * \end{array}
  4128. * $$`
  4129. *
  4130. * and
  4131. * `$$
  4132. * \begin{array}{l}
  4133. * Kangle = dsize.height / 2\Pi \\
  4134. * Klin = dsize.width / maxRadius \\
  4135. * Klog = dsize.width / log_e(maxRadius) \\
  4136. * \end{array}
  4137. * $$`
  4138. *
  4139. *
  4140. * \par Linear vs semilog mapping
  4141. *
  4142. * Polar mapping can be linear or semi-log. Add one of #WarpPolarMode to `flags` to specify the polar mapping mode.
  4143. *
  4144. * Linear is the default mode.
  4145. *
  4146. * The semilog mapping emulates the human "foveal" vision that permit very high acuity on the line of sight (central vision)
  4147. * in contrast to peripheral vision where acuity is minor.
  4148. *
  4149. * \par Option on `dsize`:
  4150. *
  4151. * - if both values in `dsize <=0 ` (default),
  4152. * the destination image will have (almost) same area of source bounding circle:
  4153. * `$$\begin{array}{l}
  4154. * dsize.area \leftarrow (maxRadius^2 \cdot \Pi) \\
  4155. * dsize.width = \texttt{cvRound}(maxRadius) \\
  4156. * dsize.height = \texttt{cvRound}(maxRadius \cdot \Pi) \\
  4157. * \end{array}$$`
  4158. *
  4159. *
  4160. * - if only `dsize.height <= 0`,
  4161. * the destination image area will be proportional to the bounding circle area but scaled by `Kx * Kx`:
  4162. * `$$\begin{array}{l}
  4163. * dsize.height = \texttt{cvRound}(dsize.width \cdot \Pi) \\
  4164. * \end{array}
  4165. * $$`
  4166. *
  4167. * - if both values in `dsize > 0 `,
  4168. * the destination image will have the given size therefore the area of the bounding circle will be scaled to `dsize`.
  4169. *
  4170. *
  4171. * \par Reverse mapping
  4172. *
  4173. * You can get reverse mapping adding #WARP_INVERSE_MAP to `flags`
  4174. * \snippet polar_transforms.cpp InverseMap
  4175. *
  4176. * In addiction, to calculate the original coordinate from a polar mapped coordinate `$$(rho, phi)->(x, y)$$`:
  4177. * \snippet polar_transforms.cpp InverseCoordinate
  4178. *
  4179. * @param src Source image.
  4180. * @param dst Destination image. It will have same type as src.
  4181. * @param dsize The destination image size (see description for valid options).
  4182. * @param center The transformation center.
  4183. * @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too.
  4184. * @param flags A combination of interpolation methods, #InterpolationFlags + #WarpPolarMode.
  4185. * - Add #WARP_POLAR_LINEAR to select linear polar mapping (default)
  4186. * - Add #WARP_POLAR_LOG to select semilog polar mapping
  4187. * - Add #WARP_INVERSE_MAP for reverse mapping.
  4188. * NOTE:
  4189. * - The function can not operate in-place.
  4190. * - To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.
  4191. * - This function uses #remap. Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
  4192. *
  4193. * @see `cv::remap`
  4194. */
  4195. + (void)warpPolar:(Mat*)src dst:(Mat*)dst dsize:(Size2i*)dsize center:(Point2f*)center maxRadius:(double)maxRadius flags:(int)flags NS_SWIFT_NAME(warpPolar(src:dst:dsize:center:maxRadius:flags:));
  4196. //
  4197. // void cv::integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1, int sqdepth = -1)
  4198. //
  4199. /**
  4200. * Calculates the integral of an image.
  4201. *
  4202. * The function calculates one or more integral images for the source image as follows:
  4203. *
  4204. * `$$\texttt{sum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)$$`
  4205. *
  4206. * `$$\texttt{sqsum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)^2$$`
  4207. *
  4208. * `$$\texttt{tilted} (X,Y) = \sum _{y<Y,abs(x-X+1) \leq Y-y-1} \texttt{image} (x,y)$$`
  4209. *
  4210. * Using these integral images, you can calculate sum, mean, and standard deviation over a specific
  4211. * up-right or rotated rectangular region of the image in a constant time, for example:
  4212. *
  4213. * `$$\sum _{x_1 \leq x < x_2, \, y_1 \leq y < y_2} \texttt{image} (x,y) = \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)$$`
  4214. *
  4215. * It makes possible to do a fast blurring or fast block correlation with a variable window size, for
  4216. * example. In case of multi-channel images, sums for each channel are accumulated independently.
  4217. *
  4218. * As a practical example, the next figure shows the calculation of the integral of a straight
  4219. * rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
  4220. * original image are shown, as well as the relative pixels in the integral images sum and tilted .
  4221. *
  4222. * ![integral calculation example](pics/integral.png)
  4223. *
  4224. * @param src input image as `$$W \times H$$`, 8-bit or floating-point (32f or 64f).
  4225. * @param sum integral image as `$$(W+1)\times (H+1)$$` , 32-bit integer or floating-point (32f or 64f).
  4226. * @param sqsum integral image for squared pixel values; it is `$$(W+1)\times (H+1)$$`, double-precision
  4227. * floating-point (64f) array.
  4228. * @param tilted integral for the image rotated by 45 degrees; it is `$$(W+1)\times (H+1)$$` array with
  4229. * the same data type as sum.
  4230. * @param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or
  4231. * CV_64F.
  4232. * @param sqdepth desired depth of the integral image of squared pixel values, CV_32F or CV_64F.
  4233. */
  4234. + (void)integral3:(Mat*)src sum:(Mat*)sum sqsum:(Mat*)sqsum tilted:(Mat*)tilted sdepth:(int)sdepth sqdepth:(int)sqdepth NS_SWIFT_NAME(integral(src:sum:sqsum:tilted:sdepth:sqdepth:));
  4235. /**
  4236. * Calculates the integral of an image.
  4237. *
  4238. * The function calculates one or more integral images for the source image as follows:
  4239. *
  4240. * `$$\texttt{sum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)$$`
  4241. *
  4242. * `$$\texttt{sqsum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)^2$$`
  4243. *
  4244. * `$$\texttt{tilted} (X,Y) = \sum _{y<Y,abs(x-X+1) \leq Y-y-1} \texttt{image} (x,y)$$`
  4245. *
  4246. * Using these integral images, you can calculate sum, mean, and standard deviation over a specific
  4247. * up-right or rotated rectangular region of the image in a constant time, for example:
  4248. *
  4249. * `$$\sum _{x_1 \leq x < x_2, \, y_1 \leq y < y_2} \texttt{image} (x,y) = \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)$$`
  4250. *
  4251. * It makes possible to do a fast blurring or fast block correlation with a variable window size, for
  4252. * example. In case of multi-channel images, sums for each channel are accumulated independently.
  4253. *
  4254. * As a practical example, the next figure shows the calculation of the integral of a straight
  4255. * rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
  4256. * original image are shown, as well as the relative pixels in the integral images sum and tilted .
  4257. *
  4258. * ![integral calculation example](pics/integral.png)
  4259. *
  4260. * @param src input image as `$$W \times H$$`, 8-bit or floating-point (32f or 64f).
  4261. * @param sum integral image as `$$(W+1)\times (H+1)$$` , 32-bit integer or floating-point (32f or 64f).
  4262. * @param sqsum integral image for squared pixel values; it is `$$(W+1)\times (H+1)$$`, double-precision
  4263. * floating-point (64f) array.
  4264. * @param tilted integral for the image rotated by 45 degrees; it is `$$(W+1)\times (H+1)$$` array with
  4265. * the same data type as sum.
  4266. * @param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or
  4267. * CV_64F.
  4268. */
  4269. + (void)integral3:(Mat*)src sum:(Mat*)sum sqsum:(Mat*)sqsum tilted:(Mat*)tilted sdepth:(int)sdepth NS_SWIFT_NAME(integral(src:sum:sqsum:tilted:sdepth:));
  4270. /**
  4271. * Calculates the integral of an image.
  4272. *
  4273. * The function calculates one or more integral images for the source image as follows:
  4274. *
  4275. * `$$\texttt{sum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)$$`
  4276. *
  4277. * `$$\texttt{sqsum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)^2$$`
  4278. *
  4279. * `$$\texttt{tilted} (X,Y) = \sum _{y<Y,abs(x-X+1) \leq Y-y-1} \texttt{image} (x,y)$$`
  4280. *
  4281. * Using these integral images, you can calculate sum, mean, and standard deviation over a specific
  4282. * up-right or rotated rectangular region of the image in a constant time, for example:
  4283. *
  4284. * `$$\sum _{x_1 \leq x < x_2, \, y_1 \leq y < y_2} \texttt{image} (x,y) = \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)$$`
  4285. *
  4286. * It makes possible to do a fast blurring or fast block correlation with a variable window size, for
  4287. * example. In case of multi-channel images, sums for each channel are accumulated independently.
  4288. *
  4289. * As a practical example, the next figure shows the calculation of the integral of a straight
  4290. * rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
  4291. * original image are shown, as well as the relative pixels in the integral images sum and tilted .
  4292. *
  4293. * ![integral calculation example](pics/integral.png)
  4294. *
  4295. * @param src input image as `$$W \times H$$`, 8-bit or floating-point (32f or 64f).
  4296. * @param sum integral image as `$$(W+1)\times (H+1)$$` , 32-bit integer or floating-point (32f or 64f).
  4297. * @param sqsum integral image for squared pixel values; it is `$$(W+1)\times (H+1)$$`, double-precision
  4298. * floating-point (64f) array.
  4299. * @param tilted integral for the image rotated by 45 degrees; it is `$$(W+1)\times (H+1)$$` array with
  4300. * the same data type as sum.
  4301. * CV_64F.
  4302. */
  4303. + (void)integral3:(Mat*)src sum:(Mat*)sum sqsum:(Mat*)sqsum tilted:(Mat*)tilted NS_SWIFT_NAME(integral(src:sum:sqsum:tilted:));
  4304. //
  4305. // void cv::integral(Mat src, Mat& sum, int sdepth = -1)
  4306. //
  4307. + (void)integral:(Mat*)src sum:(Mat*)sum sdepth:(int)sdepth NS_SWIFT_NAME(integral(src:sum:sdepth:));
  4308. + (void)integral:(Mat*)src sum:(Mat*)sum NS_SWIFT_NAME(integral(src:sum:));
  4309. //
  4310. // void cv::integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1, int sqdepth = -1)
  4311. //
  4312. + (void)integral2:(Mat*)src sum:(Mat*)sum sqsum:(Mat*)sqsum sdepth:(int)sdepth sqdepth:(int)sqdepth NS_SWIFT_NAME(integral(src:sum:sqsum:sdepth:sqdepth:));
  4313. + (void)integral2:(Mat*)src sum:(Mat*)sum sqsum:(Mat*)sqsum sdepth:(int)sdepth NS_SWIFT_NAME(integral(src:sum:sqsum:sdepth:));
  4314. + (void)integral2:(Mat*)src sum:(Mat*)sum sqsum:(Mat*)sqsum NS_SWIFT_NAME(integral(src:sum:sqsum:));
  4315. //
  4316. // void cv::accumulate(Mat src, Mat& dst, Mat mask = Mat())
  4317. //
  4318. /**
  4319. * Adds an image to the accumulator image.
  4320. *
  4321. * The function adds src or some of its elements to dst :
  4322. *
  4323. * `$$\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0$$`
  4324. *
  4325. * The function supports multi-channel images. Each channel is processed independently.
  4326. *
  4327. * The function cv::accumulate can be used, for example, to collect statistics of a scene background
  4328. * viewed by a still camera and for the further foreground-background segmentation.
  4329. *
  4330. * @param src Input image of type CV_8UC(n), CV_16UC(n), CV_32FC(n) or CV_64FC(n), where n is a positive integer.
  4331. * @param dst %Accumulator image with the same number of channels as input image, and a depth of CV_32F or CV_64F.
  4332. * @param mask Optional operation mask.
  4333. *
  4334. * @see `+accumulateSquare:dst:mask:`, `+accumulateProduct:src2:dst:mask:`, `+accumulateWeighted:dst:alpha:mask:`
  4335. */
  4336. + (void)accumulate:(Mat*)src dst:(Mat*)dst mask:(Mat*)mask NS_SWIFT_NAME(accumulate(src:dst:mask:));
  4337. /**
  4338. * Adds an image to the accumulator image.
  4339. *
  4340. * The function adds src or some of its elements to dst :
  4341. *
  4342. * `$$\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0$$`
  4343. *
  4344. * The function supports multi-channel images. Each channel is processed independently.
  4345. *
  4346. * The function cv::accumulate can be used, for example, to collect statistics of a scene background
  4347. * viewed by a still camera and for the further foreground-background segmentation.
  4348. *
  4349. * @param src Input image of type CV_8UC(n), CV_16UC(n), CV_32FC(n) or CV_64FC(n), where n is a positive integer.
  4350. * @param dst %Accumulator image with the same number of channels as input image, and a depth of CV_32F or CV_64F.
  4351. *
  4352. * @see `+accumulateSquare:dst:mask:`, `+accumulateProduct:src2:dst:mask:`, `+accumulateWeighted:dst:alpha:mask:`
  4353. */
  4354. + (void)accumulate:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(accumulate(src:dst:));
  4355. //
  4356. // void cv::accumulateSquare(Mat src, Mat& dst, Mat mask = Mat())
  4357. //
  4358. /**
  4359. * Adds the square of a source image to the accumulator image.
  4360. *
  4361. * The function adds the input image src or its selected region, raised to a power of 2, to the
  4362. * accumulator dst :
  4363. *
  4364. * `$$\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src} (x,y)^2 \quad \text{if} \quad \texttt{mask} (x,y) \ne 0$$`
  4365. *
  4366. * The function supports multi-channel images. Each channel is processed independently.
  4367. *
  4368. * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
  4369. * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
  4370. * floating-point.
  4371. * @param mask Optional operation mask.
  4372. *
  4373. * @see `+accumulateSquare:dst:mask:`, `+accumulateProduct:src2:dst:mask:`, `+accumulateWeighted:dst:alpha:mask:`
  4374. */
  4375. + (void)accumulateSquare:(Mat*)src dst:(Mat*)dst mask:(Mat*)mask NS_SWIFT_NAME(accumulateSquare(src:dst:mask:));
  4376. /**
  4377. * Adds the square of a source image to the accumulator image.
  4378. *
  4379. * The function adds the input image src or its selected region, raised to a power of 2, to the
  4380. * accumulator dst :
  4381. *
  4382. * `$$\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src} (x,y)^2 \quad \text{if} \quad \texttt{mask} (x,y) \ne 0$$`
  4383. *
  4384. * The function supports multi-channel images. Each channel is processed independently.
  4385. *
  4386. * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
  4387. * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
  4388. * floating-point.
  4389. *
  4390. * @see `+accumulateSquare:dst:mask:`, `+accumulateProduct:src2:dst:mask:`, `+accumulateWeighted:dst:alpha:mask:`
  4391. */
  4392. + (void)accumulateSquare:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(accumulateSquare(src:dst:));
  4393. //
  4394. // void cv::accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat())
  4395. //
  4396. /**
  4397. * Adds the per-element product of two input images to the accumulator image.
  4398. *
  4399. * The function adds the product of two images or their selected regions to the accumulator dst :
  4400. *
  4401. * `$$\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src1} (x,y) \cdot \texttt{src2} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0$$`
  4402. *
  4403. * The function supports multi-channel images. Each channel is processed independently.
  4404. *
  4405. * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
  4406. * @param src2 Second input image of the same type and the same size as src1 .
  4407. * @param dst %Accumulator image with the same number of channels as input images, 32-bit or 64-bit
  4408. * floating-point.
  4409. * @param mask Optional operation mask.
  4410. *
  4411. * @see `+accumulate:dst:mask:`, `+accumulateSquare:dst:mask:`, `+accumulateWeighted:dst:alpha:mask:`
  4412. */
  4413. + (void)accumulateProduct:(Mat*)src1 src2:(Mat*)src2 dst:(Mat*)dst mask:(Mat*)mask NS_SWIFT_NAME(accumulateProduct(src1:src2:dst:mask:));
  4414. /**
  4415. * Adds the per-element product of two input images to the accumulator image.
  4416. *
  4417. * The function adds the product of two images or their selected regions to the accumulator dst :
  4418. *
  4419. * `$$\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src1} (x,y) \cdot \texttt{src2} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0$$`
  4420. *
  4421. * The function supports multi-channel images. Each channel is processed independently.
  4422. *
  4423. * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
  4424. * @param src2 Second input image of the same type and the same size as src1 .
  4425. * @param dst %Accumulator image with the same number of channels as input images, 32-bit or 64-bit
  4426. * floating-point.
  4427. *
  4428. * @see `+accumulate:dst:mask:`, `+accumulateSquare:dst:mask:`, `+accumulateWeighted:dst:alpha:mask:`
  4429. */
  4430. + (void)accumulateProduct:(Mat*)src1 src2:(Mat*)src2 dst:(Mat*)dst NS_SWIFT_NAME(accumulateProduct(src1:src2:dst:));
  4431. //
  4432. // void cv::accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat())
  4433. //
  4434. /**
  4435. * Updates a running average.
  4436. *
  4437. * The function calculates the weighted sum of the input image src and the accumulator dst so that dst
  4438. * becomes a running average of a frame sequence:
  4439. *
  4440. * `$$\texttt{dst} (x,y) \leftarrow (1- \texttt{alpha} ) \cdot \texttt{dst} (x,y) + \texttt{alpha} \cdot \texttt{src} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0$$`
  4441. *
  4442. * That is, alpha regulates the update speed (how fast the accumulator "forgets" about earlier images).
  4443. * The function supports multi-channel images. Each channel is processed independently.
  4444. *
  4445. * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
  4446. * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
  4447. * floating-point.
  4448. * @param alpha Weight of the input image.
  4449. * @param mask Optional operation mask.
  4450. *
  4451. * @see `+accumulate:dst:mask:`, `+accumulateSquare:dst:mask:`, `+accumulateProduct:src2:dst:mask:`
  4452. */
  4453. + (void)accumulateWeighted:(Mat*)src dst:(Mat*)dst alpha:(double)alpha mask:(Mat*)mask NS_SWIFT_NAME(accumulateWeighted(src:dst:alpha:mask:));
  4454. /**
  4455. * Updates a running average.
  4456. *
  4457. * The function calculates the weighted sum of the input image src and the accumulator dst so that dst
  4458. * becomes a running average of a frame sequence:
  4459. *
  4460. * `$$\texttt{dst} (x,y) \leftarrow (1- \texttt{alpha} ) \cdot \texttt{dst} (x,y) + \texttt{alpha} \cdot \texttt{src} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0$$`
  4461. *
  4462. * That is, alpha regulates the update speed (how fast the accumulator "forgets" about earlier images).
  4463. * The function supports multi-channel images. Each channel is processed independently.
  4464. *
  4465. * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
  4466. * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
  4467. * floating-point.
  4468. * @param alpha Weight of the input image.
  4469. *
  4470. * @see `+accumulate:dst:mask:`, `+accumulateSquare:dst:mask:`, `+accumulateProduct:src2:dst:mask:`
  4471. */
  4472. + (void)accumulateWeighted:(Mat*)src dst:(Mat*)dst alpha:(double)alpha NS_SWIFT_NAME(accumulateWeighted(src:dst:alpha:));
  4473. //
  4474. // Point2d cv::phaseCorrelate(Mat src1, Mat src2, Mat window = Mat(), double* response = 0)
  4475. //
  4476. /**
  4477. * The function is used to detect translational shifts that occur between two images.
  4478. *
  4479. * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
  4480. * the frequency domain. It can be used for fast image registration as well as motion estimation. For
  4481. * more information please see <http://en.wikipedia.org/wiki/Phase_correlation>
  4482. *
  4483. * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed
  4484. * with getOptimalDFTSize.
  4485. *
  4486. * The function performs the following equations:
  4487. * - First it applies a Hanning window (see <http://en.wikipedia.org/wiki/Hann_function>) to each
  4488. * image to remove possible edge effects. This window is cached until the array size changes to speed
  4489. * up processing time.
  4490. * - Next it computes the forward DFTs of each source array:
  4491. * `$$\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}$$`
  4492. * where `$$\mathcal{F}$$` is the forward DFT.
  4493. * - It then computes the cross-power spectrum of each frequency domain array:
  4494. * `$$R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}$$`
  4495. * - Next the cross-correlation is converted back into the time domain via the inverse DFT:
  4496. * `$$r = \mathcal{F}^{-1}\{R\}$$`
  4497. * - Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to
  4498. * achieve sub-pixel accuracy.
  4499. * `$$(\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}$$`
  4500. * - If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5
  4501. * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single
  4502. * peak) and will be smaller when there are multiple peaks.
  4503. *
  4504. * @param src1 Source floating point array (CV_32FC1 or CV_64FC1)
  4505. * @param src2 Source floating point array (CV_32FC1 or CV_64FC1)
  4506. * @param window Floating point array with windowing coefficients to reduce edge effects (optional).
  4507. * @param response Signal power within the 5x5 centroid around the peak, between 0 and 1 (optional).
  4508. * @return detected phase shift (sub-pixel) between the two arrays.
  4509. *
  4510. * @see `dft`, `getOptimalDFTSize`, `idft`, `mulSpectrums createHanningWindow`
  4511. */
  4512. + (Point2d*)phaseCorrelate:(Mat*)src1 src2:(Mat*)src2 window:(Mat*)window response:(double*)response NS_SWIFT_NAME(phaseCorrelate(src1:src2:window:response:));
  4513. /**
  4514. * The function is used to detect translational shifts that occur between two images.
  4515. *
  4516. * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
  4517. * the frequency domain. It can be used for fast image registration as well as motion estimation. For
  4518. * more information please see <http://en.wikipedia.org/wiki/Phase_correlation>
  4519. *
  4520. * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed
  4521. * with getOptimalDFTSize.
  4522. *
  4523. * The function performs the following equations:
  4524. * - First it applies a Hanning window (see <http://en.wikipedia.org/wiki/Hann_function>) to each
  4525. * image to remove possible edge effects. This window is cached until the array size changes to speed
  4526. * up processing time.
  4527. * - Next it computes the forward DFTs of each source array:
  4528. * `$$\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}$$`
  4529. * where `$$\mathcal{F}$$` is the forward DFT.
  4530. * - It then computes the cross-power spectrum of each frequency domain array:
  4531. * `$$R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}$$`
  4532. * - Next the cross-correlation is converted back into the time domain via the inverse DFT:
  4533. * `$$r = \mathcal{F}^{-1}\{R\}$$`
  4534. * - Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to
  4535. * achieve sub-pixel accuracy.
  4536. * `$$(\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}$$`
  4537. * - If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5
  4538. * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single
  4539. * peak) and will be smaller when there are multiple peaks.
  4540. *
  4541. * @param src1 Source floating point array (CV_32FC1 or CV_64FC1)
  4542. * @param src2 Source floating point array (CV_32FC1 or CV_64FC1)
  4543. * @param window Floating point array with windowing coefficients to reduce edge effects (optional).
  4544. * @return detected phase shift (sub-pixel) between the two arrays.
  4545. *
  4546. * @see `dft`, `getOptimalDFTSize`, `idft`, `mulSpectrums createHanningWindow`
  4547. */
  4548. + (Point2d*)phaseCorrelate:(Mat*)src1 src2:(Mat*)src2 window:(Mat*)window NS_SWIFT_NAME(phaseCorrelate(src1:src2:window:));
  4549. /**
  4550. * The function is used to detect translational shifts that occur between two images.
  4551. *
  4552. * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
  4553. * the frequency domain. It can be used for fast image registration as well as motion estimation. For
  4554. * more information please see <http://en.wikipedia.org/wiki/Phase_correlation>
  4555. *
  4556. * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed
  4557. * with getOptimalDFTSize.
  4558. *
  4559. * The function performs the following equations:
  4560. * - First it applies a Hanning window (see <http://en.wikipedia.org/wiki/Hann_function>) to each
  4561. * image to remove possible edge effects. This window is cached until the array size changes to speed
  4562. * up processing time.
  4563. * - Next it computes the forward DFTs of each source array:
  4564. * `$$\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}$$`
  4565. * where `$$\mathcal{F}$$` is the forward DFT.
  4566. * - It then computes the cross-power spectrum of each frequency domain array:
  4567. * `$$R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}$$`
  4568. * - Next the cross-correlation is converted back into the time domain via the inverse DFT:
  4569. * `$$r = \mathcal{F}^{-1}\{R\}$$`
  4570. * - Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to
  4571. * achieve sub-pixel accuracy.
  4572. * `$$(\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}$$`
  4573. * - If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5
  4574. * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single
  4575. * peak) and will be smaller when there are multiple peaks.
  4576. *
  4577. * @param src1 Source floating point array (CV_32FC1 or CV_64FC1)
  4578. * @param src2 Source floating point array (CV_32FC1 or CV_64FC1)
  4579. * @return detected phase shift (sub-pixel) between the two arrays.
  4580. *
  4581. * @see `dft`, `getOptimalDFTSize`, `idft`, `mulSpectrums createHanningWindow`
  4582. */
  4583. + (Point2d*)phaseCorrelate:(Mat*)src1 src2:(Mat*)src2 NS_SWIFT_NAME(phaseCorrelate(src1:src2:));
  4584. //
  4585. // void cv::createHanningWindow(Mat& dst, Size winSize, int type)
  4586. //
  4587. /**
  4588. * This function computes a Hanning window coefficients in two dimensions.
  4589. *
  4590. * See (http://en.wikipedia.org/wiki/Hann_function) and (http://en.wikipedia.org/wiki/Window_function)
  4591. * for more information.
  4592. *
  4593. * An example is shown below:
  4594. *
  4595. * // create hanning window of size 100x100 and type CV_32F
  4596. * Mat hann;
  4597. * createHanningWindow(hann, Size(100, 100), CV_32F);
  4598. *
  4599. * @param dst Destination array to place Hann coefficients in
  4600. * @param winSize The window size specifications (both width and height must be > 1)
  4601. * @param type Created array type
  4602. */
  4603. + (void)createHanningWindow:(Mat*)dst winSize:(Size2i*)winSize type:(int)type NS_SWIFT_NAME(createHanningWindow(dst:winSize:type:));
  4604. //
  4605. // void cv::divSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false)
  4606. //
  4607. /**
  4608. * Performs the per-element division of the first Fourier spectrum by the second Fourier spectrum.
  4609. *
  4610. * The function cv::divSpectrums performs the per-element division of the first array by the second array.
  4611. * The arrays are CCS-packed or complex matrices that are results of a real or complex Fourier transform.
  4612. *
  4613. * @param a first input array.
  4614. * @param b second input array of the same size and type as src1 .
  4615. * @param c output array of the same size and type as src1 .
  4616. * @param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that
  4617. * each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a `0` as value.
  4618. * @param conjB optional flag that conjugates the second input array before the multiplication (true)
  4619. * or not (false).
  4620. */
  4621. + (void)divSpectrums:(Mat*)a b:(Mat*)b c:(Mat*)c flags:(int)flags conjB:(BOOL)conjB NS_SWIFT_NAME(divSpectrums(a:b:c:flags:conjB:));
  4622. /**
  4623. * Performs the per-element division of the first Fourier spectrum by the second Fourier spectrum.
  4624. *
  4625. * The function cv::divSpectrums performs the per-element division of the first array by the second array.
  4626. * The arrays are CCS-packed or complex matrices that are results of a real or complex Fourier transform.
  4627. *
  4628. * @param a first input array.
  4629. * @param b second input array of the same size and type as src1 .
  4630. * @param c output array of the same size and type as src1 .
  4631. * @param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that
  4632. * each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a `0` as value.
  4633. * or not (false).
  4634. */
  4635. + (void)divSpectrums:(Mat*)a b:(Mat*)b c:(Mat*)c flags:(int)flags NS_SWIFT_NAME(divSpectrums(a:b:c:flags:));
  4636. //
  4637. // double cv::threshold(Mat src, Mat& dst, double thresh, double maxval, ThresholdTypes type)
  4638. //
  4639. /**
  4640. * Applies a fixed-level threshold to each array element.
  4641. *
  4642. * The function applies fixed-level thresholding to a multiple-channel array. The function is typically
  4643. * used to get a bi-level (binary) image out of a grayscale image ( #compare could be also used for
  4644. * this purpose) or for removing a noise, that is, filtering out pixels with too small or too large
  4645. * values. There are several types of thresholding supported by the function. They are determined by
  4646. * type parameter.
  4647. *
  4648. * Also, the special values #THRESH_OTSU or #THRESH_TRIANGLE may be combined with one of the
  4649. * above values. In these cases, the function determines the optimal threshold value using the Otsu's
  4650. * or Triangle algorithm and uses it instead of the specified thresh.
  4651. *
  4652. * NOTE: Currently, the Otsu's and Triangle methods are implemented only for 8-bit single-channel images.
  4653. *
  4654. * @param src input array (multiple-channel, 8-bit or 32-bit floating point).
  4655. * @param dst output array of the same size and type and the same number of channels as src.
  4656. * @param thresh threshold value.
  4657. * @param maxval maximum value to use with the #THRESH_BINARY and #THRESH_BINARY_INV thresholding
  4658. * types.
  4659. * @param type thresholding type (see #ThresholdTypes).
  4660. * @return the computed threshold value if Otsu's or Triangle methods used.
  4661. *
  4662. * @see `+adaptiveThreshold:dst:maxValue:adaptiveMethod:thresholdType:blockSize:C:`, `+findContours:contours:hierarchy:mode:method:offset:`, `compare`, `min`, `max`
  4663. */
  4664. + (double)threshold:(Mat*)src dst:(Mat*)dst thresh:(double)thresh maxval:(double)maxval type:(ThresholdTypes)type NS_SWIFT_NAME(threshold(src:dst:thresh:maxval:type:));
  4665. //
  4666. // void cv::adaptiveThreshold(Mat src, Mat& dst, double maxValue, AdaptiveThresholdTypes adaptiveMethod, ThresholdTypes thresholdType, int blockSize, double C)
  4667. //
  4668. /**
  4669. * Applies an adaptive threshold to an array.
  4670. *
  4671. * The function transforms a grayscale image to a binary image according to the formulae:
  4672. * - **THRESH_BINARY**
  4673. * `$$\newcommand{\fork}[4]{ \left\{ \begin{array}{l l} #1 & \text{#2}\\\\ #3 & \text{#4}\\\\ \end{array} \right.} dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}$$`
  4674. * - **THRESH_BINARY_INV**
  4675. * `$$\newcommand{\fork}[4]{ \left\{ \begin{array}{l l} #1 & \text{#2}\\\\ #3 & \text{#4}\\\\ \end{array} \right.} dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}$$`
  4676. * where `$$T(x,y)$$` is a threshold calculated individually for each pixel (see adaptiveMethod parameter).
  4677. *
  4678. * The function can process the image in-place.
  4679. *
  4680. * @param src Source 8-bit single-channel image.
  4681. * @param dst Destination image of the same size and the same type as src.
  4682. * @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied
  4683. * @param adaptiveMethod Adaptive thresholding algorithm to use, see #AdaptiveThresholdTypes.
  4684. * The #BORDER_REPLICATE | #BORDER_ISOLATED is used to process boundaries.
  4685. * @param thresholdType Thresholding type that must be either #THRESH_BINARY or #THRESH_BINARY_INV,
  4686. * see #ThresholdTypes.
  4687. * @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the
  4688. * pixel: 3, 5, 7, and so on.
  4689. * @param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it
  4690. * is positive but may be zero or negative as well.
  4691. *
  4692. * @see `+threshold:dst:thresh:maxval:type:`, `+blur:dst:ksize:anchor:borderType:`, `+GaussianBlur:dst:ksize:sigmaX:sigmaY:borderType:`
  4693. */
  4694. + (void)adaptiveThreshold:(Mat*)src dst:(Mat*)dst maxValue:(double)maxValue adaptiveMethod:(AdaptiveThresholdTypes)adaptiveMethod thresholdType:(ThresholdTypes)thresholdType blockSize:(int)blockSize C:(double)C NS_SWIFT_NAME(adaptiveThreshold(src:dst:maxValue:adaptiveMethod:thresholdType:blockSize:C:));
  4695. //
  4696. // void cv::pyrDown(Mat src, Mat& dst, Size dstsize = Size(), BorderTypes borderType = BORDER_DEFAULT)
  4697. //
  4698. /**
  4699. * Blurs an image and downsamples it.
  4700. *
  4701. * By default, size of the output image is computed as `Size((src.cols+1)/2, (src.rows+1)/2)`, but in
  4702. * any case, the following conditions should be satisfied:
  4703. *
  4704. * `$$\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}$$`
  4705. *
  4706. * The function performs the downsampling step of the Gaussian pyramid construction. First, it
  4707. * convolves the source image with the kernel:
  4708. *
  4709. * `$$\frac{1}{256} \begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\ 4 & 16 & 24 & 16 & 4 \\ 6 & 24 & 36 & 24 & 6 \\ 4 & 16 & 24 & 16 & 4 \\ 1 & 4 & 6 & 4 & 1 \end{bmatrix}$$`
  4710. *
  4711. * Then, it downsamples the image by rejecting even rows and columns.
  4712. *
  4713. * @param src input image.
  4714. * @param dst output image; it has the specified size and the same type as src.
  4715. * @param dstsize size of the output image.
  4716. * @param borderType Pixel extrapolation method, see #BorderTypes (#BORDER_CONSTANT isn't supported)
  4717. */
  4718. + (void)pyrDown:(Mat*)src dst:(Mat*)dst dstsize:(Size2i*)dstsize borderType:(BorderTypes)borderType NS_SWIFT_NAME(pyrDown(src:dst:dstsize:borderType:));
  4719. /**
  4720. * Blurs an image and downsamples it.
  4721. *
  4722. * By default, size of the output image is computed as `Size((src.cols+1)/2, (src.rows+1)/2)`, but in
  4723. * any case, the following conditions should be satisfied:
  4724. *
  4725. * `$$\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}$$`
  4726. *
  4727. * The function performs the downsampling step of the Gaussian pyramid construction. First, it
  4728. * convolves the source image with the kernel:
  4729. *
  4730. * `$$\frac{1}{256} \begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\ 4 & 16 & 24 & 16 & 4 \\ 6 & 24 & 36 & 24 & 6 \\ 4 & 16 & 24 & 16 & 4 \\ 1 & 4 & 6 & 4 & 1 \end{bmatrix}$$`
  4731. *
  4732. * Then, it downsamples the image by rejecting even rows and columns.
  4733. *
  4734. * @param src input image.
  4735. * @param dst output image; it has the specified size and the same type as src.
  4736. * @param dstsize size of the output image.
  4737. */
  4738. + (void)pyrDown:(Mat*)src dst:(Mat*)dst dstsize:(Size2i*)dstsize NS_SWIFT_NAME(pyrDown(src:dst:dstsize:));
  4739. /**
  4740. * Blurs an image and downsamples it.
  4741. *
  4742. * By default, size of the output image is computed as `Size((src.cols+1)/2, (src.rows+1)/2)`, but in
  4743. * any case, the following conditions should be satisfied:
  4744. *
  4745. * `$$\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}$$`
  4746. *
  4747. * The function performs the downsampling step of the Gaussian pyramid construction. First, it
  4748. * convolves the source image with the kernel:
  4749. *
  4750. * `$$\frac{1}{256} \begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\ 4 & 16 & 24 & 16 & 4 \\ 6 & 24 & 36 & 24 & 6 \\ 4 & 16 & 24 & 16 & 4 \\ 1 & 4 & 6 & 4 & 1 \end{bmatrix}$$`
  4751. *
  4752. * Then, it downsamples the image by rejecting even rows and columns.
  4753. *
  4754. * @param src input image.
  4755. * @param dst output image; it has the specified size and the same type as src.
  4756. */
  4757. + (void)pyrDown:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(pyrDown(src:dst:));
  4758. //
  4759. // void cv::pyrUp(Mat src, Mat& dst, Size dstsize = Size(), BorderTypes borderType = BORDER_DEFAULT)
  4760. //
  4761. /**
  4762. * Upsamples an image and then blurs it.
  4763. *
  4764. * By default, size of the output image is computed as `Size(src.cols\*2, (src.rows\*2)`, but in any
  4765. * case, the following conditions should be satisfied:
  4766. *
  4767. * `$$\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq ( \texttt{dstsize.width} \mod 2) \\ | \texttt{dstsize.height} -src.rows*2| \leq ( \texttt{dstsize.height} \mod 2) \end{array}$$`
  4768. *
  4769. * The function performs the upsampling step of the Gaussian pyramid construction, though it can
  4770. * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
  4771. * injecting even zero rows and columns and then convolves the result with the same kernel as in
  4772. * pyrDown multiplied by 4.
  4773. *
  4774. * @param src input image.
  4775. * @param dst output image. It has the specified size and the same type as src .
  4776. * @param dstsize size of the output image.
  4777. * @param borderType Pixel extrapolation method, see #BorderTypes (only #BORDER_DEFAULT is supported)
  4778. */
  4779. + (void)pyrUp:(Mat*)src dst:(Mat*)dst dstsize:(Size2i*)dstsize borderType:(BorderTypes)borderType NS_SWIFT_NAME(pyrUp(src:dst:dstsize:borderType:));
  4780. /**
  4781. * Upsamples an image and then blurs it.
  4782. *
  4783. * By default, size of the output image is computed as `Size(src.cols\*2, (src.rows\*2)`, but in any
  4784. * case, the following conditions should be satisfied:
  4785. *
  4786. * `$$\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq ( \texttt{dstsize.width} \mod 2) \\ | \texttt{dstsize.height} -src.rows*2| \leq ( \texttt{dstsize.height} \mod 2) \end{array}$$`
  4787. *
  4788. * The function performs the upsampling step of the Gaussian pyramid construction, though it can
  4789. * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
  4790. * injecting even zero rows and columns and then convolves the result with the same kernel as in
  4791. * pyrDown multiplied by 4.
  4792. *
  4793. * @param src input image.
  4794. * @param dst output image. It has the specified size and the same type as src .
  4795. * @param dstsize size of the output image.
  4796. */
  4797. + (void)pyrUp:(Mat*)src dst:(Mat*)dst dstsize:(Size2i*)dstsize NS_SWIFT_NAME(pyrUp(src:dst:dstsize:));
  4798. /**
  4799. * Upsamples an image and then blurs it.
  4800. *
  4801. * By default, size of the output image is computed as `Size(src.cols\*2, (src.rows\*2)`, but in any
  4802. * case, the following conditions should be satisfied:
  4803. *
  4804. * `$$\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq ( \texttt{dstsize.width} \mod 2) \\ | \texttt{dstsize.height} -src.rows*2| \leq ( \texttt{dstsize.height} \mod 2) \end{array}$$`
  4805. *
  4806. * The function performs the upsampling step of the Gaussian pyramid construction, though it can
  4807. * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
  4808. * injecting even zero rows and columns and then convolves the result with the same kernel as in
  4809. * pyrDown multiplied by 4.
  4810. *
  4811. * @param src input image.
  4812. * @param dst output image. It has the specified size and the same type as src .
  4813. */
  4814. + (void)pyrUp:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(pyrUp(src:dst:));
  4815. //
  4816. // void cv::calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false)
  4817. //
  4818. + (void)calcHist:(NSArray<Mat*>*)images channels:(IntVector*)channels mask:(Mat*)mask hist:(Mat*)hist histSize:(IntVector*)histSize ranges:(FloatVector*)ranges accumulate:(BOOL)accumulate NS_SWIFT_NAME(calcHist(images:channels:mask:hist:histSize:ranges:accumulate:));
  4819. + (void)calcHist:(NSArray<Mat*>*)images channels:(IntVector*)channels mask:(Mat*)mask hist:(Mat*)hist histSize:(IntVector*)histSize ranges:(FloatVector*)ranges NS_SWIFT_NAME(calcHist(images:channels:mask:hist:histSize:ranges:));
  4820. //
  4821. // void cv::calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale)
  4822. //
  4823. + (void)calcBackProject:(NSArray<Mat*>*)images channels:(IntVector*)channels hist:(Mat*)hist dst:(Mat*)dst ranges:(FloatVector*)ranges scale:(double)scale NS_SWIFT_NAME(calcBackProject(images:channels:hist:dst:ranges:scale:));
  4824. //
  4825. // double cv::compareHist(Mat H1, Mat H2, HistCompMethods method)
  4826. //
  4827. /**
  4828. * Compares two histograms.
  4829. *
  4830. * The function cv::compareHist compares two dense or two sparse histograms using the specified method.
  4831. *
  4832. * The function returns `$$d(H_1, H_2)$$` .
  4833. *
  4834. * While the function works well with 1-, 2-, 3-dimensional dense histograms, it may not be suitable
  4835. * for high-dimensional sparse histograms. In such histograms, because of aliasing and sampling
  4836. * problems, the coordinates of non-zero histogram bins can slightly shift. To compare such histograms
  4837. * or more general sparse configurations of weighted points, consider using the #EMD function.
  4838. *
  4839. * @param H1 First compared histogram.
  4840. * @param H2 Second compared histogram of the same size as H1 .
  4841. * @param method Comparison method, see #HistCompMethods
  4842. */
  4843. + (double)compareHist:(Mat*)H1 H2:(Mat*)H2 method:(HistCompMethods)method NS_SWIFT_NAME(compareHist(H1:H2:method:));
  4844. //
  4845. // void cv::equalizeHist(Mat src, Mat& dst)
  4846. //
  4847. /**
  4848. * Equalizes the histogram of a grayscale image.
  4849. *
  4850. * The function equalizes the histogram of the input image using the following algorithm:
  4851. *
  4852. * - Calculate the histogram `$$H$$` for src .
  4853. * - Normalize the histogram so that the sum of histogram bins is 255.
  4854. * - Compute the integral of the histogram:
  4855. * `$$H'_i = \sum _{0 \le j < i} H(j)$$`
  4856. * - Transform the image using `$$H'$$` as a look-up table: `$$\texttt{dst}(x,y) = H'(\texttt{src}(x,y))$$`
  4857. *
  4858. * The algorithm normalizes the brightness and increases the contrast of the image.
  4859. *
  4860. * @param src Source 8-bit single channel image.
  4861. * @param dst Destination image of the same size and type as src .
  4862. */
  4863. + (void)equalizeHist:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(equalizeHist(src:dst:));
  4864. //
  4865. // Ptr_CLAHE cv::createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8))
  4866. //
  4867. /**
  4868. * Creates a smart pointer to a cv::CLAHE class and initializes it.
  4869. *
  4870. * @param clipLimit Threshold for contrast limiting.
  4871. * @param tileGridSize Size of grid for histogram equalization. Input image will be divided into
  4872. * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
  4873. */
  4874. + (CLAHE*)createCLAHE:(double)clipLimit tileGridSize:(Size2i*)tileGridSize NS_SWIFT_NAME(createCLAHE(clipLimit:tileGridSize:));
  4875. /**
  4876. * Creates a smart pointer to a cv::CLAHE class and initializes it.
  4877. *
  4878. * @param clipLimit Threshold for contrast limiting.
  4879. * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
  4880. */
  4881. + (CLAHE*)createCLAHE:(double)clipLimit NS_SWIFT_NAME(createCLAHE(clipLimit:));
  4882. /**
  4883. * Creates a smart pointer to a cv::CLAHE class and initializes it.
  4884. *
  4885. * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
  4886. */
  4887. + (CLAHE*)createCLAHE NS_SWIFT_NAME(createCLAHE());
  4888. //
  4889. // float cv::wrapperEMD(Mat signature1, Mat signature2, DistanceTypes distType, Mat cost = Mat(), _hidden_ & lowerBound = cv::Ptr<float>(), Mat& flow = Mat())
  4890. //
  4891. /**
  4892. * Computes the "minimal work" distance between two weighted point configurations.
  4893. *
  4894. * The function computes the earth mover distance and/or a lower boundary of the distance between the
  4895. * two weighted point configurations. One of the applications described in CITE: RubnerSept98,
  4896. * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation
  4897. * problem that is solved using some modification of a simplex algorithm, thus the complexity is
  4898. * exponential in the worst case, though, on average it is much faster. In the case of a real metric
  4899. * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used
  4900. * to determine roughly whether the two signatures are far enough so that they cannot relate to the
  4901. * same object.
  4902. *
  4903. * @param signature1 First signature, a `$$\texttt{size1}\times \texttt{dims}+1$$` floating-point matrix.
  4904. * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have
  4905. * a single column (weights only) if the user-defined cost matrix is used. The weights must be
  4906. * non-negative and have at least one non-zero value.
  4907. * @param signature2 Second signature of the same format as signature1 , though the number of rows
  4908. * may be different. The total weights may be different. In this case an extra "dummy" point is added
  4909. * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero
  4910. * value.
  4911. * @param distType Used metric. See #DistanceTypes.
  4912. * @param cost User-defined `$$\texttt{size1}\times \texttt{size2}$$` cost matrix. Also, if a cost matrix
  4913. * is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
  4914. * @param lowerBound Optional input/output parameter: lower boundary of a distance between the two
  4915. * signatures that is a distance between mass centers. The lower boundary may not be calculated if
  4916. * the user-defined cost matrix is used, the total weights of point configurations are not equal, or
  4917. * if the signatures consist of weights only (the signature matrices have a single column). You
  4918. * *must** initialize \*lowerBound . If the calculated distance between mass centers is greater or
  4919. * equal to \*lowerBound (it means that the signatures are far enough), the function does not
  4920. * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on
  4921. * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound
  4922. * should be set to 0.
  4923. * @param flow Resultant `$$\texttt{size1} \times \texttt{size2}$$` flow matrix: `$$\texttt{flow}_{i,j}$$` is
  4924. * a flow from `$$i$$` -th point of signature1 to `$$j$$` -th point of signature2 .
  4925. */
  4926. + (float)EMD:(Mat*)signature1 signature2:(Mat*)signature2 distType:(DistanceTypes)distType cost:(Mat*)cost flow:(Mat*)flow NS_SWIFT_NAME(wrapperEMD(signature1:signature2:distType:cost:flow:));
  4927. /**
  4928. * Computes the "minimal work" distance between two weighted point configurations.
  4929. *
  4930. * The function computes the earth mover distance and/or a lower boundary of the distance between the
  4931. * two weighted point configurations. One of the applications described in CITE: RubnerSept98,
  4932. * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation
  4933. * problem that is solved using some modification of a simplex algorithm, thus the complexity is
  4934. * exponential in the worst case, though, on average it is much faster. In the case of a real metric
  4935. * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used
  4936. * to determine roughly whether the two signatures are far enough so that they cannot relate to the
  4937. * same object.
  4938. *
  4939. * @param signature1 First signature, a `$$\texttt{size1}\times \texttt{dims}+1$$` floating-point matrix.
  4940. * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have
  4941. * a single column (weights only) if the user-defined cost matrix is used. The weights must be
  4942. * non-negative and have at least one non-zero value.
  4943. * @param signature2 Second signature of the same format as signature1 , though the number of rows
  4944. * may be different. The total weights may be different. In this case an extra "dummy" point is added
  4945. * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero
  4946. * value.
  4947. * @param distType Used metric. See #DistanceTypes.
  4948. * @param cost User-defined `$$\texttt{size1}\times \texttt{size2}$$` cost matrix. Also, if a cost matrix
  4949. * is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
  4950. * @param lowerBound Optional input/output parameter: lower boundary of a distance between the two
  4951. * signatures that is a distance between mass centers. The lower boundary may not be calculated if
  4952. * the user-defined cost matrix is used, the total weights of point configurations are not equal, or
  4953. * if the signatures consist of weights only (the signature matrices have a single column). You
  4954. * *must** initialize \*lowerBound . If the calculated distance between mass centers is greater or
  4955. * equal to \*lowerBound (it means that the signatures are far enough), the function does not
  4956. * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on
  4957. * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound
  4958. * should be set to 0.
  4959. * a flow from `$$i$$` -th point of signature1 to `$$j$$` -th point of signature2 .
  4960. */
  4961. + (float)EMD:(Mat*)signature1 signature2:(Mat*)signature2 distType:(DistanceTypes)distType cost:(Mat*)cost NS_SWIFT_NAME(wrapperEMD(signature1:signature2:distType:cost:));
  4962. /**
  4963. * Computes the "minimal work" distance between two weighted point configurations.
  4964. *
  4965. * The function computes the earth mover distance and/or a lower boundary of the distance between the
  4966. * two weighted point configurations. One of the applications described in CITE: RubnerSept98,
  4967. * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation
  4968. * problem that is solved using some modification of a simplex algorithm, thus the complexity is
  4969. * exponential in the worst case, though, on average it is much faster. In the case of a real metric
  4970. * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used
  4971. * to determine roughly whether the two signatures are far enough so that they cannot relate to the
  4972. * same object.
  4973. *
  4974. * @param signature1 First signature, a `$$\texttt{size1}\times \texttt{dims}+1$$` floating-point matrix.
  4975. * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have
  4976. * a single column (weights only) if the user-defined cost matrix is used. The weights must be
  4977. * non-negative and have at least one non-zero value.
  4978. * @param signature2 Second signature of the same format as signature1 , though the number of rows
  4979. * may be different. The total weights may be different. In this case an extra "dummy" point is added
  4980. * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero
  4981. * value.
  4982. * @param distType Used metric. See #DistanceTypes.
  4983. * is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
  4984. * signatures that is a distance between mass centers. The lower boundary may not be calculated if
  4985. * the user-defined cost matrix is used, the total weights of point configurations are not equal, or
  4986. * if the signatures consist of weights only (the signature matrices have a single column). You
  4987. * *must** initialize \*lowerBound . If the calculated distance between mass centers is greater or
  4988. * equal to \*lowerBound (it means that the signatures are far enough), the function does not
  4989. * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on
  4990. * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound
  4991. * should be set to 0.
  4992. * a flow from `$$i$$` -th point of signature1 to `$$j$$` -th point of signature2 .
  4993. */
  4994. + (float)EMD:(Mat*)signature1 signature2:(Mat*)signature2 distType:(DistanceTypes)distType NS_SWIFT_NAME(wrapperEMD(signature1:signature2:distType:));
  4995. //
  4996. // void cv::watershed(Mat image, Mat& markers)
  4997. //
  4998. /**
  4999. * Performs a marker-based image segmentation using the watershed algorithm.
  5000. *
  5001. * The function implements one of the variants of watershed, non-parametric marker-based segmentation
  5002. * algorithm, described in CITE: Meyer92 .
  5003. *
  5004. * Before passing the image to the function, you have to roughly outline the desired regions in the
  5005. * image markers with positive (\>0) indices. So, every region is represented as one or more connected
  5006. * components with the pixel values 1, 2, 3, and so on. Such markers can be retrieved from a binary
  5007. * mask using #findContours and #drawContours (see the watershed.cpp demo). The markers are "seeds" of
  5008. * the future image regions. All the other pixels in markers , whose relation to the outlined regions
  5009. * is not known and should be defined by the algorithm, should be set to 0's. In the function output,
  5010. * each pixel in markers is set to a value of the "seed" components or to -1 at boundaries between the
  5011. * regions.
  5012. *
  5013. * NOTE: Any two neighbor connected components are not necessarily separated by a watershed boundary
  5014. * (-1's pixels); for example, they can touch each other in the initial marker image passed to the
  5015. * function.
  5016. *
  5017. * @param image Input 8-bit 3-channel image.
  5018. * @param markers Input/output 32-bit single-channel image (map) of markers. It should have the same
  5019. * size as image .
  5020. *
  5021. * @see `+findContours:contours:hierarchy:mode:method:offset:`
  5022. */
  5023. + (void)watershed:(Mat*)image markers:(Mat*)markers NS_SWIFT_NAME(watershed(image:markers:));
  5024. //
  5025. // void cv::pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1))
  5026. //
  5027. /**
  5028. * Performs initial step of meanshift segmentation of an image.
  5029. *
  5030. * The function implements the filtering stage of meanshift segmentation, that is, the output of the
  5031. * function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
  5032. * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
  5033. * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
  5034. * considered:
  5035. *
  5036. * `$$(x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}$$`
  5037. *
  5038. * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
  5039. * (though, the algorithm does not depend on the color space used, so any 3-component color space can
  5040. * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
  5041. * (R',G',B') are found and they act as the neighborhood center on the next iteration:
  5042. *
  5043. * `$$(X,Y)~(X',Y'), (R,G,B)~(R',G',B').$$`
  5044. *
  5045. * After the iterations over, the color components of the initial pixel (that is, the pixel from where
  5046. * the iterations started) are set to the final value (average color at the last iteration):
  5047. *
  5048. * `$$I(X,Y) <- (R*,G*,B*)$$`
  5049. *
  5050. * When maxLevel \> 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
  5051. * run on the smallest layer first. After that, the results are propagated to the larger layer and the
  5052. * iterations are run again only on those pixels where the layer colors differ by more than sr from the
  5053. * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
  5054. * results will be actually different from the ones obtained by running the meanshift procedure on the
  5055. * whole original image (i.e. when maxLevel==0).
  5056. *
  5057. * @param src The source 8-bit, 3-channel image.
  5058. * @param dst The destination image of the same format and the same size as the source.
  5059. * @param sp The spatial window radius.
  5060. * @param sr The color window radius.
  5061. * @param maxLevel Maximum level of the pyramid for the segmentation.
  5062. * @param termcrit Termination criteria: when to stop meanshift iterations.
  5063. */
  5064. + (void)pyrMeanShiftFiltering:(Mat*)src dst:(Mat*)dst sp:(double)sp sr:(double)sr maxLevel:(int)maxLevel termcrit:(TermCriteria*)termcrit NS_SWIFT_NAME(pyrMeanShiftFiltering(src:dst:sp:sr:maxLevel:termcrit:));
  5065. /**
  5066. * Performs initial step of meanshift segmentation of an image.
  5067. *
  5068. * The function implements the filtering stage of meanshift segmentation, that is, the output of the
  5069. * function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
  5070. * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
  5071. * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
  5072. * considered:
  5073. *
  5074. * `$$(x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}$$`
  5075. *
  5076. * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
  5077. * (though, the algorithm does not depend on the color space used, so any 3-component color space can
  5078. * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
  5079. * (R',G',B') are found and they act as the neighborhood center on the next iteration:
  5080. *
  5081. * `$$(X,Y)~(X',Y'), (R,G,B)~(R',G',B').$$`
  5082. *
  5083. * After the iterations over, the color components of the initial pixel (that is, the pixel from where
  5084. * the iterations started) are set to the final value (average color at the last iteration):
  5085. *
  5086. * `$$I(X,Y) <- (R*,G*,B*)$$`
  5087. *
  5088. * When maxLevel \> 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
  5089. * run on the smallest layer first. After that, the results are propagated to the larger layer and the
  5090. * iterations are run again only on those pixels where the layer colors differ by more than sr from the
  5091. * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
  5092. * results will be actually different from the ones obtained by running the meanshift procedure on the
  5093. * whole original image (i.e. when maxLevel==0).
  5094. *
  5095. * @param src The source 8-bit, 3-channel image.
  5096. * @param dst The destination image of the same format and the same size as the source.
  5097. * @param sp The spatial window radius.
  5098. * @param sr The color window radius.
  5099. * @param maxLevel Maximum level of the pyramid for the segmentation.
  5100. */
  5101. + (void)pyrMeanShiftFiltering:(Mat*)src dst:(Mat*)dst sp:(double)sp sr:(double)sr maxLevel:(int)maxLevel NS_SWIFT_NAME(pyrMeanShiftFiltering(src:dst:sp:sr:maxLevel:));
  5102. /**
  5103. * Performs initial step of meanshift segmentation of an image.
  5104. *
  5105. * The function implements the filtering stage of meanshift segmentation, that is, the output of the
  5106. * function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
  5107. * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
  5108. * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
  5109. * considered:
  5110. *
  5111. * `$$(x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}$$`
  5112. *
  5113. * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
  5114. * (though, the algorithm does not depend on the color space used, so any 3-component color space can
  5115. * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
  5116. * (R',G',B') are found and they act as the neighborhood center on the next iteration:
  5117. *
  5118. * `$$(X,Y)~(X',Y'), (R,G,B)~(R',G',B').$$`
  5119. *
  5120. * After the iterations over, the color components of the initial pixel (that is, the pixel from where
  5121. * the iterations started) are set to the final value (average color at the last iteration):
  5122. *
  5123. * `$$I(X,Y) <- (R*,G*,B*)$$`
  5124. *
  5125. * When maxLevel \> 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
  5126. * run on the smallest layer first. After that, the results are propagated to the larger layer and the
  5127. * iterations are run again only on those pixels where the layer colors differ by more than sr from the
  5128. * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
  5129. * results will be actually different from the ones obtained by running the meanshift procedure on the
  5130. * whole original image (i.e. when maxLevel==0).
  5131. *
  5132. * @param src The source 8-bit, 3-channel image.
  5133. * @param dst The destination image of the same format and the same size as the source.
  5134. * @param sp The spatial window radius.
  5135. * @param sr The color window radius.
  5136. */
  5137. + (void)pyrMeanShiftFiltering:(Mat*)src dst:(Mat*)dst sp:(double)sp sr:(double)sr NS_SWIFT_NAME(pyrMeanShiftFiltering(src:dst:sp:sr:));
  5138. //
  5139. // void cv::grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL)
  5140. //
  5141. /**
  5142. * Runs the GrabCut algorithm.
  5143. *
  5144. * The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut).
  5145. *
  5146. * @param img Input 8-bit 3-channel image.
  5147. * @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when
  5148. * mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses.
  5149. * @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as
  5150. * "obvious background". The parameter is only used when mode==#GC_INIT_WITH_RECT .
  5151. * @param bgdModel Temporary array for the background model. Do not modify it while you are
  5152. * processing the same image.
  5153. * @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are
  5154. * processing the same image.
  5155. * @param iterCount Number of iterations the algorithm should make before returning the result. Note
  5156. * that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or
  5157. * mode==GC_EVAL .
  5158. * @param mode Operation mode that could be one of the #GrabCutModes
  5159. */
  5160. + (void)grabCut:(Mat*)img mask:(Mat*)mask rect:(Rect2i*)rect bgdModel:(Mat*)bgdModel fgdModel:(Mat*)fgdModel iterCount:(int)iterCount mode:(int)mode NS_SWIFT_NAME(grabCut(img:mask:rect:bgdModel:fgdModel:iterCount:mode:));
  5161. /**
  5162. * Runs the GrabCut algorithm.
  5163. *
  5164. * The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut).
  5165. *
  5166. * @param img Input 8-bit 3-channel image.
  5167. * @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when
  5168. * mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses.
  5169. * @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as
  5170. * "obvious background". The parameter is only used when mode==#GC_INIT_WITH_RECT .
  5171. * @param bgdModel Temporary array for the background model. Do not modify it while you are
  5172. * processing the same image.
  5173. * @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are
  5174. * processing the same image.
  5175. * @param iterCount Number of iterations the algorithm should make before returning the result. Note
  5176. * that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or
  5177. * mode==GC_EVAL .
  5178. */
  5179. + (void)grabCut:(Mat*)img mask:(Mat*)mask rect:(Rect2i*)rect bgdModel:(Mat*)bgdModel fgdModel:(Mat*)fgdModel iterCount:(int)iterCount NS_SWIFT_NAME(grabCut(img:mask:rect:bgdModel:fgdModel:iterCount:));
  5180. //
  5181. // void cv::distanceTransform(Mat src, Mat& dst, Mat& labels, DistanceTypes distanceType, DistanceTransformMasks maskSize, DistanceTransformLabelTypes labelType = DIST_LABEL_CCOMP)
  5182. //
  5183. /**
  5184. * Calculates the distance to the closest zero pixel for each pixel of the source image.
  5185. *
  5186. * The function cv::distanceTransform calculates the approximate or precise distance from every binary
  5187. * image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.
  5188. *
  5189. * When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the
  5190. * algorithm described in CITE: Felzenszwalb04 . This algorithm is parallelized with the TBB library.
  5191. *
  5192. * In other cases, the algorithm CITE: Borgefors86 is used. This means that for a pixel the function
  5193. * finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,
  5194. * diagonal, or knight's move (the latest is available for a `$$5\times 5$$` mask). The overall
  5195. * distance is calculated as a sum of these basic distances. Since the distance function should be
  5196. * symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all
  5197. * the diagonal shifts must have the same cost (denoted as `b`), and all knight's moves must have the
  5198. * same cost (denoted as `c`). For the #DIST_C and #DIST_L1 types, the distance is calculated
  5199. * precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a
  5200. * relative error (a `$$5\times 5$$` mask gives more accurate results). For `a`,`b`, and `c`, OpenCV
  5201. * uses the values suggested in the original paper:
  5202. * - DIST_L1: `a = 1, b = 2`
  5203. * - DIST_L2:
  5204. * - `3 x 3`: `a=0.955, b=1.3693`
  5205. * - `5 x 5`: `a=1, b=1.4, c=2.1969`
  5206. * - DIST_C: `a = 1, b = 1`
  5207. *
  5208. * Typically, for a fast, coarse distance estimation #DIST_L2, a `$$3\times 3$$` mask is used. For a
  5209. * more accurate distance estimation #DIST_L2, a `$$5\times 5$$` mask or the precise algorithm is used.
  5210. * Note that both the precise and the approximate algorithms are linear on the number of pixels.
  5211. *
  5212. * This variant of the function does not only compute the minimum distance for each pixel `$$(x, y)$$`
  5213. * but also identifies the nearest connected component consisting of zero pixels
  5214. * (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the
  5215. * component/pixel is stored in `labels(x, y)`. When labelType==#DIST_LABEL_CCOMP, the function
  5216. * automatically finds connected components of zero pixels in the input image and marks them with
  5217. * distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and
  5218. * marks all the zero pixels with distinct labels.
  5219. *
  5220. * In this mode, the complexity is still linear. That is, the function provides a very fast way to
  5221. * compute the Voronoi diagram for a binary image. Currently, the second variant can use only the
  5222. * approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported
  5223. * yet.
  5224. *
  5225. * @param src 8-bit, single-channel (binary) source image.
  5226. * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
  5227. * single-channel image of the same size as src.
  5228. * @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type
  5229. * CV_32SC1 and the same size as src.
  5230. * @param distanceType Type of distance, see #DistanceTypes
  5231. * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks.
  5232. * #DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type,
  5233. * the parameter is forced to 3 because a `$$3\times 3$$` mask gives the same result as `$$5\times
  5234. * 5$$` or any larger aperture.
  5235. * @param labelType Type of the label array to build, see #DistanceTransformLabelTypes.
  5236. */
  5237. + (void)distanceTransformWithLabels:(Mat*)src dst:(Mat*)dst labels:(Mat*)labels distanceType:(DistanceTypes)distanceType maskSize:(DistanceTransformMasks)maskSize labelType:(DistanceTransformLabelTypes)labelType NS_SWIFT_NAME(distanceTransform(src:dst:labels:distanceType:maskSize:labelType:));
  5238. /**
  5239. * Calculates the distance to the closest zero pixel for each pixel of the source image.
  5240. *
  5241. * The function cv::distanceTransform calculates the approximate or precise distance from every binary
  5242. * image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.
  5243. *
  5244. * When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the
  5245. * algorithm described in CITE: Felzenszwalb04 . This algorithm is parallelized with the TBB library.
  5246. *
  5247. * In other cases, the algorithm CITE: Borgefors86 is used. This means that for a pixel the function
  5248. * finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,
  5249. * diagonal, or knight's move (the latest is available for a `$$5\times 5$$` mask). The overall
  5250. * distance is calculated as a sum of these basic distances. Since the distance function should be
  5251. * symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all
  5252. * the diagonal shifts must have the same cost (denoted as `b`), and all knight's moves must have the
  5253. * same cost (denoted as `c`). For the #DIST_C and #DIST_L1 types, the distance is calculated
  5254. * precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a
  5255. * relative error (a `$$5\times 5$$` mask gives more accurate results). For `a`,`b`, and `c`, OpenCV
  5256. * uses the values suggested in the original paper:
  5257. * - DIST_L1: `a = 1, b = 2`
  5258. * - DIST_L2:
  5259. * - `3 x 3`: `a=0.955, b=1.3693`
  5260. * - `5 x 5`: `a=1, b=1.4, c=2.1969`
  5261. * - DIST_C: `a = 1, b = 1`
  5262. *
  5263. * Typically, for a fast, coarse distance estimation #DIST_L2, a `$$3\times 3$$` mask is used. For a
  5264. * more accurate distance estimation #DIST_L2, a `$$5\times 5$$` mask or the precise algorithm is used.
  5265. * Note that both the precise and the approximate algorithms are linear on the number of pixels.
  5266. *
  5267. * This variant of the function does not only compute the minimum distance for each pixel `$$(x, y)$$`
  5268. * but also identifies the nearest connected component consisting of zero pixels
  5269. * (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the
  5270. * component/pixel is stored in `labels(x, y)`. When labelType==#DIST_LABEL_CCOMP, the function
  5271. * automatically finds connected components of zero pixels in the input image and marks them with
  5272. * distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and
  5273. * marks all the zero pixels with distinct labels.
  5274. *
  5275. * In this mode, the complexity is still linear. That is, the function provides a very fast way to
  5276. * compute the Voronoi diagram for a binary image. Currently, the second variant can use only the
  5277. * approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported
  5278. * yet.
  5279. *
  5280. * @param src 8-bit, single-channel (binary) source image.
  5281. * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
  5282. * single-channel image of the same size as src.
  5283. * @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type
  5284. * CV_32SC1 and the same size as src.
  5285. * @param distanceType Type of distance, see #DistanceTypes
  5286. * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks.
  5287. * #DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type,
  5288. * the parameter is forced to 3 because a `$$3\times 3$$` mask gives the same result as `$$5\times
  5289. * 5$$` or any larger aperture.
  5290. */
  5291. + (void)distanceTransformWithLabels:(Mat*)src dst:(Mat*)dst labels:(Mat*)labels distanceType:(DistanceTypes)distanceType maskSize:(DistanceTransformMasks)maskSize NS_SWIFT_NAME(distanceTransform(src:dst:labels:distanceType:maskSize:));
  5292. //
  5293. // void cv::distanceTransform(Mat src, Mat& dst, DistanceTypes distanceType, DistanceTransformMasks maskSize, int dstType = CV_32F)
  5294. //
  5295. /**
  5296. *
  5297. * @param src 8-bit, single-channel (binary) source image.
  5298. * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
  5299. * single-channel image of the same size as src .
  5300. * @param distanceType Type of distance, see #DistanceTypes
  5301. * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the
  5302. * #DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a `$$3\times 3$$` mask gives
  5303. * the same result as `$$5\times 5$$` or any larger aperture.
  5304. * @param dstType Type of output image. It can be CV_8U or CV_32F. Type CV_8U can be used only for
  5305. * the first variant of the function and distanceType == #DIST_L1.
  5306. */
  5307. + (void)distanceTransform:(Mat*)src dst:(Mat*)dst distanceType:(DistanceTypes)distanceType maskSize:(DistanceTransformMasks)maskSize dstType:(int)dstType NS_SWIFT_NAME(distanceTransform(src:dst:distanceType:maskSize:dstType:));
  5308. /**
  5309. *
  5310. * @param src 8-bit, single-channel (binary) source image.
  5311. * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
  5312. * single-channel image of the same size as src .
  5313. * @param distanceType Type of distance, see #DistanceTypes
  5314. * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the
  5315. * #DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a `$$3\times 3$$` mask gives
  5316. * the same result as `$$5\times 5$$` or any larger aperture.
  5317. * the first variant of the function and distanceType == #DIST_L1.
  5318. */
  5319. + (void)distanceTransform:(Mat*)src dst:(Mat*)dst distanceType:(DistanceTypes)distanceType maskSize:(DistanceTransformMasks)maskSize NS_SWIFT_NAME(distanceTransform(src:dst:distanceType:maskSize:));
  5320. //
  5321. // int cv::floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4)
  5322. //
  5323. /**
  5324. * Fills a connected component with the given color.
  5325. *
  5326. * The function cv::floodFill fills a connected component starting from the seed point with the specified
  5327. * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
  5328. * pixel at `$$(x,y)$$` is considered to belong to the repainted domain if:
  5329. *
  5330. * - in case of a grayscale image and floating range
  5331. * `$$\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}$$`
  5332. *
  5333. *
  5334. * - in case of a grayscale image and fixed range
  5335. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}$$`
  5336. *
  5337. *
  5338. * - in case of a color image and floating range
  5339. * `$$\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,$$`
  5340. * `$$\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g$$`
  5341. * and
  5342. * `$$\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b$$`
  5343. *
  5344. *
  5345. * - in case of a color image and fixed range
  5346. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,$$`
  5347. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g$$`
  5348. * and
  5349. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b$$`
  5350. *
  5351. *
  5352. * where `$$src(x',y')$$` is the value of one of pixel neighbors that is already known to belong to the
  5353. * component. That is, to be added to the connected component, a color/brightness of the pixel should
  5354. * be close enough to:
  5355. * - Color/brightness of one of its neighbors that already belong to the connected component in case
  5356. * of a floating range.
  5357. * - Color/brightness of the seed point in case of a fixed range.
  5358. *
  5359. * Use these functions to either mark a connected component with the specified color in-place, or build
  5360. * a mask and then extract the contour, or copy the region to another image, and so on.
  5361. *
  5362. * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
  5363. * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
  5364. * the details below.
  5365. * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
  5366. * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
  5367. * input and output parameter, you must take responsibility of initializing it.
  5368. * Flood-filling cannot go across non-zero pixels in the input mask. For example,
  5369. * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
  5370. * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
  5371. * as described below. Additionally, the function fills the border of the mask with ones to simplify
  5372. * internal processing. It is therefore possible to use the same mask in multiple calls to the function
  5373. * to make sure the filled areas do not overlap.
  5374. * @param seedPoint Starting point.
  5375. * @param newVal New value of the repainted domain pixels.
  5376. * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and
  5377. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5378. * @param upDiff Maximal upper brightness/color difference between the currently observed pixel and
  5379. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5380. * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
  5381. * repainted domain.
  5382. * @param flags Operation flags. The first 8 bits contain a connectivity value. The default value of
  5383. * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
  5384. * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
  5385. * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
  5386. * the mask (the default value is 1). For example, 4 | ( 255 \<\< 8 ) will consider 4 nearest
  5387. * neighbours and fill the mask with a value of 255. The following additional options occupy higher
  5388. * bits and therefore may be further combined with the connectivity and mask fill values using
  5389. * bit-wise or (|), see #FloodFillFlags.
  5390. *
  5391. * NOTE: Since the mask is larger than the filled image, a pixel `$$(x, y)$$` in image corresponds to the
  5392. * pixel `$$(x+1, y+1)$$` in the mask .
  5393. *
  5394. * @see `+findContours:contours:hierarchy:mode:method:offset:`
  5395. */
  5396. + (int)floodFill:(Mat*)image mask:(Mat*)mask seedPoint:(Point2i*)seedPoint newVal:(Scalar*)newVal rect:(Rect2i*)rect loDiff:(Scalar*)loDiff upDiff:(Scalar*)upDiff flags:(int)flags NS_SWIFT_NAME(floodFill(image:mask:seedPoint:newVal:rect:loDiff:upDiff:flags:));
  5397. /**
  5398. * Fills a connected component with the given color.
  5399. *
  5400. * The function cv::floodFill fills a connected component starting from the seed point with the specified
  5401. * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
  5402. * pixel at `$$(x,y)$$` is considered to belong to the repainted domain if:
  5403. *
  5404. * - in case of a grayscale image and floating range
  5405. * `$$\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}$$`
  5406. *
  5407. *
  5408. * - in case of a grayscale image and fixed range
  5409. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}$$`
  5410. *
  5411. *
  5412. * - in case of a color image and floating range
  5413. * `$$\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,$$`
  5414. * `$$\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g$$`
  5415. * and
  5416. * `$$\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b$$`
  5417. *
  5418. *
  5419. * - in case of a color image and fixed range
  5420. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,$$`
  5421. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g$$`
  5422. * and
  5423. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b$$`
  5424. *
  5425. *
  5426. * where `$$src(x',y')$$` is the value of one of pixel neighbors that is already known to belong to the
  5427. * component. That is, to be added to the connected component, a color/brightness of the pixel should
  5428. * be close enough to:
  5429. * - Color/brightness of one of its neighbors that already belong to the connected component in case
  5430. * of a floating range.
  5431. * - Color/brightness of the seed point in case of a fixed range.
  5432. *
  5433. * Use these functions to either mark a connected component with the specified color in-place, or build
  5434. * a mask and then extract the contour, or copy the region to another image, and so on.
  5435. *
  5436. * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
  5437. * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
  5438. * the details below.
  5439. * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
  5440. * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
  5441. * input and output parameter, you must take responsibility of initializing it.
  5442. * Flood-filling cannot go across non-zero pixels in the input mask. For example,
  5443. * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
  5444. * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
  5445. * as described below. Additionally, the function fills the border of the mask with ones to simplify
  5446. * internal processing. It is therefore possible to use the same mask in multiple calls to the function
  5447. * to make sure the filled areas do not overlap.
  5448. * @param seedPoint Starting point.
  5449. * @param newVal New value of the repainted domain pixels.
  5450. * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and
  5451. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5452. * @param upDiff Maximal upper brightness/color difference between the currently observed pixel and
  5453. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5454. * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
  5455. * repainted domain.
  5456. * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
  5457. * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
  5458. * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
  5459. * the mask (the default value is 1). For example, 4 | ( 255 \<\< 8 ) will consider 4 nearest
  5460. * neighbours and fill the mask with a value of 255. The following additional options occupy higher
  5461. * bits and therefore may be further combined with the connectivity and mask fill values using
  5462. * bit-wise or (|), see #FloodFillFlags.
  5463. *
  5464. * NOTE: Since the mask is larger than the filled image, a pixel `$$(x, y)$$` in image corresponds to the
  5465. * pixel `$$(x+1, y+1)$$` in the mask .
  5466. *
  5467. * @see `+findContours:contours:hierarchy:mode:method:offset:`
  5468. */
  5469. + (int)floodFill:(Mat*)image mask:(Mat*)mask seedPoint:(Point2i*)seedPoint newVal:(Scalar*)newVal rect:(Rect2i*)rect loDiff:(Scalar*)loDiff upDiff:(Scalar*)upDiff NS_SWIFT_NAME(floodFill(image:mask:seedPoint:newVal:rect:loDiff:upDiff:));
  5470. /**
  5471. * Fills a connected component with the given color.
  5472. *
  5473. * The function cv::floodFill fills a connected component starting from the seed point with the specified
  5474. * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
  5475. * pixel at `$$(x,y)$$` is considered to belong to the repainted domain if:
  5476. *
  5477. * - in case of a grayscale image and floating range
  5478. * `$$\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}$$`
  5479. *
  5480. *
  5481. * - in case of a grayscale image and fixed range
  5482. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}$$`
  5483. *
  5484. *
  5485. * - in case of a color image and floating range
  5486. * `$$\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,$$`
  5487. * `$$\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g$$`
  5488. * and
  5489. * `$$\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b$$`
  5490. *
  5491. *
  5492. * - in case of a color image and fixed range
  5493. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,$$`
  5494. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g$$`
  5495. * and
  5496. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b$$`
  5497. *
  5498. *
  5499. * where `$$src(x',y')$$` is the value of one of pixel neighbors that is already known to belong to the
  5500. * component. That is, to be added to the connected component, a color/brightness of the pixel should
  5501. * be close enough to:
  5502. * - Color/brightness of one of its neighbors that already belong to the connected component in case
  5503. * of a floating range.
  5504. * - Color/brightness of the seed point in case of a fixed range.
  5505. *
  5506. * Use these functions to either mark a connected component with the specified color in-place, or build
  5507. * a mask and then extract the contour, or copy the region to another image, and so on.
  5508. *
  5509. * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
  5510. * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
  5511. * the details below.
  5512. * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
  5513. * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
  5514. * input and output parameter, you must take responsibility of initializing it.
  5515. * Flood-filling cannot go across non-zero pixels in the input mask. For example,
  5516. * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
  5517. * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
  5518. * as described below. Additionally, the function fills the border of the mask with ones to simplify
  5519. * internal processing. It is therefore possible to use the same mask in multiple calls to the function
  5520. * to make sure the filled areas do not overlap.
  5521. * @param seedPoint Starting point.
  5522. * @param newVal New value of the repainted domain pixels.
  5523. * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and
  5524. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5525. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5526. * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
  5527. * repainted domain.
  5528. * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
  5529. * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
  5530. * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
  5531. * the mask (the default value is 1). For example, 4 | ( 255 \<\< 8 ) will consider 4 nearest
  5532. * neighbours and fill the mask with a value of 255. The following additional options occupy higher
  5533. * bits and therefore may be further combined with the connectivity and mask fill values using
  5534. * bit-wise or (|), see #FloodFillFlags.
  5535. *
  5536. * NOTE: Since the mask is larger than the filled image, a pixel `$$(x, y)$$` in image corresponds to the
  5537. * pixel `$$(x+1, y+1)$$` in the mask .
  5538. *
  5539. * @see `+findContours:contours:hierarchy:mode:method:offset:`
  5540. */
  5541. + (int)floodFill:(Mat*)image mask:(Mat*)mask seedPoint:(Point2i*)seedPoint newVal:(Scalar*)newVal rect:(Rect2i*)rect loDiff:(Scalar*)loDiff NS_SWIFT_NAME(floodFill(image:mask:seedPoint:newVal:rect:loDiff:));
  5542. /**
  5543. * Fills a connected component with the given color.
  5544. *
  5545. * The function cv::floodFill fills a connected component starting from the seed point with the specified
  5546. * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
  5547. * pixel at `$$(x,y)$$` is considered to belong to the repainted domain if:
  5548. *
  5549. * - in case of a grayscale image and floating range
  5550. * `$$\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}$$`
  5551. *
  5552. *
  5553. * - in case of a grayscale image and fixed range
  5554. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}$$`
  5555. *
  5556. *
  5557. * - in case of a color image and floating range
  5558. * `$$\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,$$`
  5559. * `$$\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g$$`
  5560. * and
  5561. * `$$\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b$$`
  5562. *
  5563. *
  5564. * - in case of a color image and fixed range
  5565. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,$$`
  5566. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g$$`
  5567. * and
  5568. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b$$`
  5569. *
  5570. *
  5571. * where `$$src(x',y')$$` is the value of one of pixel neighbors that is already known to belong to the
  5572. * component. That is, to be added to the connected component, a color/brightness of the pixel should
  5573. * be close enough to:
  5574. * - Color/brightness of one of its neighbors that already belong to the connected component in case
  5575. * of a floating range.
  5576. * - Color/brightness of the seed point in case of a fixed range.
  5577. *
  5578. * Use these functions to either mark a connected component with the specified color in-place, or build
  5579. * a mask and then extract the contour, or copy the region to another image, and so on.
  5580. *
  5581. * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
  5582. * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
  5583. * the details below.
  5584. * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
  5585. * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
  5586. * input and output parameter, you must take responsibility of initializing it.
  5587. * Flood-filling cannot go across non-zero pixels in the input mask. For example,
  5588. * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
  5589. * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
  5590. * as described below. Additionally, the function fills the border of the mask with ones to simplify
  5591. * internal processing. It is therefore possible to use the same mask in multiple calls to the function
  5592. * to make sure the filled areas do not overlap.
  5593. * @param seedPoint Starting point.
  5594. * @param newVal New value of the repainted domain pixels.
  5595. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5596. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5597. * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
  5598. * repainted domain.
  5599. * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
  5600. * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
  5601. * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
  5602. * the mask (the default value is 1). For example, 4 | ( 255 \<\< 8 ) will consider 4 nearest
  5603. * neighbours and fill the mask with a value of 255. The following additional options occupy higher
  5604. * bits and therefore may be further combined with the connectivity and mask fill values using
  5605. * bit-wise or (|), see #FloodFillFlags.
  5606. *
  5607. * NOTE: Since the mask is larger than the filled image, a pixel `$$(x, y)$$` in image corresponds to the
  5608. * pixel `$$(x+1, y+1)$$` in the mask .
  5609. *
  5610. * @see `+findContours:contours:hierarchy:mode:method:offset:`
  5611. */
  5612. + (int)floodFill:(Mat*)image mask:(Mat*)mask seedPoint:(Point2i*)seedPoint newVal:(Scalar*)newVal rect:(Rect2i*)rect NS_SWIFT_NAME(floodFill(image:mask:seedPoint:newVal:rect:));
  5613. /**
  5614. * Fills a connected component with the given color.
  5615. *
  5616. * The function cv::floodFill fills a connected component starting from the seed point with the specified
  5617. * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
  5618. * pixel at `$$(x,y)$$` is considered to belong to the repainted domain if:
  5619. *
  5620. * - in case of a grayscale image and floating range
  5621. * `$$\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}$$`
  5622. *
  5623. *
  5624. * - in case of a grayscale image and fixed range
  5625. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}$$`
  5626. *
  5627. *
  5628. * - in case of a color image and floating range
  5629. * `$$\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,$$`
  5630. * `$$\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g$$`
  5631. * and
  5632. * `$$\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b$$`
  5633. *
  5634. *
  5635. * - in case of a color image and fixed range
  5636. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,$$`
  5637. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g$$`
  5638. * and
  5639. * `$$\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b$$`
  5640. *
  5641. *
  5642. * where `$$src(x',y')$$` is the value of one of pixel neighbors that is already known to belong to the
  5643. * component. That is, to be added to the connected component, a color/brightness of the pixel should
  5644. * be close enough to:
  5645. * - Color/brightness of one of its neighbors that already belong to the connected component in case
  5646. * of a floating range.
  5647. * - Color/brightness of the seed point in case of a fixed range.
  5648. *
  5649. * Use these functions to either mark a connected component with the specified color in-place, or build
  5650. * a mask and then extract the contour, or copy the region to another image, and so on.
  5651. *
  5652. * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
  5653. * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
  5654. * the details below.
  5655. * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
  5656. * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
  5657. * input and output parameter, you must take responsibility of initializing it.
  5658. * Flood-filling cannot go across non-zero pixels in the input mask. For example,
  5659. * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
  5660. * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
  5661. * as described below. Additionally, the function fills the border of the mask with ones to simplify
  5662. * internal processing. It is therefore possible to use the same mask in multiple calls to the function
  5663. * to make sure the filled areas do not overlap.
  5664. * @param seedPoint Starting point.
  5665. * @param newVal New value of the repainted domain pixels.
  5666. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5667. * one of its neighbors belonging to the component, or a seed pixel being added to the component.
  5668. * repainted domain.
  5669. * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
  5670. * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
  5671. * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
  5672. * the mask (the default value is 1). For example, 4 | ( 255 \<\< 8 ) will consider 4 nearest
  5673. * neighbours and fill the mask with a value of 255. The following additional options occupy higher
  5674. * bits and therefore may be further combined with the connectivity and mask fill values using
  5675. * bit-wise or (|), see #FloodFillFlags.
  5676. *
  5677. * NOTE: Since the mask is larger than the filled image, a pixel `$$(x, y)$$` in image corresponds to the
  5678. * pixel `$$(x+1, y+1)$$` in the mask .
  5679. *
  5680. * @see `+findContours:contours:hierarchy:mode:method:offset:`
  5681. */
  5682. + (int)floodFill:(Mat*)image mask:(Mat*)mask seedPoint:(Point2i*)seedPoint newVal:(Scalar*)newVal NS_SWIFT_NAME(floodFill(image:mask:seedPoint:newVal:));
  5683. //
  5684. // void cv::blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat& dst)
  5685. //
  5686. /**
  5687. *
  5688. *
  5689. * variant without `mask` parameter
  5690. */
  5691. + (void)blendLinear:(Mat*)src1 src2:(Mat*)src2 weights1:(Mat*)weights1 weights2:(Mat*)weights2 dst:(Mat*)dst NS_SWIFT_NAME(blendLinear(src1:src2:weights1:weights2:dst:));
  5692. //
  5693. // void cv::cvtColor(Mat src, Mat& dst, ColorConversionCodes code, int dstCn = 0)
  5694. //
  5695. /**
  5696. * Converts an image from one color space to another.
  5697. *
  5698. * The function converts an input image from one color space to another. In case of a transformation
  5699. * to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note
  5700. * that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the
  5701. * bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue
  5702. * component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and
  5703. * sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.
  5704. *
  5705. * The conventional ranges for R, G, and B channel values are:
  5706. * - 0 to 255 for CV_8U images
  5707. * - 0 to 65535 for CV_16U images
  5708. * - 0 to 1 for CV_32F images
  5709. *
  5710. * In case of linear transformations, the range does not matter. But in case of a non-linear
  5711. * transformation, an input RGB image should be normalized to the proper value range to get the correct
  5712. * results, for example, for RGB `$$\rightarrow$$` L\*u\*v\* transformation. For example, if you have a
  5713. * 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will
  5714. * have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor ,
  5715. * you need first to scale the image down:
  5716. *
  5717. * img *= 1./255;
  5718. * cvtColor(img, img, COLOR_BGR2Luv);
  5719. *
  5720. * If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many
  5721. * applications, this will not be noticeable but it is recommended to use 32-bit images in applications
  5722. * that need the full range of colors or that convert an image before an operation and then convert
  5723. * back.
  5724. *
  5725. * If conversion adds the alpha channel, its value will set to the maximum of corresponding channel
  5726. * range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.
  5727. *
  5728. * @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision
  5729. * floating-point.
  5730. * @param dst output image of the same size and depth as src.
  5731. * @param code color space conversion code (see #ColorConversionCodes).
  5732. * @param dstCn number of channels in the destination image; if the parameter is 0, the number of the
  5733. * channels is derived automatically from src and code.
  5734. *
  5735. * @see `REF: imgproc_color_conversions`
  5736. */
  5737. + (void)cvtColor:(Mat*)src dst:(Mat*)dst code:(ColorConversionCodes)code dstCn:(int)dstCn NS_SWIFT_NAME(cvtColor(src:dst:code:dstCn:));
  5738. /**
  5739. * Converts an image from one color space to another.
  5740. *
  5741. * The function converts an input image from one color space to another. In case of a transformation
  5742. * to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note
  5743. * that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the
  5744. * bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue
  5745. * component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and
  5746. * sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.
  5747. *
  5748. * The conventional ranges for R, G, and B channel values are:
  5749. * - 0 to 255 for CV_8U images
  5750. * - 0 to 65535 for CV_16U images
  5751. * - 0 to 1 for CV_32F images
  5752. *
  5753. * In case of linear transformations, the range does not matter. But in case of a non-linear
  5754. * transformation, an input RGB image should be normalized to the proper value range to get the correct
  5755. * results, for example, for RGB `$$\rightarrow$$` L\*u\*v\* transformation. For example, if you have a
  5756. * 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will
  5757. * have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor ,
  5758. * you need first to scale the image down:
  5759. *
  5760. * img *= 1./255;
  5761. * cvtColor(img, img, COLOR_BGR2Luv);
  5762. *
  5763. * If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many
  5764. * applications, this will not be noticeable but it is recommended to use 32-bit images in applications
  5765. * that need the full range of colors or that convert an image before an operation and then convert
  5766. * back.
  5767. *
  5768. * If conversion adds the alpha channel, its value will set to the maximum of corresponding channel
  5769. * range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.
  5770. *
  5771. * @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision
  5772. * floating-point.
  5773. * @param dst output image of the same size and depth as src.
  5774. * @param code color space conversion code (see #ColorConversionCodes).
  5775. * channels is derived automatically from src and code.
  5776. *
  5777. * @see `REF: imgproc_color_conversions`
  5778. */
  5779. + (void)cvtColor:(Mat*)src dst:(Mat*)dst code:(ColorConversionCodes)code NS_SWIFT_NAME(cvtColor(src:dst:code:));
  5780. //
  5781. // void cv::cvtColorTwoPlane(Mat src1, Mat src2, Mat& dst, int code)
  5782. //
  5783. /**
  5784. * Converts an image from one color space to another where the source image is
  5785. * stored in two planes.
  5786. *
  5787. * This function only supports YUV420 to RGB conversion as of now.
  5788. *
  5789. * - #COLOR_YUV2BGR_NV12
  5790. * - #COLOR_YUV2RGB_NV12
  5791. * - #COLOR_YUV2BGRA_NV12
  5792. * - #COLOR_YUV2RGBA_NV12
  5793. * - #COLOR_YUV2BGR_NV21
  5794. * - #COLOR_YUV2RGB_NV21
  5795. * - #COLOR_YUV2BGRA_NV21
  5796. * - #COLOR_YUV2RGBA_NV21
  5797. */
  5798. + (void)cvtColorTwoPlane:(Mat*)src1 src2:(Mat*)src2 dst:(Mat*)dst code:(int)code NS_SWIFT_NAME(cvtColorTwoPlane(src1:src2:dst:code:));
  5799. //
  5800. // void cv::demosaicing(Mat src, Mat& dst, int code, int dstCn = 0)
  5801. //
  5802. /**
  5803. * main function for all demosaicing processes
  5804. *
  5805. * @param src input image: 8-bit unsigned or 16-bit unsigned.
  5806. * @param dst output image of the same size and depth as src.
  5807. * @param code Color space conversion code (see the description below).
  5808. * @param dstCn number of channels in the destination image; if the parameter is 0, the number of the
  5809. * channels is derived automatically from src and code.
  5810. *
  5811. * The function can do the following transformations:
  5812. *
  5813. * - Demosaicing using bilinear interpolation
  5814. *
  5815. * #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR
  5816. *
  5817. * #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY
  5818. *
  5819. * - Demosaicing using Variable Number of Gradients.
  5820. *
  5821. * #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG
  5822. *
  5823. * - Edge-Aware Demosaicing.
  5824. *
  5825. * #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA
  5826. *
  5827. * - Demosaicing with alpha channel
  5828. *
  5829. * #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA
  5830. *
  5831. * @see `+cvtColor:dst:code:dstCn:`
  5832. */
  5833. + (void)demosaicing:(Mat*)src dst:(Mat*)dst code:(int)code dstCn:(int)dstCn NS_SWIFT_NAME(demosaicing(src:dst:code:dstCn:));
  5834. /**
  5835. * main function for all demosaicing processes
  5836. *
  5837. * @param src input image: 8-bit unsigned or 16-bit unsigned.
  5838. * @param dst output image of the same size and depth as src.
  5839. * @param code Color space conversion code (see the description below).
  5840. * channels is derived automatically from src and code.
  5841. *
  5842. * The function can do the following transformations:
  5843. *
  5844. * - Demosaicing using bilinear interpolation
  5845. *
  5846. * #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR
  5847. *
  5848. * #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY
  5849. *
  5850. * - Demosaicing using Variable Number of Gradients.
  5851. *
  5852. * #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG
  5853. *
  5854. * - Edge-Aware Demosaicing.
  5855. *
  5856. * #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA
  5857. *
  5858. * - Demosaicing with alpha channel
  5859. *
  5860. * #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA
  5861. *
  5862. * @see `+cvtColor:dst:code:dstCn:`
  5863. */
  5864. + (void)demosaicing:(Mat*)src dst:(Mat*)dst code:(int)code NS_SWIFT_NAME(demosaicing(src:dst:code:));
  5865. //
  5866. // Moments cv::moments(Mat array, bool binaryImage = false)
  5867. //
  5868. /**
  5869. * Calculates all of the moments up to the third order of a polygon or rasterized shape.
  5870. *
  5871. * The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The
  5872. * results are returned in the structure cv::Moments.
  5873. *
  5874. * @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array (
  5875. * `$$1 \times N$$` or `$$N \times 1$$` ) of 2D points (Point or Point2f ).
  5876. * @param binaryImage If it is true, all non-zero image pixels are treated as 1's. The parameter is
  5877. * used for images only.
  5878. * @return moments.
  5879. *
  5880. * NOTE: Only applicable to contour moments calculations from Python bindings: Note that the numpy
  5881. * type for the input array should be either np.int32 or np.float32.
  5882. *
  5883. * @see `+contourArea:oriented:`, `+arcLength:closed:`
  5884. */
  5885. + (Moments*)moments:(Mat*)array binaryImage:(BOOL)binaryImage NS_SWIFT_NAME(moments(array:binaryImage:));
  5886. /**
  5887. * Calculates all of the moments up to the third order of a polygon or rasterized shape.
  5888. *
  5889. * The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The
  5890. * results are returned in the structure cv::Moments.
  5891. *
  5892. * @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array (
  5893. * `$$1 \times N$$` or `$$N \times 1$$` ) of 2D points (Point or Point2f ).
  5894. * used for images only.
  5895. * @return moments.
  5896. *
  5897. * NOTE: Only applicable to contour moments calculations from Python bindings: Note that the numpy
  5898. * type for the input array should be either np.int32 or np.float32.
  5899. *
  5900. * @see `+contourArea:oriented:`, `+arcLength:closed:`
  5901. */
  5902. + (Moments*)moments:(Mat*)array NS_SWIFT_NAME(moments(array:));
  5903. //
  5904. // void cv::HuMoments(Moments m, Mat& hu)
  5905. //
  5906. + (void)HuMoments:(Moments*)m hu:(Mat*)hu NS_SWIFT_NAME(HuMoments(m:hu:));
  5907. //
  5908. // void cv::matchTemplate(Mat image, Mat templ, Mat& result, TemplateMatchModes method, Mat mask = Mat())
  5909. //
  5910. /**
  5911. * Compares a template against overlapped image regions.
  5912. *
  5913. * The function slides through image , compares the overlapped patches of size `$$w \times h$$` against
  5914. * templ using the specified method and stores the comparison results in result . #TemplateMatchModes
  5915. * describes the formulae for the available comparison methods ( `$$I$$` denotes image, `$$T$$`
  5916. * template, `$$R$$` result, `$$M$$` the optional mask ). The summation is done over template and/or
  5917. * the image patch: `$$x' = 0...w-1, y' = 0...h-1$$`
  5918. *
  5919. * After the function finishes the comparison, the best matches can be found as global minimums (when
  5920. * #TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the
  5921. * #minMaxLoc function. In case of a color image, template summation in the numerator and each sum in
  5922. * the denominator is done over all of the channels and separate mean values are used for each channel.
  5923. * That is, the function can take a color template and a color image. The result will still be a
  5924. * single-channel image, which is easier to analyze.
  5925. *
  5926. * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point.
  5927. * @param templ Searched template. It must be not greater than the source image and have the same
  5928. * data type.
  5929. * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image
  5930. * is `$$W \times H$$` and templ is `$$w \times h$$` , then result is `$$(W-w+1) \times (H-h+1)$$` .
  5931. * @param method Parameter specifying the comparison method, see #TemplateMatchModes
  5932. * @param mask Optional mask. It must have the same size as templ. It must either have the same number
  5933. * of channels as template or only one channel, which is then used for all template and
  5934. * image channels. If the data type is #CV_8U, the mask is interpreted as a binary mask,
  5935. * meaning only elements where mask is nonzero are used and are kept unchanged independent
  5936. * of the actual mask value (weight equals 1). For data tpye #CV_32F, the mask values are
  5937. * used as weights. The exact formulas are documented in #TemplateMatchModes.
  5938. */
  5939. + (void)matchTemplate:(Mat*)image templ:(Mat*)templ result:(Mat*)result method:(TemplateMatchModes)method mask:(Mat*)mask NS_SWIFT_NAME(matchTemplate(image:templ:result:method:mask:));
  5940. /**
  5941. * Compares a template against overlapped image regions.
  5942. *
  5943. * The function slides through image , compares the overlapped patches of size `$$w \times h$$` against
  5944. * templ using the specified method and stores the comparison results in result . #TemplateMatchModes
  5945. * describes the formulae for the available comparison methods ( `$$I$$` denotes image, `$$T$$`
  5946. * template, `$$R$$` result, `$$M$$` the optional mask ). The summation is done over template and/or
  5947. * the image patch: `$$x' = 0...w-1, y' = 0...h-1$$`
  5948. *
  5949. * After the function finishes the comparison, the best matches can be found as global minimums (when
  5950. * #TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the
  5951. * #minMaxLoc function. In case of a color image, template summation in the numerator and each sum in
  5952. * the denominator is done over all of the channels and separate mean values are used for each channel.
  5953. * That is, the function can take a color template and a color image. The result will still be a
  5954. * single-channel image, which is easier to analyze.
  5955. *
  5956. * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point.
  5957. * @param templ Searched template. It must be not greater than the source image and have the same
  5958. * data type.
  5959. * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image
  5960. * is `$$W \times H$$` and templ is `$$w \times h$$` , then result is `$$(W-w+1) \times (H-h+1)$$` .
  5961. * @param method Parameter specifying the comparison method, see #TemplateMatchModes
  5962. * of channels as template or only one channel, which is then used for all template and
  5963. * image channels. If the data type is #CV_8U, the mask is interpreted as a binary mask,
  5964. * meaning only elements where mask is nonzero are used and are kept unchanged independent
  5965. * of the actual mask value (weight equals 1). For data tpye #CV_32F, the mask values are
  5966. * used as weights. The exact formulas are documented in #TemplateMatchModes.
  5967. */
  5968. + (void)matchTemplate:(Mat*)image templ:(Mat*)templ result:(Mat*)result method:(TemplateMatchModes)method NS_SWIFT_NAME(matchTemplate(image:templ:result:method:));
  5969. //
  5970. // int cv::connectedComponents(Mat image, Mat& labels, int connectivity, int ltype, int ccltype)
  5971. //
  5972. /**
  5973. * computes the connected components labeled image of boolean image
  5974. *
  5975. * image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0
  5976. * represents the background label. ltype specifies the output label image type, an important
  5977. * consideration based on the total number of labels or alternatively the total number of pixels in
  5978. * the source image. ccltype specifies the connected components labeling algorithm to use, currently
  5979. * Bolelli (Spaghetti) CITE: Bolelli2019, Grana (BBDT) CITE: Grana2010 and Wu's (SAUF) CITE: Wu2009 algorithms
  5980. * are supported, see the #ConnectedComponentsAlgorithmsTypes for details. Note that SAUF algorithm forces
  5981. * a row major ordering of labels while Spaghetti and BBDT do not.
  5982. * This function uses parallel version of the algorithms if at least one allowed
  5983. * parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs.
  5984. *
  5985. * @param image the 8-bit single-channel image to be labeled
  5986. * @param labels destination labeled image
  5987. * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
  5988. * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
  5989. * @param ccltype connected components algorithm type (see the #ConnectedComponentsAlgorithmsTypes).
  5990. */
  5991. + (int)connectedComponentsWithAlgorithm:(Mat*)image labels:(Mat*)labels connectivity:(int)connectivity ltype:(int)ltype ccltype:(int)ccltype NS_SWIFT_NAME(connectedComponents(image:labels:connectivity:ltype:ccltype:));
  5992. //
  5993. // int cv::connectedComponents(Mat image, Mat& labels, int connectivity = 8, int ltype = CV_32S)
  5994. //
  5995. /**
  5996. *
  5997. *
  5998. * @param image the 8-bit single-channel image to be labeled
  5999. * @param labels destination labeled image
  6000. * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
  6001. * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
  6002. */
  6003. + (int)connectedComponents:(Mat*)image labels:(Mat*)labels connectivity:(int)connectivity ltype:(int)ltype NS_SWIFT_NAME(connectedComponents(image:labels:connectivity:ltype:));
  6004. /**
  6005. *
  6006. *
  6007. * @param image the 8-bit single-channel image to be labeled
  6008. * @param labels destination labeled image
  6009. * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
  6010. */
  6011. + (int)connectedComponents:(Mat*)image labels:(Mat*)labels connectivity:(int)connectivity NS_SWIFT_NAME(connectedComponents(image:labels:connectivity:));
  6012. /**
  6013. *
  6014. *
  6015. * @param image the 8-bit single-channel image to be labeled
  6016. * @param labels destination labeled image
  6017. */
  6018. + (int)connectedComponents:(Mat*)image labels:(Mat*)labels NS_SWIFT_NAME(connectedComponents(image:labels:));
  6019. //
  6020. // int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity, int ltype, ConnectedComponentsAlgorithmsTypes ccltype)
  6021. //
  6022. /**
  6023. * computes the connected components labeled image of boolean image and also produces a statistics output for each label
  6024. *
  6025. * image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0
  6026. * represents the background label. ltype specifies the output label image type, an important
  6027. * consideration based on the total number of labels or alternatively the total number of pixels in
  6028. * the source image. ccltype specifies the connected components labeling algorithm to use, currently
  6029. * Bolelli (Spaghetti) CITE: Bolelli2019, Grana (BBDT) CITE: Grana2010 and Wu's (SAUF) CITE: Wu2009 algorithms
  6030. * are supported, see the #ConnectedComponentsAlgorithmsTypes for details. Note that SAUF algorithm forces
  6031. * a row major ordering of labels while Spaghetti and BBDT do not.
  6032. * This function uses parallel version of the algorithms (statistics included) if at least one allowed
  6033. * parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs.
  6034. *
  6035. * @param image the 8-bit single-channel image to be labeled
  6036. * @param labels destination labeled image
  6037. * @param stats statistics output for each label, including the background label.
  6038. * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
  6039. * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
  6040. * @param centroids centroid output for each label, including the background label. Centroids are
  6041. * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
  6042. * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
  6043. * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
  6044. * @param ccltype connected components algorithm type (see #ConnectedComponentsAlgorithmsTypes).
  6045. */
  6046. + (int)connectedComponentsWithStatsWithAlgorithm:(Mat*)image labels:(Mat*)labels stats:(Mat*)stats centroids:(Mat*)centroids connectivity:(int)connectivity ltype:(int)ltype ccltype:(ConnectedComponentsAlgorithmsTypes)ccltype NS_SWIFT_NAME(connectedComponentsWithStats(image:labels:stats:centroids:connectivity:ltype:ccltype:));
  6047. //
  6048. // int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity = 8, int ltype = CV_32S)
  6049. //
  6050. /**
  6051. *
  6052. * @param image the 8-bit single-channel image to be labeled
  6053. * @param labels destination labeled image
  6054. * @param stats statistics output for each label, including the background label.
  6055. * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
  6056. * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
  6057. * @param centroids centroid output for each label, including the background label. Centroids are
  6058. * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
  6059. * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
  6060. * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
  6061. */
  6062. + (int)connectedComponentsWithStats:(Mat*)image labels:(Mat*)labels stats:(Mat*)stats centroids:(Mat*)centroids connectivity:(int)connectivity ltype:(int)ltype NS_SWIFT_NAME(connectedComponentsWithStats(image:labels:stats:centroids:connectivity:ltype:));
  6063. /**
  6064. *
  6065. * @param image the 8-bit single-channel image to be labeled
  6066. * @param labels destination labeled image
  6067. * @param stats statistics output for each label, including the background label.
  6068. * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
  6069. * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
  6070. * @param centroids centroid output for each label, including the background label. Centroids are
  6071. * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
  6072. * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
  6073. */
  6074. + (int)connectedComponentsWithStats:(Mat*)image labels:(Mat*)labels stats:(Mat*)stats centroids:(Mat*)centroids connectivity:(int)connectivity NS_SWIFT_NAME(connectedComponentsWithStats(image:labels:stats:centroids:connectivity:));
  6075. /**
  6076. *
  6077. * @param image the 8-bit single-channel image to be labeled
  6078. * @param labels destination labeled image
  6079. * @param stats statistics output for each label, including the background label.
  6080. * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
  6081. * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
  6082. * @param centroids centroid output for each label, including the background label. Centroids are
  6083. * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
  6084. */
  6085. + (int)connectedComponentsWithStats:(Mat*)image labels:(Mat*)labels stats:(Mat*)stats centroids:(Mat*)centroids NS_SWIFT_NAME(connectedComponentsWithStats(image:labels:stats:centroids:));
  6086. //
  6087. // void cv::findContours(Mat image, vector_vector_Point& contours, Mat& hierarchy, RetrievalModes mode, ContourApproximationModes method, Point offset = Point())
  6088. //
  6089. /**
  6090. * Finds contours in a binary image.
  6091. *
  6092. * The function retrieves contours from the binary image using the algorithm CITE: Suzuki85 . The contours
  6093. * are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the
  6094. * OpenCV sample directory.
  6095. * NOTE: Since opencv 3.2 source image is not modified by this function.
  6096. *
  6097. * @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero
  6098. * pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
  6099. * #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
  6100. * If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).
  6101. * @param contours Detected contours. Each contour is stored as a vector of points (e.g.
  6102. * std::vector<std::vector<cv::Point> >).
  6103. * @param hierarchy Optional output vector (e.g. std::vector<cv::Vec4i>), containing information about the image topology. It has
  6104. * as many elements as the number of contours. For each i-th contour contours[i], the elements
  6105. * hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices
  6106. * in contours of the next and previous contours at the same hierarchical level, the first child
  6107. * contour and the parent contour, respectively. If for the contour i there are no next, previous,
  6108. * parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
  6109. * NOTE: In Python, hierarchy is nested inside a top level array. Use hierarchy[0][i] to access hierarchical elements of i-th contour.
  6110. * @param mode Contour retrieval mode, see #RetrievalModes
  6111. * @param method Contour approximation method, see #ContourApproximationModes
  6112. * @param offset Optional offset by which every contour point is shifted. This is useful if the
  6113. * contours are extracted from the image ROI and then they should be analyzed in the whole image
  6114. * context.
  6115. */
  6116. + (void)findContours:(Mat*)image contours:(NSMutableArray<NSMutableArray<Point2i*>*>*)contours hierarchy:(Mat*)hierarchy mode:(RetrievalModes)mode method:(ContourApproximationModes)method offset:(Point2i*)offset NS_SWIFT_NAME(findContours(image:contours:hierarchy:mode:method:offset:));
  6117. /**
  6118. * Finds contours in a binary image.
  6119. *
  6120. * The function retrieves contours from the binary image using the algorithm CITE: Suzuki85 . The contours
  6121. * are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the
  6122. * OpenCV sample directory.
  6123. * NOTE: Since opencv 3.2 source image is not modified by this function.
  6124. *
  6125. * @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero
  6126. * pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
  6127. * #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
  6128. * If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).
  6129. * @param contours Detected contours. Each contour is stored as a vector of points (e.g.
  6130. * std::vector<std::vector<cv::Point> >).
  6131. * @param hierarchy Optional output vector (e.g. std::vector<cv::Vec4i>), containing information about the image topology. It has
  6132. * as many elements as the number of contours. For each i-th contour contours[i], the elements
  6133. * hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices
  6134. * in contours of the next and previous contours at the same hierarchical level, the first child
  6135. * contour and the parent contour, respectively. If for the contour i there are no next, previous,
  6136. * parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
  6137. * NOTE: In Python, hierarchy is nested inside a top level array. Use hierarchy[0][i] to access hierarchical elements of i-th contour.
  6138. * @param mode Contour retrieval mode, see #RetrievalModes
  6139. * @param method Contour approximation method, see #ContourApproximationModes
  6140. * contours are extracted from the image ROI and then they should be analyzed in the whole image
  6141. * context.
  6142. */
  6143. + (void)findContours:(Mat*)image contours:(NSMutableArray<NSMutableArray<Point2i*>*>*)contours hierarchy:(Mat*)hierarchy mode:(RetrievalModes)mode method:(ContourApproximationModes)method NS_SWIFT_NAME(findContours(image:contours:hierarchy:mode:method:));
  6144. //
  6145. // void cv::approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed)
  6146. //
  6147. /**
  6148. * Approximates a polygonal curve(s) with the specified precision.
  6149. *
  6150. * The function cv::approxPolyDP approximates a curve or a polygon with another curve/polygon with less
  6151. * vertices so that the distance between them is less or equal to the specified precision. It uses the
  6152. * Douglas-Peucker algorithm <http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm>
  6153. *
  6154. * @param curve Input vector of a 2D point stored in std::vector or Mat
  6155. * @param approxCurve Result of the approximation. The type should match the type of the input curve.
  6156. * @param epsilon Parameter specifying the approximation accuracy. This is the maximum distance
  6157. * between the original curve and its approximation.
  6158. * @param closed If true, the approximated curve is closed (its first and last vertices are
  6159. * connected). Otherwise, it is not closed.
  6160. */
  6161. + (void)approxPolyDP:(NSArray<Point2f*>*)curve approxCurve:(NSMutableArray<Point2f*>*)approxCurve epsilon:(double)epsilon closed:(BOOL)closed NS_SWIFT_NAME(approxPolyDP(curve:approxCurve:epsilon:closed:));
  6162. //
  6163. // double cv::arcLength(vector_Point2f curve, bool closed)
  6164. //
  6165. /**
  6166. * Calculates a contour perimeter or a curve length.
  6167. *
  6168. * The function computes a curve length or a closed contour perimeter.
  6169. *
  6170. * @param curve Input vector of 2D points, stored in std::vector or Mat.
  6171. * @param closed Flag indicating whether the curve is closed or not.
  6172. */
  6173. + (double)arcLength:(NSArray<Point2f*>*)curve closed:(BOOL)closed NS_SWIFT_NAME(arcLength(curve:closed:));
  6174. //
  6175. // Rect cv::boundingRect(Mat array)
  6176. //
  6177. /**
  6178. * Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.
  6179. *
  6180. * The function calculates and returns the minimal up-right bounding rectangle for the specified point set or
  6181. * non-zero pixels of gray-scale image.
  6182. *
  6183. * @param array Input gray-scale image or 2D point set, stored in std::vector or Mat.
  6184. */
  6185. + (Rect2i*)boundingRect:(Mat*)array NS_SWIFT_NAME(boundingRect(array:));
  6186. //
  6187. // double cv::contourArea(Mat contour, bool oriented = false)
  6188. //
  6189. /**
  6190. * Calculates a contour area.
  6191. *
  6192. * The function computes a contour area. Similarly to moments , the area is computed using the Green
  6193. * formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using
  6194. * #drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong
  6195. * results for contours with self-intersections.
  6196. *
  6197. * Example:
  6198. *
  6199. * vector<Point> contour;
  6200. * contour.push_back(Point2f(0, 0));
  6201. * contour.push_back(Point2f(10, 0));
  6202. * contour.push_back(Point2f(10, 10));
  6203. * contour.push_back(Point2f(5, 4));
  6204. *
  6205. * double area0 = contourArea(contour);
  6206. * vector<Point> approx;
  6207. * approxPolyDP(contour, approx, 5, true);
  6208. * double area1 = contourArea(approx);
  6209. *
  6210. * cout << "area0 =" << area0 << endl <<
  6211. * "area1 =" << area1 << endl <<
  6212. * "approx poly vertices" << approx.size() << endl;
  6213. *
  6214. * @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat.
  6215. * @param oriented Oriented area flag. If it is true, the function returns a signed area value,
  6216. * depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can
  6217. * determine orientation of a contour by taking the sign of an area. By default, the parameter is
  6218. * false, which means that the absolute value is returned.
  6219. */
  6220. + (double)contourArea:(Mat*)contour oriented:(BOOL)oriented NS_SWIFT_NAME(contourArea(contour:oriented:));
  6221. /**
  6222. * Calculates a contour area.
  6223. *
  6224. * The function computes a contour area. Similarly to moments , the area is computed using the Green
  6225. * formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using
  6226. * #drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong
  6227. * results for contours with self-intersections.
  6228. *
  6229. * Example:
  6230. *
  6231. * vector<Point> contour;
  6232. * contour.push_back(Point2f(0, 0));
  6233. * contour.push_back(Point2f(10, 0));
  6234. * contour.push_back(Point2f(10, 10));
  6235. * contour.push_back(Point2f(5, 4));
  6236. *
  6237. * double area0 = contourArea(contour);
  6238. * vector<Point> approx;
  6239. * approxPolyDP(contour, approx, 5, true);
  6240. * double area1 = contourArea(approx);
  6241. *
  6242. * cout << "area0 =" << area0 << endl <<
  6243. * "area1 =" << area1 << endl <<
  6244. * "approx poly vertices" << approx.size() << endl;
  6245. *
  6246. * @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat.
  6247. * depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can
  6248. * determine orientation of a contour by taking the sign of an area. By default, the parameter is
  6249. * false, which means that the absolute value is returned.
  6250. */
  6251. + (double)contourArea:(Mat*)contour NS_SWIFT_NAME(contourArea(contour:));
  6252. //
  6253. // RotatedRect cv::minAreaRect(vector_Point2f points)
  6254. //
  6255. /**
  6256. * Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
  6257. *
  6258. * The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a
  6259. * specified point set. Developer should keep in mind that the returned RotatedRect can contain negative
  6260. * indices when data is close to the containing Mat element boundary.
  6261. *
  6262. * @param points Input vector of 2D points, stored in std::vector\<\> or Mat
  6263. */
  6264. + (RotatedRect*)minAreaRect:(NSArray<Point2f*>*)points NS_SWIFT_NAME(minAreaRect(points:));
  6265. //
  6266. // void cv::boxPoints(RotatedRect box, Mat& points)
  6267. //
  6268. /**
  6269. * Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.
  6270. *
  6271. * The function finds the four vertices of a rotated rectangle. This function is useful to draw the
  6272. * rectangle. In C++, instead of using this function, you can directly use RotatedRect::points method. Please
  6273. * visit the REF: tutorial_bounding_rotated_ellipses "tutorial on Creating Bounding rotated boxes and ellipses for contours" for more information.
  6274. *
  6275. * @param box The input rotated rectangle. It may be the output of
  6276. * @param points The output array of four vertices of rectangles.
  6277. */
  6278. + (void)boxPoints:(RotatedRect*)box points:(Mat*)points NS_SWIFT_NAME(boxPoints(box:points:));
  6279. //
  6280. // void cv::minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius)
  6281. //
  6282. /**
  6283. * Finds a circle of the minimum area enclosing a 2D point set.
  6284. *
  6285. * The function finds the minimal enclosing circle of a 2D point set using an iterative algorithm.
  6286. *
  6287. * @param points Input vector of 2D points, stored in std::vector\<\> or Mat
  6288. * @param center Output center of the circle.
  6289. * @param radius Output radius of the circle.
  6290. */
  6291. + (void)minEnclosingCircle:(NSArray<Point2f*>*)points center:(Point2f*)center radius:(float*)radius NS_SWIFT_NAME(minEnclosingCircle(points:center:radius:));
  6292. //
  6293. // double cv::minEnclosingTriangle(Mat points, Mat& triangle)
  6294. //
  6295. /**
  6296. * Finds a triangle of minimum area enclosing a 2D point set and returns its area.
  6297. *
  6298. * The function finds a triangle of minimum area enclosing the given set of 2D points and returns its
  6299. * area. The output for a given 2D point set is shown in the image below. 2D points are depicted in
  6300. * red* and the enclosing triangle in *yellow*.
  6301. *
  6302. * ![Sample output of the minimum enclosing triangle function](pics/minenclosingtriangle.png)
  6303. *
  6304. * The implementation of the algorithm is based on O'Rourke's CITE: ORourke86 and Klee and Laskowski's
  6305. * CITE: KleeLaskowski85 papers. O'Rourke provides a `$$\theta(n)$$` algorithm for finding the minimal
  6306. * enclosing triangle of a 2D convex polygon with n vertices. Since the #minEnclosingTriangle function
  6307. * takes a 2D point set as input an additional preprocessing step of computing the convex hull of the
  6308. * 2D point set is required. The complexity of the #convexHull function is `$$O(n log(n))$$` which is higher
  6309. * than `$$\theta(n)$$`. Thus the overall complexity of the function is `$$O(n log(n))$$`.
  6310. *
  6311. * @param points Input vector of 2D points with depth CV_32S or CV_32F, stored in std::vector\<\> or Mat
  6312. * @param triangle Output vector of three 2D points defining the vertices of the triangle. The depth
  6313. * of the OutputArray must be CV_32F.
  6314. */
  6315. + (double)minEnclosingTriangle:(Mat*)points triangle:(Mat*)triangle NS_SWIFT_NAME(minEnclosingTriangle(points:triangle:));
  6316. //
  6317. // double cv::matchShapes(Mat contour1, Mat contour2, ShapeMatchModes method, double parameter)
  6318. //
  6319. /**
  6320. * Compares two shapes.
  6321. *
  6322. * The function compares two shapes. All three implemented methods use the Hu invariants (see #HuMoments)
  6323. *
  6324. * @param contour1 First contour or grayscale image.
  6325. * @param contour2 Second contour or grayscale image.
  6326. * @param method Comparison method, see #ShapeMatchModes
  6327. * @param parameter Method-specific parameter (not supported now).
  6328. */
  6329. + (double)matchShapes:(Mat*)contour1 contour2:(Mat*)contour2 method:(ShapeMatchModes)method parameter:(double)parameter NS_SWIFT_NAME(matchShapes(contour1:contour2:method:parameter:));
  6330. //
  6331. // void cv::convexHull(vector_Point points, vector_int& hull, bool clockwise = false, _hidden_ returnPoints = true)
  6332. //
  6333. /**
  6334. * Finds the convex hull of a point set.
  6335. *
  6336. * The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm CITE: Sklansky82
  6337. * that has *O(N logN)* complexity in the current implementation.
  6338. *
  6339. * @param points Input 2D point set, stored in std::vector or Mat.
  6340. * @param hull Output convex hull. It is either an integer vector of indices or vector of points. In
  6341. * the first case, the hull elements are 0-based indices of the convex hull points in the original
  6342. * array (since the set of convex hull points is a subset of the original point set). In the second
  6343. * case, hull elements are the convex hull points themselves.
  6344. * @param clockwise Orientation flag. If it is true, the output convex hull is oriented clockwise.
  6345. * Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing
  6346. * to the right, and its Y axis pointing upwards.
  6347. * @param returnPoints Operation flag. In case of a matrix, when the flag is true, the function
  6348. * returns convex hull points. Otherwise, it returns indices of the convex hull points. When the
  6349. * output array is std::vector, the flag is ignored, and the output depends on the type of the
  6350. * vector: std::vector\<int\> implies returnPoints=false, std::vector\<Point\> implies
  6351. * returnPoints=true.
  6352. *
  6353. * NOTE: `points` and `hull` should be different arrays, inplace processing isn't supported.
  6354. *
  6355. * Check REF: tutorial_hull "the corresponding tutorial" for more details.
  6356. *
  6357. * useful links:
  6358. *
  6359. * https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/
  6360. */
  6361. + (void)convexHull:(NSArray<Point2i*>*)points hull:(IntVector*)hull clockwise:(BOOL)clockwise NS_SWIFT_NAME(convexHull(points:hull:clockwise:));
  6362. /**
  6363. * Finds the convex hull of a point set.
  6364. *
  6365. * The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm CITE: Sklansky82
  6366. * that has *O(N logN)* complexity in the current implementation.
  6367. *
  6368. * @param points Input 2D point set, stored in std::vector or Mat.
  6369. * @param hull Output convex hull. It is either an integer vector of indices or vector of points. In
  6370. * the first case, the hull elements are 0-based indices of the convex hull points in the original
  6371. * array (since the set of convex hull points is a subset of the original point set). In the second
  6372. * case, hull elements are the convex hull points themselves.
  6373. * Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing
  6374. * to the right, and its Y axis pointing upwards.
  6375. * returns convex hull points. Otherwise, it returns indices of the convex hull points. When the
  6376. * output array is std::vector, the flag is ignored, and the output depends on the type of the
  6377. * vector: std::vector\<int\> implies returnPoints=false, std::vector\<Point\> implies
  6378. * returnPoints=true.
  6379. *
  6380. * NOTE: `points` and `hull` should be different arrays, inplace processing isn't supported.
  6381. *
  6382. * Check REF: tutorial_hull "the corresponding tutorial" for more details.
  6383. *
  6384. * useful links:
  6385. *
  6386. * https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/
  6387. */
  6388. + (void)convexHull:(NSArray<Point2i*>*)points hull:(IntVector*)hull NS_SWIFT_NAME(convexHull(points:hull:));
  6389. //
  6390. // void cv::convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects)
  6391. //
  6392. /**
  6393. * Finds the convexity defects of a contour.
  6394. *
  6395. * The figure below displays convexity defects of a hand contour:
  6396. *
  6397. * ![image](pics/defects.png)
  6398. *
  6399. * @param contour Input contour.
  6400. * @param convexhull Convex hull obtained using convexHull that should contain indices of the contour
  6401. * points that make the hull.
  6402. * @param convexityDefects The output vector of convexity defects. In C++ and the new Python/Java
  6403. * interface each convexity defect is represented as 4-element integer vector (a.k.a. #Vec4i):
  6404. * (start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices
  6405. * in the original contour of the convexity defect beginning, end and the farthest point, and
  6406. * fixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the
  6407. * farthest contour point and the hull. That is, to get the floating-point value of the depth will be
  6408. * fixpt_depth/256.0.
  6409. */
  6410. + (void)convexityDefects:(NSArray<Point2i*>*)contour convexhull:(IntVector*)convexhull convexityDefects:(NSMutableArray<Int4*>*)convexityDefects NS_SWIFT_NAME(convexityDefects(contour:convexhull:convexityDefects:));
  6411. //
  6412. // bool cv::isContourConvex(vector_Point contour)
  6413. //
  6414. /**
  6415. * Tests a contour convexity.
  6416. *
  6417. * The function tests whether the input contour is convex or not. The contour must be simple, that is,
  6418. * without self-intersections. Otherwise, the function output is undefined.
  6419. *
  6420. * @param contour Input vector of 2D points, stored in std::vector\<\> or Mat
  6421. */
  6422. + (BOOL)isContourConvex:(NSArray<Point2i*>*)contour NS_SWIFT_NAME(isContourConvex(contour:));
  6423. //
  6424. // float cv::intersectConvexConvex(Mat p1, Mat p2, Mat& p12, bool handleNested = true)
  6425. //
  6426. /**
  6427. * Finds intersection of two convex polygons
  6428. *
  6429. * @param p1 First polygon
  6430. * @param p2 Second polygon
  6431. * @param p12 Output polygon describing the intersecting area
  6432. * @param handleNested When true, an intersection is found if one of the polygons is fully enclosed in the other.
  6433. * When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge
  6434. * of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested.
  6435. *
  6436. * @return Absolute value of area of intersecting polygon
  6437. *
  6438. * NOTE: intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't.
  6439. */
  6440. + (float)intersectConvexConvex:(Mat*)p1 p2:(Mat*)p2 p12:(Mat*)p12 handleNested:(BOOL)handleNested NS_SWIFT_NAME(intersectConvexConvex(p1:p2:p12:handleNested:));
  6441. /**
  6442. * Finds intersection of two convex polygons
  6443. *
  6444. * @param p1 First polygon
  6445. * @param p2 Second polygon
  6446. * @param p12 Output polygon describing the intersecting area
  6447. * When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge
  6448. * of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested.
  6449. *
  6450. * @return Absolute value of area of intersecting polygon
  6451. *
  6452. * NOTE: intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't.
  6453. */
  6454. + (float)intersectConvexConvex:(Mat*)p1 p2:(Mat*)p2 p12:(Mat*)p12 NS_SWIFT_NAME(intersectConvexConvex(p1:p2:p12:));
  6455. //
  6456. // RotatedRect cv::fitEllipse(vector_Point2f points)
  6457. //
  6458. /**
  6459. * Fits an ellipse around a set of 2D points.
  6460. *
  6461. * The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of
  6462. * all. It returns the rotated rectangle in which the ellipse is inscribed. The first algorithm described by CITE: Fitzgibbon95
  6463. * is used. Developer should keep in mind that it is possible that the returned
  6464. * ellipse/rotatedRect data contains negative indices, due to the data points being close to the
  6465. * border of the containing Mat element.
  6466. *
  6467. * @param points Input 2D point set, stored in std::vector\<\> or Mat
  6468. */
  6469. + (RotatedRect*)fitEllipse:(NSArray<Point2f*>*)points NS_SWIFT_NAME(fitEllipse(points:));
  6470. //
  6471. // RotatedRect cv::fitEllipseAMS(Mat points)
  6472. //
  6473. /**
  6474. * Fits an ellipse around a set of 2D points.
  6475. *
  6476. * The function calculates the ellipse that fits a set of 2D points.
  6477. * It returns the rotated rectangle in which the ellipse is inscribed.
  6478. * The Approximate Mean Square (AMS) proposed by CITE: Taubin1991 is used.
  6479. *
  6480. * For an ellipse, this basis set is `$$ \chi= \left(x^2, x y, y^2, x, y, 1\right) $$`,
  6481. * which is a set of six free coefficients `$$ A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} $$`.
  6482. * However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths `$$ (a,b) $$`,
  6483. * the position `$$ (x_0,y_0) $$`, and the orientation `$$ \theta $$`. This is because the basis set includes lines,
  6484. * quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits.
  6485. * If the fit is found to be a parabolic or hyperbolic function then the standard #fitEllipse method is used.
  6486. * The AMS method restricts the fit to parabolic, hyperbolic and elliptical curves
  6487. * by imposing the condition that `$$ A^T ( D_x^T D_x + D_y^T D_y) A = 1 $$` where
  6488. * the matrices `$$ Dx $$` and `$$ Dy $$` are the partial derivatives of the design matrix `$$ D $$` with
  6489. * respect to x and y. The matrices are formed row by row applying the following to
  6490. * each of the points in the set:
  6491. * `$$\begin{aligned}
  6492. * D(i,:)&=\left\{x_i^2, x_i y_i, y_i^2, x_i, y_i, 1\right\} &
  6493. * D_x(i,:)&=\left\{2 x_i,y_i,0,1,0,0\right\} &
  6494. * D_y(i,:)&=\left\{0,x_i,2 y_i,0,1,0\right\}
  6495. * \end{aligned}$$`
  6496. * The AMS method minimizes the cost function
  6497. * `$$\begin{aligned}
  6498. * \epsilon ^2=\frac{ A^T D^T D A }{ A^T (D_x^T D_x + D_y^T D_y) A^T }
  6499. * \end{aligned}$$`
  6500. *
  6501. * The minimum cost is found by solving the generalized eigenvalue problem.
  6502. *
  6503. * `$$\begin{aligned}
  6504. * D^T D A = \lambda \left( D_x^T D_x + D_y^T D_y\right) A
  6505. * \end{aligned}$$`
  6506. *
  6507. * @param points Input 2D point set, stored in std::vector\<\> or Mat
  6508. */
  6509. + (RotatedRect*)fitEllipseAMS:(Mat*)points NS_SWIFT_NAME(fitEllipseAMS(points:));
  6510. //
  6511. // RotatedRect cv::fitEllipseDirect(Mat points)
  6512. //
  6513. /**
  6514. * Fits an ellipse around a set of 2D points.
  6515. *
  6516. * The function calculates the ellipse that fits a set of 2D points.
  6517. * It returns the rotated rectangle in which the ellipse is inscribed.
  6518. * The Direct least square (Direct) method by CITE: Fitzgibbon1999 is used.
  6519. *
  6520. * For an ellipse, this basis set is `$$ \chi= \left(x^2, x y, y^2, x, y, 1\right) $$`,
  6521. * which is a set of six free coefficients `$$ A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} $$`.
  6522. * However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths `$$ (a,b) $$`,
  6523. * the position `$$ (x_0,y_0) $$`, and the orientation `$$ \theta $$`. This is because the basis set includes lines,
  6524. * quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits.
  6525. * The Direct method confines the fit to ellipses by ensuring that `$$ 4 A_{xx} A_{yy}- A_{xy}^2 > 0 $$`.
  6526. * The condition imposed is that `$$ 4 A_{xx} A_{yy}- A_{xy}^2=1 $$` which satisfies the inequality
  6527. * and as the coefficients can be arbitrarily scaled is not overly restrictive.
  6528. *
  6529. * `$$\begin{aligned}
  6530. * \epsilon ^2= A^T D^T D A \quad \text{with} \quad A^T C A =1 \quad \text{and} \quad C=\left(\begin{matrix}
  6531. * 0 & 0 & 2 & 0 & 0 & 0 \\
  6532. * 0 & -1 & 0 & 0 & 0 & 0 \\
  6533. * 2 & 0 & 0 & 0 & 0 & 0 \\
  6534. * 0 & 0 & 0 & 0 & 0 & 0 \\
  6535. * 0 & 0 & 0 & 0 & 0 & 0 \\
  6536. * 0 & 0 & 0 & 0 & 0 & 0
  6537. * \end{matrix} \right)
  6538. * \end{aligned}$$`
  6539. *
  6540. * The minimum cost is found by solving the generalized eigenvalue problem.
  6541. *
  6542. * `$$\begin{aligned}
  6543. * D^T D A = \lambda \left( C\right) A
  6544. * \end{aligned}$$`
  6545. *
  6546. * The system produces only one positive eigenvalue `$$ \lambda$$` which is chosen as the solution
  6547. * with its eigenvector `$$\mathbf{u}$$`. These are used to find the coefficients
  6548. *
  6549. * `$$\begin{aligned}
  6550. * A = \sqrt{\frac{1}{\mathbf{u}^T C \mathbf{u}}} \mathbf{u}
  6551. * \end{aligned}$$`
  6552. * The scaling factor guarantees that `$$A^T C A =1$$`.
  6553. *
  6554. * @param points Input 2D point set, stored in std::vector\<\> or Mat
  6555. */
  6556. + (RotatedRect*)fitEllipseDirect:(Mat*)points NS_SWIFT_NAME(fitEllipseDirect(points:));
  6557. //
  6558. // void cv::fitLine(Mat points, Mat& line, DistanceTypes distType, double param, double reps, double aeps)
  6559. //
  6560. /**
  6561. * Fits a line to a 2D or 3D point set.
  6562. *
  6563. * The function fitLine fits a line to a 2D or 3D point set by minimizing `$$\sum_i \rho(r_i)$$` where
  6564. * `$$r_i$$` is a distance between the `$$i^{th}$$` point, the line and `$$\rho(r)$$` is a distance function, one
  6565. * of the following:
  6566. * - DIST_L2
  6567. * `$$\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}$$`
  6568. * - DIST_L1
  6569. * `$$\rho (r) = r$$`
  6570. * - DIST_L12
  6571. * `$$\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)$$`
  6572. * - DIST_FAIR
  6573. * `$$\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998$$`
  6574. * - DIST_WELSCH
  6575. * `$$\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846$$`
  6576. * - DIST_HUBER
  6577. * `$$\newcommand{\fork}[4]{ \left\{ \begin{array}{l l} #1 & \text{#2}\\\\ #3 & \text{#4}\\\\ \end{array} \right.} \rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345$$`
  6578. *
  6579. * The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique
  6580. * that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
  6581. * weights `$$w_i$$` are adjusted to be inversely proportional to `$$\rho(r_i)$$` .
  6582. *
  6583. * @param points Input vector of 2D or 3D points, stored in std::vector\<\> or Mat.
  6584. * @param line Output line parameters. In case of 2D fitting, it should be a vector of 4 elements
  6585. * (like Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and
  6586. * (x0, y0) is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like
  6587. * Vec6f) - (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line
  6588. * and (x0, y0, z0) is a point on the line.
  6589. * @param distType Distance used by the M-estimator, see #DistanceTypes
  6590. * @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
  6591. * is chosen.
  6592. * @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the line).
  6593. * @param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps.
  6594. */
  6595. + (void)fitLine:(Mat*)points line:(Mat*)line distType:(DistanceTypes)distType param:(double)param reps:(double)reps aeps:(double)aeps NS_SWIFT_NAME(fitLine(points:line:distType:param:reps:aeps:));
  6596. //
  6597. // double cv::pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist)
  6598. //
  6599. /**
  6600. * Performs a point-in-contour test.
  6601. *
  6602. * The function determines whether the point is inside a contour, outside, or lies on an edge (or
  6603. * coincides with a vertex). It returns positive (inside), negative (outside), or zero (on an edge)
  6604. * value, correspondingly. When measureDist=false , the return value is +1, -1, and 0, respectively.
  6605. * Otherwise, the return value is a signed distance between the point and the nearest contour edge.
  6606. *
  6607. * See below a sample output of the function where each image pixel is tested against the contour:
  6608. *
  6609. * ![sample output](pics/pointpolygon.png)
  6610. *
  6611. * @param contour Input contour.
  6612. * @param pt Point tested against the contour.
  6613. * @param measureDist If true, the function estimates the signed distance from the point to the
  6614. * nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not.
  6615. */
  6616. + (double)pointPolygonTest:(NSArray<Point2f*>*)contour pt:(Point2f*)pt measureDist:(BOOL)measureDist NS_SWIFT_NAME(pointPolygonTest(contour:pt:measureDist:));
  6617. //
  6618. // int cv::rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat& intersectingRegion)
  6619. //
  6620. /**
  6621. * Finds out if there is any intersection between two rotated rectangles.
  6622. *
  6623. * If there is then the vertices of the intersecting region are returned as well.
  6624. *
  6625. * Below are some examples of intersection configurations. The hatched pattern indicates the
  6626. * intersecting region and the red vertices are returned by the function.
  6627. *
  6628. * ![intersection examples](pics/intersection.png)
  6629. *
  6630. * @param rect1 First rectangle
  6631. * @param rect2 Second rectangle
  6632. * @param intersectingRegion The output array of the vertices of the intersecting region. It returns
  6633. * at most 8 vertices. Stored as std::vector\<cv::Point2f\> or cv::Mat as Mx1 of type CV_32FC2.
  6634. * @return One of #RectanglesIntersectTypes
  6635. */
  6636. + (int)rotatedRectangleIntersection:(RotatedRect*)rect1 rect2:(RotatedRect*)rect2 intersectingRegion:(Mat*)intersectingRegion NS_SWIFT_NAME(rotatedRectangleIntersection(rect1:rect2:intersectingRegion:));
  6637. //
  6638. // Ptr_GeneralizedHoughBallard cv::createGeneralizedHoughBallard()
  6639. //
  6640. /**
  6641. * Creates a smart pointer to a cv::GeneralizedHoughBallard class and initializes it.
  6642. */
  6643. + (GeneralizedHoughBallard*)createGeneralizedHoughBallard NS_SWIFT_NAME(createGeneralizedHoughBallard());
  6644. //
  6645. // Ptr_GeneralizedHoughGuil cv::createGeneralizedHoughGuil()
  6646. //
  6647. /**
  6648. * Creates a smart pointer to a cv::GeneralizedHoughGuil class and initializes it.
  6649. */
  6650. + (GeneralizedHoughGuil*)createGeneralizedHoughGuil NS_SWIFT_NAME(createGeneralizedHoughGuil());
  6651. //
  6652. // void cv::applyColorMap(Mat src, Mat& dst, ColormapTypes colormap)
  6653. //
  6654. /**
  6655. * Applies a GNU Octave/MATLAB equivalent colormap on a given image.
  6656. *
  6657. * @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3.
  6658. * @param dst The result is the colormapped source image. Note: Mat::create is called on dst.
  6659. * @param colormap The colormap to apply, see #ColormapTypes
  6660. */
  6661. + (void)applyColorMap:(Mat*)src dst:(Mat*)dst colormap:(ColormapTypes)colormap NS_SWIFT_NAME(applyColorMap(src:dst:colormap:));
  6662. //
  6663. // void cv::applyColorMap(Mat src, Mat& dst, Mat userColor)
  6664. //
  6665. /**
  6666. * Applies a user colormap on a given image.
  6667. *
  6668. * @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3.
  6669. * @param dst The result is the colormapped source image. Note: Mat::create is called on dst.
  6670. * @param userColor The colormap to apply of type CV_8UC1 or CV_8UC3 and size 256
  6671. */
  6672. + (void)applyColorMap:(Mat*)src dst:(Mat*)dst userColor:(Mat*)userColor NS_SWIFT_NAME(applyColorMap(src:dst:userColor:));
  6673. //
  6674. // void cv::line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, LineTypes lineType = LINE_8, int shift = 0)
  6675. //
  6676. /**
  6677. * Draws a line segment connecting two points.
  6678. *
  6679. * The function line draws the line segment between pt1 and pt2 points in the image. The line is
  6680. * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
  6681. * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
  6682. * lines are drawn using Gaussian filtering.
  6683. *
  6684. * @param img Image.
  6685. * @param pt1 First point of the line segment.
  6686. * @param pt2 Second point of the line segment.
  6687. * @param color Line color.
  6688. * @param thickness Line thickness.
  6689. * @param lineType Type of the line. See #LineTypes.
  6690. * @param shift Number of fractional bits in the point coordinates.
  6691. */
  6692. + (void)line:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType shift:(int)shift NS_SWIFT_NAME(line(img:pt1:pt2:color:thickness:lineType:shift:));
  6693. /**
  6694. * Draws a line segment connecting two points.
  6695. *
  6696. * The function line draws the line segment between pt1 and pt2 points in the image. The line is
  6697. * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
  6698. * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
  6699. * lines are drawn using Gaussian filtering.
  6700. *
  6701. * @param img Image.
  6702. * @param pt1 First point of the line segment.
  6703. * @param pt2 Second point of the line segment.
  6704. * @param color Line color.
  6705. * @param thickness Line thickness.
  6706. * @param lineType Type of the line. See #LineTypes.
  6707. */
  6708. + (void)line:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType NS_SWIFT_NAME(line(img:pt1:pt2:color:thickness:lineType:));
  6709. /**
  6710. * Draws a line segment connecting two points.
  6711. *
  6712. * The function line draws the line segment between pt1 and pt2 points in the image. The line is
  6713. * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
  6714. * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
  6715. * lines are drawn using Gaussian filtering.
  6716. *
  6717. * @param img Image.
  6718. * @param pt1 First point of the line segment.
  6719. * @param pt2 Second point of the line segment.
  6720. * @param color Line color.
  6721. * @param thickness Line thickness.
  6722. */
  6723. + (void)line:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(line(img:pt1:pt2:color:thickness:));
  6724. /**
  6725. * Draws a line segment connecting two points.
  6726. *
  6727. * The function line draws the line segment between pt1 and pt2 points in the image. The line is
  6728. * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
  6729. * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
  6730. * lines are drawn using Gaussian filtering.
  6731. *
  6732. * @param img Image.
  6733. * @param pt1 First point of the line segment.
  6734. * @param pt2 Second point of the line segment.
  6735. * @param color Line color.
  6736. */
  6737. + (void)line:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color NS_SWIFT_NAME(line(img:pt1:pt2:color:));
  6738. //
  6739. // void cv::arrowedLine(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, LineTypes line_type = 8, int shift = 0, double tipLength = 0.1)
  6740. //
  6741. /**
  6742. * Draws an arrow segment pointing from the first point to the second one.
  6743. *
  6744. * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
  6745. *
  6746. * @param img Image.
  6747. * @param pt1 The point the arrow starts from.
  6748. * @param pt2 The point the arrow points to.
  6749. * @param color Line color.
  6750. * @param thickness Line thickness.
  6751. * @param line_type Type of the line. See #LineTypes
  6752. * @param shift Number of fractional bits in the point coordinates.
  6753. * @param tipLength The length of the arrow tip in relation to the arrow length
  6754. */
  6755. + (void)arrowedLine:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness line_type:(LineTypes)line_type shift:(int)shift tipLength:(double)tipLength NS_SWIFT_NAME(arrowedLine(img:pt1:pt2:color:thickness:line_type:shift:tipLength:));
  6756. /**
  6757. * Draws an arrow segment pointing from the first point to the second one.
  6758. *
  6759. * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
  6760. *
  6761. * @param img Image.
  6762. * @param pt1 The point the arrow starts from.
  6763. * @param pt2 The point the arrow points to.
  6764. * @param color Line color.
  6765. * @param thickness Line thickness.
  6766. * @param line_type Type of the line. See #LineTypes
  6767. * @param shift Number of fractional bits in the point coordinates.
  6768. */
  6769. + (void)arrowedLine:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness line_type:(LineTypes)line_type shift:(int)shift NS_SWIFT_NAME(arrowedLine(img:pt1:pt2:color:thickness:line_type:shift:));
  6770. /**
  6771. * Draws an arrow segment pointing from the first point to the second one.
  6772. *
  6773. * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
  6774. *
  6775. * @param img Image.
  6776. * @param pt1 The point the arrow starts from.
  6777. * @param pt2 The point the arrow points to.
  6778. * @param color Line color.
  6779. * @param thickness Line thickness.
  6780. * @param line_type Type of the line. See #LineTypes
  6781. */
  6782. + (void)arrowedLine:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness line_type:(LineTypes)line_type NS_SWIFT_NAME(arrowedLine(img:pt1:pt2:color:thickness:line_type:));
  6783. /**
  6784. * Draws an arrow segment pointing from the first point to the second one.
  6785. *
  6786. * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
  6787. *
  6788. * @param img Image.
  6789. * @param pt1 The point the arrow starts from.
  6790. * @param pt2 The point the arrow points to.
  6791. * @param color Line color.
  6792. * @param thickness Line thickness.
  6793. */
  6794. + (void)arrowedLine:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(arrowedLine(img:pt1:pt2:color:thickness:));
  6795. /**
  6796. * Draws an arrow segment pointing from the first point to the second one.
  6797. *
  6798. * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
  6799. *
  6800. * @param img Image.
  6801. * @param pt1 The point the arrow starts from.
  6802. * @param pt2 The point the arrow points to.
  6803. * @param color Line color.
  6804. */
  6805. + (void)arrowedLine:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color NS_SWIFT_NAME(arrowedLine(img:pt1:pt2:color:));
  6806. //
  6807. // void cv::rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, LineTypes lineType = LINE_8, int shift = 0)
  6808. //
  6809. /**
  6810. * Draws a simple, thick, or filled up-right rectangle.
  6811. *
  6812. * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
  6813. * are pt1 and pt2.
  6814. *
  6815. * @param img Image.
  6816. * @param pt1 Vertex of the rectangle.
  6817. * @param pt2 Vertex of the rectangle opposite to pt1 .
  6818. * @param color Rectangle color or brightness (grayscale image).
  6819. * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,
  6820. * mean that the function has to draw a filled rectangle.
  6821. * @param lineType Type of the line. See #LineTypes
  6822. * @param shift Number of fractional bits in the point coordinates.
  6823. */
  6824. + (void)rectangle:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType shift:(int)shift NS_SWIFT_NAME(rectangle(img:pt1:pt2:color:thickness:lineType:shift:));
  6825. /**
  6826. * Draws a simple, thick, or filled up-right rectangle.
  6827. *
  6828. * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
  6829. * are pt1 and pt2.
  6830. *
  6831. * @param img Image.
  6832. * @param pt1 Vertex of the rectangle.
  6833. * @param pt2 Vertex of the rectangle opposite to pt1 .
  6834. * @param color Rectangle color or brightness (grayscale image).
  6835. * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,
  6836. * mean that the function has to draw a filled rectangle.
  6837. * @param lineType Type of the line. See #LineTypes
  6838. */
  6839. + (void)rectangle:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType NS_SWIFT_NAME(rectangle(img:pt1:pt2:color:thickness:lineType:));
  6840. /**
  6841. * Draws a simple, thick, or filled up-right rectangle.
  6842. *
  6843. * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
  6844. * are pt1 and pt2.
  6845. *
  6846. * @param img Image.
  6847. * @param pt1 Vertex of the rectangle.
  6848. * @param pt2 Vertex of the rectangle opposite to pt1 .
  6849. * @param color Rectangle color or brightness (grayscale image).
  6850. * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,
  6851. * mean that the function has to draw a filled rectangle.
  6852. */
  6853. + (void)rectangle:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(rectangle(img:pt1:pt2:color:thickness:));
  6854. /**
  6855. * Draws a simple, thick, or filled up-right rectangle.
  6856. *
  6857. * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
  6858. * are pt1 and pt2.
  6859. *
  6860. * @param img Image.
  6861. * @param pt1 Vertex of the rectangle.
  6862. * @param pt2 Vertex of the rectangle opposite to pt1 .
  6863. * @param color Rectangle color or brightness (grayscale image).
  6864. * mean that the function has to draw a filled rectangle.
  6865. */
  6866. + (void)rectangle:(Mat*)img pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 color:(Scalar*)color NS_SWIFT_NAME(rectangle(img:pt1:pt2:color:));
  6867. //
  6868. // void cv::rectangle(Mat& img, Rect rec, Scalar color, int thickness = 1, LineTypes lineType = LINE_8, int shift = 0)
  6869. //
  6870. /**
  6871. *
  6872. *
  6873. * use `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and
  6874. * r.br()-Point(1,1)` are opposite corners
  6875. */
  6876. + (void)rectangle:(Mat*)img rec:(Rect2i*)rec color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType shift:(int)shift NS_SWIFT_NAME(rectangle(img:rec:color:thickness:lineType:shift:));
  6877. /**
  6878. *
  6879. *
  6880. * use `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and
  6881. * r.br()-Point(1,1)` are opposite corners
  6882. */
  6883. + (void)rectangle:(Mat*)img rec:(Rect2i*)rec color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType NS_SWIFT_NAME(rectangle(img:rec:color:thickness:lineType:));
  6884. /**
  6885. *
  6886. *
  6887. * use `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and
  6888. * r.br()-Point(1,1)` are opposite corners
  6889. */
  6890. + (void)rectangle:(Mat*)img rec:(Rect2i*)rec color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(rectangle(img:rec:color:thickness:));
  6891. /**
  6892. *
  6893. *
  6894. * use `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and
  6895. * r.br()-Point(1,1)` are opposite corners
  6896. */
  6897. + (void)rectangle:(Mat*)img rec:(Rect2i*)rec color:(Scalar*)color NS_SWIFT_NAME(rectangle(img:rec:color:));
  6898. //
  6899. // void cv::circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, LineTypes lineType = LINE_8, int shift = 0)
  6900. //
  6901. /**
  6902. * Draws a circle.
  6903. *
  6904. * The function cv::circle draws a simple or filled circle with a given center and radius.
  6905. * @param img Image where the circle is drawn.
  6906. * @param center Center of the circle.
  6907. * @param radius Radius of the circle.
  6908. * @param color Circle color.
  6909. * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,
  6910. * mean that a filled circle is to be drawn.
  6911. * @param lineType Type of the circle boundary. See #LineTypes
  6912. * @param shift Number of fractional bits in the coordinates of the center and in the radius value.
  6913. */
  6914. + (void)circle:(Mat*)img center:(Point2i*)center radius:(int)radius color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType shift:(int)shift NS_SWIFT_NAME(circle(img:center:radius:color:thickness:lineType:shift:));
  6915. /**
  6916. * Draws a circle.
  6917. *
  6918. * The function cv::circle draws a simple or filled circle with a given center and radius.
  6919. * @param img Image where the circle is drawn.
  6920. * @param center Center of the circle.
  6921. * @param radius Radius of the circle.
  6922. * @param color Circle color.
  6923. * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,
  6924. * mean that a filled circle is to be drawn.
  6925. * @param lineType Type of the circle boundary. See #LineTypes
  6926. */
  6927. + (void)circle:(Mat*)img center:(Point2i*)center radius:(int)radius color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType NS_SWIFT_NAME(circle(img:center:radius:color:thickness:lineType:));
  6928. /**
  6929. * Draws a circle.
  6930. *
  6931. * The function cv::circle draws a simple or filled circle with a given center and radius.
  6932. * @param img Image where the circle is drawn.
  6933. * @param center Center of the circle.
  6934. * @param radius Radius of the circle.
  6935. * @param color Circle color.
  6936. * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,
  6937. * mean that a filled circle is to be drawn.
  6938. */
  6939. + (void)circle:(Mat*)img center:(Point2i*)center radius:(int)radius color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(circle(img:center:radius:color:thickness:));
  6940. /**
  6941. * Draws a circle.
  6942. *
  6943. * The function cv::circle draws a simple or filled circle with a given center and radius.
  6944. * @param img Image where the circle is drawn.
  6945. * @param center Center of the circle.
  6946. * @param radius Radius of the circle.
  6947. * @param color Circle color.
  6948. * mean that a filled circle is to be drawn.
  6949. */
  6950. + (void)circle:(Mat*)img center:(Point2i*)center radius:(int)radius color:(Scalar*)color NS_SWIFT_NAME(circle(img:center:radius:color:));
  6951. //
  6952. // void cv::ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, LineTypes lineType = LINE_8, int shift = 0)
  6953. //
  6954. /**
  6955. * Draws a simple or thick elliptic arc or fills an ellipse sector.
  6956. *
  6957. * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
  6958. * arc, or a filled ellipse sector. The drawing code uses general parametric form.
  6959. * A piecewise-linear curve is used to approximate the elliptic arc
  6960. * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
  6961. * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
  6962. * variant of the function and want to draw the whole ellipse, not an arc, pass `startAngle=0` and
  6963. * `endAngle=360`. If `startAngle` is greater than `endAngle`, they are swapped. The figure below explains
  6964. * the meaning of the parameters to draw the blue arc.
  6965. *
  6966. * ![Parameters of Elliptic Arc](pics/ellipse.svg)
  6967. *
  6968. * @param img Image.
  6969. * @param center Center of the ellipse.
  6970. * @param axes Half of the size of the ellipse main axes.
  6971. * @param angle Ellipse rotation angle in degrees.
  6972. * @param startAngle Starting angle of the elliptic arc in degrees.
  6973. * @param endAngle Ending angle of the elliptic arc in degrees.
  6974. * @param color Ellipse color.
  6975. * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
  6976. * a filled ellipse sector is to be drawn.
  6977. * @param lineType Type of the ellipse boundary. See #LineTypes
  6978. * @param shift Number of fractional bits in the coordinates of the center and values of axes.
  6979. */
  6980. + (void)ellipse:(Mat*)img center:(Point2i*)center axes:(Size2i*)axes angle:(double)angle startAngle:(double)startAngle endAngle:(double)endAngle color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType shift:(int)shift NS_SWIFT_NAME(ellipse(img:center:axes:angle:startAngle:endAngle:color:thickness:lineType:shift:));
  6981. /**
  6982. * Draws a simple or thick elliptic arc or fills an ellipse sector.
  6983. *
  6984. * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
  6985. * arc, or a filled ellipse sector. The drawing code uses general parametric form.
  6986. * A piecewise-linear curve is used to approximate the elliptic arc
  6987. * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
  6988. * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
  6989. * variant of the function and want to draw the whole ellipse, not an arc, pass `startAngle=0` and
  6990. * `endAngle=360`. If `startAngle` is greater than `endAngle`, they are swapped. The figure below explains
  6991. * the meaning of the parameters to draw the blue arc.
  6992. *
  6993. * ![Parameters of Elliptic Arc](pics/ellipse.svg)
  6994. *
  6995. * @param img Image.
  6996. * @param center Center of the ellipse.
  6997. * @param axes Half of the size of the ellipse main axes.
  6998. * @param angle Ellipse rotation angle in degrees.
  6999. * @param startAngle Starting angle of the elliptic arc in degrees.
  7000. * @param endAngle Ending angle of the elliptic arc in degrees.
  7001. * @param color Ellipse color.
  7002. * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
  7003. * a filled ellipse sector is to be drawn.
  7004. * @param lineType Type of the ellipse boundary. See #LineTypes
  7005. */
  7006. + (void)ellipse:(Mat*)img center:(Point2i*)center axes:(Size2i*)axes angle:(double)angle startAngle:(double)startAngle endAngle:(double)endAngle color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType NS_SWIFT_NAME(ellipse(img:center:axes:angle:startAngle:endAngle:color:thickness:lineType:));
  7007. /**
  7008. * Draws a simple or thick elliptic arc or fills an ellipse sector.
  7009. *
  7010. * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
  7011. * arc, or a filled ellipse sector. The drawing code uses general parametric form.
  7012. * A piecewise-linear curve is used to approximate the elliptic arc
  7013. * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
  7014. * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
  7015. * variant of the function and want to draw the whole ellipse, not an arc, pass `startAngle=0` and
  7016. * `endAngle=360`. If `startAngle` is greater than `endAngle`, they are swapped. The figure below explains
  7017. * the meaning of the parameters to draw the blue arc.
  7018. *
  7019. * ![Parameters of Elliptic Arc](pics/ellipse.svg)
  7020. *
  7021. * @param img Image.
  7022. * @param center Center of the ellipse.
  7023. * @param axes Half of the size of the ellipse main axes.
  7024. * @param angle Ellipse rotation angle in degrees.
  7025. * @param startAngle Starting angle of the elliptic arc in degrees.
  7026. * @param endAngle Ending angle of the elliptic arc in degrees.
  7027. * @param color Ellipse color.
  7028. * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
  7029. * a filled ellipse sector is to be drawn.
  7030. */
  7031. + (void)ellipse:(Mat*)img center:(Point2i*)center axes:(Size2i*)axes angle:(double)angle startAngle:(double)startAngle endAngle:(double)endAngle color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(ellipse(img:center:axes:angle:startAngle:endAngle:color:thickness:));
  7032. /**
  7033. * Draws a simple or thick elliptic arc or fills an ellipse sector.
  7034. *
  7035. * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
  7036. * arc, or a filled ellipse sector. The drawing code uses general parametric form.
  7037. * A piecewise-linear curve is used to approximate the elliptic arc
  7038. * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
  7039. * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
  7040. * variant of the function and want to draw the whole ellipse, not an arc, pass `startAngle=0` and
  7041. * `endAngle=360`. If `startAngle` is greater than `endAngle`, they are swapped. The figure below explains
  7042. * the meaning of the parameters to draw the blue arc.
  7043. *
  7044. * ![Parameters of Elliptic Arc](pics/ellipse.svg)
  7045. *
  7046. * @param img Image.
  7047. * @param center Center of the ellipse.
  7048. * @param axes Half of the size of the ellipse main axes.
  7049. * @param angle Ellipse rotation angle in degrees.
  7050. * @param startAngle Starting angle of the elliptic arc in degrees.
  7051. * @param endAngle Ending angle of the elliptic arc in degrees.
  7052. * @param color Ellipse color.
  7053. * a filled ellipse sector is to be drawn.
  7054. */
  7055. + (void)ellipse:(Mat*)img center:(Point2i*)center axes:(Size2i*)axes angle:(double)angle startAngle:(double)startAngle endAngle:(double)endAngle color:(Scalar*)color NS_SWIFT_NAME(ellipse(img:center:axes:angle:startAngle:endAngle:color:));
  7056. //
  7057. // void cv::ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, LineTypes lineType = LINE_8)
  7058. //
  7059. /**
  7060. *
  7061. * @param img Image.
  7062. * @param box Alternative ellipse representation via RotatedRect. This means that the function draws
  7063. * an ellipse inscribed in the rotated rectangle.
  7064. * @param color Ellipse color.
  7065. * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
  7066. * a filled ellipse sector is to be drawn.
  7067. * @param lineType Type of the ellipse boundary. See #LineTypes
  7068. */
  7069. + (void)ellipse:(Mat*)img box:(RotatedRect*)box color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType NS_SWIFT_NAME(ellipse(img:box:color:thickness:lineType:));
  7070. /**
  7071. *
  7072. * @param img Image.
  7073. * @param box Alternative ellipse representation via RotatedRect. This means that the function draws
  7074. * an ellipse inscribed in the rotated rectangle.
  7075. * @param color Ellipse color.
  7076. * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
  7077. * a filled ellipse sector is to be drawn.
  7078. */
  7079. + (void)ellipse:(Mat*)img box:(RotatedRect*)box color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(ellipse(img:box:color:thickness:));
  7080. /**
  7081. *
  7082. * @param img Image.
  7083. * @param box Alternative ellipse representation via RotatedRect. This means that the function draws
  7084. * an ellipse inscribed in the rotated rectangle.
  7085. * @param color Ellipse color.
  7086. * a filled ellipse sector is to be drawn.
  7087. */
  7088. + (void)ellipse:(Mat*)img box:(RotatedRect*)box color:(Scalar*)color NS_SWIFT_NAME(ellipse(img:box:color:));
  7089. //
  7090. // void cv::drawMarker(Mat& img, Point position, Scalar color, MarkerTypes markerType = MARKER_CROSS, int markerSize = 20, int thickness = 1, LineTypes line_type = 8)
  7091. //
  7092. /**
  7093. * Draws a marker on a predefined position in an image.
  7094. *
  7095. * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
  7096. * marker types are supported, see #MarkerTypes for more information.
  7097. *
  7098. * @param img Image.
  7099. * @param position The point where the crosshair is positioned.
  7100. * @param color Line color.
  7101. * @param markerType The specific type of marker you want to use, see #MarkerTypes
  7102. * @param thickness Line thickness.
  7103. * @param line_type Type of the line, See #LineTypes
  7104. * @param markerSize The length of the marker axis [default = 20 pixels]
  7105. */
  7106. + (void)drawMarker:(Mat*)img position:(Point2i*)position color:(Scalar*)color markerType:(MarkerTypes)markerType markerSize:(int)markerSize thickness:(int)thickness line_type:(LineTypes)line_type NS_SWIFT_NAME(drawMarker(img:position:color:markerType:markerSize:thickness:line_type:));
  7107. /**
  7108. * Draws a marker on a predefined position in an image.
  7109. *
  7110. * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
  7111. * marker types are supported, see #MarkerTypes for more information.
  7112. *
  7113. * @param img Image.
  7114. * @param position The point where the crosshair is positioned.
  7115. * @param color Line color.
  7116. * @param markerType The specific type of marker you want to use, see #MarkerTypes
  7117. * @param thickness Line thickness.
  7118. * @param markerSize The length of the marker axis [default = 20 pixels]
  7119. */
  7120. + (void)drawMarker:(Mat*)img position:(Point2i*)position color:(Scalar*)color markerType:(MarkerTypes)markerType markerSize:(int)markerSize thickness:(int)thickness NS_SWIFT_NAME(drawMarker(img:position:color:markerType:markerSize:thickness:));
  7121. /**
  7122. * Draws a marker on a predefined position in an image.
  7123. *
  7124. * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
  7125. * marker types are supported, see #MarkerTypes for more information.
  7126. *
  7127. * @param img Image.
  7128. * @param position The point where the crosshair is positioned.
  7129. * @param color Line color.
  7130. * @param markerType The specific type of marker you want to use, see #MarkerTypes
  7131. * @param markerSize The length of the marker axis [default = 20 pixels]
  7132. */
  7133. + (void)drawMarker:(Mat*)img position:(Point2i*)position color:(Scalar*)color markerType:(MarkerTypes)markerType markerSize:(int)markerSize NS_SWIFT_NAME(drawMarker(img:position:color:markerType:markerSize:));
  7134. /**
  7135. * Draws a marker on a predefined position in an image.
  7136. *
  7137. * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
  7138. * marker types are supported, see #MarkerTypes for more information.
  7139. *
  7140. * @param img Image.
  7141. * @param position The point where the crosshair is positioned.
  7142. * @param color Line color.
  7143. * @param markerType The specific type of marker you want to use, see #MarkerTypes
  7144. */
  7145. + (void)drawMarker:(Mat*)img position:(Point2i*)position color:(Scalar*)color markerType:(MarkerTypes)markerType NS_SWIFT_NAME(drawMarker(img:position:color:markerType:));
  7146. /**
  7147. * Draws a marker on a predefined position in an image.
  7148. *
  7149. * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
  7150. * marker types are supported, see #MarkerTypes for more information.
  7151. *
  7152. * @param img Image.
  7153. * @param position The point where the crosshair is positioned.
  7154. * @param color Line color.
  7155. */
  7156. + (void)drawMarker:(Mat*)img position:(Point2i*)position color:(Scalar*)color NS_SWIFT_NAME(drawMarker(img:position:color:));
  7157. //
  7158. // void cv::fillConvexPoly(Mat& img, vector_Point points, Scalar color, LineTypes lineType = LINE_8, int shift = 0)
  7159. //
  7160. /**
  7161. * Fills a convex polygon.
  7162. *
  7163. * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the
  7164. * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without
  7165. * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
  7166. * twice at the most (though, its top-most and/or the bottom edge could be horizontal).
  7167. *
  7168. * @param img Image.
  7169. * @param points Polygon vertices.
  7170. * @param color Polygon color.
  7171. * @param lineType Type of the polygon boundaries. See #LineTypes
  7172. * @param shift Number of fractional bits in the vertex coordinates.
  7173. */
  7174. + (void)fillConvexPoly:(Mat*)img points:(NSArray<Point2i*>*)points color:(Scalar*)color lineType:(LineTypes)lineType shift:(int)shift NS_SWIFT_NAME(fillConvexPoly(img:points:color:lineType:shift:));
  7175. /**
  7176. * Fills a convex polygon.
  7177. *
  7178. * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the
  7179. * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without
  7180. * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
  7181. * twice at the most (though, its top-most and/or the bottom edge could be horizontal).
  7182. *
  7183. * @param img Image.
  7184. * @param points Polygon vertices.
  7185. * @param color Polygon color.
  7186. * @param lineType Type of the polygon boundaries. See #LineTypes
  7187. */
  7188. + (void)fillConvexPoly:(Mat*)img points:(NSArray<Point2i*>*)points color:(Scalar*)color lineType:(LineTypes)lineType NS_SWIFT_NAME(fillConvexPoly(img:points:color:lineType:));
  7189. /**
  7190. * Fills a convex polygon.
  7191. *
  7192. * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the
  7193. * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without
  7194. * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
  7195. * twice at the most (though, its top-most and/or the bottom edge could be horizontal).
  7196. *
  7197. * @param img Image.
  7198. * @param points Polygon vertices.
  7199. * @param color Polygon color.
  7200. */
  7201. + (void)fillConvexPoly:(Mat*)img points:(NSArray<Point2i*>*)points color:(Scalar*)color NS_SWIFT_NAME(fillConvexPoly(img:points:color:));
  7202. //
  7203. // void cv::fillPoly(Mat& img, vector_vector_Point pts, Scalar color, LineTypes lineType = LINE_8, int shift = 0, Point offset = Point())
  7204. //
  7205. /**
  7206. * Fills the area bounded by one or more polygons.
  7207. *
  7208. * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
  7209. * complex areas, for example, areas with holes, contours with self-intersections (some of their
  7210. * parts), and so forth.
  7211. *
  7212. * @param img Image.
  7213. * @param pts Array of polygons where each polygon is represented as an array of points.
  7214. * @param color Polygon color.
  7215. * @param lineType Type of the polygon boundaries. See #LineTypes
  7216. * @param shift Number of fractional bits in the vertex coordinates.
  7217. * @param offset Optional offset of all points of the contours.
  7218. */
  7219. + (void)fillPoly:(Mat*)img pts:(NSArray<NSArray<Point2i*>*>*)pts color:(Scalar*)color lineType:(LineTypes)lineType shift:(int)shift offset:(Point2i*)offset NS_SWIFT_NAME(fillPoly(img:pts:color:lineType:shift:offset:));
  7220. /**
  7221. * Fills the area bounded by one or more polygons.
  7222. *
  7223. * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
  7224. * complex areas, for example, areas with holes, contours with self-intersections (some of their
  7225. * parts), and so forth.
  7226. *
  7227. * @param img Image.
  7228. * @param pts Array of polygons where each polygon is represented as an array of points.
  7229. * @param color Polygon color.
  7230. * @param lineType Type of the polygon boundaries. See #LineTypes
  7231. * @param shift Number of fractional bits in the vertex coordinates.
  7232. */
  7233. + (void)fillPoly:(Mat*)img pts:(NSArray<NSArray<Point2i*>*>*)pts color:(Scalar*)color lineType:(LineTypes)lineType shift:(int)shift NS_SWIFT_NAME(fillPoly(img:pts:color:lineType:shift:));
  7234. /**
  7235. * Fills the area bounded by one or more polygons.
  7236. *
  7237. * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
  7238. * complex areas, for example, areas with holes, contours with self-intersections (some of their
  7239. * parts), and so forth.
  7240. *
  7241. * @param img Image.
  7242. * @param pts Array of polygons where each polygon is represented as an array of points.
  7243. * @param color Polygon color.
  7244. * @param lineType Type of the polygon boundaries. See #LineTypes
  7245. */
  7246. + (void)fillPoly:(Mat*)img pts:(NSArray<NSArray<Point2i*>*>*)pts color:(Scalar*)color lineType:(LineTypes)lineType NS_SWIFT_NAME(fillPoly(img:pts:color:lineType:));
  7247. /**
  7248. * Fills the area bounded by one or more polygons.
  7249. *
  7250. * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
  7251. * complex areas, for example, areas with holes, contours with self-intersections (some of their
  7252. * parts), and so forth.
  7253. *
  7254. * @param img Image.
  7255. * @param pts Array of polygons where each polygon is represented as an array of points.
  7256. * @param color Polygon color.
  7257. */
  7258. + (void)fillPoly:(Mat*)img pts:(NSArray<NSArray<Point2i*>*>*)pts color:(Scalar*)color NS_SWIFT_NAME(fillPoly(img:pts:color:));
  7259. //
  7260. // void cv::polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, LineTypes lineType = LINE_8, int shift = 0)
  7261. //
  7262. /**
  7263. * Draws several polygonal curves.
  7264. *
  7265. * @param img Image.
  7266. * @param pts Array of polygonal curves.
  7267. * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
  7268. * the function draws a line from the last vertex of each curve to its first vertex.
  7269. * @param color Polyline color.
  7270. * @param thickness Thickness of the polyline edges.
  7271. * @param lineType Type of the line segments. See #LineTypes
  7272. * @param shift Number of fractional bits in the vertex coordinates.
  7273. *
  7274. * The function cv::polylines draws one or more polygonal curves.
  7275. */
  7276. + (void)polylines:(Mat*)img pts:(NSArray<NSArray<Point2i*>*>*)pts isClosed:(BOOL)isClosed color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType shift:(int)shift NS_SWIFT_NAME(polylines(img:pts:isClosed:color:thickness:lineType:shift:));
  7277. /**
  7278. * Draws several polygonal curves.
  7279. *
  7280. * @param img Image.
  7281. * @param pts Array of polygonal curves.
  7282. * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
  7283. * the function draws a line from the last vertex of each curve to its first vertex.
  7284. * @param color Polyline color.
  7285. * @param thickness Thickness of the polyline edges.
  7286. * @param lineType Type of the line segments. See #LineTypes
  7287. *
  7288. * The function cv::polylines draws one or more polygonal curves.
  7289. */
  7290. + (void)polylines:(Mat*)img pts:(NSArray<NSArray<Point2i*>*>*)pts isClosed:(BOOL)isClosed color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType NS_SWIFT_NAME(polylines(img:pts:isClosed:color:thickness:lineType:));
  7291. /**
  7292. * Draws several polygonal curves.
  7293. *
  7294. * @param img Image.
  7295. * @param pts Array of polygonal curves.
  7296. * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
  7297. * the function draws a line from the last vertex of each curve to its first vertex.
  7298. * @param color Polyline color.
  7299. * @param thickness Thickness of the polyline edges.
  7300. *
  7301. * The function cv::polylines draws one or more polygonal curves.
  7302. */
  7303. + (void)polylines:(Mat*)img pts:(NSArray<NSArray<Point2i*>*>*)pts isClosed:(BOOL)isClosed color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(polylines(img:pts:isClosed:color:thickness:));
  7304. /**
  7305. * Draws several polygonal curves.
  7306. *
  7307. * @param img Image.
  7308. * @param pts Array of polygonal curves.
  7309. * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
  7310. * the function draws a line from the last vertex of each curve to its first vertex.
  7311. * @param color Polyline color.
  7312. *
  7313. * The function cv::polylines draws one or more polygonal curves.
  7314. */
  7315. + (void)polylines:(Mat*)img pts:(NSArray<NSArray<Point2i*>*>*)pts isClosed:(BOOL)isClosed color:(Scalar*)color NS_SWIFT_NAME(polylines(img:pts:isClosed:color:));
  7316. //
  7317. // void cv::drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, LineTypes lineType = LINE_8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point())
  7318. //
  7319. /**
  7320. * Draws contours outlines or filled contours.
  7321. *
  7322. * The function draws contour outlines in the image if `$$\texttt{thickness} \ge 0$$` or fills the area
  7323. * bounded by the contours if `$$\texttt{thickness}<0$$` . The example below shows how to retrieve
  7324. * connected components from the binary image and label them: :
  7325. * INCLUDE: snippets/imgproc_drawContours.cpp
  7326. *
  7327. * @param image Destination image.
  7328. * @param contours All the input contours. Each contour is stored as a point vector.
  7329. * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
  7330. * @param color Color of the contours.
  7331. * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
  7332. * thickness=#FILLED ), the contour interiors are drawn.
  7333. * @param lineType Line connectivity. See #LineTypes
  7334. * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
  7335. * some of the contours (see maxLevel ).
  7336. * @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn.
  7337. * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
  7338. * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
  7339. * parameter is only taken into account when there is hierarchy available.
  7340. * @param offset Optional contour shift parameter. Shift all the drawn contours by the specified
  7341. * `$$\texttt{offset}=(dx,dy)$$` .
  7342. * NOTE: When thickness=#FILLED, the function is designed to handle connected components with holes correctly
  7343. * even when no hierarchy data is provided. This is done by analyzing all the outlines together
  7344. * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
  7345. * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
  7346. * of contours, or iterate over the collection using contourIdx parameter.
  7347. */
  7348. + (void)drawContours:(Mat*)image contours:(NSArray<NSArray<Point2i*>*>*)contours contourIdx:(int)contourIdx color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType hierarchy:(Mat*)hierarchy maxLevel:(int)maxLevel offset:(Point2i*)offset NS_SWIFT_NAME(drawContours(image:contours:contourIdx:color:thickness:lineType:hierarchy:maxLevel:offset:));
  7349. /**
  7350. * Draws contours outlines or filled contours.
  7351. *
  7352. * The function draws contour outlines in the image if `$$\texttt{thickness} \ge 0$$` or fills the area
  7353. * bounded by the contours if `$$\texttt{thickness}<0$$` . The example below shows how to retrieve
  7354. * connected components from the binary image and label them: :
  7355. * INCLUDE: snippets/imgproc_drawContours.cpp
  7356. *
  7357. * @param image Destination image.
  7358. * @param contours All the input contours. Each contour is stored as a point vector.
  7359. * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
  7360. * @param color Color of the contours.
  7361. * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
  7362. * thickness=#FILLED ), the contour interiors are drawn.
  7363. * @param lineType Line connectivity. See #LineTypes
  7364. * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
  7365. * some of the contours (see maxLevel ).
  7366. * @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn.
  7367. * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
  7368. * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
  7369. * parameter is only taken into account when there is hierarchy available.
  7370. * `$$\texttt{offset}=(dx,dy)$$` .
  7371. * NOTE: When thickness=#FILLED, the function is designed to handle connected components with holes correctly
  7372. * even when no hierarchy data is provided. This is done by analyzing all the outlines together
  7373. * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
  7374. * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
  7375. * of contours, or iterate over the collection using contourIdx parameter.
  7376. */
  7377. + (void)drawContours:(Mat*)image contours:(NSArray<NSArray<Point2i*>*>*)contours contourIdx:(int)contourIdx color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType hierarchy:(Mat*)hierarchy maxLevel:(int)maxLevel NS_SWIFT_NAME(drawContours(image:contours:contourIdx:color:thickness:lineType:hierarchy:maxLevel:));
  7378. /**
  7379. * Draws contours outlines or filled contours.
  7380. *
  7381. * The function draws contour outlines in the image if `$$\texttt{thickness} \ge 0$$` or fills the area
  7382. * bounded by the contours if `$$\texttt{thickness}<0$$` . The example below shows how to retrieve
  7383. * connected components from the binary image and label them: :
  7384. * INCLUDE: snippets/imgproc_drawContours.cpp
  7385. *
  7386. * @param image Destination image.
  7387. * @param contours All the input contours. Each contour is stored as a point vector.
  7388. * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
  7389. * @param color Color of the contours.
  7390. * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
  7391. * thickness=#FILLED ), the contour interiors are drawn.
  7392. * @param lineType Line connectivity. See #LineTypes
  7393. * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
  7394. * some of the contours (see maxLevel ).
  7395. * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
  7396. * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
  7397. * parameter is only taken into account when there is hierarchy available.
  7398. * `$$\texttt{offset}=(dx,dy)$$` .
  7399. * NOTE: When thickness=#FILLED, the function is designed to handle connected components with holes correctly
  7400. * even when no hierarchy data is provided. This is done by analyzing all the outlines together
  7401. * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
  7402. * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
  7403. * of contours, or iterate over the collection using contourIdx parameter.
  7404. */
  7405. + (void)drawContours:(Mat*)image contours:(NSArray<NSArray<Point2i*>*>*)contours contourIdx:(int)contourIdx color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType hierarchy:(Mat*)hierarchy NS_SWIFT_NAME(drawContours(image:contours:contourIdx:color:thickness:lineType:hierarchy:));
  7406. /**
  7407. * Draws contours outlines or filled contours.
  7408. *
  7409. * The function draws contour outlines in the image if `$$\texttt{thickness} \ge 0$$` or fills the area
  7410. * bounded by the contours if `$$\texttt{thickness}<0$$` . The example below shows how to retrieve
  7411. * connected components from the binary image and label them: :
  7412. * INCLUDE: snippets/imgproc_drawContours.cpp
  7413. *
  7414. * @param image Destination image.
  7415. * @param contours All the input contours. Each contour is stored as a point vector.
  7416. * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
  7417. * @param color Color of the contours.
  7418. * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
  7419. * thickness=#FILLED ), the contour interiors are drawn.
  7420. * @param lineType Line connectivity. See #LineTypes
  7421. * some of the contours (see maxLevel ).
  7422. * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
  7423. * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
  7424. * parameter is only taken into account when there is hierarchy available.
  7425. * `$$\texttt{offset}=(dx,dy)$$` .
  7426. * NOTE: When thickness=#FILLED, the function is designed to handle connected components with holes correctly
  7427. * even when no hierarchy data is provided. This is done by analyzing all the outlines together
  7428. * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
  7429. * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
  7430. * of contours, or iterate over the collection using contourIdx parameter.
  7431. */
  7432. + (void)drawContours:(Mat*)image contours:(NSArray<NSArray<Point2i*>*>*)contours contourIdx:(int)contourIdx color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType NS_SWIFT_NAME(drawContours(image:contours:contourIdx:color:thickness:lineType:));
  7433. /**
  7434. * Draws contours outlines or filled contours.
  7435. *
  7436. * The function draws contour outlines in the image if `$$\texttt{thickness} \ge 0$$` or fills the area
  7437. * bounded by the contours if `$$\texttt{thickness}<0$$` . The example below shows how to retrieve
  7438. * connected components from the binary image and label them: :
  7439. * INCLUDE: snippets/imgproc_drawContours.cpp
  7440. *
  7441. * @param image Destination image.
  7442. * @param contours All the input contours. Each contour is stored as a point vector.
  7443. * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
  7444. * @param color Color of the contours.
  7445. * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
  7446. * thickness=#FILLED ), the contour interiors are drawn.
  7447. * some of the contours (see maxLevel ).
  7448. * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
  7449. * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
  7450. * parameter is only taken into account when there is hierarchy available.
  7451. * `$$\texttt{offset}=(dx,dy)$$` .
  7452. * NOTE: When thickness=#FILLED, the function is designed to handle connected components with holes correctly
  7453. * even when no hierarchy data is provided. This is done by analyzing all the outlines together
  7454. * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
  7455. * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
  7456. * of contours, or iterate over the collection using contourIdx parameter.
  7457. */
  7458. + (void)drawContours:(Mat*)image contours:(NSArray<NSArray<Point2i*>*>*)contours contourIdx:(int)contourIdx color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(drawContours(image:contours:contourIdx:color:thickness:));
  7459. /**
  7460. * Draws contours outlines or filled contours.
  7461. *
  7462. * The function draws contour outlines in the image if `$$\texttt{thickness} \ge 0$$` or fills the area
  7463. * bounded by the contours if `$$\texttt{thickness}<0$$` . The example below shows how to retrieve
  7464. * connected components from the binary image and label them: :
  7465. * INCLUDE: snippets/imgproc_drawContours.cpp
  7466. *
  7467. * @param image Destination image.
  7468. * @param contours All the input contours. Each contour is stored as a point vector.
  7469. * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
  7470. * @param color Color of the contours.
  7471. * thickness=#FILLED ), the contour interiors are drawn.
  7472. * some of the contours (see maxLevel ).
  7473. * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
  7474. * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
  7475. * parameter is only taken into account when there is hierarchy available.
  7476. * `$$\texttt{offset}=(dx,dy)$$` .
  7477. * NOTE: When thickness=#FILLED, the function is designed to handle connected components with holes correctly
  7478. * even when no hierarchy data is provided. This is done by analyzing all the outlines together
  7479. * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
  7480. * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
  7481. * of contours, or iterate over the collection using contourIdx parameter.
  7482. */
  7483. + (void)drawContours:(Mat*)image contours:(NSArray<NSArray<Point2i*>*>*)contours contourIdx:(int)contourIdx color:(Scalar*)color NS_SWIFT_NAME(drawContours(image:contours:contourIdx:color:));
  7484. //
  7485. // bool cv::clipLine(Rect imgRect, Point& pt1, Point& pt2)
  7486. //
  7487. /**
  7488. *
  7489. * @param imgRect Image rectangle.
  7490. * @param pt1 First line point.
  7491. * @param pt2 Second line point.
  7492. */
  7493. + (BOOL)clipLine:(Rect2i*)imgRect pt1:(Point2i*)pt1 pt2:(Point2i*)pt2 NS_SWIFT_NAME(clipLine(imgRect:pt1:pt2:));
  7494. //
  7495. // void cv::ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts)
  7496. //
  7497. /**
  7498. * Approximates an elliptic arc with a polyline.
  7499. *
  7500. * The function ellipse2Poly computes the vertices of a polyline that approximates the specified
  7501. * elliptic arc. It is used by #ellipse. If `arcStart` is greater than `arcEnd`, they are swapped.
  7502. *
  7503. * @param center Center of the arc.
  7504. * @param axes Half of the size of the ellipse main axes. See #ellipse for details.
  7505. * @param angle Rotation angle of the ellipse in degrees. See #ellipse for details.
  7506. * @param arcStart Starting angle of the elliptic arc in degrees.
  7507. * @param arcEnd Ending angle of the elliptic arc in degrees.
  7508. * @param delta Angle between the subsequent polyline vertices. It defines the approximation
  7509. * accuracy.
  7510. * @param pts Output vector of polyline vertices.
  7511. */
  7512. + (void)ellipse2Poly:(Point2i*)center axes:(Size2i*)axes angle:(int)angle arcStart:(int)arcStart arcEnd:(int)arcEnd delta:(int)delta pts:(NSMutableArray<Point2i*>*)pts NS_SWIFT_NAME(ellipse2Poly(center:axes:angle:arcStart:arcEnd:delta:pts:));
  7513. //
  7514. // void cv::putText(Mat& img, String text, Point org, HersheyFonts fontFace, double fontScale, Scalar color, int thickness = 1, LineTypes lineType = LINE_8, bool bottomLeftOrigin = false)
  7515. //
  7516. /**
  7517. * Draws a text string.
  7518. *
  7519. * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
  7520. * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
  7521. * example.
  7522. *
  7523. * @param img Image.
  7524. * @param text Text string to be drawn.
  7525. * @param org Bottom-left corner of the text string in the image.
  7526. * @param fontFace Font type, see #HersheyFonts.
  7527. * @param fontScale Font scale factor that is multiplied by the font-specific base size.
  7528. * @param color Text color.
  7529. * @param thickness Thickness of the lines used to draw a text.
  7530. * @param lineType Line type. See #LineTypes
  7531. * @param bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise,
  7532. * it is at the top-left corner.
  7533. */
  7534. + (void)putText:(Mat*)img text:(NSString*)text org:(Point2i*)org fontFace:(HersheyFonts)fontFace fontScale:(double)fontScale color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType bottomLeftOrigin:(BOOL)bottomLeftOrigin NS_SWIFT_NAME(putText(img:text:org:fontFace:fontScale:color:thickness:lineType:bottomLeftOrigin:));
  7535. /**
  7536. * Draws a text string.
  7537. *
  7538. * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
  7539. * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
  7540. * example.
  7541. *
  7542. * @param img Image.
  7543. * @param text Text string to be drawn.
  7544. * @param org Bottom-left corner of the text string in the image.
  7545. * @param fontFace Font type, see #HersheyFonts.
  7546. * @param fontScale Font scale factor that is multiplied by the font-specific base size.
  7547. * @param color Text color.
  7548. * @param thickness Thickness of the lines used to draw a text.
  7549. * @param lineType Line type. See #LineTypes
  7550. * it is at the top-left corner.
  7551. */
  7552. + (void)putText:(Mat*)img text:(NSString*)text org:(Point2i*)org fontFace:(HersheyFonts)fontFace fontScale:(double)fontScale color:(Scalar*)color thickness:(int)thickness lineType:(LineTypes)lineType NS_SWIFT_NAME(putText(img:text:org:fontFace:fontScale:color:thickness:lineType:));
  7553. /**
  7554. * Draws a text string.
  7555. *
  7556. * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
  7557. * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
  7558. * example.
  7559. *
  7560. * @param img Image.
  7561. * @param text Text string to be drawn.
  7562. * @param org Bottom-left corner of the text string in the image.
  7563. * @param fontFace Font type, see #HersheyFonts.
  7564. * @param fontScale Font scale factor that is multiplied by the font-specific base size.
  7565. * @param color Text color.
  7566. * @param thickness Thickness of the lines used to draw a text.
  7567. * it is at the top-left corner.
  7568. */
  7569. + (void)putText:(Mat*)img text:(NSString*)text org:(Point2i*)org fontFace:(HersheyFonts)fontFace fontScale:(double)fontScale color:(Scalar*)color thickness:(int)thickness NS_SWIFT_NAME(putText(img:text:org:fontFace:fontScale:color:thickness:));
  7570. /**
  7571. * Draws a text string.
  7572. *
  7573. * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
  7574. * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
  7575. * example.
  7576. *
  7577. * @param img Image.
  7578. * @param text Text string to be drawn.
  7579. * @param org Bottom-left corner of the text string in the image.
  7580. * @param fontFace Font type, see #HersheyFonts.
  7581. * @param fontScale Font scale factor that is multiplied by the font-specific base size.
  7582. * @param color Text color.
  7583. * it is at the top-left corner.
  7584. */
  7585. + (void)putText:(Mat*)img text:(NSString*)text org:(Point2i*)org fontFace:(HersheyFonts)fontFace fontScale:(double)fontScale color:(Scalar*)color NS_SWIFT_NAME(putText(img:text:org:fontFace:fontScale:color:));
  7586. //
  7587. // Size cv::getTextSize(String text, HersheyFonts fontFace, double fontScale, int thickness, int* baseLine)
  7588. //
  7589. /**
  7590. * Calculates the width and height of a text string.
  7591. *
  7592. * The function cv::getTextSize calculates and returns the size of a box that contains the specified text.
  7593. * That is, the following code renders some text, the tight box surrounding it, and the baseline: :
  7594. *
  7595. * String text = "Funny text inside the box";
  7596. * int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;
  7597. * double fontScale = 2;
  7598. * int thickness = 3;
  7599. *
  7600. * Mat img(600, 800, CV_8UC3, Scalar::all(0));
  7601. *
  7602. * int baseline=0;
  7603. * Size textSize = getTextSize(text, fontFace,
  7604. * fontScale, thickness, &baseline);
  7605. * baseline += thickness;
  7606. *
  7607. * // center the text
  7608. * Point textOrg((img.cols - textSize.width)/2,
  7609. * (img.rows + textSize.height)/2);
  7610. *
  7611. * // draw the box
  7612. * rectangle(img, textOrg + Point(0, baseline),
  7613. * textOrg + Point(textSize.width, -textSize.height),
  7614. * Scalar(0,0,255));
  7615. * // ... and the baseline first
  7616. * line(img, textOrg + Point(0, thickness),
  7617. * textOrg + Point(textSize.width, thickness),
  7618. * Scalar(0, 0, 255));
  7619. *
  7620. * // then put the text itself
  7621. * putText(img, text, textOrg, fontFace, fontScale,
  7622. * Scalar::all(255), thickness, 8);
  7623. *
  7624. *
  7625. * @param text Input text string.
  7626. * @param fontFace Font to use, see #HersheyFonts.
  7627. * @param fontScale Font scale factor that is multiplied by the font-specific base size.
  7628. * @param thickness Thickness of lines used to render the text. See #putText for details.
  7629. * @param baseLine y-coordinate of the baseline relative to the bottom-most text
  7630. * point.
  7631. * @return The size of a box that contains the specified text.
  7632. *
  7633. * @see `+putText:text:org:fontFace:fontScale:color:thickness:lineType:bottomLeftOrigin:`
  7634. */
  7635. + (Size2i*)getTextSize:(NSString*)text fontFace:(HersheyFonts)fontFace fontScale:(double)fontScale thickness:(int)thickness baseLine:(int*)baseLine NS_SWIFT_NAME(getTextSize(text:fontFace:fontScale:thickness:baseLine:));
  7636. //
  7637. // double cv::getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness = 1)
  7638. //
  7639. /**
  7640. * Calculates the font-specific size to use to achieve a given height in pixels.
  7641. *
  7642. * @param fontFace Font to use, see cv::HersheyFonts.
  7643. * @param pixelHeight Pixel height to compute the fontScale for
  7644. * @param thickness Thickness of lines used to render the text.See putText for details.
  7645. * @return The fontSize to use for cv::putText
  7646. *
  7647. * @see `cv::putText`
  7648. */
  7649. + (double)getFontScaleFromHeight:(int)fontFace pixelHeight:(int)pixelHeight thickness:(int)thickness NS_SWIFT_NAME(getFontScaleFromHeight(fontFace:pixelHeight:thickness:));
  7650. /**
  7651. * Calculates the font-specific size to use to achieve a given height in pixels.
  7652. *
  7653. * @param fontFace Font to use, see cv::HersheyFonts.
  7654. * @param pixelHeight Pixel height to compute the fontScale for
  7655. * @return The fontSize to use for cv::putText
  7656. *
  7657. * @see `cv::putText`
  7658. */
  7659. + (double)getFontScaleFromHeight:(int)fontFace pixelHeight:(int)pixelHeight NS_SWIFT_NAME(getFontScaleFromHeight(fontFace:pixelHeight:));
  7660. //
  7661. // void cv::HoughLinesWithAccumulator(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
  7662. //
  7663. /**
  7664. * Finds lines in a binary image using the standard Hough transform and get accumulator.
  7665. *
  7666. * NOTE: This function is for bindings use only. Use original function in C++ code
  7667. *
  7668. * @see `+HoughLines:lines:rho:theta:threshold:srn:stn:min_theta:max_theta:`
  7669. */
  7670. + (void)HoughLinesWithAccumulator:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold srn:(double)srn stn:(double)stn min_theta:(double)min_theta max_theta:(double)max_theta NS_SWIFT_NAME(HoughLinesWithAccumulator(image:lines:rho:theta:threshold:srn:stn:min_theta:max_theta:));
  7671. /**
  7672. * Finds lines in a binary image using the standard Hough transform and get accumulator.
  7673. *
  7674. * NOTE: This function is for bindings use only. Use original function in C++ code
  7675. *
  7676. * @see `+HoughLines:lines:rho:theta:threshold:srn:stn:min_theta:max_theta:`
  7677. */
  7678. + (void)HoughLinesWithAccumulator:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold srn:(double)srn stn:(double)stn min_theta:(double)min_theta NS_SWIFT_NAME(HoughLinesWithAccumulator(image:lines:rho:theta:threshold:srn:stn:min_theta:));
  7679. /**
  7680. * Finds lines in a binary image using the standard Hough transform and get accumulator.
  7681. *
  7682. * NOTE: This function is for bindings use only. Use original function in C++ code
  7683. *
  7684. * @see `+HoughLines:lines:rho:theta:threshold:srn:stn:min_theta:max_theta:`
  7685. */
  7686. + (void)HoughLinesWithAccumulator:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold srn:(double)srn stn:(double)stn NS_SWIFT_NAME(HoughLinesWithAccumulator(image:lines:rho:theta:threshold:srn:stn:));
  7687. /**
  7688. * Finds lines in a binary image using the standard Hough transform and get accumulator.
  7689. *
  7690. * NOTE: This function is for bindings use only. Use original function in C++ code
  7691. *
  7692. * @see `+HoughLines:lines:rho:theta:threshold:srn:stn:min_theta:max_theta:`
  7693. */
  7694. + (void)HoughLinesWithAccumulator:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold srn:(double)srn NS_SWIFT_NAME(HoughLinesWithAccumulator(image:lines:rho:theta:threshold:srn:));
  7695. /**
  7696. * Finds lines in a binary image using the standard Hough transform and get accumulator.
  7697. *
  7698. * NOTE: This function is for bindings use only. Use original function in C++ code
  7699. *
  7700. * @see `+HoughLines:lines:rho:theta:threshold:srn:stn:min_theta:max_theta:`
  7701. */
  7702. + (void)HoughLinesWithAccumulator:(Mat*)image lines:(Mat*)lines rho:(double)rho theta:(double)theta threshold:(int)threshold NS_SWIFT_NAME(HoughLinesWithAccumulator(image:lines:rho:theta:threshold:));
  7703. @end
  7704. NS_ASSUME_NONNULL_END