Calib3d.h 563 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239
  1. //
  2. // This file is auto-generated. Please don't modify it!
  3. //
  4. #pragma once
  5. #ifdef __cplusplus
  6. //#import "opencv.hpp"
  7. #import "opencv2/calib3d.hpp"
  8. #else
  9. #define CV_EXPORTS
  10. #endif
  11. #import <Foundation/Foundation.h>
  12. @class CirclesGridFinderParameters;
  13. @class Double3;
  14. @class Mat;
  15. @class Point2d;
  16. @class Rect2i;
  17. @class Scalar;
  18. @class Size2i;
  19. @class TermCriteria;
  20. @class UsacParams;
  21. // C++: enum HandEyeCalibrationMethod (cv.HandEyeCalibrationMethod)
  22. typedef NS_ENUM(int, HandEyeCalibrationMethod) {
  23. CALIB_HAND_EYE_TSAI = 0,
  24. CALIB_HAND_EYE_PARK = 1,
  25. CALIB_HAND_EYE_HORAUD = 2,
  26. CALIB_HAND_EYE_ANDREFF = 3,
  27. CALIB_HAND_EYE_DANIILIDIS = 4
  28. };
  29. // C++: enum LocalOptimMethod (cv.LocalOptimMethod)
  30. typedef NS_ENUM(int, LocalOptimMethod) {
  31. LOCAL_OPTIM_NULL = 0,
  32. LOCAL_OPTIM_INNER_LO = 1,
  33. LOCAL_OPTIM_INNER_AND_ITER_LO = 2,
  34. LOCAL_OPTIM_GC = 3,
  35. LOCAL_OPTIM_SIGMA = 4
  36. };
  37. // C++: enum NeighborSearchMethod (cv.NeighborSearchMethod)
  38. typedef NS_ENUM(int, NeighborSearchMethod) {
  39. NEIGH_FLANN_KNN = 0,
  40. NEIGH_GRID = 1,
  41. NEIGH_FLANN_RADIUS = 2
  42. };
  43. // C++: enum RobotWorldHandEyeCalibrationMethod (cv.RobotWorldHandEyeCalibrationMethod)
  44. typedef NS_ENUM(int, RobotWorldHandEyeCalibrationMethod) {
  45. CALIB_ROBOT_WORLD_HAND_EYE_SHAH = 0,
  46. CALIB_ROBOT_WORLD_HAND_EYE_LI = 1
  47. };
  48. // C++: enum SamplingMethod (cv.SamplingMethod)
  49. typedef NS_ENUM(int, SamplingMethod) {
  50. SAMPLING_UNIFORM = 0,
  51. SAMPLING_PROGRESSIVE_NAPSAC = 1,
  52. SAMPLING_NAPSAC = 2,
  53. SAMPLING_PROSAC = 3
  54. };
  55. // C++: enum ScoreMethod (cv.ScoreMethod)
  56. typedef NS_ENUM(int, ScoreMethod) {
  57. SCORE_METHOD_RANSAC = 0,
  58. SCORE_METHOD_MSAC = 1,
  59. SCORE_METHOD_MAGSAC = 2,
  60. SCORE_METHOD_LMEDS = 3
  61. };
  62. // C++: enum SolvePnPMethod (cv.SolvePnPMethod)
  63. typedef NS_ENUM(int, SolvePnPMethod) {
  64. SOLVEPNP_ITERATIVE = 0,
  65. SOLVEPNP_EPNP = 1,
  66. SOLVEPNP_P3P = 2,
  67. SOLVEPNP_DLS = 3,
  68. SOLVEPNP_UPNP = 4,
  69. SOLVEPNP_AP3P = 5,
  70. SOLVEPNP_IPPE = 6,
  71. SOLVEPNP_IPPE_SQUARE = 7,
  72. SOLVEPNP_SQPNP = 8,
  73. SOLVEPNP_MAX_COUNT = 8+1
  74. };
  75. // C++: enum UndistortTypes (cv.UndistortTypes)
  76. typedef NS_ENUM(int, UndistortTypes) {
  77. PROJ_SPHERICAL_ORTHO = 0,
  78. PROJ_SPHERICAL_EQRECT = 1
  79. };
  80. NS_ASSUME_NONNULL_BEGIN
  81. // C++: class Calib3d
  82. /**
  83. * The Calib3d module
  84. *
  85. * Member classes: `UsacParams`, `CirclesGridFinderParameters`, `StereoMatcher`, `StereoBM`, `StereoSGBM`
  86. *
  87. * Member enums: `SolvePnPMethod`, `HandEyeCalibrationMethod`, `RobotWorldHandEyeCalibrationMethod`, `SamplingMethod`, `LocalOptimMethod`, `ScoreMethod`, `NeighborSearchMethod`, `GridType`, `UndistortTypes`
  88. */
  89. CV_EXPORTS @interface Calib3d : NSObject
  90. #pragma mark - Class Constants
  91. @property (class, readonly) int CV_ITERATIVE NS_SWIFT_NAME(CV_ITERATIVE);
  92. @property (class, readonly) int CV_EPNP NS_SWIFT_NAME(CV_EPNP);
  93. @property (class, readonly) int CV_P3P NS_SWIFT_NAME(CV_P3P);
  94. @property (class, readonly) int CV_DLS NS_SWIFT_NAME(CV_DLS);
  95. @property (class, readonly) int CvLevMarq_DONE NS_SWIFT_NAME(CvLevMarq_DONE);
  96. @property (class, readonly) int CvLevMarq_STARTED NS_SWIFT_NAME(CvLevMarq_STARTED);
  97. @property (class, readonly) int CvLevMarq_CALC_J NS_SWIFT_NAME(CvLevMarq_CALC_J);
  98. @property (class, readonly) int CvLevMarq_CHECK_ERR NS_SWIFT_NAME(CvLevMarq_CHECK_ERR);
  99. @property (class, readonly) int LMEDS NS_SWIFT_NAME(LMEDS);
  100. @property (class, readonly) int RANSAC NS_SWIFT_NAME(RANSAC);
  101. @property (class, readonly) int RHO NS_SWIFT_NAME(RHO);
  102. @property (class, readonly) int USAC_DEFAULT NS_SWIFT_NAME(USAC_DEFAULT);
  103. @property (class, readonly) int USAC_PARALLEL NS_SWIFT_NAME(USAC_PARALLEL);
  104. @property (class, readonly) int USAC_FM_8PTS NS_SWIFT_NAME(USAC_FM_8PTS);
  105. @property (class, readonly) int USAC_FAST NS_SWIFT_NAME(USAC_FAST);
  106. @property (class, readonly) int USAC_ACCURATE NS_SWIFT_NAME(USAC_ACCURATE);
  107. @property (class, readonly) int USAC_PROSAC NS_SWIFT_NAME(USAC_PROSAC);
  108. @property (class, readonly) int USAC_MAGSAC NS_SWIFT_NAME(USAC_MAGSAC);
  109. @property (class, readonly) int CALIB_CB_ADAPTIVE_THRESH NS_SWIFT_NAME(CALIB_CB_ADAPTIVE_THRESH);
  110. @property (class, readonly) int CALIB_CB_NORMALIZE_IMAGE NS_SWIFT_NAME(CALIB_CB_NORMALIZE_IMAGE);
  111. @property (class, readonly) int CALIB_CB_FILTER_QUADS NS_SWIFT_NAME(CALIB_CB_FILTER_QUADS);
  112. @property (class, readonly) int CALIB_CB_FAST_CHECK NS_SWIFT_NAME(CALIB_CB_FAST_CHECK);
  113. @property (class, readonly) int CALIB_CB_EXHAUSTIVE NS_SWIFT_NAME(CALIB_CB_EXHAUSTIVE);
  114. @property (class, readonly) int CALIB_CB_ACCURACY NS_SWIFT_NAME(CALIB_CB_ACCURACY);
  115. @property (class, readonly) int CALIB_CB_LARGER NS_SWIFT_NAME(CALIB_CB_LARGER);
  116. @property (class, readonly) int CALIB_CB_MARKER NS_SWIFT_NAME(CALIB_CB_MARKER);
  117. @property (class, readonly) int CALIB_CB_SYMMETRIC_GRID NS_SWIFT_NAME(CALIB_CB_SYMMETRIC_GRID);
  118. @property (class, readonly) int CALIB_CB_ASYMMETRIC_GRID NS_SWIFT_NAME(CALIB_CB_ASYMMETRIC_GRID);
  119. @property (class, readonly) int CALIB_CB_CLUSTERING NS_SWIFT_NAME(CALIB_CB_CLUSTERING);
  120. @property (class, readonly) int CALIB_NINTRINSIC NS_SWIFT_NAME(CALIB_NINTRINSIC);
  121. @property (class, readonly) int CALIB_USE_INTRINSIC_GUESS NS_SWIFT_NAME(CALIB_USE_INTRINSIC_GUESS);
  122. @property (class, readonly) int CALIB_FIX_ASPECT_RATIO NS_SWIFT_NAME(CALIB_FIX_ASPECT_RATIO);
  123. @property (class, readonly) int CALIB_FIX_PRINCIPAL_POINT NS_SWIFT_NAME(CALIB_FIX_PRINCIPAL_POINT);
  124. @property (class, readonly) int CALIB_ZERO_TANGENT_DIST NS_SWIFT_NAME(CALIB_ZERO_TANGENT_DIST);
  125. @property (class, readonly) int CALIB_FIX_FOCAL_LENGTH NS_SWIFT_NAME(CALIB_FIX_FOCAL_LENGTH);
  126. @property (class, readonly) int CALIB_FIX_K1 NS_SWIFT_NAME(CALIB_FIX_K1);
  127. @property (class, readonly) int CALIB_FIX_K2 NS_SWIFT_NAME(CALIB_FIX_K2);
  128. @property (class, readonly) int CALIB_FIX_K3 NS_SWIFT_NAME(CALIB_FIX_K3);
  129. @property (class, readonly) int CALIB_FIX_K4 NS_SWIFT_NAME(CALIB_FIX_K4);
  130. @property (class, readonly) int CALIB_FIX_K5 NS_SWIFT_NAME(CALIB_FIX_K5);
  131. @property (class, readonly) int CALIB_FIX_K6 NS_SWIFT_NAME(CALIB_FIX_K6);
  132. @property (class, readonly) int CALIB_RATIONAL_MODEL NS_SWIFT_NAME(CALIB_RATIONAL_MODEL);
  133. @property (class, readonly) int CALIB_THIN_PRISM_MODEL NS_SWIFT_NAME(CALIB_THIN_PRISM_MODEL);
  134. @property (class, readonly) int CALIB_FIX_S1_S2_S3_S4 NS_SWIFT_NAME(CALIB_FIX_S1_S2_S3_S4);
  135. @property (class, readonly) int CALIB_TILTED_MODEL NS_SWIFT_NAME(CALIB_TILTED_MODEL);
  136. @property (class, readonly) int CALIB_FIX_TAUX_TAUY NS_SWIFT_NAME(CALIB_FIX_TAUX_TAUY);
  137. @property (class, readonly) int CALIB_USE_QR NS_SWIFT_NAME(CALIB_USE_QR);
  138. @property (class, readonly) int CALIB_FIX_TANGENT_DIST NS_SWIFT_NAME(CALIB_FIX_TANGENT_DIST);
  139. @property (class, readonly) int CALIB_FIX_INTRINSIC NS_SWIFT_NAME(CALIB_FIX_INTRINSIC);
  140. @property (class, readonly) int CALIB_SAME_FOCAL_LENGTH NS_SWIFT_NAME(CALIB_SAME_FOCAL_LENGTH);
  141. @property (class, readonly) int CALIB_ZERO_DISPARITY NS_SWIFT_NAME(CALIB_ZERO_DISPARITY);
  142. @property (class, readonly) int CALIB_USE_LU NS_SWIFT_NAME(CALIB_USE_LU);
  143. @property (class, readonly) int CALIB_USE_EXTRINSIC_GUESS NS_SWIFT_NAME(CALIB_USE_EXTRINSIC_GUESS);
  144. @property (class, readonly) int FM_7POINT NS_SWIFT_NAME(FM_7POINT);
  145. @property (class, readonly) int FM_8POINT NS_SWIFT_NAME(FM_8POINT);
  146. @property (class, readonly) int FM_LMEDS NS_SWIFT_NAME(FM_LMEDS);
  147. @property (class, readonly) int FM_RANSAC NS_SWIFT_NAME(FM_RANSAC);
  148. @property (class, readonly) int CALIB_RECOMPUTE_EXTRINSIC NS_SWIFT_NAME(CALIB_RECOMPUTE_EXTRINSIC);
  149. @property (class, readonly) int CALIB_CHECK_COND NS_SWIFT_NAME(CALIB_CHECK_COND);
  150. @property (class, readonly) int CALIB_FIX_SKEW NS_SWIFT_NAME(CALIB_FIX_SKEW);
  151. #pragma mark - Methods
  152. //
  153. // void cv::Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat())
  154. //
  155. /**
  156. * Converts a rotation matrix to a rotation vector or vice versa.
  157. *
  158. * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
  159. * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
  160. * @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
  161. * derivatives of the output array components with respect to the input array components.
  162. *
  163. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}$$`
  164. *
  165. * Inverse transformation can be also done easily, since
  166. *
  167. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}$$`
  168. *
  169. * A rotation vector is a convenient and most compact representation of a rotation matrix (since any
  170. * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
  171. * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
  172. *
  173. * NOTE: More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
  174. * can be found in:
  175. * - A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
  176. *
  177. * NOTE: Useful information on SE(3) and Lie Groups can be found in:
  178. * - A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
  179. * - Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
  180. * - A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
  181. */
  182. + (void)Rodrigues:(Mat*)src dst:(Mat*)dst jacobian:(Mat*)jacobian NS_SWIFT_NAME(Rodrigues(src:dst:jacobian:));
  183. /**
  184. * Converts a rotation matrix to a rotation vector or vice versa.
  185. *
  186. * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
  187. * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
  188. * derivatives of the output array components with respect to the input array components.
  189. *
  190. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}$$`
  191. *
  192. * Inverse transformation can be also done easily, since
  193. *
  194. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}$$`
  195. *
  196. * A rotation vector is a convenient and most compact representation of a rotation matrix (since any
  197. * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
  198. * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
  199. *
  200. * NOTE: More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
  201. * can be found in:
  202. * - A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
  203. *
  204. * NOTE: Useful information on SE(3) and Lie Groups can be found in:
  205. * - A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
  206. * - Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
  207. * - A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
  208. */
  209. + (void)Rodrigues:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(Rodrigues(src:dst:));
  210. //
  211. // Mat cv::findHomography(Mat srcPoints, Mat dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat(), int maxIters = 2000, double confidence = 0.995)
  212. //
  213. /**
  214. * Finds a perspective transformation between two planes.
  215. *
  216. * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
  217. * or vector\<Point2f\> .
  218. * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
  219. * a vector\<Point2f\> .
  220. * @param method Method used to compute a homography matrix. The following methods are possible:
  221. * - **0** - a regular method using all the points, i.e., the least squares method
  222. * - REF: RANSAC - RANSAC-based robust method
  223. * - REF: LMEDS - Least-Median robust method
  224. * - REF: RHO - PROSAC-based robust method
  225. * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
  226. * (used in the RANSAC and RHO methods only). That is, if
  227. * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
  228. * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
  229. * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
  230. * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
  231. * mask values are ignored.
  232. * @param maxIters The maximum number of RANSAC iterations.
  233. * @param confidence Confidence level, between 0 and 1.
  234. *
  235. * The function finds and returns the perspective transformation `$$H$$` between the source and the
  236. * destination planes:
  237. *
  238. * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
  239. *
  240. * so that the back-projection error
  241. *
  242. * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
  243. *
  244. * is minimized. If the parameter method is set to the default value 0, the function uses all the point
  245. * pairs to compute an initial homography estimate with a simple least-squares scheme.
  246. *
  247. * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
  248. * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
  249. * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
  250. * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
  251. * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
  252. * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
  253. * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
  254. * the mask of inliers/outliers.
  255. *
  256. * Regardless of the method, robust or not, the computed homography matrix is refined further (using
  257. * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
  258. * re-projection error even more.
  259. *
  260. * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
  261. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  262. * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
  263. * noise is rather small, use the default method (method=0).
  264. *
  265. * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
  266. * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
  267. * cannot be estimated, an empty one will be returned.
  268. *
  269. * @sa
  270. * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
  271. * perspectiveTransform
  272. */
  273. + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold mask:(Mat*)mask maxIters:(int)maxIters confidence:(double)confidence NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:ransacReprojThreshold:mask:maxIters:confidence:));
  274. /**
  275. * Finds a perspective transformation between two planes.
  276. *
  277. * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
  278. * or vector\<Point2f\> .
  279. * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
  280. * a vector\<Point2f\> .
  281. * @param method Method used to compute a homography matrix. The following methods are possible:
  282. * - **0** - a regular method using all the points, i.e., the least squares method
  283. * - REF: RANSAC - RANSAC-based robust method
  284. * - REF: LMEDS - Least-Median robust method
  285. * - REF: RHO - PROSAC-based robust method
  286. * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
  287. * (used in the RANSAC and RHO methods only). That is, if
  288. * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
  289. * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
  290. * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
  291. * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
  292. * mask values are ignored.
  293. * @param maxIters The maximum number of RANSAC iterations.
  294. *
  295. * The function finds and returns the perspective transformation `$$H$$` between the source and the
  296. * destination planes:
  297. *
  298. * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
  299. *
  300. * so that the back-projection error
  301. *
  302. * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
  303. *
  304. * is minimized. If the parameter method is set to the default value 0, the function uses all the point
  305. * pairs to compute an initial homography estimate with a simple least-squares scheme.
  306. *
  307. * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
  308. * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
  309. * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
  310. * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
  311. * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
  312. * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
  313. * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
  314. * the mask of inliers/outliers.
  315. *
  316. * Regardless of the method, robust or not, the computed homography matrix is refined further (using
  317. * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
  318. * re-projection error even more.
  319. *
  320. * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
  321. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  322. * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
  323. * noise is rather small, use the default method (method=0).
  324. *
  325. * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
  326. * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
  327. * cannot be estimated, an empty one will be returned.
  328. *
  329. * @sa
  330. * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
  331. * perspectiveTransform
  332. */
  333. + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold mask:(Mat*)mask maxIters:(int)maxIters NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:ransacReprojThreshold:mask:maxIters:));
  334. /**
  335. * Finds a perspective transformation between two planes.
  336. *
  337. * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
  338. * or vector\<Point2f\> .
  339. * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
  340. * a vector\<Point2f\> .
  341. * @param method Method used to compute a homography matrix. The following methods are possible:
  342. * - **0** - a regular method using all the points, i.e., the least squares method
  343. * - REF: RANSAC - RANSAC-based robust method
  344. * - REF: LMEDS - Least-Median robust method
  345. * - REF: RHO - PROSAC-based robust method
  346. * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
  347. * (used in the RANSAC and RHO methods only). That is, if
  348. * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
  349. * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
  350. * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
  351. * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
  352. * mask values are ignored.
  353. *
  354. * The function finds and returns the perspective transformation `$$H$$` between the source and the
  355. * destination planes:
  356. *
  357. * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
  358. *
  359. * so that the back-projection error
  360. *
  361. * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
  362. *
  363. * is minimized. If the parameter method is set to the default value 0, the function uses all the point
  364. * pairs to compute an initial homography estimate with a simple least-squares scheme.
  365. *
  366. * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
  367. * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
  368. * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
  369. * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
  370. * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
  371. * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
  372. * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
  373. * the mask of inliers/outliers.
  374. *
  375. * Regardless of the method, robust or not, the computed homography matrix is refined further (using
  376. * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
  377. * re-projection error even more.
  378. *
  379. * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
  380. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  381. * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
  382. * noise is rather small, use the default method (method=0).
  383. *
  384. * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
  385. * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
  386. * cannot be estimated, an empty one will be returned.
  387. *
  388. * @sa
  389. * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
  390. * perspectiveTransform
  391. */
  392. + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold mask:(Mat*)mask NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:ransacReprojThreshold:mask:));
  393. /**
  394. * Finds a perspective transformation between two planes.
  395. *
  396. * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
  397. * or vector\<Point2f\> .
  398. * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
  399. * a vector\<Point2f\> .
  400. * @param method Method used to compute a homography matrix. The following methods are possible:
  401. * - **0** - a regular method using all the points, i.e., the least squares method
  402. * - REF: RANSAC - RANSAC-based robust method
  403. * - REF: LMEDS - Least-Median robust method
  404. * - REF: RHO - PROSAC-based robust method
  405. * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
  406. * (used in the RANSAC and RHO methods only). That is, if
  407. * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
  408. * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
  409. * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
  410. * mask values are ignored.
  411. *
  412. * The function finds and returns the perspective transformation `$$H$$` between the source and the
  413. * destination planes:
  414. *
  415. * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
  416. *
  417. * so that the back-projection error
  418. *
  419. * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
  420. *
  421. * is minimized. If the parameter method is set to the default value 0, the function uses all the point
  422. * pairs to compute an initial homography estimate with a simple least-squares scheme.
  423. *
  424. * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
  425. * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
  426. * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
  427. * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
  428. * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
  429. * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
  430. * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
  431. * the mask of inliers/outliers.
  432. *
  433. * Regardless of the method, robust or not, the computed homography matrix is refined further (using
  434. * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
  435. * re-projection error even more.
  436. *
  437. * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
  438. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  439. * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
  440. * noise is rather small, use the default method (method=0).
  441. *
  442. * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
  443. * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
  444. * cannot be estimated, an empty one will be returned.
  445. *
  446. * @sa
  447. * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
  448. * perspectiveTransform
  449. */
  450. + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:ransacReprojThreshold:));
  451. /**
  452. * Finds a perspective transformation between two planes.
  453. *
  454. * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
  455. * or vector\<Point2f\> .
  456. * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
  457. * a vector\<Point2f\> .
  458. * @param method Method used to compute a homography matrix. The following methods are possible:
  459. * - **0** - a regular method using all the points, i.e., the least squares method
  460. * - REF: RANSAC - RANSAC-based robust method
  461. * - REF: LMEDS - Least-Median robust method
  462. * - REF: RHO - PROSAC-based robust method
  463. * (used in the RANSAC and RHO methods only). That is, if
  464. * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
  465. * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
  466. * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
  467. * mask values are ignored.
  468. *
  469. * The function finds and returns the perspective transformation `$$H$$` between the source and the
  470. * destination planes:
  471. *
  472. * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
  473. *
  474. * so that the back-projection error
  475. *
  476. * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
  477. *
  478. * is minimized. If the parameter method is set to the default value 0, the function uses all the point
  479. * pairs to compute an initial homography estimate with a simple least-squares scheme.
  480. *
  481. * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
  482. * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
  483. * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
  484. * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
  485. * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
  486. * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
  487. * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
  488. * the mask of inliers/outliers.
  489. *
  490. * Regardless of the method, robust or not, the computed homography matrix is refined further (using
  491. * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
  492. * re-projection error even more.
  493. *
  494. * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
  495. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  496. * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
  497. * noise is rather small, use the default method (method=0).
  498. *
  499. * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
  500. * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
  501. * cannot be estimated, an empty one will be returned.
  502. *
  503. * @sa
  504. * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
  505. * perspectiveTransform
  506. */
  507. + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:));
  508. /**
  509. * Finds a perspective transformation between two planes.
  510. *
  511. * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
  512. * or vector\<Point2f\> .
  513. * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
  514. * a vector\<Point2f\> .
  515. * - **0** - a regular method using all the points, i.e., the least squares method
  516. * - REF: RANSAC - RANSAC-based robust method
  517. * - REF: LMEDS - Least-Median robust method
  518. * - REF: RHO - PROSAC-based robust method
  519. * (used in the RANSAC and RHO methods only). That is, if
  520. * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
  521. * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
  522. * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
  523. * mask values are ignored.
  524. *
  525. * The function finds and returns the perspective transformation `$$H$$` between the source and the
  526. * destination planes:
  527. *
  528. * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
  529. *
  530. * so that the back-projection error
  531. *
  532. * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
  533. *
  534. * is minimized. If the parameter method is set to the default value 0, the function uses all the point
  535. * pairs to compute an initial homography estimate with a simple least-squares scheme.
  536. *
  537. * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
  538. * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
  539. * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
  540. * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
  541. * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
  542. * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
  543. * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
  544. * the mask of inliers/outliers.
  545. *
  546. * Regardless of the method, robust or not, the computed homography matrix is refined further (using
  547. * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
  548. * re-projection error even more.
  549. *
  550. * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
  551. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  552. * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
  553. * noise is rather small, use the default method (method=0).
  554. *
  555. * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
  556. * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
  557. * cannot be estimated, an empty one will be returned.
  558. *
  559. * @sa
  560. * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
  561. * perspectiveTransform
  562. */
  563. + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:));
  564. //
  565. // Mat cv::findHomography(Mat srcPoints, Mat dstPoints, Mat& mask, UsacParams params)
  566. //
  567. + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints mask:(Mat*)mask params:(UsacParams*)params NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:mask:params:));
  568. //
  569. // Vec3d cv::RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat())
  570. //
  571. /**
  572. * Computes an RQ decomposition of 3x3 matrices.
  573. *
  574. * @param src 3x3 input matrix.
  575. * @param mtxR Output 3x3 upper-triangular matrix.
  576. * @param mtxQ Output 3x3 orthogonal matrix.
  577. * @param Qx Optional output 3x3 rotation matrix around x-axis.
  578. * @param Qy Optional output 3x3 rotation matrix around y-axis.
  579. * @param Qz Optional output 3x3 rotation matrix around z-axis.
  580. *
  581. * The function computes a RQ decomposition using the given rotations. This function is used in
  582. * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
  583. * and a rotation matrix.
  584. *
  585. * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
  586. * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
  587. * sequence of rotations about the three principal axes that results in the same orientation of an
  588. * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
  589. * are only one of the possible solutions.
  590. */
  591. + (Double3*)RQDecomp3x3:(Mat*)src mtxR:(Mat*)mtxR mtxQ:(Mat*)mtxQ Qx:(Mat*)Qx Qy:(Mat*)Qy Qz:(Mat*)Qz NS_SWIFT_NAME(RQDecomp3x3(src:mtxR:mtxQ:Qx:Qy:Qz:));
  592. /**
  593. * Computes an RQ decomposition of 3x3 matrices.
  594. *
  595. * @param src 3x3 input matrix.
  596. * @param mtxR Output 3x3 upper-triangular matrix.
  597. * @param mtxQ Output 3x3 orthogonal matrix.
  598. * @param Qx Optional output 3x3 rotation matrix around x-axis.
  599. * @param Qy Optional output 3x3 rotation matrix around y-axis.
  600. *
  601. * The function computes a RQ decomposition using the given rotations. This function is used in
  602. * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
  603. * and a rotation matrix.
  604. *
  605. * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
  606. * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
  607. * sequence of rotations about the three principal axes that results in the same orientation of an
  608. * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
  609. * are only one of the possible solutions.
  610. */
  611. + (Double3*)RQDecomp3x3:(Mat*)src mtxR:(Mat*)mtxR mtxQ:(Mat*)mtxQ Qx:(Mat*)Qx Qy:(Mat*)Qy NS_SWIFT_NAME(RQDecomp3x3(src:mtxR:mtxQ:Qx:Qy:));
  612. /**
  613. * Computes an RQ decomposition of 3x3 matrices.
  614. *
  615. * @param src 3x3 input matrix.
  616. * @param mtxR Output 3x3 upper-triangular matrix.
  617. * @param mtxQ Output 3x3 orthogonal matrix.
  618. * @param Qx Optional output 3x3 rotation matrix around x-axis.
  619. *
  620. * The function computes a RQ decomposition using the given rotations. This function is used in
  621. * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
  622. * and a rotation matrix.
  623. *
  624. * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
  625. * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
  626. * sequence of rotations about the three principal axes that results in the same orientation of an
  627. * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
  628. * are only one of the possible solutions.
  629. */
  630. + (Double3*)RQDecomp3x3:(Mat*)src mtxR:(Mat*)mtxR mtxQ:(Mat*)mtxQ Qx:(Mat*)Qx NS_SWIFT_NAME(RQDecomp3x3(src:mtxR:mtxQ:Qx:));
  631. /**
  632. * Computes an RQ decomposition of 3x3 matrices.
  633. *
  634. * @param src 3x3 input matrix.
  635. * @param mtxR Output 3x3 upper-triangular matrix.
  636. * @param mtxQ Output 3x3 orthogonal matrix.
  637. *
  638. * The function computes a RQ decomposition using the given rotations. This function is used in
  639. * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
  640. * and a rotation matrix.
  641. *
  642. * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
  643. * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
  644. * sequence of rotations about the three principal axes that results in the same orientation of an
  645. * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
  646. * are only one of the possible solutions.
  647. */
  648. + (Double3*)RQDecomp3x3:(Mat*)src mtxR:(Mat*)mtxR mtxQ:(Mat*)mtxQ NS_SWIFT_NAME(RQDecomp3x3(src:mtxR:mtxQ:));
  649. //
  650. // void cv::decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat())
  651. //
  652. /**
  653. * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
  654. *
  655. * @param projMatrix 3x4 input projection matrix P.
  656. * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
  657. * @param rotMatrix Output 3x3 external rotation matrix R.
  658. * @param transVect Output 4x1 translation vector T.
  659. * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
  660. * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
  661. * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
  662. * @param eulerAngles Optional three-element vector containing three Euler angles of rotation in
  663. * degrees.
  664. *
  665. * The function computes a decomposition of a projection matrix into a calibration and a rotation
  666. * matrix and the position of a camera.
  667. *
  668. * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
  669. * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
  670. * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
  671. * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
  672. *
  673. * The function is based on RQDecomp3x3 .
  674. */
  675. + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect rotMatrixX:(Mat*)rotMatrixX rotMatrixY:(Mat*)rotMatrixY rotMatrixZ:(Mat*)rotMatrixZ eulerAngles:(Mat*)eulerAngles NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:rotMatrixX:rotMatrixY:rotMatrixZ:eulerAngles:));
  676. /**
  677. * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
  678. *
  679. * @param projMatrix 3x4 input projection matrix P.
  680. * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
  681. * @param rotMatrix Output 3x3 external rotation matrix R.
  682. * @param transVect Output 4x1 translation vector T.
  683. * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
  684. * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
  685. * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
  686. * degrees.
  687. *
  688. * The function computes a decomposition of a projection matrix into a calibration and a rotation
  689. * matrix and the position of a camera.
  690. *
  691. * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
  692. * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
  693. * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
  694. * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
  695. *
  696. * The function is based on RQDecomp3x3 .
  697. */
  698. + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect rotMatrixX:(Mat*)rotMatrixX rotMatrixY:(Mat*)rotMatrixY rotMatrixZ:(Mat*)rotMatrixZ NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:rotMatrixX:rotMatrixY:rotMatrixZ:));
  699. /**
  700. * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
  701. *
  702. * @param projMatrix 3x4 input projection matrix P.
  703. * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
  704. * @param rotMatrix Output 3x3 external rotation matrix R.
  705. * @param transVect Output 4x1 translation vector T.
  706. * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
  707. * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
  708. * degrees.
  709. *
  710. * The function computes a decomposition of a projection matrix into a calibration and a rotation
  711. * matrix and the position of a camera.
  712. *
  713. * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
  714. * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
  715. * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
  716. * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
  717. *
  718. * The function is based on RQDecomp3x3 .
  719. */
  720. + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect rotMatrixX:(Mat*)rotMatrixX rotMatrixY:(Mat*)rotMatrixY NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:rotMatrixX:rotMatrixY:));
  721. /**
  722. * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
  723. *
  724. * @param projMatrix 3x4 input projection matrix P.
  725. * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
  726. * @param rotMatrix Output 3x3 external rotation matrix R.
  727. * @param transVect Output 4x1 translation vector T.
  728. * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
  729. * degrees.
  730. *
  731. * The function computes a decomposition of a projection matrix into a calibration and a rotation
  732. * matrix and the position of a camera.
  733. *
  734. * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
  735. * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
  736. * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
  737. * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
  738. *
  739. * The function is based on RQDecomp3x3 .
  740. */
  741. + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect rotMatrixX:(Mat*)rotMatrixX NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:rotMatrixX:));
  742. /**
  743. * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
  744. *
  745. * @param projMatrix 3x4 input projection matrix P.
  746. * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
  747. * @param rotMatrix Output 3x3 external rotation matrix R.
  748. * @param transVect Output 4x1 translation vector T.
  749. * degrees.
  750. *
  751. * The function computes a decomposition of a projection matrix into a calibration and a rotation
  752. * matrix and the position of a camera.
  753. *
  754. * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
  755. * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
  756. * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
  757. * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
  758. *
  759. * The function is based on RQDecomp3x3 .
  760. */
  761. + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:));
  762. //
  763. // void cv::matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB)
  764. //
  765. /**
  766. * Computes partial derivatives of the matrix product for each multiplied matrix.
  767. *
  768. * @param A First multiplied matrix.
  769. * @param B Second multiplied matrix.
  770. * @param dABdA First output derivative matrix d(A\*B)/dA of size
  771. * `$$\texttt{A.rows*B.cols} \times {A.rows*A.cols}$$` .
  772. * @param dABdB Second output derivative matrix d(A\*B)/dB of size
  773. * `$$\texttt{A.rows*B.cols} \times {B.rows*B.cols}$$` .
  774. *
  775. * The function computes partial derivatives of the elements of the matrix product `$$A*B$$` with regard to
  776. * the elements of each of the two input matrices. The function is used to compute the Jacobian
  777. * matrices in #stereoCalibrate but can also be used in any other similar optimization function.
  778. */
  779. + (void)matMulDeriv:(Mat*)A B:(Mat*)B dABdA:(Mat*)dABdA dABdB:(Mat*)dABdB NS_SWIFT_NAME(matMulDeriv(A:B:dABdA:dABdB:));
  780. //
  781. // void cv::composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat())
  782. //
  783. /**
  784. * Combines two rotation-and-shift transformations.
  785. *
  786. * @param rvec1 First rotation vector.
  787. * @param tvec1 First translation vector.
  788. * @param rvec2 Second rotation vector.
  789. * @param tvec2 Second translation vector.
  790. * @param rvec3 Output rotation vector of the superposition.
  791. * @param tvec3 Output translation vector of the superposition.
  792. * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
  793. * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
  794. * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
  795. * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
  796. * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
  797. * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
  798. * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
  799. * @param dt3dt2 Optional output derivative of tvec3 with regard to tvec2
  800. *
  801. * The functions compute:
  802. *
  803. * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
  804. *
  805. * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
  806. * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
  807. *
  808. * Also, the functions can compute the derivatives of the output vectors with regards to the input
  809. * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
  810. * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
  811. * function that contains a matrix multiplication.
  812. */
  813. + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 dt3dr1:(Mat*)dt3dr1 dt3dt1:(Mat*)dt3dt1 dt3dr2:(Mat*)dt3dr2 dt3dt2:(Mat*)dt3dt2 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:dt3dr1:dt3dt1:dt3dr2:dt3dt2:));
  814. /**
  815. * Combines two rotation-and-shift transformations.
  816. *
  817. * @param rvec1 First rotation vector.
  818. * @param tvec1 First translation vector.
  819. * @param rvec2 Second rotation vector.
  820. * @param tvec2 Second translation vector.
  821. * @param rvec3 Output rotation vector of the superposition.
  822. * @param tvec3 Output translation vector of the superposition.
  823. * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
  824. * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
  825. * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
  826. * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
  827. * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
  828. * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
  829. * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
  830. *
  831. * The functions compute:
  832. *
  833. * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
  834. *
  835. * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
  836. * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
  837. *
  838. * Also, the functions can compute the derivatives of the output vectors with regards to the input
  839. * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
  840. * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
  841. * function that contains a matrix multiplication.
  842. */
  843. + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 dt3dr1:(Mat*)dt3dr1 dt3dt1:(Mat*)dt3dt1 dt3dr2:(Mat*)dt3dr2 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:dt3dr1:dt3dt1:dt3dr2:));
  844. /**
  845. * Combines two rotation-and-shift transformations.
  846. *
  847. * @param rvec1 First rotation vector.
  848. * @param tvec1 First translation vector.
  849. * @param rvec2 Second rotation vector.
  850. * @param tvec2 Second translation vector.
  851. * @param rvec3 Output rotation vector of the superposition.
  852. * @param tvec3 Output translation vector of the superposition.
  853. * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
  854. * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
  855. * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
  856. * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
  857. * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
  858. * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
  859. *
  860. * The functions compute:
  861. *
  862. * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
  863. *
  864. * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
  865. * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
  866. *
  867. * Also, the functions can compute the derivatives of the output vectors with regards to the input
  868. * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
  869. * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
  870. * function that contains a matrix multiplication.
  871. */
  872. + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 dt3dr1:(Mat*)dt3dr1 dt3dt1:(Mat*)dt3dt1 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:dt3dr1:dt3dt1:));
  873. /**
  874. * Combines two rotation-and-shift transformations.
  875. *
  876. * @param rvec1 First rotation vector.
  877. * @param tvec1 First translation vector.
  878. * @param rvec2 Second rotation vector.
  879. * @param tvec2 Second translation vector.
  880. * @param rvec3 Output rotation vector of the superposition.
  881. * @param tvec3 Output translation vector of the superposition.
  882. * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
  883. * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
  884. * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
  885. * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
  886. * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
  887. *
  888. * The functions compute:
  889. *
  890. * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
  891. *
  892. * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
  893. * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
  894. *
  895. * Also, the functions can compute the derivatives of the output vectors with regards to the input
  896. * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
  897. * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
  898. * function that contains a matrix multiplication.
  899. */
  900. + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 dt3dr1:(Mat*)dt3dr1 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:dt3dr1:));
  901. /**
  902. * Combines two rotation-and-shift transformations.
  903. *
  904. * @param rvec1 First rotation vector.
  905. * @param tvec1 First translation vector.
  906. * @param rvec2 Second rotation vector.
  907. * @param tvec2 Second translation vector.
  908. * @param rvec3 Output rotation vector of the superposition.
  909. * @param tvec3 Output translation vector of the superposition.
  910. * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
  911. * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
  912. * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
  913. * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
  914. *
  915. * The functions compute:
  916. *
  917. * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
  918. *
  919. * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
  920. * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
  921. *
  922. * Also, the functions can compute the derivatives of the output vectors with regards to the input
  923. * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
  924. * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
  925. * function that contains a matrix multiplication.
  926. */
  927. + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:));
  928. /**
  929. * Combines two rotation-and-shift transformations.
  930. *
  931. * @param rvec1 First rotation vector.
  932. * @param tvec1 First translation vector.
  933. * @param rvec2 Second rotation vector.
  934. * @param tvec2 Second translation vector.
  935. * @param rvec3 Output rotation vector of the superposition.
  936. * @param tvec3 Output translation vector of the superposition.
  937. * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
  938. * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
  939. * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
  940. *
  941. * The functions compute:
  942. *
  943. * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
  944. *
  945. * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
  946. * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
  947. *
  948. * Also, the functions can compute the derivatives of the output vectors with regards to the input
  949. * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
  950. * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
  951. * function that contains a matrix multiplication.
  952. */
  953. + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:));
  954. /**
  955. * Combines two rotation-and-shift transformations.
  956. *
  957. * @param rvec1 First rotation vector.
  958. * @param tvec1 First translation vector.
  959. * @param rvec2 Second rotation vector.
  960. * @param tvec2 Second translation vector.
  961. * @param rvec3 Output rotation vector of the superposition.
  962. * @param tvec3 Output translation vector of the superposition.
  963. * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
  964. * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
  965. *
  966. * The functions compute:
  967. *
  968. * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
  969. *
  970. * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
  971. * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
  972. *
  973. * Also, the functions can compute the derivatives of the output vectors with regards to the input
  974. * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
  975. * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
  976. * function that contains a matrix multiplication.
  977. */
  978. + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:));
  979. /**
  980. * Combines two rotation-and-shift transformations.
  981. *
  982. * @param rvec1 First rotation vector.
  983. * @param tvec1 First translation vector.
  984. * @param rvec2 Second rotation vector.
  985. * @param tvec2 Second translation vector.
  986. * @param rvec3 Output rotation vector of the superposition.
  987. * @param tvec3 Output translation vector of the superposition.
  988. * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
  989. *
  990. * The functions compute:
  991. *
  992. * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
  993. *
  994. * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
  995. * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
  996. *
  997. * Also, the functions can compute the derivatives of the output vectors with regards to the input
  998. * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
  999. * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
  1000. * function that contains a matrix multiplication.
  1001. */
  1002. + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:));
  1003. /**
  1004. * Combines two rotation-and-shift transformations.
  1005. *
  1006. * @param rvec1 First rotation vector.
  1007. * @param tvec1 First translation vector.
  1008. * @param rvec2 Second rotation vector.
  1009. * @param tvec2 Second translation vector.
  1010. * @param rvec3 Output rotation vector of the superposition.
  1011. * @param tvec3 Output translation vector of the superposition.
  1012. *
  1013. * The functions compute:
  1014. *
  1015. * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
  1016. *
  1017. * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
  1018. * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
  1019. *
  1020. * Also, the functions can compute the derivatives of the output vectors with regards to the input
  1021. * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
  1022. * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
  1023. * function that contains a matrix multiplication.
  1024. */
  1025. + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:));
  1026. //
  1027. // void cv::projectPoints(Mat objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, Mat distCoeffs, Mat& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0)
  1028. //
  1029. /**
  1030. * Projects 3D points to an image plane.
  1031. *
  1032. * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
  1033. * 1-channel or 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is the number of points in the view.
  1034. * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
  1035. * basis from world to camera coordinate system, see REF: calibrateCamera for details.
  1036. * @param tvec The translation vector, see parameter description above.
  1037. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1038. * @param distCoeffs Input vector of distortion coefficients
  1039. * `$$\distcoeffs$$` . If the vector is empty, the zero distortion coefficients are assumed.
  1040. * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
  1041. * vector\<Point2f\> .
  1042. * @param jacobian Optional output 2Nx(10+\<numDistCoeffs\>) jacobian matrix of derivatives of image
  1043. * points with respect to components of the rotation vector, translation vector, focal lengths,
  1044. * coordinates of the principal point and the distortion coefficients. In the old interface different
  1045. * components of the jacobian are returned via different output parameters.
  1046. * @param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
  1047. * function assumes that the aspect ratio (`$$f_x / f_y$$`) is fixed and correspondingly adjusts the
  1048. * jacobian matrix.
  1049. *
  1050. * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
  1051. * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
  1052. * derivatives of image points coordinates (as functions of all the input parameters) with respect to
  1053. * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
  1054. * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
  1055. * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
  1056. * parameters.
  1057. *
  1058. * NOTE: By setting rvec = tvec = `$$[0, 0, 0]$$`, or by setting cameraMatrix to a 3x3 identity matrix,
  1059. * or by passing zero distortion coefficients, one can get various useful partial cases of the
  1060. * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
  1061. * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
  1062. */
  1063. + (void)projectPoints:(Mat*)objectPoints rvec:(Mat*)rvec tvec:(Mat*)tvec cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imagePoints:(Mat*)imagePoints jacobian:(Mat*)jacobian aspectRatio:(double)aspectRatio NS_SWIFT_NAME(projectPoints(objectPoints:rvec:tvec:cameraMatrix:distCoeffs:imagePoints:jacobian:aspectRatio:));
  1064. /**
  1065. * Projects 3D points to an image plane.
  1066. *
  1067. * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
  1068. * 1-channel or 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is the number of points in the view.
  1069. * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
  1070. * basis from world to camera coordinate system, see REF: calibrateCamera for details.
  1071. * @param tvec The translation vector, see parameter description above.
  1072. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1073. * @param distCoeffs Input vector of distortion coefficients
  1074. * `$$\distcoeffs$$` . If the vector is empty, the zero distortion coefficients are assumed.
  1075. * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
  1076. * vector\<Point2f\> .
  1077. * @param jacobian Optional output 2Nx(10+\<numDistCoeffs\>) jacobian matrix of derivatives of image
  1078. * points with respect to components of the rotation vector, translation vector, focal lengths,
  1079. * coordinates of the principal point and the distortion coefficients. In the old interface different
  1080. * components of the jacobian are returned via different output parameters.
  1081. * function assumes that the aspect ratio (`$$f_x / f_y$$`) is fixed and correspondingly adjusts the
  1082. * jacobian matrix.
  1083. *
  1084. * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
  1085. * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
  1086. * derivatives of image points coordinates (as functions of all the input parameters) with respect to
  1087. * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
  1088. * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
  1089. * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
  1090. * parameters.
  1091. *
  1092. * NOTE: By setting rvec = tvec = `$$[0, 0, 0]$$`, or by setting cameraMatrix to a 3x3 identity matrix,
  1093. * or by passing zero distortion coefficients, one can get various useful partial cases of the
  1094. * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
  1095. * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
  1096. */
  1097. + (void)projectPoints:(Mat*)objectPoints rvec:(Mat*)rvec tvec:(Mat*)tvec cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imagePoints:(Mat*)imagePoints jacobian:(Mat*)jacobian NS_SWIFT_NAME(projectPoints(objectPoints:rvec:tvec:cameraMatrix:distCoeffs:imagePoints:jacobian:));
  1098. /**
  1099. * Projects 3D points to an image plane.
  1100. *
  1101. * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
  1102. * 1-channel or 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is the number of points in the view.
  1103. * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
  1104. * basis from world to camera coordinate system, see REF: calibrateCamera for details.
  1105. * @param tvec The translation vector, see parameter description above.
  1106. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1107. * @param distCoeffs Input vector of distortion coefficients
  1108. * `$$\distcoeffs$$` . If the vector is empty, the zero distortion coefficients are assumed.
  1109. * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
  1110. * vector\<Point2f\> .
  1111. * points with respect to components of the rotation vector, translation vector, focal lengths,
  1112. * coordinates of the principal point and the distortion coefficients. In the old interface different
  1113. * components of the jacobian are returned via different output parameters.
  1114. * function assumes that the aspect ratio (`$$f_x / f_y$$`) is fixed and correspondingly adjusts the
  1115. * jacobian matrix.
  1116. *
  1117. * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
  1118. * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
  1119. * derivatives of image points coordinates (as functions of all the input parameters) with respect to
  1120. * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
  1121. * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
  1122. * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
  1123. * parameters.
  1124. *
  1125. * NOTE: By setting rvec = tvec = `$$[0, 0, 0]$$`, or by setting cameraMatrix to a 3x3 identity matrix,
  1126. * or by passing zero distortion coefficients, one can get various useful partial cases of the
  1127. * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
  1128. * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
  1129. */
  1130. + (void)projectPoints:(Mat*)objectPoints rvec:(Mat*)rvec tvec:(Mat*)tvec cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imagePoints:(Mat*)imagePoints NS_SWIFT_NAME(projectPoints(objectPoints:rvec:tvec:cameraMatrix:distCoeffs:imagePoints:));
  1131. //
  1132. // bool cv::solvePnP(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE)
  1133. //
  1134. /**
  1135. * Finds an object pose from 3D-2D point correspondences.
  1136. *
  1137. * @see `REF: calib3d_solvePnP`
  1138. *
  1139. * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
  1140. * coordinate frame to the camera coordinate frame, using different methods:
  1141. * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
  1142. * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
  1143. * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  1144. * Number of input points must be 4. Object points must be defined in the following order:
  1145. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1146. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1147. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1148. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1149. * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  1150. *
  1151. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1152. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1153. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1154. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1155. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1156. * @param distCoeffs Input vector of distortion coefficients
  1157. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1158. * assumed.
  1159. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1160. * the model coordinate system to the camera coordinate system.
  1161. * @param tvec Output translation vector.
  1162. * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
  1163. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1164. * vectors, respectively, and further optimizes them.
  1165. * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
  1166. *
  1167. * More information about Perspective-n-Points is described in REF: calib3d_solvePnP
  1168. *
  1169. * NOTE:
  1170. * - An example of how to use solvePnP for planar augmented reality can be found at
  1171. * opencv_source_code/samples/python/plane_ar.py
  1172. * - If you are using Python:
  1173. * - Numpy array slices won't work as input because solvePnP requires contiguous
  1174. * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  1175. * modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1176. * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  1177. * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1178. * which requires 2-channel information.
  1179. * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  1180. * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  1181. * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  1182. * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
  1183. * unstable and sometimes give completely wrong results. If you pass one of these two
  1184. * flags, REF: SOLVEPNP_EPNP method will be used instead.
  1185. * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
  1186. * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  1187. * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  1188. * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  1189. * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  1190. * global solution to converge.
  1191. * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  1192. * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  1193. * Number of input points must be 4. Object points must be defined in the following order:
  1194. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1195. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1196. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1197. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1198. * - With REF: SOLVEPNP_SQPNP input points must be >= 3
  1199. */
  1200. + (BOOL)solvePnP:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(int)flags NS_SWIFT_NAME(solvePnP(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:flags:));
  1201. /**
  1202. * Finds an object pose from 3D-2D point correspondences.
  1203. *
  1204. * @see `REF: calib3d_solvePnP`
  1205. *
  1206. * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
  1207. * coordinate frame to the camera coordinate frame, using different methods:
  1208. * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
  1209. * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
  1210. * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  1211. * Number of input points must be 4. Object points must be defined in the following order:
  1212. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1213. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1214. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1215. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1216. * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  1217. *
  1218. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1219. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1220. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1221. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1222. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1223. * @param distCoeffs Input vector of distortion coefficients
  1224. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1225. * assumed.
  1226. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1227. * the model coordinate system to the camera coordinate system.
  1228. * @param tvec Output translation vector.
  1229. * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
  1230. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1231. * vectors, respectively, and further optimizes them.
  1232. *
  1233. * More information about Perspective-n-Points is described in REF: calib3d_solvePnP
  1234. *
  1235. * NOTE:
  1236. * - An example of how to use solvePnP for planar augmented reality can be found at
  1237. * opencv_source_code/samples/python/plane_ar.py
  1238. * - If you are using Python:
  1239. * - Numpy array slices won't work as input because solvePnP requires contiguous
  1240. * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  1241. * modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1242. * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  1243. * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1244. * which requires 2-channel information.
  1245. * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  1246. * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  1247. * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  1248. * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
  1249. * unstable and sometimes give completely wrong results. If you pass one of these two
  1250. * flags, REF: SOLVEPNP_EPNP method will be used instead.
  1251. * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
  1252. * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  1253. * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  1254. * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  1255. * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  1256. * global solution to converge.
  1257. * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  1258. * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  1259. * Number of input points must be 4. Object points must be defined in the following order:
  1260. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1261. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1262. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1263. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1264. * - With REF: SOLVEPNP_SQPNP input points must be >= 3
  1265. */
  1266. + (BOOL)solvePnP:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess NS_SWIFT_NAME(solvePnP(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:));
  1267. /**
  1268. * Finds an object pose from 3D-2D point correspondences.
  1269. *
  1270. * @see `REF: calib3d_solvePnP`
  1271. *
  1272. * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
  1273. * coordinate frame to the camera coordinate frame, using different methods:
  1274. * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
  1275. * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
  1276. * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  1277. * Number of input points must be 4. Object points must be defined in the following order:
  1278. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1279. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1280. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1281. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1282. * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  1283. *
  1284. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1285. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1286. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1287. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1288. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1289. * @param distCoeffs Input vector of distortion coefficients
  1290. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1291. * assumed.
  1292. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1293. * the model coordinate system to the camera coordinate system.
  1294. * @param tvec Output translation vector.
  1295. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1296. * vectors, respectively, and further optimizes them.
  1297. *
  1298. * More information about Perspective-n-Points is described in REF: calib3d_solvePnP
  1299. *
  1300. * NOTE:
  1301. * - An example of how to use solvePnP for planar augmented reality can be found at
  1302. * opencv_source_code/samples/python/plane_ar.py
  1303. * - If you are using Python:
  1304. * - Numpy array slices won't work as input because solvePnP requires contiguous
  1305. * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  1306. * modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1307. * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  1308. * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1309. * which requires 2-channel information.
  1310. * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  1311. * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  1312. * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  1313. * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
  1314. * unstable and sometimes give completely wrong results. If you pass one of these two
  1315. * flags, REF: SOLVEPNP_EPNP method will be used instead.
  1316. * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
  1317. * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  1318. * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  1319. * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  1320. * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  1321. * global solution to converge.
  1322. * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  1323. * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  1324. * Number of input points must be 4. Object points must be defined in the following order:
  1325. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1326. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1327. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1328. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1329. * - With REF: SOLVEPNP_SQPNP input points must be >= 3
  1330. */
  1331. + (BOOL)solvePnP:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnP(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:));
  1332. //
  1333. // bool cv::solvePnPRansac(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, double confidence = 0.99, Mat& inliers = Mat(), int flags = SOLVEPNP_ITERATIVE)
  1334. //
  1335. /**
  1336. * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
  1337. *
  1338. * @see `REF: calib3d_solvePnP`
  1339. *
  1340. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1341. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1342. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1343. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1344. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1345. * @param distCoeffs Input vector of distortion coefficients
  1346. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1347. * assumed.
  1348. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1349. * the model coordinate system to the camera coordinate system.
  1350. * @param tvec Output translation vector.
  1351. * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
  1352. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1353. * vectors, respectively, and further optimizes them.
  1354. * @param iterationsCount Number of iterations.
  1355. * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
  1356. * is the maximum allowed distance between the observed and computed point projections to consider it
  1357. * an inlier.
  1358. * @param confidence The probability that the algorithm produces a useful result.
  1359. * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
  1360. * @param flags Method for solving a PnP problem (see REF: solvePnP ).
  1361. *
  1362. * The function estimates an object pose given a set of object points, their corresponding image
  1363. * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
  1364. * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
  1365. * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
  1366. * makes the function resistant to outliers.
  1367. *
  1368. * NOTE:
  1369. * - An example of how to use solvePNPRansac for object detection can be found at
  1370. * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
  1371. * - The default method used to estimate the camera pose for the Minimal Sample Sets step
  1372. * is #SOLVEPNP_EPNP. Exceptions are:
  1373. * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
  1374. * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
  1375. * - The method used to estimate the camera pose using all the inliers is defined by the
  1376. * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
  1377. * the method #SOLVEPNP_EPNP will be used instead.
  1378. */
  1379. + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount reprojectionError:(float)reprojectionError confidence:(double)confidence inliers:(Mat*)inliers flags:(int)flags NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:reprojectionError:confidence:inliers:flags:));
  1380. /**
  1381. * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
  1382. *
  1383. * @see `REF: calib3d_solvePnP`
  1384. *
  1385. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1386. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1387. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1388. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1389. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1390. * @param distCoeffs Input vector of distortion coefficients
  1391. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1392. * assumed.
  1393. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1394. * the model coordinate system to the camera coordinate system.
  1395. * @param tvec Output translation vector.
  1396. * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
  1397. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1398. * vectors, respectively, and further optimizes them.
  1399. * @param iterationsCount Number of iterations.
  1400. * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
  1401. * is the maximum allowed distance between the observed and computed point projections to consider it
  1402. * an inlier.
  1403. * @param confidence The probability that the algorithm produces a useful result.
  1404. * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
  1405. *
  1406. * The function estimates an object pose given a set of object points, their corresponding image
  1407. * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
  1408. * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
  1409. * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
  1410. * makes the function resistant to outliers.
  1411. *
  1412. * NOTE:
  1413. * - An example of how to use solvePNPRansac for object detection can be found at
  1414. * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
  1415. * - The default method used to estimate the camera pose for the Minimal Sample Sets step
  1416. * is #SOLVEPNP_EPNP. Exceptions are:
  1417. * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
  1418. * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
  1419. * - The method used to estimate the camera pose using all the inliers is defined by the
  1420. * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
  1421. * the method #SOLVEPNP_EPNP will be used instead.
  1422. */
  1423. + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount reprojectionError:(float)reprojectionError confidence:(double)confidence inliers:(Mat*)inliers NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:reprojectionError:confidence:inliers:));
  1424. /**
  1425. * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
  1426. *
  1427. * @see `REF: calib3d_solvePnP`
  1428. *
  1429. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1430. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1431. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1432. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1433. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1434. * @param distCoeffs Input vector of distortion coefficients
  1435. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1436. * assumed.
  1437. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1438. * the model coordinate system to the camera coordinate system.
  1439. * @param tvec Output translation vector.
  1440. * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
  1441. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1442. * vectors, respectively, and further optimizes them.
  1443. * @param iterationsCount Number of iterations.
  1444. * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
  1445. * is the maximum allowed distance between the observed and computed point projections to consider it
  1446. * an inlier.
  1447. * @param confidence The probability that the algorithm produces a useful result.
  1448. *
  1449. * The function estimates an object pose given a set of object points, their corresponding image
  1450. * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
  1451. * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
  1452. * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
  1453. * makes the function resistant to outliers.
  1454. *
  1455. * NOTE:
  1456. * - An example of how to use solvePNPRansac for object detection can be found at
  1457. * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
  1458. * - The default method used to estimate the camera pose for the Minimal Sample Sets step
  1459. * is #SOLVEPNP_EPNP. Exceptions are:
  1460. * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
  1461. * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
  1462. * - The method used to estimate the camera pose using all the inliers is defined by the
  1463. * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
  1464. * the method #SOLVEPNP_EPNP will be used instead.
  1465. */
  1466. + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount reprojectionError:(float)reprojectionError confidence:(double)confidence NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:reprojectionError:confidence:));
  1467. /**
  1468. * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
  1469. *
  1470. * @see `REF: calib3d_solvePnP`
  1471. *
  1472. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1473. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1474. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1475. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1476. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1477. * @param distCoeffs Input vector of distortion coefficients
  1478. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1479. * assumed.
  1480. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1481. * the model coordinate system to the camera coordinate system.
  1482. * @param tvec Output translation vector.
  1483. * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
  1484. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1485. * vectors, respectively, and further optimizes them.
  1486. * @param iterationsCount Number of iterations.
  1487. * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
  1488. * is the maximum allowed distance between the observed and computed point projections to consider it
  1489. * an inlier.
  1490. *
  1491. * The function estimates an object pose given a set of object points, their corresponding image
  1492. * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
  1493. * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
  1494. * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
  1495. * makes the function resistant to outliers.
  1496. *
  1497. * NOTE:
  1498. * - An example of how to use solvePNPRansac for object detection can be found at
  1499. * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
  1500. * - The default method used to estimate the camera pose for the Minimal Sample Sets step
  1501. * is #SOLVEPNP_EPNP. Exceptions are:
  1502. * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
  1503. * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
  1504. * - The method used to estimate the camera pose using all the inliers is defined by the
  1505. * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
  1506. * the method #SOLVEPNP_EPNP will be used instead.
  1507. */
  1508. + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount reprojectionError:(float)reprojectionError NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:reprojectionError:));
  1509. /**
  1510. * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
  1511. *
  1512. * @see `REF: calib3d_solvePnP`
  1513. *
  1514. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1515. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1516. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1517. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1518. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1519. * @param distCoeffs Input vector of distortion coefficients
  1520. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1521. * assumed.
  1522. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1523. * the model coordinate system to the camera coordinate system.
  1524. * @param tvec Output translation vector.
  1525. * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
  1526. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1527. * vectors, respectively, and further optimizes them.
  1528. * @param iterationsCount Number of iterations.
  1529. * is the maximum allowed distance between the observed and computed point projections to consider it
  1530. * an inlier.
  1531. *
  1532. * The function estimates an object pose given a set of object points, their corresponding image
  1533. * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
  1534. * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
  1535. * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
  1536. * makes the function resistant to outliers.
  1537. *
  1538. * NOTE:
  1539. * - An example of how to use solvePNPRansac for object detection can be found at
  1540. * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
  1541. * - The default method used to estimate the camera pose for the Minimal Sample Sets step
  1542. * is #SOLVEPNP_EPNP. Exceptions are:
  1543. * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
  1544. * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
  1545. * - The method used to estimate the camera pose using all the inliers is defined by the
  1546. * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
  1547. * the method #SOLVEPNP_EPNP will be used instead.
  1548. */
  1549. + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:));
  1550. /**
  1551. * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
  1552. *
  1553. * @see `REF: calib3d_solvePnP`
  1554. *
  1555. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1556. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1557. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1558. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1559. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1560. * @param distCoeffs Input vector of distortion coefficients
  1561. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1562. * assumed.
  1563. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1564. * the model coordinate system to the camera coordinate system.
  1565. * @param tvec Output translation vector.
  1566. * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
  1567. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1568. * vectors, respectively, and further optimizes them.
  1569. * is the maximum allowed distance between the observed and computed point projections to consider it
  1570. * an inlier.
  1571. *
  1572. * The function estimates an object pose given a set of object points, their corresponding image
  1573. * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
  1574. * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
  1575. * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
  1576. * makes the function resistant to outliers.
  1577. *
  1578. * NOTE:
  1579. * - An example of how to use solvePNPRansac for object detection can be found at
  1580. * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
  1581. * - The default method used to estimate the camera pose for the Minimal Sample Sets step
  1582. * is #SOLVEPNP_EPNP. Exceptions are:
  1583. * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
  1584. * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
  1585. * - The method used to estimate the camera pose using all the inliers is defined by the
  1586. * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
  1587. * the method #SOLVEPNP_EPNP will be used instead.
  1588. */
  1589. + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:));
  1590. /**
  1591. * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
  1592. *
  1593. * @see `REF: calib3d_solvePnP`
  1594. *
  1595. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1596. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1597. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1598. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1599. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1600. * @param distCoeffs Input vector of distortion coefficients
  1601. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1602. * assumed.
  1603. * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1604. * the model coordinate system to the camera coordinate system.
  1605. * @param tvec Output translation vector.
  1606. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1607. * vectors, respectively, and further optimizes them.
  1608. * is the maximum allowed distance between the observed and computed point projections to consider it
  1609. * an inlier.
  1610. *
  1611. * The function estimates an object pose given a set of object points, their corresponding image
  1612. * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
  1613. * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
  1614. * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
  1615. * makes the function resistant to outliers.
  1616. *
  1617. * NOTE:
  1618. * - An example of how to use solvePNPRansac for object detection can be found at
  1619. * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
  1620. * - The default method used to estimate the camera pose for the Minimal Sample Sets step
  1621. * is #SOLVEPNP_EPNP. Exceptions are:
  1622. * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
  1623. * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
  1624. * - The method used to estimate the camera pose using all the inliers is defined by the
  1625. * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
  1626. * the method #SOLVEPNP_EPNP will be used instead.
  1627. */
  1628. + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:));
  1629. //
  1630. // bool cv::solvePnPRansac(Mat objectPoints, Mat imagePoints, Mat& cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, Mat& inliers, UsacParams params = UsacParams())
  1631. //
  1632. + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec inliers:(Mat*)inliers params:(UsacParams*)params NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:inliers:params:));
  1633. + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec inliers:(Mat*)inliers NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:inliers:));
  1634. //
  1635. // int cv::solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags)
  1636. //
  1637. /**
  1638. * Finds an object pose from 3 3D-2D point correspondences.
  1639. *
  1640. * @see `REF: calib3d_solvePnP`
  1641. *
  1642. * @param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or
  1643. * 1x3/3x1 3-channel. vector\<Point3f\> can be also passed here.
  1644. * @param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
  1645. * vector\<Point2f\> can be also passed here.
  1646. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1647. * @param distCoeffs Input vector of distortion coefficients
  1648. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1649. * assumed.
  1650. * @param rvecs Output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
  1651. * the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.
  1652. * @param tvecs Output translation vectors.
  1653. * @param flags Method for solving a P3P problem:
  1654. * - REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
  1655. * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
  1656. * - REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis.
  1657. * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
  1658. *
  1659. * The function estimates the object pose given 3 object points, their corresponding image
  1660. * projections, as well as the camera intrinsic matrix and the distortion coefficients.
  1661. *
  1662. * NOTE:
  1663. * The solutions are sorted by reprojection errors (lowest to highest).
  1664. */
  1665. + (int)solveP3P:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags NS_SWIFT_NAME(solveP3P(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:flags:));
  1666. //
  1667. // void cv::solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON))
  1668. //
  1669. /**
  1670. * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
  1671. * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
  1672. *
  1673. * @see `REF: calib3d_solvePnP`
  1674. *
  1675. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
  1676. * where N is the number of points. vector\<Point3d\> can also be passed here.
  1677. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1678. * where N is the number of points. vector\<Point2d\> can also be passed here.
  1679. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1680. * @param distCoeffs Input vector of distortion coefficients
  1681. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1682. * assumed.
  1683. * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1684. * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
  1685. * @param tvec Input/Output translation vector. Input values are used as an initial solution.
  1686. * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
  1687. *
  1688. * The function refines the object pose given at least 3 object points, their corresponding image
  1689. * projections, an initial solution for the rotation and translation vector,
  1690. * as well as the camera intrinsic matrix and the distortion coefficients.
  1691. * The function minimizes the projection error with respect to the rotation and the translation vectors, according
  1692. * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.
  1693. */
  1694. + (void)solvePnPRefineLM:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec criteria:(TermCriteria*)criteria NS_SWIFT_NAME(solvePnPRefineLM(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:criteria:));
  1695. /**
  1696. * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
  1697. * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
  1698. *
  1699. * @see `REF: calib3d_solvePnP`
  1700. *
  1701. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
  1702. * where N is the number of points. vector\<Point3d\> can also be passed here.
  1703. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1704. * where N is the number of points. vector\<Point2d\> can also be passed here.
  1705. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1706. * @param distCoeffs Input vector of distortion coefficients
  1707. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1708. * assumed.
  1709. * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1710. * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
  1711. * @param tvec Input/Output translation vector. Input values are used as an initial solution.
  1712. *
  1713. * The function refines the object pose given at least 3 object points, their corresponding image
  1714. * projections, an initial solution for the rotation and translation vector,
  1715. * as well as the camera intrinsic matrix and the distortion coefficients.
  1716. * The function minimizes the projection error with respect to the rotation and the translation vectors, according
  1717. * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.
  1718. */
  1719. + (void)solvePnPRefineLM:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnPRefineLM(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:));
  1720. //
  1721. // void cv::solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), double VVSlambda = 1)
  1722. //
  1723. /**
  1724. * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
  1725. * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
  1726. *
  1727. * @see `REF: calib3d_solvePnP`
  1728. *
  1729. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
  1730. * where N is the number of points. vector\<Point3d\> can also be passed here.
  1731. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1732. * where N is the number of points. vector\<Point2d\> can also be passed here.
  1733. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1734. * @param distCoeffs Input vector of distortion coefficients
  1735. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1736. * assumed.
  1737. * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1738. * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
  1739. * @param tvec Input/Output translation vector. Input values are used as an initial solution.
  1740. * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
  1741. * @param VVSlambda Gain for the virtual visual servoing control law, equivalent to the `$$\alpha$$`
  1742. * gain in the Damped Gauss-Newton formulation.
  1743. *
  1744. * The function refines the object pose given at least 3 object points, their corresponding image
  1745. * projections, an initial solution for the rotation and translation vector,
  1746. * as well as the camera intrinsic matrix and the distortion coefficients.
  1747. * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
  1748. * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
  1749. */
  1750. + (void)solvePnPRefineVVS:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec criteria:(TermCriteria*)criteria VVSlambda:(double)VVSlambda NS_SWIFT_NAME(solvePnPRefineVVS(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:criteria:VVSlambda:));
  1751. /**
  1752. * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
  1753. * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
  1754. *
  1755. * @see `REF: calib3d_solvePnP`
  1756. *
  1757. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
  1758. * where N is the number of points. vector\<Point3d\> can also be passed here.
  1759. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1760. * where N is the number of points. vector\<Point2d\> can also be passed here.
  1761. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1762. * @param distCoeffs Input vector of distortion coefficients
  1763. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1764. * assumed.
  1765. * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1766. * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
  1767. * @param tvec Input/Output translation vector. Input values are used as an initial solution.
  1768. * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
  1769. * gain in the Damped Gauss-Newton formulation.
  1770. *
  1771. * The function refines the object pose given at least 3 object points, their corresponding image
  1772. * projections, an initial solution for the rotation and translation vector,
  1773. * as well as the camera intrinsic matrix and the distortion coefficients.
  1774. * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
  1775. * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
  1776. */
  1777. + (void)solvePnPRefineVVS:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec criteria:(TermCriteria*)criteria NS_SWIFT_NAME(solvePnPRefineVVS(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:criteria:));
  1778. /**
  1779. * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
  1780. * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
  1781. *
  1782. * @see `REF: calib3d_solvePnP`
  1783. *
  1784. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
  1785. * where N is the number of points. vector\<Point3d\> can also be passed here.
  1786. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1787. * where N is the number of points. vector\<Point2d\> can also be passed here.
  1788. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1789. * @param distCoeffs Input vector of distortion coefficients
  1790. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1791. * assumed.
  1792. * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  1793. * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
  1794. * @param tvec Input/Output translation vector. Input values are used as an initial solution.
  1795. * gain in the Damped Gauss-Newton formulation.
  1796. *
  1797. * The function refines the object pose given at least 3 object points, their corresponding image
  1798. * projections, an initial solution for the rotation and translation vector,
  1799. * as well as the camera intrinsic matrix and the distortion coefficients.
  1800. * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
  1801. * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
  1802. */
  1803. + (void)solvePnPRefineVVS:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnPRefineVVS(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:));
  1804. //
  1805. // int cv::solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, Mat rvec = Mat(), Mat tvec = Mat(), Mat& reprojectionError = Mat())
  1806. //
  1807. /**
  1808. * Finds an object pose from 3D-2D point correspondences.
  1809. *
  1810. * @see `REF: calib3d_solvePnP`
  1811. *
  1812. * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
  1813. * couple), depending on the number of input points and the chosen method:
  1814. * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
  1815. * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
  1816. * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  1817. * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
  1818. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1819. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1820. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1821. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1822. * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  1823. * Only 1 solution is returned.
  1824. *
  1825. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1826. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1827. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1828. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1829. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1830. * @param distCoeffs Input vector of distortion coefficients
  1831. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1832. * assumed.
  1833. * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
  1834. * the model coordinate system to the camera coordinate system.
  1835. * @param tvecs Vector of output translation vectors.
  1836. * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
  1837. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1838. * vectors, respectively, and further optimizes them.
  1839. * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
  1840. * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
  1841. * and useExtrinsicGuess is set to true.
  1842. * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
  1843. * and useExtrinsicGuess is set to true.
  1844. * @param reprojectionError Optional vector of reprojection error, that is the RMS error
  1845. * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
  1846. * and the 3D object points projected with the estimated pose.
  1847. *
  1848. * More information is described in REF: calib3d_solvePnP
  1849. *
  1850. * NOTE:
  1851. * - An example of how to use solvePnP for planar augmented reality can be found at
  1852. * opencv_source_code/samples/python/plane_ar.py
  1853. * - If you are using Python:
  1854. * - Numpy array slices won't work as input because solvePnP requires contiguous
  1855. * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  1856. * modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1857. * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  1858. * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1859. * which requires 2-channel information.
  1860. * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  1861. * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  1862. * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  1863. * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
  1864. * unstable and sometimes give completely wrong results. If you pass one of these two
  1865. * flags, REF: SOLVEPNP_EPNP method will be used instead.
  1866. * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
  1867. * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  1868. * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  1869. * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  1870. * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  1871. * global solution to converge.
  1872. * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  1873. * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  1874. * Number of input points must be 4. Object points must be defined in the following order:
  1875. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1876. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1877. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1878. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1879. */
  1880. + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(SolvePnPMethod)flags rvec:(Mat*)rvec tvec:(Mat*)tvec reprojectionError:(Mat*)reprojectionError NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:flags:rvec:tvec:reprojectionError:));
  1881. /**
  1882. * Finds an object pose from 3D-2D point correspondences.
  1883. *
  1884. * @see `REF: calib3d_solvePnP`
  1885. *
  1886. * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
  1887. * couple), depending on the number of input points and the chosen method:
  1888. * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
  1889. * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
  1890. * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  1891. * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
  1892. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1893. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1894. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1895. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1896. * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  1897. * Only 1 solution is returned.
  1898. *
  1899. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1900. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1901. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1902. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1903. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1904. * @param distCoeffs Input vector of distortion coefficients
  1905. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1906. * assumed.
  1907. * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
  1908. * the model coordinate system to the camera coordinate system.
  1909. * @param tvecs Vector of output translation vectors.
  1910. * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
  1911. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1912. * vectors, respectively, and further optimizes them.
  1913. * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
  1914. * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
  1915. * and useExtrinsicGuess is set to true.
  1916. * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
  1917. * and useExtrinsicGuess is set to true.
  1918. * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
  1919. * and the 3D object points projected with the estimated pose.
  1920. *
  1921. * More information is described in REF: calib3d_solvePnP
  1922. *
  1923. * NOTE:
  1924. * - An example of how to use solvePnP for planar augmented reality can be found at
  1925. * opencv_source_code/samples/python/plane_ar.py
  1926. * - If you are using Python:
  1927. * - Numpy array slices won't work as input because solvePnP requires contiguous
  1928. * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  1929. * modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1930. * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  1931. * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1932. * which requires 2-channel information.
  1933. * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  1934. * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  1935. * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  1936. * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
  1937. * unstable and sometimes give completely wrong results. If you pass one of these two
  1938. * flags, REF: SOLVEPNP_EPNP method will be used instead.
  1939. * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
  1940. * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  1941. * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  1942. * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  1943. * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  1944. * global solution to converge.
  1945. * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  1946. * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  1947. * Number of input points must be 4. Object points must be defined in the following order:
  1948. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1949. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1950. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1951. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1952. */
  1953. + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(SolvePnPMethod)flags rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:flags:rvec:tvec:));
  1954. /**
  1955. * Finds an object pose from 3D-2D point correspondences.
  1956. *
  1957. * @see `REF: calib3d_solvePnP`
  1958. *
  1959. * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
  1960. * couple), depending on the number of input points and the chosen method:
  1961. * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
  1962. * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
  1963. * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  1964. * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
  1965. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  1966. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  1967. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1968. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1969. * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  1970. * Only 1 solution is returned.
  1971. *
  1972. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1973. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1974. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1975. * where N is the number of points. vector\<Point2d\> can be also passed here.
  1976. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  1977. * @param distCoeffs Input vector of distortion coefficients
  1978. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  1979. * assumed.
  1980. * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
  1981. * the model coordinate system to the camera coordinate system.
  1982. * @param tvecs Vector of output translation vectors.
  1983. * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
  1984. * the provided rvec and tvec values as initial approximations of the rotation and translation
  1985. * vectors, respectively, and further optimizes them.
  1986. * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
  1987. * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
  1988. * and useExtrinsicGuess is set to true.
  1989. * and useExtrinsicGuess is set to true.
  1990. * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
  1991. * and the 3D object points projected with the estimated pose.
  1992. *
  1993. * More information is described in REF: calib3d_solvePnP
  1994. *
  1995. * NOTE:
  1996. * - An example of how to use solvePnP for planar augmented reality can be found at
  1997. * opencv_source_code/samples/python/plane_ar.py
  1998. * - If you are using Python:
  1999. * - Numpy array slices won't work as input because solvePnP requires contiguous
  2000. * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  2001. * modules/calib3d/src/solvepnp.cpp version 2.4.9)
  2002. * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  2003. * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  2004. * which requires 2-channel information.
  2005. * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  2006. * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  2007. * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  2008. * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
  2009. * unstable and sometimes give completely wrong results. If you pass one of these two
  2010. * flags, REF: SOLVEPNP_EPNP method will be used instead.
  2011. * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
  2012. * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  2013. * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  2014. * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  2015. * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  2016. * global solution to converge.
  2017. * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  2018. * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  2019. * Number of input points must be 4. Object points must be defined in the following order:
  2020. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  2021. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  2022. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  2023. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  2024. */
  2025. + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(SolvePnPMethod)flags rvec:(Mat*)rvec NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:flags:rvec:));
  2026. /**
  2027. * Finds an object pose from 3D-2D point correspondences.
  2028. *
  2029. * @see `REF: calib3d_solvePnP`
  2030. *
  2031. * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
  2032. * couple), depending on the number of input points and the chosen method:
  2033. * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
  2034. * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
  2035. * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  2036. * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
  2037. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  2038. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  2039. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  2040. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  2041. * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  2042. * Only 1 solution is returned.
  2043. *
  2044. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  2045. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  2046. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  2047. * where N is the number of points. vector\<Point2d\> can be also passed here.
  2048. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  2049. * @param distCoeffs Input vector of distortion coefficients
  2050. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  2051. * assumed.
  2052. * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
  2053. * the model coordinate system to the camera coordinate system.
  2054. * @param tvecs Vector of output translation vectors.
  2055. * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
  2056. * the provided rvec and tvec values as initial approximations of the rotation and translation
  2057. * vectors, respectively, and further optimizes them.
  2058. * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
  2059. * and useExtrinsicGuess is set to true.
  2060. * and useExtrinsicGuess is set to true.
  2061. * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
  2062. * and the 3D object points projected with the estimated pose.
  2063. *
  2064. * More information is described in REF: calib3d_solvePnP
  2065. *
  2066. * NOTE:
  2067. * - An example of how to use solvePnP for planar augmented reality can be found at
  2068. * opencv_source_code/samples/python/plane_ar.py
  2069. * - If you are using Python:
  2070. * - Numpy array slices won't work as input because solvePnP requires contiguous
  2071. * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  2072. * modules/calib3d/src/solvepnp.cpp version 2.4.9)
  2073. * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  2074. * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  2075. * which requires 2-channel information.
  2076. * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  2077. * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  2078. * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  2079. * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
  2080. * unstable and sometimes give completely wrong results. If you pass one of these two
  2081. * flags, REF: SOLVEPNP_EPNP method will be used instead.
  2082. * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
  2083. * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  2084. * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  2085. * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  2086. * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  2087. * global solution to converge.
  2088. * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  2089. * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  2090. * Number of input points must be 4. Object points must be defined in the following order:
  2091. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  2092. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  2093. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  2094. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  2095. */
  2096. + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(SolvePnPMethod)flags NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:flags:));
  2097. /**
  2098. * Finds an object pose from 3D-2D point correspondences.
  2099. *
  2100. * @see `REF: calib3d_solvePnP`
  2101. *
  2102. * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
  2103. * couple), depending on the number of input points and the chosen method:
  2104. * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
  2105. * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
  2106. * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  2107. * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
  2108. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  2109. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  2110. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  2111. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  2112. * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  2113. * Only 1 solution is returned.
  2114. *
  2115. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  2116. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  2117. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  2118. * where N is the number of points. vector\<Point2d\> can be also passed here.
  2119. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  2120. * @param distCoeffs Input vector of distortion coefficients
  2121. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  2122. * assumed.
  2123. * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
  2124. * the model coordinate system to the camera coordinate system.
  2125. * @param tvecs Vector of output translation vectors.
  2126. * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
  2127. * the provided rvec and tvec values as initial approximations of the rotation and translation
  2128. * vectors, respectively, and further optimizes them.
  2129. * and useExtrinsicGuess is set to true.
  2130. * and useExtrinsicGuess is set to true.
  2131. * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
  2132. * and the 3D object points projected with the estimated pose.
  2133. *
  2134. * More information is described in REF: calib3d_solvePnP
  2135. *
  2136. * NOTE:
  2137. * - An example of how to use solvePnP for planar augmented reality can be found at
  2138. * opencv_source_code/samples/python/plane_ar.py
  2139. * - If you are using Python:
  2140. * - Numpy array slices won't work as input because solvePnP requires contiguous
  2141. * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  2142. * modules/calib3d/src/solvepnp.cpp version 2.4.9)
  2143. * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  2144. * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  2145. * which requires 2-channel information.
  2146. * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  2147. * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  2148. * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  2149. * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
  2150. * unstable and sometimes give completely wrong results. If you pass one of these two
  2151. * flags, REF: SOLVEPNP_EPNP method will be used instead.
  2152. * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
  2153. * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  2154. * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  2155. * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  2156. * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  2157. * global solution to converge.
  2158. * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  2159. * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  2160. * Number of input points must be 4. Object points must be defined in the following order:
  2161. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  2162. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  2163. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  2164. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  2165. */
  2166. + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:));
  2167. /**
  2168. * Finds an object pose from 3D-2D point correspondences.
  2169. *
  2170. * @see `REF: calib3d_solvePnP`
  2171. *
  2172. * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
  2173. * couple), depending on the number of input points and the chosen method:
  2174. * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
  2175. * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
  2176. * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  2177. * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
  2178. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  2179. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  2180. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  2181. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  2182. * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  2183. * Only 1 solution is returned.
  2184. *
  2185. * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  2186. * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  2187. * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  2188. * where N is the number of points. vector\<Point2d\> can be also passed here.
  2189. * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
  2190. * @param distCoeffs Input vector of distortion coefficients
  2191. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  2192. * assumed.
  2193. * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
  2194. * the model coordinate system to the camera coordinate system.
  2195. * @param tvecs Vector of output translation vectors.
  2196. * the provided rvec and tvec values as initial approximations of the rotation and translation
  2197. * vectors, respectively, and further optimizes them.
  2198. * and useExtrinsicGuess is set to true.
  2199. * and useExtrinsicGuess is set to true.
  2200. * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
  2201. * and the 3D object points projected with the estimated pose.
  2202. *
  2203. * More information is described in REF: calib3d_solvePnP
  2204. *
  2205. * NOTE:
  2206. * - An example of how to use solvePnP for planar augmented reality can be found at
  2207. * opencv_source_code/samples/python/plane_ar.py
  2208. * - If you are using Python:
  2209. * - Numpy array slices won't work as input because solvePnP requires contiguous
  2210. * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  2211. * modules/calib3d/src/solvepnp.cpp version 2.4.9)
  2212. * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  2213. * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  2214. * which requires 2-channel information.
  2215. * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  2216. * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  2217. * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  2218. * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
  2219. * unstable and sometimes give completely wrong results. If you pass one of these two
  2220. * flags, REF: SOLVEPNP_EPNP method will be used instead.
  2221. * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
  2222. * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  2223. * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  2224. * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  2225. * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  2226. * global solution to converge.
  2227. * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  2228. * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  2229. * Number of input points must be 4. Object points must be defined in the following order:
  2230. * - point 0: [-squareLength / 2, squareLength / 2, 0]
  2231. * - point 1: [ squareLength / 2, squareLength / 2, 0]
  2232. * - point 2: [ squareLength / 2, -squareLength / 2, 0]
  2233. * - point 3: [-squareLength / 2, -squareLength / 2, 0]
  2234. */
  2235. + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:));
  2236. //
  2237. // Mat cv::initCameraMatrix2D(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, double aspectRatio = 1.0)
  2238. //
  2239. /**
  2240. * Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
  2241. *
  2242. * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
  2243. * coordinate space. In the old interface all the per-view vectors are concatenated. See
  2244. * #calibrateCamera for details.
  2245. * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
  2246. * old interface all the per-view vectors are concatenated.
  2247. * @param imageSize Image size in pixels used to initialize the principal point.
  2248. * @param aspectRatio If it is zero or negative, both `$$f_x$$` and `$$f_y$$` are estimated independently.
  2249. * Otherwise, `$$f_x = f_y * \texttt{aspectRatio}$$` .
  2250. *
  2251. * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
  2252. * Currently, the function only supports planar calibration patterns, which are patterns where each
  2253. * object point has z-coordinate =0.
  2254. */
  2255. + (Mat*)initCameraMatrix2D:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize aspectRatio:(double)aspectRatio NS_SWIFT_NAME(initCameraMatrix2D(objectPoints:imagePoints:imageSize:aspectRatio:));
  2256. /**
  2257. * Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
  2258. *
  2259. * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
  2260. * coordinate space. In the old interface all the per-view vectors are concatenated. See
  2261. * #calibrateCamera for details.
  2262. * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
  2263. * old interface all the per-view vectors are concatenated.
  2264. * @param imageSize Image size in pixels used to initialize the principal point.
  2265. * Otherwise, `$$f_x = f_y * \texttt{aspectRatio}$$` .
  2266. *
  2267. * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
  2268. * Currently, the function only supports planar calibration patterns, which are patterns where each
  2269. * object point has z-coordinate =0.
  2270. */
  2271. + (Mat*)initCameraMatrix2D:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize NS_SWIFT_NAME(initCameraMatrix2D(objectPoints:imagePoints:imageSize:));
  2272. //
  2273. // bool cv::findChessboardCorners(Mat image, Size patternSize, Mat& corners, int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE)
  2274. //
  2275. /**
  2276. * Finds the positions of internal corners of the chessboard.
  2277. *
  2278. * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
  2279. * @param patternSize Number of inner corners per a chessboard row and column
  2280. * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
  2281. * @param corners Output array of detected corners.
  2282. * @param flags Various operation flags that can be zero or a combination of the following values:
  2283. * - REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
  2284. * and white, rather than a fixed threshold level (computed from the average image brightness).
  2285. * - REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before
  2286. * applying fixed or adaptive thresholding.
  2287. * - REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
  2288. * square-like shape) to filter out false quads extracted at the contour retrieval stage.
  2289. * - REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
  2290. * and shortcut the call if none is found. This can drastically speed up the call in the
  2291. * degenerate condition when no chessboard is observed.
  2292. *
  2293. * The function attempts to determine whether the input image is a view of the chessboard pattern and
  2294. * locate the internal chessboard corners. The function returns a non-zero value if all of the corners
  2295. * are found and they are placed in a certain order (row by row, left to right in every row).
  2296. * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
  2297. * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
  2298. * squares touch each other. The detected coordinates are approximate, and to determine their positions
  2299. * more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
  2300. * different parameters if returned coordinates are not accurate enough.
  2301. *
  2302. * Sample usage of detecting and drawing chessboard corners: :
  2303. *
  2304. * Size patternsize(8,6); //interior number of corners
  2305. * Mat gray = ....; //source image
  2306. * vector<Point2f> corners; //this will be filled by the detected corners
  2307. *
  2308. * //CALIB_CB_FAST_CHECK saves a lot of time on images
  2309. * //that do not contain any chessboard corners
  2310. * bool patternfound = findChessboardCorners(gray, patternsize, corners,
  2311. * CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
  2312. * + CALIB_CB_FAST_CHECK);
  2313. *
  2314. * if(patternfound)
  2315. * cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
  2316. * TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
  2317. *
  2318. * drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
  2319. *
  2320. * NOTE: The function requires white space (like a square-thick border, the wider the better) around
  2321. * the board to make the detection more robust in various environments. Otherwise, if there is no
  2322. * border and the background is dark, the outer black squares cannot be segmented properly and so the
  2323. * square grouping and ordering algorithm fails.
  2324. *
  2325. * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
  2326. */
  2327. + (BOOL)findChessboardCorners:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners flags:(int)flags NS_SWIFT_NAME(findChessboardCorners(image:patternSize:corners:flags:));
  2328. /**
  2329. * Finds the positions of internal corners of the chessboard.
  2330. *
  2331. * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
  2332. * @param patternSize Number of inner corners per a chessboard row and column
  2333. * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
  2334. * @param corners Output array of detected corners.
  2335. * - REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
  2336. * and white, rather than a fixed threshold level (computed from the average image brightness).
  2337. * - REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before
  2338. * applying fixed or adaptive thresholding.
  2339. * - REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
  2340. * square-like shape) to filter out false quads extracted at the contour retrieval stage.
  2341. * - REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
  2342. * and shortcut the call if none is found. This can drastically speed up the call in the
  2343. * degenerate condition when no chessboard is observed.
  2344. *
  2345. * The function attempts to determine whether the input image is a view of the chessboard pattern and
  2346. * locate the internal chessboard corners. The function returns a non-zero value if all of the corners
  2347. * are found and they are placed in a certain order (row by row, left to right in every row).
  2348. * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
  2349. * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
  2350. * squares touch each other. The detected coordinates are approximate, and to determine their positions
  2351. * more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
  2352. * different parameters if returned coordinates are not accurate enough.
  2353. *
  2354. * Sample usage of detecting and drawing chessboard corners: :
  2355. *
  2356. * Size patternsize(8,6); //interior number of corners
  2357. * Mat gray = ....; //source image
  2358. * vector<Point2f> corners; //this will be filled by the detected corners
  2359. *
  2360. * //CALIB_CB_FAST_CHECK saves a lot of time on images
  2361. * //that do not contain any chessboard corners
  2362. * bool patternfound = findChessboardCorners(gray, patternsize, corners,
  2363. * CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
  2364. * + CALIB_CB_FAST_CHECK);
  2365. *
  2366. * if(patternfound)
  2367. * cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
  2368. * TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
  2369. *
  2370. * drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
  2371. *
  2372. * NOTE: The function requires white space (like a square-thick border, the wider the better) around
  2373. * the board to make the detection more robust in various environments. Otherwise, if there is no
  2374. * border and the background is dark, the outer black squares cannot be segmented properly and so the
  2375. * square grouping and ordering algorithm fails.
  2376. *
  2377. * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
  2378. */
  2379. + (BOOL)findChessboardCorners:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners NS_SWIFT_NAME(findChessboardCorners(image:patternSize:corners:));
  2380. //
  2381. // bool cv::checkChessboard(Mat img, Size size)
  2382. //
  2383. + (BOOL)checkChessboard:(Mat*)img size:(Size2i*)size NS_SWIFT_NAME(checkChessboard(img:size:));
  2384. //
  2385. // bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags, Mat& meta)
  2386. //
  2387. /**
  2388. * Finds the positions of internal corners of the chessboard using a sector based approach.
  2389. *
  2390. * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
  2391. * @param patternSize Number of inner corners per a chessboard row and column
  2392. * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
  2393. * @param corners Output array of detected corners.
  2394. * @param flags Various operation flags that can be zero or a combination of the following values:
  2395. * - REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection.
  2396. * - REF: CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate.
  2397. * - REF: CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to aliasing effects.
  2398. * - REF: CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description).
  2399. * - REF: CALIB_CB_MARKER The detected pattern must have a marker (see description).
  2400. * This should be used if an accurate camera calibration is required.
  2401. * @param meta Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)).
  2402. * Each entry stands for one corner of the pattern and can have one of the following values:
  2403. * - 0 = no meta data attached
  2404. * - 1 = left-top corner of a black cell
  2405. * - 2 = left-top corner of a white cell
  2406. * - 3 = left-top corner of a black cell with a white marker dot
  2407. * - 4 = left-top corner of a white cell with a black marker dot (pattern origin in case of markers otherwise first corner)
  2408. *
  2409. * The function is analog to #findChessboardCorners but uses a localized radon
  2410. * transformation approximated by box filters being more robust to all sort of
  2411. * noise, faster on larger images and is able to directly return the sub-pixel
  2412. * position of the internal chessboard corners. The Method is based on the paper
  2413. * CITE: duda2018 "Accurate Detection and Localization of Checkerboard Corners for
  2414. * Calibration" demonstrating that the returned sub-pixel positions are more
  2415. * accurate than the one returned by cornerSubPix allowing a precise camera
  2416. * calibration for demanding applications.
  2417. *
  2418. * In the case, the flags REF: CALIB_CB_LARGER or REF: CALIB_CB_MARKER are given,
  2419. * the result can be recovered from the optional meta array. Both flags are
  2420. * helpful to use calibration patterns exceeding the field of view of the camera.
  2421. * These oversized patterns allow more accurate calibrations as corners can be
  2422. * utilized, which are as close as possible to the image borders. For a
  2423. * consistent coordinate system across all images, the optional marker (see image
  2424. * below) can be used to move the origin of the board to the location where the
  2425. * black circle is located.
  2426. *
  2427. * NOTE: The function requires a white boarder with roughly the same width as one
  2428. * of the checkerboard fields around the whole board to improve the detection in
  2429. * various environments. In addition, because of the localized radon
  2430. * transformation it is beneficial to use round corners for the field corners
  2431. * which are located on the outside of the board. The following figure illustrates
  2432. * a sample checkerboard optimized for the detection. However, any other checkerboard
  2433. * can be used as well.
  2434. *
  2435. * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
  2436. * ![Checkerboard](pics/checkerboard_radon.png)
  2437. */
  2438. + (BOOL)findChessboardCornersSBWithMeta:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners flags:(int)flags meta:(Mat*)meta NS_SWIFT_NAME(findChessboardCornersSB(image:patternSize:corners:flags:meta:));
  2439. //
  2440. // bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags = 0)
  2441. //
  2442. + (BOOL)findChessboardCornersSB:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners flags:(int)flags NS_SWIFT_NAME(findChessboardCornersSB(image:patternSize:corners:flags:));
  2443. + (BOOL)findChessboardCornersSB:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners NS_SWIFT_NAME(findChessboardCornersSB(image:patternSize:corners:));
  2444. //
  2445. // Scalar cv::estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance = 0.8F, bool vertical = false, Mat& sharpness = Mat())
  2446. //
  2447. /**
  2448. * Estimates the sharpness of a detected chessboard.
  2449. *
  2450. * Image sharpness, as well as brightness, are a critical parameter for accuracte
  2451. * camera calibration. For accessing these parameters for filtering out
  2452. * problematic calibraiton images, this method calculates edge profiles by traveling from
  2453. * black to white chessboard cell centers. Based on this, the number of pixels is
  2454. * calculated required to transit from black to white. This width of the
  2455. * transition area is a good indication of how sharp the chessboard is imaged
  2456. * and should be below ~3.0 pixels.
  2457. *
  2458. * @param image Gray image used to find chessboard corners
  2459. * @param patternSize Size of a found chessboard pattern
  2460. * @param corners Corners found by #findChessboardCornersSB
  2461. * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
  2462. * @param vertical By default edge responses for horizontal lines are calculated
  2463. * @param sharpness Optional output array with a sharpness value for calculated edge responses (see description)
  2464. *
  2465. * The optional sharpness array is of type CV_32FC1 and has for each calculated
  2466. * profile one row with the following five entries:
  2467. * 0 = x coordinate of the underlying edge in the image
  2468. * 1 = y coordinate of the underlying edge in the image
  2469. * 2 = width of the transition area (sharpness)
  2470. * 3 = signal strength in the black cell (min brightness)
  2471. * 4 = signal strength in the white cell (max brightness)
  2472. *
  2473. * @return Scalar(average sharpness, average min brightness, average max brightness,0)
  2474. */
  2475. + (Scalar*)estimateChessboardSharpness:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners rise_distance:(float)rise_distance vertical:(BOOL)vertical sharpness:(Mat*)sharpness NS_SWIFT_NAME(estimateChessboardSharpness(image:patternSize:corners:rise_distance:vertical:sharpness:));
  2476. /**
  2477. * Estimates the sharpness of a detected chessboard.
  2478. *
  2479. * Image sharpness, as well as brightness, are a critical parameter for accuracte
  2480. * camera calibration. For accessing these parameters for filtering out
  2481. * problematic calibraiton images, this method calculates edge profiles by traveling from
  2482. * black to white chessboard cell centers. Based on this, the number of pixels is
  2483. * calculated required to transit from black to white. This width of the
  2484. * transition area is a good indication of how sharp the chessboard is imaged
  2485. * and should be below ~3.0 pixels.
  2486. *
  2487. * @param image Gray image used to find chessboard corners
  2488. * @param patternSize Size of a found chessboard pattern
  2489. * @param corners Corners found by #findChessboardCornersSB
  2490. * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
  2491. * @param vertical By default edge responses for horizontal lines are calculated
  2492. *
  2493. * The optional sharpness array is of type CV_32FC1 and has for each calculated
  2494. * profile one row with the following five entries:
  2495. * 0 = x coordinate of the underlying edge in the image
  2496. * 1 = y coordinate of the underlying edge in the image
  2497. * 2 = width of the transition area (sharpness)
  2498. * 3 = signal strength in the black cell (min brightness)
  2499. * 4 = signal strength in the white cell (max brightness)
  2500. *
  2501. * @return Scalar(average sharpness, average min brightness, average max brightness,0)
  2502. */
  2503. + (Scalar*)estimateChessboardSharpness:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners rise_distance:(float)rise_distance vertical:(BOOL)vertical NS_SWIFT_NAME(estimateChessboardSharpness(image:patternSize:corners:rise_distance:vertical:));
  2504. /**
  2505. * Estimates the sharpness of a detected chessboard.
  2506. *
  2507. * Image sharpness, as well as brightness, are a critical parameter for accuracte
  2508. * camera calibration. For accessing these parameters for filtering out
  2509. * problematic calibraiton images, this method calculates edge profiles by traveling from
  2510. * black to white chessboard cell centers. Based on this, the number of pixels is
  2511. * calculated required to transit from black to white. This width of the
  2512. * transition area is a good indication of how sharp the chessboard is imaged
  2513. * and should be below ~3.0 pixels.
  2514. *
  2515. * @param image Gray image used to find chessboard corners
  2516. * @param patternSize Size of a found chessboard pattern
  2517. * @param corners Corners found by #findChessboardCornersSB
  2518. * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
  2519. *
  2520. * The optional sharpness array is of type CV_32FC1 and has for each calculated
  2521. * profile one row with the following five entries:
  2522. * 0 = x coordinate of the underlying edge in the image
  2523. * 1 = y coordinate of the underlying edge in the image
  2524. * 2 = width of the transition area (sharpness)
  2525. * 3 = signal strength in the black cell (min brightness)
  2526. * 4 = signal strength in the white cell (max brightness)
  2527. *
  2528. * @return Scalar(average sharpness, average min brightness, average max brightness,0)
  2529. */
  2530. + (Scalar*)estimateChessboardSharpness:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners rise_distance:(float)rise_distance NS_SWIFT_NAME(estimateChessboardSharpness(image:patternSize:corners:rise_distance:));
  2531. /**
  2532. * Estimates the sharpness of a detected chessboard.
  2533. *
  2534. * Image sharpness, as well as brightness, are a critical parameter for accuracte
  2535. * camera calibration. For accessing these parameters for filtering out
  2536. * problematic calibraiton images, this method calculates edge profiles by traveling from
  2537. * black to white chessboard cell centers. Based on this, the number of pixels is
  2538. * calculated required to transit from black to white. This width of the
  2539. * transition area is a good indication of how sharp the chessboard is imaged
  2540. * and should be below ~3.0 pixels.
  2541. *
  2542. * @param image Gray image used to find chessboard corners
  2543. * @param patternSize Size of a found chessboard pattern
  2544. * @param corners Corners found by #findChessboardCornersSB
  2545. *
  2546. * The optional sharpness array is of type CV_32FC1 and has for each calculated
  2547. * profile one row with the following five entries:
  2548. * 0 = x coordinate of the underlying edge in the image
  2549. * 1 = y coordinate of the underlying edge in the image
  2550. * 2 = width of the transition area (sharpness)
  2551. * 3 = signal strength in the black cell (min brightness)
  2552. * 4 = signal strength in the white cell (max brightness)
  2553. *
  2554. * @return Scalar(average sharpness, average min brightness, average max brightness,0)
  2555. */
  2556. + (Scalar*)estimateChessboardSharpness:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners NS_SWIFT_NAME(estimateChessboardSharpness(image:patternSize:corners:));
  2557. //
  2558. // bool cv::find4QuadCornerSubpix(Mat img, Mat& corners, Size region_size)
  2559. //
  2560. + (BOOL)find4QuadCornerSubpix:(Mat*)img corners:(Mat*)corners region_size:(Size2i*)region_size NS_SWIFT_NAME(find4QuadCornerSubpix(img:corners:region_size:));
  2561. //
  2562. // void cv::drawChessboardCorners(Mat& image, Size patternSize, Mat corners, bool patternWasFound)
  2563. //
  2564. /**
  2565. * Renders the detected chessboard corners.
  2566. *
  2567. * @param image Destination image. It must be an 8-bit color image.
  2568. * @param patternSize Number of inner corners per a chessboard row and column
  2569. * (patternSize = cv::Size(points_per_row,points_per_column)).
  2570. * @param corners Array of detected corners, the output of #findChessboardCorners.
  2571. * @param patternWasFound Parameter indicating whether the complete board was found or not. The
  2572. * return value of #findChessboardCorners should be passed here.
  2573. *
  2574. * The function draws individual chessboard corners detected either as red circles if the board was not
  2575. * found, or as colored corners connected with lines if the board was found.
  2576. */
  2577. + (void)drawChessboardCorners:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners patternWasFound:(BOOL)patternWasFound NS_SWIFT_NAME(drawChessboardCorners(image:patternSize:corners:patternWasFound:));
  2578. //
  2579. // void cv::drawFrameAxes(Mat& image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness = 3)
  2580. //
  2581. /**
  2582. * Draw axes of the world/object coordinate system from pose estimation. @see `+solvePnP:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:flags:`
  2583. *
  2584. * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
  2585. * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
  2586. * `$$\cameramatrix{A}$$`
  2587. * @param distCoeffs Input vector of distortion coefficients
  2588. * `$$\distcoeffs$$`. If the vector is empty, the zero distortion coefficients are assumed.
  2589. * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  2590. * the model coordinate system to the camera coordinate system.
  2591. * @param tvec Translation vector.
  2592. * @param length Length of the painted axes in the same unit than tvec (usually in meters).
  2593. * @param thickness Line thickness of the painted axes.
  2594. *
  2595. * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
  2596. * OX is drawn in red, OY in green and OZ in blue.
  2597. */
  2598. + (void)drawFrameAxes:(Mat*)image cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec length:(float)length thickness:(int)thickness NS_SWIFT_NAME(drawFrameAxes(image:cameraMatrix:distCoeffs:rvec:tvec:length:thickness:));
  2599. /**
  2600. * Draw axes of the world/object coordinate system from pose estimation. @see `+solvePnP:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:flags:`
  2601. *
  2602. * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
  2603. * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
  2604. * `$$\cameramatrix{A}$$`
  2605. * @param distCoeffs Input vector of distortion coefficients
  2606. * `$$\distcoeffs$$`. If the vector is empty, the zero distortion coefficients are assumed.
  2607. * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
  2608. * the model coordinate system to the camera coordinate system.
  2609. * @param tvec Translation vector.
  2610. * @param length Length of the painted axes in the same unit than tvec (usually in meters).
  2611. *
  2612. * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
  2613. * OX is drawn in red, OY in green and OZ in blue.
  2614. */
  2615. + (void)drawFrameAxes:(Mat*)image cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec length:(float)length NS_SWIFT_NAME(drawFrameAxes(image:cameraMatrix:distCoeffs:rvec:tvec:length:));
  2616. //
  2617. // bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags, _hidden_ blobDetector = cv::SimpleBlobDetector::create(), CirclesGridFinderParameters parameters)
  2618. //
  2619. /**
  2620. * Finds centers in the grid of circles.
  2621. *
  2622. * @param image grid view of input circles; it must be an 8-bit grayscale or color image.
  2623. * @param patternSize number of circles per row and column
  2624. * ( patternSize = Size(points_per_row, points_per_colum) ).
  2625. * @param centers output array of detected centers.
  2626. * @param flags various operation flags that can be one of the following values:
  2627. * - REF: CALIB_CB_SYMMETRIC_GRID uses symmetric pattern of circles.
  2628. * - REF: CALIB_CB_ASYMMETRIC_GRID uses asymmetric pattern of circles.
  2629. * - REF: CALIB_CB_CLUSTERING uses a special algorithm for grid detection. It is more robust to
  2630. * perspective distortions but much more sensitive to background clutter.
  2631. * @param blobDetector feature detector that finds blobs like dark circles on light background.
  2632. * If `blobDetector` is NULL then `image` represents Point2f array of candidates.
  2633. * @param parameters struct for finding circles in a grid pattern.
  2634. *
  2635. * The function attempts to determine whether the input image contains a grid of circles. If it is, the
  2636. * function locates centers of the circles. The function returns a non-zero value if all of the centers
  2637. * have been found and they have been placed in a certain order (row by row, left to right in every
  2638. * row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0.
  2639. *
  2640. * Sample usage of detecting and drawing the centers of circles: :
  2641. *
  2642. * Size patternsize(7,7); //number of centers
  2643. * Mat gray = ...; //source image
  2644. * vector<Point2f> centers; //this will be filled by the detected centers
  2645. *
  2646. * bool patternfound = findCirclesGrid(gray, patternsize, centers);
  2647. *
  2648. * drawChessboardCorners(img, patternsize, Mat(centers), patternfound);
  2649. *
  2650. * NOTE: The function requires white space (like a square-thick border, the wider the better) around
  2651. * the board to make the detection more robust in various environments.
  2652. */
  2653. + (BOOL)findCirclesGrid:(Mat*)image patternSize:(Size2i*)patternSize centers:(Mat*)centers flags:(int)flags parameters:(CirclesGridFinderParameters*)parameters NS_SWIFT_NAME(findCirclesGrid(image:patternSize:centers:flags:parameters:));
  2654. //
  2655. // bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, _hidden_ blobDetector = cv::SimpleBlobDetector::create())
  2656. //
  2657. + (BOOL)findCirclesGrid:(Mat*)image patternSize:(Size2i*)patternSize centers:(Mat*)centers flags:(int)flags NS_SWIFT_NAME(findCirclesGrid(image:patternSize:centers:flags:));
  2658. + (BOOL)findCirclesGrid:(Mat*)image patternSize:(Size2i*)patternSize centers:(Mat*)centers NS_SWIFT_NAME(findCirclesGrid(image:patternSize:centers:));
  2659. //
  2660. // double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
  2661. //
  2662. /**
  2663. * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
  2664. * pattern.
  2665. *
  2666. * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
  2667. * the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
  2668. * vector contains as many elements as the number of pattern views. If the same calibration pattern
  2669. * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
  2670. * possible to use partially occluded patterns or even different patterns in different views. Then,
  2671. * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
  2672. * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
  2673. * In the old interface all the vectors of object points from different views are concatenated
  2674. * together.
  2675. * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
  2676. * pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
  2677. * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
  2678. * respectively. In the old interface all the vectors of object points from different views are
  2679. * concatenated together.
  2680. * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
  2681. * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
  2682. * `$$\cameramatrix{A}$$` . If REF: CALIB_USE_INTRINSIC_GUESS
  2683. * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
  2684. * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
  2685. * @param distCoeffs Input/output vector of distortion coefficients
  2686. * `$$\distcoeffs$$`.
  2687. * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
  2688. * (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
  2689. * i-th translation vector (see the next output parameter description) brings the calibration pattern
  2690. * from the object coordinate space (in which object points are specified) to the camera coordinate
  2691. * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
  2692. * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
  2693. * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
  2694. * space.
  2695. * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
  2696. * describtion above.
  2697. * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
  2698. * parameters. Order of deviations values:
  2699. * `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
  2700. * s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
  2701. * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
  2702. * parameters. Order of deviations values: `$$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})$$` where M is
  2703. * the number of pattern views. `$$R_i, T_i$$` are concatenated 1x3 vectors.
  2704. * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  2705. * @param flags Different flags that may be zero or a combination of the following values:
  2706. * - REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
  2707. * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  2708. * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
  2709. * Note, that if intrinsic parameters are known, there is no need to use this function just to
  2710. * estimate extrinsic parameters. Use REF: solvePnP instead.
  2711. * - REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
  2712. * optimization. It stays at the center or at a different location specified when
  2713. * REF: CALIB_USE_INTRINSIC_GUESS is set too.
  2714. * - REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
  2715. * ratio fx/fy stays the same as in the input cameraMatrix . When
  2716. * REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
  2717. * ignored, only their ratio is computed and used further.
  2718. * - REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients `$$(p_1, p_2)$$` are set
  2719. * to zeros and stay zero.
  2720. * - REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
  2721. * REF: CALIB_USE_INTRINSIC_GUESS is set.
  2722. * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
  2723. * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
  2724. * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2725. * - REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
  2726. * backward compatibility, this extra flag should be explicitly specified to make the
  2727. * calibration function use the rational model and return 8 coefficients or more.
  2728. * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
  2729. * backward compatibility, this extra flag should be explicitly specified to make the
  2730. * calibration function use the thin prism model and return 12 coefficients or more.
  2731. * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
  2732. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  2733. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2734. * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
  2735. * backward compatibility, this extra flag should be explicitly specified to make the
  2736. * calibration function use the tilted sensor model and return 14 coefficients.
  2737. * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
  2738. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  2739. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2740. * @param criteria Termination criteria for the iterative optimization algorithm.
  2741. *
  2742. * @return the overall RMS re-projection error.
  2743. *
  2744. * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
  2745. * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
  2746. * points and their corresponding 2D projections in each view must be specified. That may be achieved
  2747. * by using an object with known geometry and easily detectable feature points. Such an object is
  2748. * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
  2749. * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
  2750. * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
  2751. * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
  2752. * be used as long as initial cameraMatrix is provided.
  2753. *
  2754. * The algorithm performs the following steps:
  2755. *
  2756. * - Compute the initial intrinsic parameters (the option only available for planar calibration
  2757. * patterns) or read them from the input parameters. The distortion coefficients are all set to
  2758. * zeros initially unless some of CALIB_FIX_K? are specified.
  2759. *
  2760. * - Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
  2761. * done using REF: solvePnP .
  2762. *
  2763. * - Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
  2764. * that is, the total sum of squared distances between the observed feature points imagePoints and
  2765. * the projected (using the current estimates for camera parameters and the poses) object points
  2766. * objectPoints. See REF: projectPoints for details.
  2767. *
  2768. * NOTE:
  2769. * If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
  2770. * and REF: calibrateCamera returns bad values (zero distortion coefficients, `$$c_x$$` and
  2771. * `$$c_y$$` very far from the image center, and/or large differences between `$$f_x$$` and
  2772. * `$$f_y$$` (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
  2773. * instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
  2774. *
  2775. * @sa
  2776. * calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
  2777. * undistort
  2778. */
  2779. + (double)calibrateCameraExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:flags:criteria:));
  2780. /**
  2781. * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
  2782. * pattern.
  2783. *
  2784. * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
  2785. * the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
  2786. * vector contains as many elements as the number of pattern views. If the same calibration pattern
  2787. * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
  2788. * possible to use partially occluded patterns or even different patterns in different views. Then,
  2789. * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
  2790. * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
  2791. * In the old interface all the vectors of object points from different views are concatenated
  2792. * together.
  2793. * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
  2794. * pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
  2795. * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
  2796. * respectively. In the old interface all the vectors of object points from different views are
  2797. * concatenated together.
  2798. * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
  2799. * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
  2800. * `$$\cameramatrix{A}$$` . If REF: CALIB_USE_INTRINSIC_GUESS
  2801. * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
  2802. * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
  2803. * @param distCoeffs Input/output vector of distortion coefficients
  2804. * `$$\distcoeffs$$`.
  2805. * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
  2806. * (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
  2807. * i-th translation vector (see the next output parameter description) brings the calibration pattern
  2808. * from the object coordinate space (in which object points are specified) to the camera coordinate
  2809. * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
  2810. * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
  2811. * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
  2812. * space.
  2813. * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
  2814. * describtion above.
  2815. * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
  2816. * parameters. Order of deviations values:
  2817. * `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
  2818. * s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
  2819. * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
  2820. * parameters. Order of deviations values: `$$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})$$` where M is
  2821. * the number of pattern views. `$$R_i, T_i$$` are concatenated 1x3 vectors.
  2822. * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  2823. * @param flags Different flags that may be zero or a combination of the following values:
  2824. * - REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
  2825. * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  2826. * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
  2827. * Note, that if intrinsic parameters are known, there is no need to use this function just to
  2828. * estimate extrinsic parameters. Use REF: solvePnP instead.
  2829. * - REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
  2830. * optimization. It stays at the center or at a different location specified when
  2831. * REF: CALIB_USE_INTRINSIC_GUESS is set too.
  2832. * - REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
  2833. * ratio fx/fy stays the same as in the input cameraMatrix . When
  2834. * REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
  2835. * ignored, only their ratio is computed and used further.
  2836. * - REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients `$$(p_1, p_2)$$` are set
  2837. * to zeros and stay zero.
  2838. * - REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
  2839. * REF: CALIB_USE_INTRINSIC_GUESS is set.
  2840. * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
  2841. * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
  2842. * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2843. * - REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
  2844. * backward compatibility, this extra flag should be explicitly specified to make the
  2845. * calibration function use the rational model and return 8 coefficients or more.
  2846. * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
  2847. * backward compatibility, this extra flag should be explicitly specified to make the
  2848. * calibration function use the thin prism model and return 12 coefficients or more.
  2849. * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
  2850. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  2851. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2852. * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
  2853. * backward compatibility, this extra flag should be explicitly specified to make the
  2854. * calibration function use the tilted sensor model and return 14 coefficients.
  2855. * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
  2856. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  2857. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2858. *
  2859. * @return the overall RMS re-projection error.
  2860. *
  2861. * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
  2862. * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
  2863. * points and their corresponding 2D projections in each view must be specified. That may be achieved
  2864. * by using an object with known geometry and easily detectable feature points. Such an object is
  2865. * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
  2866. * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
  2867. * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
  2868. * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
  2869. * be used as long as initial cameraMatrix is provided.
  2870. *
  2871. * The algorithm performs the following steps:
  2872. *
  2873. * - Compute the initial intrinsic parameters (the option only available for planar calibration
  2874. * patterns) or read them from the input parameters. The distortion coefficients are all set to
  2875. * zeros initially unless some of CALIB_FIX_K? are specified.
  2876. *
  2877. * - Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
  2878. * done using REF: solvePnP .
  2879. *
  2880. * - Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
  2881. * that is, the total sum of squared distances between the observed feature points imagePoints and
  2882. * the projected (using the current estimates for camera parameters and the poses) object points
  2883. * objectPoints. See REF: projectPoints for details.
  2884. *
  2885. * NOTE:
  2886. * If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
  2887. * and REF: calibrateCamera returns bad values (zero distortion coefficients, `$$c_x$$` and
  2888. * `$$c_y$$` very far from the image center, and/or large differences between `$$f_x$$` and
  2889. * `$$f_y$$` (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
  2890. * instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
  2891. *
  2892. * @sa
  2893. * calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
  2894. * undistort
  2895. */
  2896. + (double)calibrateCameraExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors flags:(int)flags NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:flags:));
  2897. /**
  2898. * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
  2899. * pattern.
  2900. *
  2901. * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
  2902. * the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
  2903. * vector contains as many elements as the number of pattern views. If the same calibration pattern
  2904. * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
  2905. * possible to use partially occluded patterns or even different patterns in different views. Then,
  2906. * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
  2907. * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
  2908. * In the old interface all the vectors of object points from different views are concatenated
  2909. * together.
  2910. * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
  2911. * pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
  2912. * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
  2913. * respectively. In the old interface all the vectors of object points from different views are
  2914. * concatenated together.
  2915. * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
  2916. * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
  2917. * `$$\cameramatrix{A}$$` . If REF: CALIB_USE_INTRINSIC_GUESS
  2918. * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
  2919. * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
  2920. * @param distCoeffs Input/output vector of distortion coefficients
  2921. * `$$\distcoeffs$$`.
  2922. * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
  2923. * (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
  2924. * i-th translation vector (see the next output parameter description) brings the calibration pattern
  2925. * from the object coordinate space (in which object points are specified) to the camera coordinate
  2926. * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
  2927. * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
  2928. * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
  2929. * space.
  2930. * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
  2931. * describtion above.
  2932. * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
  2933. * parameters. Order of deviations values:
  2934. * `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
  2935. * s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
  2936. * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
  2937. * parameters. Order of deviations values: `$$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})$$` where M is
  2938. * the number of pattern views. `$$R_i, T_i$$` are concatenated 1x3 vectors.
  2939. * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  2940. * - REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
  2941. * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  2942. * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
  2943. * Note, that if intrinsic parameters are known, there is no need to use this function just to
  2944. * estimate extrinsic parameters. Use REF: solvePnP instead.
  2945. * - REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
  2946. * optimization. It stays at the center or at a different location specified when
  2947. * REF: CALIB_USE_INTRINSIC_GUESS is set too.
  2948. * - REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
  2949. * ratio fx/fy stays the same as in the input cameraMatrix . When
  2950. * REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
  2951. * ignored, only their ratio is computed and used further.
  2952. * - REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients `$$(p_1, p_2)$$` are set
  2953. * to zeros and stay zero.
  2954. * - REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
  2955. * REF: CALIB_USE_INTRINSIC_GUESS is set.
  2956. * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
  2957. * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
  2958. * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2959. * - REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
  2960. * backward compatibility, this extra flag should be explicitly specified to make the
  2961. * calibration function use the rational model and return 8 coefficients or more.
  2962. * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
  2963. * backward compatibility, this extra flag should be explicitly specified to make the
  2964. * calibration function use the thin prism model and return 12 coefficients or more.
  2965. * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
  2966. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  2967. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2968. * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
  2969. * backward compatibility, this extra flag should be explicitly specified to make the
  2970. * calibration function use the tilted sensor model and return 14 coefficients.
  2971. * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
  2972. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  2973. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2974. *
  2975. * @return the overall RMS re-projection error.
  2976. *
  2977. * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
  2978. * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
  2979. * points and their corresponding 2D projections in each view must be specified. That may be achieved
  2980. * by using an object with known geometry and easily detectable feature points. Such an object is
  2981. * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
  2982. * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
  2983. * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
  2984. * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
  2985. * be used as long as initial cameraMatrix is provided.
  2986. *
  2987. * The algorithm performs the following steps:
  2988. *
  2989. * - Compute the initial intrinsic parameters (the option only available for planar calibration
  2990. * patterns) or read them from the input parameters. The distortion coefficients are all set to
  2991. * zeros initially unless some of CALIB_FIX_K? are specified.
  2992. *
  2993. * - Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
  2994. * done using REF: solvePnP .
  2995. *
  2996. * - Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
  2997. * that is, the total sum of squared distances between the observed feature points imagePoints and
  2998. * the projected (using the current estimates for camera parameters and the poses) object points
  2999. * objectPoints. See REF: projectPoints for details.
  3000. *
  3001. * NOTE:
  3002. * If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
  3003. * and REF: calibrateCamera returns bad values (zero distortion coefficients, `$$c_x$$` and
  3004. * `$$c_y$$` very far from the image center, and/or large differences between `$$f_x$$` and
  3005. * `$$f_y$$` (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
  3006. * instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
  3007. *
  3008. * @sa
  3009. * calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
  3010. * undistort
  3011. */
  3012. + (double)calibrateCameraExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:));
  3013. //
  3014. // double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
  3015. //
  3016. + (double)calibrateCamera:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:flags:criteria:));
  3017. + (double)calibrateCamera:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:flags:));
  3018. + (double)calibrateCamera:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:));
  3019. //
  3020. // double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& stdDeviationsObjPoints, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
  3021. //
  3022. /**
  3023. * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
  3024. *
  3025. * This function is an extension of #calibrateCamera with the method of releasing object which was
  3026. * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
  3027. * targets (calibration plates), this method can dramatically improve the precision of the estimated
  3028. * camera parameters. Both the object-releasing method and standard method are supported by this
  3029. * function. Use the parameter **iFixedPoint** for method selection. In the internal implementation,
  3030. * #calibrateCamera is a wrapper for this function.
  3031. *
  3032. * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
  3033. * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
  3034. * the identical calibration board must be used in each view and it must be fully visible, and all
  3035. * objectPoints[i] must be the same and all points should be roughly close to a plane. **The calibration
  3036. * target has to be rigid, or at least static if the camera (rather than the calibration target) is
  3037. * shifted for grabbing images.**
  3038. * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
  3039. * #calibrateCamera for details.
  3040. * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
  3041. * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
  3042. * a switch for calibration method selection. If object-releasing method to be used, pass in the
  3043. * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
  3044. * make standard calibration method selected. Usually the top-right corner point of the calibration
  3045. * board grid is recommended to be fixed when object-releasing method being utilized. According to
  3046. * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
  3047. * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
  3048. * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
  3049. * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
  3050. * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
  3051. * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
  3052. * for details.
  3053. * @param tvecs Output vector of translation vectors estimated for each pattern view.
  3054. * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
  3055. * be scaled based on three fixed points. The returned coordinates are accurate only if the above
  3056. * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
  3057. * is ignored with standard calibration method.
  3058. * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
  3059. * See #calibrateCamera for details.
  3060. * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
  3061. * See #calibrateCamera for details.
  3062. * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
  3063. * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
  3064. * parameter is ignored with standard calibration method.
  3065. * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  3066. * @param flags Different flags that may be zero or a combination of some predefined values. See
  3067. * #calibrateCamera for details. If the method of releasing object is used, the calibration time may
  3068. * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
  3069. * less precise and less stable in some rare cases.
  3070. * @param criteria Termination criteria for the iterative optimization algorithm.
  3071. *
  3072. * @return the overall RMS re-projection error.
  3073. *
  3074. * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
  3075. * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
  3076. * #calibrateCamera for other detailed explanations.
  3077. * @sa
  3078. * calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
  3079. */
  3080. + (double)calibrateCameraROExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics stdDeviationsObjPoints:(Mat*)stdDeviationsObjPoints perViewErrors:(Mat*)perViewErrors flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:stdDeviationsIntrinsics:stdDeviationsExtrinsics:stdDeviationsObjPoints:perViewErrors:flags:criteria:));
  3081. /**
  3082. * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
  3083. *
  3084. * This function is an extension of #calibrateCamera with the method of releasing object which was
  3085. * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
  3086. * targets (calibration plates), this method can dramatically improve the precision of the estimated
  3087. * camera parameters. Both the object-releasing method and standard method are supported by this
  3088. * function. Use the parameter **iFixedPoint** for method selection. In the internal implementation,
  3089. * #calibrateCamera is a wrapper for this function.
  3090. *
  3091. * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
  3092. * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
  3093. * the identical calibration board must be used in each view and it must be fully visible, and all
  3094. * objectPoints[i] must be the same and all points should be roughly close to a plane. **The calibration
  3095. * target has to be rigid, or at least static if the camera (rather than the calibration target) is
  3096. * shifted for grabbing images.**
  3097. * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
  3098. * #calibrateCamera for details.
  3099. * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
  3100. * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
  3101. * a switch for calibration method selection. If object-releasing method to be used, pass in the
  3102. * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
  3103. * make standard calibration method selected. Usually the top-right corner point of the calibration
  3104. * board grid is recommended to be fixed when object-releasing method being utilized. According to
  3105. * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
  3106. * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
  3107. * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
  3108. * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
  3109. * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
  3110. * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
  3111. * for details.
  3112. * @param tvecs Output vector of translation vectors estimated for each pattern view.
  3113. * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
  3114. * be scaled based on three fixed points. The returned coordinates are accurate only if the above
  3115. * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
  3116. * is ignored with standard calibration method.
  3117. * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
  3118. * See #calibrateCamera for details.
  3119. * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
  3120. * See #calibrateCamera for details.
  3121. * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
  3122. * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
  3123. * parameter is ignored with standard calibration method.
  3124. * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  3125. * @param flags Different flags that may be zero or a combination of some predefined values. See
  3126. * #calibrateCamera for details. If the method of releasing object is used, the calibration time may
  3127. * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
  3128. * less precise and less stable in some rare cases.
  3129. *
  3130. * @return the overall RMS re-projection error.
  3131. *
  3132. * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
  3133. * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
  3134. * #calibrateCamera for other detailed explanations.
  3135. * @sa
  3136. * calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
  3137. */
  3138. + (double)calibrateCameraROExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics stdDeviationsObjPoints:(Mat*)stdDeviationsObjPoints perViewErrors:(Mat*)perViewErrors flags:(int)flags NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:stdDeviationsIntrinsics:stdDeviationsExtrinsics:stdDeviationsObjPoints:perViewErrors:flags:));
  3139. /**
  3140. * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
  3141. *
  3142. * This function is an extension of #calibrateCamera with the method of releasing object which was
  3143. * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
  3144. * targets (calibration plates), this method can dramatically improve the precision of the estimated
  3145. * camera parameters. Both the object-releasing method and standard method are supported by this
  3146. * function. Use the parameter **iFixedPoint** for method selection. In the internal implementation,
  3147. * #calibrateCamera is a wrapper for this function.
  3148. *
  3149. * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
  3150. * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
  3151. * the identical calibration board must be used in each view and it must be fully visible, and all
  3152. * objectPoints[i] must be the same and all points should be roughly close to a plane. **The calibration
  3153. * target has to be rigid, or at least static if the camera (rather than the calibration target) is
  3154. * shifted for grabbing images.**
  3155. * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
  3156. * #calibrateCamera for details.
  3157. * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
  3158. * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
  3159. * a switch for calibration method selection. If object-releasing method to be used, pass in the
  3160. * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
  3161. * make standard calibration method selected. Usually the top-right corner point of the calibration
  3162. * board grid is recommended to be fixed when object-releasing method being utilized. According to
  3163. * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
  3164. * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
  3165. * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
  3166. * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
  3167. * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
  3168. * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
  3169. * for details.
  3170. * @param tvecs Output vector of translation vectors estimated for each pattern view.
  3171. * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
  3172. * be scaled based on three fixed points. The returned coordinates are accurate only if the above
  3173. * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
  3174. * is ignored with standard calibration method.
  3175. * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
  3176. * See #calibrateCamera for details.
  3177. * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
  3178. * See #calibrateCamera for details.
  3179. * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
  3180. * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
  3181. * parameter is ignored with standard calibration method.
  3182. * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  3183. * #calibrateCamera for details. If the method of releasing object is used, the calibration time may
  3184. * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
  3185. * less precise and less stable in some rare cases.
  3186. *
  3187. * @return the overall RMS re-projection error.
  3188. *
  3189. * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
  3190. * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
  3191. * #calibrateCamera for other detailed explanations.
  3192. * @sa
  3193. * calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
  3194. */
  3195. + (double)calibrateCameraROExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics stdDeviationsObjPoints:(Mat*)stdDeviationsObjPoints perViewErrors:(Mat*)perViewErrors NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:stdDeviationsIntrinsics:stdDeviationsExtrinsics:stdDeviationsObjPoints:perViewErrors:));
  3196. //
  3197. // double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
  3198. //
  3199. + (double)calibrateCameraRO:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:flags:criteria:));
  3200. + (double)calibrateCameraRO:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints flags:(int)flags NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:flags:));
  3201. + (double)calibrateCameraRO:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:));
  3202. //
  3203. // void cv::calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio)
  3204. //
  3205. /**
  3206. * Computes useful camera characteristics from the camera intrinsic matrix.
  3207. *
  3208. * @param cameraMatrix Input camera intrinsic matrix that can be estimated by #calibrateCamera or
  3209. * #stereoCalibrate .
  3210. * @param imageSize Input image size in pixels.
  3211. * @param apertureWidth Physical width in mm of the sensor.
  3212. * @param apertureHeight Physical height in mm of the sensor.
  3213. * @param fovx Output field of view in degrees along the horizontal sensor axis.
  3214. * @param fovy Output field of view in degrees along the vertical sensor axis.
  3215. * @param focalLength Focal length of the lens in mm.
  3216. * @param principalPoint Principal point in mm.
  3217. * @param aspectRatio `$$f_y/f_x$$`
  3218. *
  3219. * The function computes various useful camera characteristics from the previously estimated camera
  3220. * matrix.
  3221. *
  3222. * NOTE:
  3223. * Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
  3224. * the chessboard pitch (it can thus be any value).
  3225. */
  3226. + (void)calibrationMatrixValues:(Mat*)cameraMatrix imageSize:(Size2i*)imageSize apertureWidth:(double)apertureWidth apertureHeight:(double)apertureHeight fovx:(double*)fovx fovy:(double*)fovy focalLength:(double*)focalLength principalPoint:(Point2d*)principalPoint aspectRatio:(double*)aspectRatio NS_SWIFT_NAME(calibrationMatrixValues(cameraMatrix:imageSize:apertureWidth:apertureHeight:fovx:fovy:focalLength:principalPoint:aspectRatio:));
  3227. //
  3228. // double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
  3229. //
  3230. /**
  3231. * Calibrates a stereo camera set up. This function finds the intrinsic parameters
  3232. * for each of the two cameras and the extrinsic parameters between the two cameras.
  3233. *
  3234. * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
  3235. * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
  3236. * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
  3237. * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
  3238. * be equal for each i.
  3239. * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
  3240. * observed by the first camera. The same structure as in REF: calibrateCamera.
  3241. * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
  3242. * observed by the second camera. The same structure as in REF: calibrateCamera.
  3243. * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
  3244. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  3245. * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
  3246. * REF: calibrateCamera.
  3247. * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
  3248. * cameraMatrix1.
  3249. * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
  3250. * description for distCoeffs1.
  3251. * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
  3252. * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
  3253. * points given in the first camera's coordinate system to points in the second camera's
  3254. * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
  3255. * from the first camera's coordinate system to the second camera's coordinate system. Due to its
  3256. * duality, this tuple is equivalent to the position of the first camera with respect to the
  3257. * second camera coordinate system.
  3258. * @param T Output translation vector, see description above.
  3259. * @param E Output essential matrix.
  3260. * @param F Output fundamental matrix.
  3261. * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  3262. * @param flags Different flags that may be zero or a combination of the following values:
  3263. * - REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
  3264. * matrices are estimated.
  3265. * - REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
  3266. * according to the specified flags. Initial values are provided by the user.
  3267. * - REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
  3268. * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
  3269. * - REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
  3270. * - REF: CALIB_FIX_FOCAL_LENGTH Fix `$$f^{(j)}_x$$` and `$$f^{(j)}_y$$` .
  3271. * - REF: CALIB_FIX_ASPECT_RATIO Optimize `$$f^{(j)}_y$$` . Fix the ratio `$$f^{(j)}_x/f^{(j)}_y$$`
  3272. * .
  3273. * - REF: CALIB_SAME_FOCAL_LENGTH Enforce `$$f^{(0)}_x=f^{(1)}_x$$` and `$$f^{(0)}_y=f^{(1)}_y$$` .
  3274. * - REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
  3275. * zeros and fix there.
  3276. * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
  3277. * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
  3278. * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  3279. * - REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
  3280. * compatibility, this extra flag should be explicitly specified to make the calibration
  3281. * function use the rational model and return 8 coefficients. If the flag is not set, the
  3282. * function computes and returns only 5 distortion coefficients.
  3283. * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
  3284. * backward compatibility, this extra flag should be explicitly specified to make the
  3285. * calibration function use the thin prism model and return 12 coefficients. If the flag is not
  3286. * set, the function computes and returns only 5 distortion coefficients.
  3287. * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
  3288. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  3289. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  3290. * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
  3291. * backward compatibility, this extra flag should be explicitly specified to make the
  3292. * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
  3293. * set, the function computes and returns only 5 distortion coefficients.
  3294. * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
  3295. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  3296. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  3297. * @param criteria Termination criteria for the iterative optimization algorithm.
  3298. *
  3299. * The function estimates the transformation between two cameras making a stereo pair. If one computes
  3300. * the poses of an object relative to the first camera and to the second camera,
  3301. * ( `$$R_1$$`,`$$T_1$$` ) and (`$$R_2$$`,`$$T_2$$`), respectively, for a stereo camera where the
  3302. * relative position and orientation between the two cameras are fixed, then those poses definitely
  3303. * relate to each other. This means, if the relative position and orientation (`$$R$$`,`$$T$$`) of the
  3304. * two cameras is known, it is possible to compute (`$$R_2$$`,`$$T_2$$`) when (`$$R_1$$`,`$$T_1$$`) is
  3305. * given. This is what the described function does. It computes (`$$R$$`,`$$T$$`) such that:
  3306. *
  3307. * `$$R_2=R R_1$$`
  3308. * `$$T_2=R T_1 + T.$$`
  3309. *
  3310. * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
  3311. * coordinate system when given the point's coordinate representation in the first camera's coordinate
  3312. * system:
  3313. *
  3314. * `$$\begin{bmatrix}
  3315. * X_2 \\
  3316. * Y_2 \\
  3317. * Z_2 \\
  3318. * 1
  3319. * \end{bmatrix} = \begin{bmatrix}
  3320. * R & T \\
  3321. * 0 & 1
  3322. * \end{bmatrix} \begin{bmatrix}
  3323. * X_1 \\
  3324. * Y_1 \\
  3325. * Z_1 \\
  3326. * 1
  3327. * \end{bmatrix}.$$`
  3328. *
  3329. *
  3330. * Optionally, it computes the essential matrix E:
  3331. *
  3332. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R$$`
  3333. *
  3334. * where `$$T_i$$` are components of the translation vector `$$T$$` : `$$T=[T_0, T_1, T_2]^T$$` .
  3335. * And the function can also compute the fundamental matrix F:
  3336. *
  3337. * `$$F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}$$`
  3338. *
  3339. * Besides the stereo-related information, the function can also perform a full calibration of each of
  3340. * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
  3341. * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
  3342. * estimated with high accuracy for each of the cameras individually (for example, using
  3343. * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
  3344. * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
  3345. * estimated at once, it makes sense to restrict some parameters, for example, pass
  3346. * REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
  3347. * reasonable assumption.
  3348. *
  3349. * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
  3350. * points in all the available views from both cameras. The function returns the final value of the
  3351. * re-projection error.
  3352. */
  3353. + (double)stereoCalibrateExtended:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F perViewErrors:(Mat*)perViewErrors flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:perViewErrors:flags:criteria:));
  3354. /**
  3355. * Calibrates a stereo camera set up. This function finds the intrinsic parameters
  3356. * for each of the two cameras and the extrinsic parameters between the two cameras.
  3357. *
  3358. * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
  3359. * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
  3360. * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
  3361. * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
  3362. * be equal for each i.
  3363. * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
  3364. * observed by the first camera. The same structure as in REF: calibrateCamera.
  3365. * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
  3366. * observed by the second camera. The same structure as in REF: calibrateCamera.
  3367. * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
  3368. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  3369. * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
  3370. * REF: calibrateCamera.
  3371. * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
  3372. * cameraMatrix1.
  3373. * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
  3374. * description for distCoeffs1.
  3375. * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
  3376. * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
  3377. * points given in the first camera's coordinate system to points in the second camera's
  3378. * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
  3379. * from the first camera's coordinate system to the second camera's coordinate system. Due to its
  3380. * duality, this tuple is equivalent to the position of the first camera with respect to the
  3381. * second camera coordinate system.
  3382. * @param T Output translation vector, see description above.
  3383. * @param E Output essential matrix.
  3384. * @param F Output fundamental matrix.
  3385. * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  3386. * @param flags Different flags that may be zero or a combination of the following values:
  3387. * - REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
  3388. * matrices are estimated.
  3389. * - REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
  3390. * according to the specified flags. Initial values are provided by the user.
  3391. * - REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
  3392. * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
  3393. * - REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
  3394. * - REF: CALIB_FIX_FOCAL_LENGTH Fix `$$f^{(j)}_x$$` and `$$f^{(j)}_y$$` .
  3395. * - REF: CALIB_FIX_ASPECT_RATIO Optimize `$$f^{(j)}_y$$` . Fix the ratio `$$f^{(j)}_x/f^{(j)}_y$$`
  3396. * .
  3397. * - REF: CALIB_SAME_FOCAL_LENGTH Enforce `$$f^{(0)}_x=f^{(1)}_x$$` and `$$f^{(0)}_y=f^{(1)}_y$$` .
  3398. * - REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
  3399. * zeros and fix there.
  3400. * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
  3401. * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
  3402. * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  3403. * - REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
  3404. * compatibility, this extra flag should be explicitly specified to make the calibration
  3405. * function use the rational model and return 8 coefficients. If the flag is not set, the
  3406. * function computes and returns only 5 distortion coefficients.
  3407. * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
  3408. * backward compatibility, this extra flag should be explicitly specified to make the
  3409. * calibration function use the thin prism model and return 12 coefficients. If the flag is not
  3410. * set, the function computes and returns only 5 distortion coefficients.
  3411. * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
  3412. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  3413. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  3414. * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
  3415. * backward compatibility, this extra flag should be explicitly specified to make the
  3416. * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
  3417. * set, the function computes and returns only 5 distortion coefficients.
  3418. * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
  3419. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  3420. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  3421. *
  3422. * The function estimates the transformation between two cameras making a stereo pair. If one computes
  3423. * the poses of an object relative to the first camera and to the second camera,
  3424. * ( `$$R_1$$`,`$$T_1$$` ) and (`$$R_2$$`,`$$T_2$$`), respectively, for a stereo camera where the
  3425. * relative position and orientation between the two cameras are fixed, then those poses definitely
  3426. * relate to each other. This means, if the relative position and orientation (`$$R$$`,`$$T$$`) of the
  3427. * two cameras is known, it is possible to compute (`$$R_2$$`,`$$T_2$$`) when (`$$R_1$$`,`$$T_1$$`) is
  3428. * given. This is what the described function does. It computes (`$$R$$`,`$$T$$`) such that:
  3429. *
  3430. * `$$R_2=R R_1$$`
  3431. * `$$T_2=R T_1 + T.$$`
  3432. *
  3433. * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
  3434. * coordinate system when given the point's coordinate representation in the first camera's coordinate
  3435. * system:
  3436. *
  3437. * `$$\begin{bmatrix}
  3438. * X_2 \\
  3439. * Y_2 \\
  3440. * Z_2 \\
  3441. * 1
  3442. * \end{bmatrix} = \begin{bmatrix}
  3443. * R & T \\
  3444. * 0 & 1
  3445. * \end{bmatrix} \begin{bmatrix}
  3446. * X_1 \\
  3447. * Y_1 \\
  3448. * Z_1 \\
  3449. * 1
  3450. * \end{bmatrix}.$$`
  3451. *
  3452. *
  3453. * Optionally, it computes the essential matrix E:
  3454. *
  3455. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R$$`
  3456. *
  3457. * where `$$T_i$$` are components of the translation vector `$$T$$` : `$$T=[T_0, T_1, T_2]^T$$` .
  3458. * And the function can also compute the fundamental matrix F:
  3459. *
  3460. * `$$F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}$$`
  3461. *
  3462. * Besides the stereo-related information, the function can also perform a full calibration of each of
  3463. * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
  3464. * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
  3465. * estimated with high accuracy for each of the cameras individually (for example, using
  3466. * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
  3467. * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
  3468. * estimated at once, it makes sense to restrict some parameters, for example, pass
  3469. * REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
  3470. * reasonable assumption.
  3471. *
  3472. * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
  3473. * points in all the available views from both cameras. The function returns the final value of the
  3474. * re-projection error.
  3475. */
  3476. + (double)stereoCalibrateExtended:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F perViewErrors:(Mat*)perViewErrors flags:(int)flags NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:perViewErrors:flags:));
  3477. /**
  3478. * Calibrates a stereo camera set up. This function finds the intrinsic parameters
  3479. * for each of the two cameras and the extrinsic parameters between the two cameras.
  3480. *
  3481. * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
  3482. * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
  3483. * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
  3484. * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
  3485. * be equal for each i.
  3486. * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
  3487. * observed by the first camera. The same structure as in REF: calibrateCamera.
  3488. * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
  3489. * observed by the second camera. The same structure as in REF: calibrateCamera.
  3490. * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
  3491. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  3492. * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
  3493. * REF: calibrateCamera.
  3494. * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
  3495. * cameraMatrix1.
  3496. * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
  3497. * description for distCoeffs1.
  3498. * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
  3499. * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
  3500. * points given in the first camera's coordinate system to points in the second camera's
  3501. * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
  3502. * from the first camera's coordinate system to the second camera's coordinate system. Due to its
  3503. * duality, this tuple is equivalent to the position of the first camera with respect to the
  3504. * second camera coordinate system.
  3505. * @param T Output translation vector, see description above.
  3506. * @param E Output essential matrix.
  3507. * @param F Output fundamental matrix.
  3508. * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  3509. * - REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
  3510. * matrices are estimated.
  3511. * - REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
  3512. * according to the specified flags. Initial values are provided by the user.
  3513. * - REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
  3514. * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
  3515. * - REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
  3516. * - REF: CALIB_FIX_FOCAL_LENGTH Fix `$$f^{(j)}_x$$` and `$$f^{(j)}_y$$` .
  3517. * - REF: CALIB_FIX_ASPECT_RATIO Optimize `$$f^{(j)}_y$$` . Fix the ratio `$$f^{(j)}_x/f^{(j)}_y$$`
  3518. * .
  3519. * - REF: CALIB_SAME_FOCAL_LENGTH Enforce `$$f^{(0)}_x=f^{(1)}_x$$` and `$$f^{(0)}_y=f^{(1)}_y$$` .
  3520. * - REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
  3521. * zeros and fix there.
  3522. * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
  3523. * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
  3524. * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  3525. * - REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
  3526. * compatibility, this extra flag should be explicitly specified to make the calibration
  3527. * function use the rational model and return 8 coefficients. If the flag is not set, the
  3528. * function computes and returns only 5 distortion coefficients.
  3529. * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
  3530. * backward compatibility, this extra flag should be explicitly specified to make the
  3531. * calibration function use the thin prism model and return 12 coefficients. If the flag is not
  3532. * set, the function computes and returns only 5 distortion coefficients.
  3533. * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
  3534. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  3535. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  3536. * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
  3537. * backward compatibility, this extra flag should be explicitly specified to make the
  3538. * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
  3539. * set, the function computes and returns only 5 distortion coefficients.
  3540. * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
  3541. * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  3542. * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  3543. *
  3544. * The function estimates the transformation between two cameras making a stereo pair. If one computes
  3545. * the poses of an object relative to the first camera and to the second camera,
  3546. * ( `$$R_1$$`,`$$T_1$$` ) and (`$$R_2$$`,`$$T_2$$`), respectively, for a stereo camera where the
  3547. * relative position and orientation between the two cameras are fixed, then those poses definitely
  3548. * relate to each other. This means, if the relative position and orientation (`$$R$$`,`$$T$$`) of the
  3549. * two cameras is known, it is possible to compute (`$$R_2$$`,`$$T_2$$`) when (`$$R_1$$`,`$$T_1$$`) is
  3550. * given. This is what the described function does. It computes (`$$R$$`,`$$T$$`) such that:
  3551. *
  3552. * `$$R_2=R R_1$$`
  3553. * `$$T_2=R T_1 + T.$$`
  3554. *
  3555. * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
  3556. * coordinate system when given the point's coordinate representation in the first camera's coordinate
  3557. * system:
  3558. *
  3559. * `$$\begin{bmatrix}
  3560. * X_2 \\
  3561. * Y_2 \\
  3562. * Z_2 \\
  3563. * 1
  3564. * \end{bmatrix} = \begin{bmatrix}
  3565. * R & T \\
  3566. * 0 & 1
  3567. * \end{bmatrix} \begin{bmatrix}
  3568. * X_1 \\
  3569. * Y_1 \\
  3570. * Z_1 \\
  3571. * 1
  3572. * \end{bmatrix}.$$`
  3573. *
  3574. *
  3575. * Optionally, it computes the essential matrix E:
  3576. *
  3577. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R$$`
  3578. *
  3579. * where `$$T_i$$` are components of the translation vector `$$T$$` : `$$T=[T_0, T_1, T_2]^T$$` .
  3580. * And the function can also compute the fundamental matrix F:
  3581. *
  3582. * `$$F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}$$`
  3583. *
  3584. * Besides the stereo-related information, the function can also perform a full calibration of each of
  3585. * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
  3586. * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
  3587. * estimated with high accuracy for each of the cameras individually (for example, using
  3588. * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
  3589. * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
  3590. * estimated at once, it makes sense to restrict some parameters, for example, pass
  3591. * REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
  3592. * reasonable assumption.
  3593. *
  3594. * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
  3595. * points in all the available views from both cameras. The function returns the final value of the
  3596. * re-projection error.
  3597. */
  3598. + (double)stereoCalibrateExtended:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F perViewErrors:(Mat*)perViewErrors NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:perViewErrors:));
  3599. //
  3600. // double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
  3601. //
  3602. + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:flags:criteria:));
  3603. + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F flags:(int)flags NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:flags:));
  3604. + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:));
  3605. //
  3606. // void cv::stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0)
  3607. //
  3608. /**
  3609. * Computes rectification transforms for each head of a calibrated stereo camera.
  3610. *
  3611. * @param cameraMatrix1 First camera intrinsic matrix.
  3612. * @param distCoeffs1 First camera distortion parameters.
  3613. * @param cameraMatrix2 Second camera intrinsic matrix.
  3614. * @param distCoeffs2 Second camera distortion parameters.
  3615. * @param imageSize Size of the image used for stereo calibration.
  3616. * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
  3617. * see REF: stereoCalibrate.
  3618. * @param T Translation vector from the coordinate system of the first camera to the second camera,
  3619. * see REF: stereoCalibrate.
  3620. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
  3621. * brings points given in the unrectified first camera's coordinate system to points in the rectified
  3622. * first camera's coordinate system. In more technical terms, it performs a change of basis from the
  3623. * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
  3624. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
  3625. * brings points given in the unrectified second camera's coordinate system to points in the rectified
  3626. * second camera's coordinate system. In more technical terms, it performs a change of basis from the
  3627. * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
  3628. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  3629. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  3630. * rectified first camera's image.
  3631. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  3632. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  3633. * rectified second camera's image.
  3634. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
  3635. * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
  3636. * the function makes the principal points of each camera have the same pixel coordinates in the
  3637. * rectified views. And if the flag is not set, the function may still shift the images in the
  3638. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  3639. * useful image area.
  3640. * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
  3641. * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
  3642. * images are zoomed and shifted so that only valid pixels are visible (no black areas after
  3643. * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
  3644. * pixels from the original images from the cameras are retained in the rectified images (no source
  3645. * image pixels are lost). Any intermediate value yields an intermediate result between
  3646. * those two extreme cases.
  3647. * @param newImageSize New image resolution after rectification. The same size should be passed to
  3648. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  3649. * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
  3650. * preserve details in the original image, especially when there is a big radial distortion.
  3651. * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
  3652. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  3653. * (see the picture below).
  3654. * @param validPixROI2 Optional output rectangles inside the rectified images where all the pixels
  3655. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  3656. * (see the picture below).
  3657. *
  3658. * The function computes the rotation matrices for each camera that (virtually) make both camera image
  3659. * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
  3660. * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
  3661. * as input. As output, it provides two rotation matrices and also two projection matrices in the new
  3662. * coordinates. The function distinguishes the following two cases:
  3663. *
  3664. * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
  3665. * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
  3666. * corresponding epipolar lines in the left and right cameras are horizontal and have the same
  3667. * y-coordinate. P1 and P2 look like:
  3668. *
  3669. * `$$\texttt{P1} = \begin{bmatrix}
  3670. * f & 0 & cx_1 & 0 \\
  3671. * 0 & f & cy & 0 \\
  3672. * 0 & 0 & 1 & 0
  3673. * \end{bmatrix}$$`
  3674. *
  3675. * `$$\texttt{P2} = \begin{bmatrix}
  3676. * f & 0 & cx_2 & T_x*f \\
  3677. * 0 & f & cy & 0 \\
  3678. * 0 & 0 & 1 & 0
  3679. * \end{bmatrix} ,$$`
  3680. *
  3681. * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
  3682. * REF: CALIB_ZERO_DISPARITY is set.
  3683. *
  3684. * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
  3685. * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
  3686. * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
  3687. *
  3688. * `$$\texttt{P1} = \begin{bmatrix}
  3689. * f & 0 & cx & 0 \\
  3690. * 0 & f & cy_1 & 0 \\
  3691. * 0 & 0 & 1 & 0
  3692. * \end{bmatrix}$$`
  3693. *
  3694. * `$$\texttt{P2} = \begin{bmatrix}
  3695. * f & 0 & cx & 0 \\
  3696. * 0 & f & cy_2 & T_y*f \\
  3697. * 0 & 0 & 1 & 0
  3698. * \end{bmatrix},$$`
  3699. *
  3700. * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
  3701. * REF: CALIB_ZERO_DISPARITY is set.
  3702. *
  3703. * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
  3704. * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
  3705. * initialize the rectification map for each camera.
  3706. *
  3707. * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
  3708. * the corresponding image regions. This means that the images are well rectified, which is what most
  3709. * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
  3710. * their interiors are all valid pixels.
  3711. *
  3712. * ![image](pics/stereo_undistort.jpg)
  3713. */
  3714. + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags alpha:(double)alpha newImageSize:(Size2i*)newImageSize validPixROI1:(Rect2i*)validPixROI1 validPixROI2:(Rect2i*)validPixROI2 NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:alpha:newImageSize:validPixROI1:validPixROI2:));
  3715. /**
  3716. * Computes rectification transforms for each head of a calibrated stereo camera.
  3717. *
  3718. * @param cameraMatrix1 First camera intrinsic matrix.
  3719. * @param distCoeffs1 First camera distortion parameters.
  3720. * @param cameraMatrix2 Second camera intrinsic matrix.
  3721. * @param distCoeffs2 Second camera distortion parameters.
  3722. * @param imageSize Size of the image used for stereo calibration.
  3723. * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
  3724. * see REF: stereoCalibrate.
  3725. * @param T Translation vector from the coordinate system of the first camera to the second camera,
  3726. * see REF: stereoCalibrate.
  3727. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
  3728. * brings points given in the unrectified first camera's coordinate system to points in the rectified
  3729. * first camera's coordinate system. In more technical terms, it performs a change of basis from the
  3730. * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
  3731. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
  3732. * brings points given in the unrectified second camera's coordinate system to points in the rectified
  3733. * second camera's coordinate system. In more technical terms, it performs a change of basis from the
  3734. * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
  3735. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  3736. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  3737. * rectified first camera's image.
  3738. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  3739. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  3740. * rectified second camera's image.
  3741. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
  3742. * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
  3743. * the function makes the principal points of each camera have the same pixel coordinates in the
  3744. * rectified views. And if the flag is not set, the function may still shift the images in the
  3745. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  3746. * useful image area.
  3747. * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
  3748. * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
  3749. * images are zoomed and shifted so that only valid pixels are visible (no black areas after
  3750. * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
  3751. * pixels from the original images from the cameras are retained in the rectified images (no source
  3752. * image pixels are lost). Any intermediate value yields an intermediate result between
  3753. * those two extreme cases.
  3754. * @param newImageSize New image resolution after rectification. The same size should be passed to
  3755. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  3756. * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
  3757. * preserve details in the original image, especially when there is a big radial distortion.
  3758. * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
  3759. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  3760. * (see the picture below).
  3761. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  3762. * (see the picture below).
  3763. *
  3764. * The function computes the rotation matrices for each camera that (virtually) make both camera image
  3765. * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
  3766. * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
  3767. * as input. As output, it provides two rotation matrices and also two projection matrices in the new
  3768. * coordinates. The function distinguishes the following two cases:
  3769. *
  3770. * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
  3771. * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
  3772. * corresponding epipolar lines in the left and right cameras are horizontal and have the same
  3773. * y-coordinate. P1 and P2 look like:
  3774. *
  3775. * `$$\texttt{P1} = \begin{bmatrix}
  3776. * f & 0 & cx_1 & 0 \\
  3777. * 0 & f & cy & 0 \\
  3778. * 0 & 0 & 1 & 0
  3779. * \end{bmatrix}$$`
  3780. *
  3781. * `$$\texttt{P2} = \begin{bmatrix}
  3782. * f & 0 & cx_2 & T_x*f \\
  3783. * 0 & f & cy & 0 \\
  3784. * 0 & 0 & 1 & 0
  3785. * \end{bmatrix} ,$$`
  3786. *
  3787. * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
  3788. * REF: CALIB_ZERO_DISPARITY is set.
  3789. *
  3790. * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
  3791. * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
  3792. * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
  3793. *
  3794. * `$$\texttt{P1} = \begin{bmatrix}
  3795. * f & 0 & cx & 0 \\
  3796. * 0 & f & cy_1 & 0 \\
  3797. * 0 & 0 & 1 & 0
  3798. * \end{bmatrix}$$`
  3799. *
  3800. * `$$\texttt{P2} = \begin{bmatrix}
  3801. * f & 0 & cx & 0 \\
  3802. * 0 & f & cy_2 & T_y*f \\
  3803. * 0 & 0 & 1 & 0
  3804. * \end{bmatrix},$$`
  3805. *
  3806. * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
  3807. * REF: CALIB_ZERO_DISPARITY is set.
  3808. *
  3809. * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
  3810. * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
  3811. * initialize the rectification map for each camera.
  3812. *
  3813. * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
  3814. * the corresponding image regions. This means that the images are well rectified, which is what most
  3815. * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
  3816. * their interiors are all valid pixels.
  3817. *
  3818. * ![image](pics/stereo_undistort.jpg)
  3819. */
  3820. + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags alpha:(double)alpha newImageSize:(Size2i*)newImageSize validPixROI1:(Rect2i*)validPixROI1 NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:alpha:newImageSize:validPixROI1:));
  3821. /**
  3822. * Computes rectification transforms for each head of a calibrated stereo camera.
  3823. *
  3824. * @param cameraMatrix1 First camera intrinsic matrix.
  3825. * @param distCoeffs1 First camera distortion parameters.
  3826. * @param cameraMatrix2 Second camera intrinsic matrix.
  3827. * @param distCoeffs2 Second camera distortion parameters.
  3828. * @param imageSize Size of the image used for stereo calibration.
  3829. * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
  3830. * see REF: stereoCalibrate.
  3831. * @param T Translation vector from the coordinate system of the first camera to the second camera,
  3832. * see REF: stereoCalibrate.
  3833. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
  3834. * brings points given in the unrectified first camera's coordinate system to points in the rectified
  3835. * first camera's coordinate system. In more technical terms, it performs a change of basis from the
  3836. * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
  3837. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
  3838. * brings points given in the unrectified second camera's coordinate system to points in the rectified
  3839. * second camera's coordinate system. In more technical terms, it performs a change of basis from the
  3840. * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
  3841. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  3842. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  3843. * rectified first camera's image.
  3844. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  3845. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  3846. * rectified second camera's image.
  3847. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
  3848. * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
  3849. * the function makes the principal points of each camera have the same pixel coordinates in the
  3850. * rectified views. And if the flag is not set, the function may still shift the images in the
  3851. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  3852. * useful image area.
  3853. * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
  3854. * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
  3855. * images are zoomed and shifted so that only valid pixels are visible (no black areas after
  3856. * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
  3857. * pixels from the original images from the cameras are retained in the rectified images (no source
  3858. * image pixels are lost). Any intermediate value yields an intermediate result between
  3859. * those two extreme cases.
  3860. * @param newImageSize New image resolution after rectification. The same size should be passed to
  3861. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  3862. * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
  3863. * preserve details in the original image, especially when there is a big radial distortion.
  3864. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  3865. * (see the picture below).
  3866. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  3867. * (see the picture below).
  3868. *
  3869. * The function computes the rotation matrices for each camera that (virtually) make both camera image
  3870. * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
  3871. * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
  3872. * as input. As output, it provides two rotation matrices and also two projection matrices in the new
  3873. * coordinates. The function distinguishes the following two cases:
  3874. *
  3875. * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
  3876. * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
  3877. * corresponding epipolar lines in the left and right cameras are horizontal and have the same
  3878. * y-coordinate. P1 and P2 look like:
  3879. *
  3880. * `$$\texttt{P1} = \begin{bmatrix}
  3881. * f & 0 & cx_1 & 0 \\
  3882. * 0 & f & cy & 0 \\
  3883. * 0 & 0 & 1 & 0
  3884. * \end{bmatrix}$$`
  3885. *
  3886. * `$$\texttt{P2} = \begin{bmatrix}
  3887. * f & 0 & cx_2 & T_x*f \\
  3888. * 0 & f & cy & 0 \\
  3889. * 0 & 0 & 1 & 0
  3890. * \end{bmatrix} ,$$`
  3891. *
  3892. * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
  3893. * REF: CALIB_ZERO_DISPARITY is set.
  3894. *
  3895. * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
  3896. * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
  3897. * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
  3898. *
  3899. * `$$\texttt{P1} = \begin{bmatrix}
  3900. * f & 0 & cx & 0 \\
  3901. * 0 & f & cy_1 & 0 \\
  3902. * 0 & 0 & 1 & 0
  3903. * \end{bmatrix}$$`
  3904. *
  3905. * `$$\texttt{P2} = \begin{bmatrix}
  3906. * f & 0 & cx & 0 \\
  3907. * 0 & f & cy_2 & T_y*f \\
  3908. * 0 & 0 & 1 & 0
  3909. * \end{bmatrix},$$`
  3910. *
  3911. * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
  3912. * REF: CALIB_ZERO_DISPARITY is set.
  3913. *
  3914. * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
  3915. * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
  3916. * initialize the rectification map for each camera.
  3917. *
  3918. * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
  3919. * the corresponding image regions. This means that the images are well rectified, which is what most
  3920. * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
  3921. * their interiors are all valid pixels.
  3922. *
  3923. * ![image](pics/stereo_undistort.jpg)
  3924. */
  3925. + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags alpha:(double)alpha newImageSize:(Size2i*)newImageSize NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:alpha:newImageSize:));
  3926. /**
  3927. * Computes rectification transforms for each head of a calibrated stereo camera.
  3928. *
  3929. * @param cameraMatrix1 First camera intrinsic matrix.
  3930. * @param distCoeffs1 First camera distortion parameters.
  3931. * @param cameraMatrix2 Second camera intrinsic matrix.
  3932. * @param distCoeffs2 Second camera distortion parameters.
  3933. * @param imageSize Size of the image used for stereo calibration.
  3934. * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
  3935. * see REF: stereoCalibrate.
  3936. * @param T Translation vector from the coordinate system of the first camera to the second camera,
  3937. * see REF: stereoCalibrate.
  3938. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
  3939. * brings points given in the unrectified first camera's coordinate system to points in the rectified
  3940. * first camera's coordinate system. In more technical terms, it performs a change of basis from the
  3941. * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
  3942. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
  3943. * brings points given in the unrectified second camera's coordinate system to points in the rectified
  3944. * second camera's coordinate system. In more technical terms, it performs a change of basis from the
  3945. * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
  3946. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  3947. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  3948. * rectified first camera's image.
  3949. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  3950. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  3951. * rectified second camera's image.
  3952. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
  3953. * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
  3954. * the function makes the principal points of each camera have the same pixel coordinates in the
  3955. * rectified views. And if the flag is not set, the function may still shift the images in the
  3956. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  3957. * useful image area.
  3958. * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
  3959. * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
  3960. * images are zoomed and shifted so that only valid pixels are visible (no black areas after
  3961. * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
  3962. * pixels from the original images from the cameras are retained in the rectified images (no source
  3963. * image pixels are lost). Any intermediate value yields an intermediate result between
  3964. * those two extreme cases.
  3965. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  3966. * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
  3967. * preserve details in the original image, especially when there is a big radial distortion.
  3968. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  3969. * (see the picture below).
  3970. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  3971. * (see the picture below).
  3972. *
  3973. * The function computes the rotation matrices for each camera that (virtually) make both camera image
  3974. * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
  3975. * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
  3976. * as input. As output, it provides two rotation matrices and also two projection matrices in the new
  3977. * coordinates. The function distinguishes the following two cases:
  3978. *
  3979. * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
  3980. * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
  3981. * corresponding epipolar lines in the left and right cameras are horizontal and have the same
  3982. * y-coordinate. P1 and P2 look like:
  3983. *
  3984. * `$$\texttt{P1} = \begin{bmatrix}
  3985. * f & 0 & cx_1 & 0 \\
  3986. * 0 & f & cy & 0 \\
  3987. * 0 & 0 & 1 & 0
  3988. * \end{bmatrix}$$`
  3989. *
  3990. * `$$\texttt{P2} = \begin{bmatrix}
  3991. * f & 0 & cx_2 & T_x*f \\
  3992. * 0 & f & cy & 0 \\
  3993. * 0 & 0 & 1 & 0
  3994. * \end{bmatrix} ,$$`
  3995. *
  3996. * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
  3997. * REF: CALIB_ZERO_DISPARITY is set.
  3998. *
  3999. * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
  4000. * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
  4001. * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
  4002. *
  4003. * `$$\texttt{P1} = \begin{bmatrix}
  4004. * f & 0 & cx & 0 \\
  4005. * 0 & f & cy_1 & 0 \\
  4006. * 0 & 0 & 1 & 0
  4007. * \end{bmatrix}$$`
  4008. *
  4009. * `$$\texttt{P2} = \begin{bmatrix}
  4010. * f & 0 & cx & 0 \\
  4011. * 0 & f & cy_2 & T_y*f \\
  4012. * 0 & 0 & 1 & 0
  4013. * \end{bmatrix},$$`
  4014. *
  4015. * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
  4016. * REF: CALIB_ZERO_DISPARITY is set.
  4017. *
  4018. * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
  4019. * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
  4020. * initialize the rectification map for each camera.
  4021. *
  4022. * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
  4023. * the corresponding image regions. This means that the images are well rectified, which is what most
  4024. * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
  4025. * their interiors are all valid pixels.
  4026. *
  4027. * ![image](pics/stereo_undistort.jpg)
  4028. */
  4029. + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags alpha:(double)alpha NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:alpha:));
  4030. /**
  4031. * Computes rectification transforms for each head of a calibrated stereo camera.
  4032. *
  4033. * @param cameraMatrix1 First camera intrinsic matrix.
  4034. * @param distCoeffs1 First camera distortion parameters.
  4035. * @param cameraMatrix2 Second camera intrinsic matrix.
  4036. * @param distCoeffs2 Second camera distortion parameters.
  4037. * @param imageSize Size of the image used for stereo calibration.
  4038. * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
  4039. * see REF: stereoCalibrate.
  4040. * @param T Translation vector from the coordinate system of the first camera to the second camera,
  4041. * see REF: stereoCalibrate.
  4042. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
  4043. * brings points given in the unrectified first camera's coordinate system to points in the rectified
  4044. * first camera's coordinate system. In more technical terms, it performs a change of basis from the
  4045. * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
  4046. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
  4047. * brings points given in the unrectified second camera's coordinate system to points in the rectified
  4048. * second camera's coordinate system. In more technical terms, it performs a change of basis from the
  4049. * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
  4050. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  4051. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  4052. * rectified first camera's image.
  4053. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  4054. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  4055. * rectified second camera's image.
  4056. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
  4057. * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
  4058. * the function makes the principal points of each camera have the same pixel coordinates in the
  4059. * rectified views. And if the flag is not set, the function may still shift the images in the
  4060. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  4061. * useful image area.
  4062. * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
  4063. * images are zoomed and shifted so that only valid pixels are visible (no black areas after
  4064. * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
  4065. * pixels from the original images from the cameras are retained in the rectified images (no source
  4066. * image pixels are lost). Any intermediate value yields an intermediate result between
  4067. * those two extreme cases.
  4068. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  4069. * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
  4070. * preserve details in the original image, especially when there is a big radial distortion.
  4071. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  4072. * (see the picture below).
  4073. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  4074. * (see the picture below).
  4075. *
  4076. * The function computes the rotation matrices for each camera that (virtually) make both camera image
  4077. * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
  4078. * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
  4079. * as input. As output, it provides two rotation matrices and also two projection matrices in the new
  4080. * coordinates. The function distinguishes the following two cases:
  4081. *
  4082. * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
  4083. * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
  4084. * corresponding epipolar lines in the left and right cameras are horizontal and have the same
  4085. * y-coordinate. P1 and P2 look like:
  4086. *
  4087. * `$$\texttt{P1} = \begin{bmatrix}
  4088. * f & 0 & cx_1 & 0 \\
  4089. * 0 & f & cy & 0 \\
  4090. * 0 & 0 & 1 & 0
  4091. * \end{bmatrix}$$`
  4092. *
  4093. * `$$\texttt{P2} = \begin{bmatrix}
  4094. * f & 0 & cx_2 & T_x*f \\
  4095. * 0 & f & cy & 0 \\
  4096. * 0 & 0 & 1 & 0
  4097. * \end{bmatrix} ,$$`
  4098. *
  4099. * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
  4100. * REF: CALIB_ZERO_DISPARITY is set.
  4101. *
  4102. * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
  4103. * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
  4104. * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
  4105. *
  4106. * `$$\texttt{P1} = \begin{bmatrix}
  4107. * f & 0 & cx & 0 \\
  4108. * 0 & f & cy_1 & 0 \\
  4109. * 0 & 0 & 1 & 0
  4110. * \end{bmatrix}$$`
  4111. *
  4112. * `$$\texttt{P2} = \begin{bmatrix}
  4113. * f & 0 & cx & 0 \\
  4114. * 0 & f & cy_2 & T_y*f \\
  4115. * 0 & 0 & 1 & 0
  4116. * \end{bmatrix},$$`
  4117. *
  4118. * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
  4119. * REF: CALIB_ZERO_DISPARITY is set.
  4120. *
  4121. * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
  4122. * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
  4123. * initialize the rectification map for each camera.
  4124. *
  4125. * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
  4126. * the corresponding image regions. This means that the images are well rectified, which is what most
  4127. * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
  4128. * their interiors are all valid pixels.
  4129. *
  4130. * ![image](pics/stereo_undistort.jpg)
  4131. */
  4132. + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:));
  4133. /**
  4134. * Computes rectification transforms for each head of a calibrated stereo camera.
  4135. *
  4136. * @param cameraMatrix1 First camera intrinsic matrix.
  4137. * @param distCoeffs1 First camera distortion parameters.
  4138. * @param cameraMatrix2 Second camera intrinsic matrix.
  4139. * @param distCoeffs2 Second camera distortion parameters.
  4140. * @param imageSize Size of the image used for stereo calibration.
  4141. * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
  4142. * see REF: stereoCalibrate.
  4143. * @param T Translation vector from the coordinate system of the first camera to the second camera,
  4144. * see REF: stereoCalibrate.
  4145. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
  4146. * brings points given in the unrectified first camera's coordinate system to points in the rectified
  4147. * first camera's coordinate system. In more technical terms, it performs a change of basis from the
  4148. * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
  4149. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
  4150. * brings points given in the unrectified second camera's coordinate system to points in the rectified
  4151. * second camera's coordinate system. In more technical terms, it performs a change of basis from the
  4152. * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
  4153. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  4154. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  4155. * rectified first camera's image.
  4156. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  4157. * camera, i.e. it projects points given in the rectified first camera coordinate system into the
  4158. * rectified second camera's image.
  4159. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
  4160. * the function makes the principal points of each camera have the same pixel coordinates in the
  4161. * rectified views. And if the flag is not set, the function may still shift the images in the
  4162. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  4163. * useful image area.
  4164. * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
  4165. * images are zoomed and shifted so that only valid pixels are visible (no black areas after
  4166. * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
  4167. * pixels from the original images from the cameras are retained in the rectified images (no source
  4168. * image pixels are lost). Any intermediate value yields an intermediate result between
  4169. * those two extreme cases.
  4170. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  4171. * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
  4172. * preserve details in the original image, especially when there is a big radial distortion.
  4173. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  4174. * (see the picture below).
  4175. * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  4176. * (see the picture below).
  4177. *
  4178. * The function computes the rotation matrices for each camera that (virtually) make both camera image
  4179. * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
  4180. * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
  4181. * as input. As output, it provides two rotation matrices and also two projection matrices in the new
  4182. * coordinates. The function distinguishes the following two cases:
  4183. *
  4184. * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
  4185. * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
  4186. * corresponding epipolar lines in the left and right cameras are horizontal and have the same
  4187. * y-coordinate. P1 and P2 look like:
  4188. *
  4189. * `$$\texttt{P1} = \begin{bmatrix}
  4190. * f & 0 & cx_1 & 0 \\
  4191. * 0 & f & cy & 0 \\
  4192. * 0 & 0 & 1 & 0
  4193. * \end{bmatrix}$$`
  4194. *
  4195. * `$$\texttt{P2} = \begin{bmatrix}
  4196. * f & 0 & cx_2 & T_x*f \\
  4197. * 0 & f & cy & 0 \\
  4198. * 0 & 0 & 1 & 0
  4199. * \end{bmatrix} ,$$`
  4200. *
  4201. * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
  4202. * REF: CALIB_ZERO_DISPARITY is set.
  4203. *
  4204. * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
  4205. * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
  4206. * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
  4207. *
  4208. * `$$\texttt{P1} = \begin{bmatrix}
  4209. * f & 0 & cx & 0 \\
  4210. * 0 & f & cy_1 & 0 \\
  4211. * 0 & 0 & 1 & 0
  4212. * \end{bmatrix}$$`
  4213. *
  4214. * `$$\texttt{P2} = \begin{bmatrix}
  4215. * f & 0 & cx & 0 \\
  4216. * 0 & f & cy_2 & T_y*f \\
  4217. * 0 & 0 & 1 & 0
  4218. * \end{bmatrix},$$`
  4219. *
  4220. * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
  4221. * REF: CALIB_ZERO_DISPARITY is set.
  4222. *
  4223. * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
  4224. * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
  4225. * initialize the rectification map for each camera.
  4226. *
  4227. * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
  4228. * the corresponding image regions. This means that the images are well rectified, which is what most
  4229. * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
  4230. * their interiors are all valid pixels.
  4231. *
  4232. * ![image](pics/stereo_undistort.jpg)
  4233. */
  4234. + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:));
  4235. //
  4236. // bool cv::stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5)
  4237. //
  4238. /**
  4239. * Computes a rectification transform for an uncalibrated stereo camera.
  4240. *
  4241. * @param points1 Array of feature points in the first image.
  4242. * @param points2 The corresponding points in the second image. The same formats as in
  4243. * #findFundamentalMat are supported.
  4244. * @param F Input fundamental matrix. It can be computed from the same set of point pairs using
  4245. * #findFundamentalMat .
  4246. * @param imgSize Size of the image.
  4247. * @param H1 Output rectification homography matrix for the first image.
  4248. * @param H2 Output rectification homography matrix for the second image.
  4249. * @param threshold Optional threshold used to filter out the outliers. If the parameter is greater
  4250. * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
  4251. * for which `$$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}$$` ) are
  4252. * rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
  4253. *
  4254. * The function computes the rectification transformations without knowing intrinsic parameters of the
  4255. * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
  4256. * related difference from #stereoRectify is that the function outputs not the rectification
  4257. * transformations in the object (3D) space, but the planar perspective transformations encoded by the
  4258. * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
  4259. *
  4260. * NOTE:
  4261. * While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
  4262. * depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
  4263. * it would be better to correct it before computing the fundamental matrix and calling this
  4264. * function. For example, distortion coefficients can be estimated for each head of stereo camera
  4265. * separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
  4266. * just the point coordinates can be corrected with #undistortPoints .
  4267. */
  4268. + (BOOL)stereoRectifyUncalibrated:(Mat*)points1 points2:(Mat*)points2 F:(Mat*)F imgSize:(Size2i*)imgSize H1:(Mat*)H1 H2:(Mat*)H2 threshold:(double)threshold NS_SWIFT_NAME(stereoRectifyUncalibrated(points1:points2:F:imgSize:H1:H2:threshold:));
  4269. /**
  4270. * Computes a rectification transform for an uncalibrated stereo camera.
  4271. *
  4272. * @param points1 Array of feature points in the first image.
  4273. * @param points2 The corresponding points in the second image. The same formats as in
  4274. * #findFundamentalMat are supported.
  4275. * @param F Input fundamental matrix. It can be computed from the same set of point pairs using
  4276. * #findFundamentalMat .
  4277. * @param imgSize Size of the image.
  4278. * @param H1 Output rectification homography matrix for the first image.
  4279. * @param H2 Output rectification homography matrix for the second image.
  4280. * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
  4281. * for which `$$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}$$` ) are
  4282. * rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
  4283. *
  4284. * The function computes the rectification transformations without knowing intrinsic parameters of the
  4285. * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
  4286. * related difference from #stereoRectify is that the function outputs not the rectification
  4287. * transformations in the object (3D) space, but the planar perspective transformations encoded by the
  4288. * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
  4289. *
  4290. * NOTE:
  4291. * While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
  4292. * depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
  4293. * it would be better to correct it before computing the fundamental matrix and calling this
  4294. * function. For example, distortion coefficients can be estimated for each head of stereo camera
  4295. * separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
  4296. * just the point coordinates can be corrected with #undistortPoints .
  4297. */
  4298. + (BOOL)stereoRectifyUncalibrated:(Mat*)points1 points2:(Mat*)points2 F:(Mat*)F imgSize:(Size2i*)imgSize H1:(Mat*)H1 H2:(Mat*)H2 NS_SWIFT_NAME(stereoRectifyUncalibrated(points1:points2:F:imgSize:H1:H2:));
  4299. //
  4300. // float cv::rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags)
  4301. //
  4302. + (float)rectify3Collinear:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 cameraMatrix3:(Mat*)cameraMatrix3 distCoeffs3:(Mat*)distCoeffs3 imgpt1:(NSArray<Mat*>*)imgpt1 imgpt3:(NSArray<Mat*>*)imgpt3 imageSize:(Size2i*)imageSize R12:(Mat*)R12 T12:(Mat*)T12 R13:(Mat*)R13 T13:(Mat*)T13 R1:(Mat*)R1 R2:(Mat*)R2 R3:(Mat*)R3 P1:(Mat*)P1 P2:(Mat*)P2 P3:(Mat*)P3 Q:(Mat*)Q alpha:(double)alpha newImgSize:(Size2i*)newImgSize roi1:(Rect2i*)roi1 roi2:(Rect2i*)roi2 flags:(int)flags NS_SWIFT_NAME(rectify3Collinear(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:cameraMatrix3:distCoeffs3:imgpt1:imgpt3:imageSize:R12:T12:R13:T13:R1:R2:R3:P1:P2:P3:Q:alpha:newImgSize:roi1:roi2:flags:));
  4303. //
  4304. // Mat cv::getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false)
  4305. //
  4306. /**
  4307. * Returns the new camera intrinsic matrix based on the free scaling parameter.
  4308. *
  4309. * @param cameraMatrix Input camera intrinsic matrix.
  4310. * @param distCoeffs Input vector of distortion coefficients
  4311. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  4312. * assumed.
  4313. * @param imageSize Original image size.
  4314. * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
  4315. * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
  4316. * #stereoRectify for details.
  4317. * @param newImgSize Image size after rectification. By default, it is set to imageSize .
  4318. * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the
  4319. * undistorted image. See roi1, roi2 description in #stereoRectify .
  4320. * @param centerPrincipalPoint Optional flag that indicates whether in the new camera intrinsic matrix the
  4321. * principal point should be at the image center or not. By default, the principal point is chosen to
  4322. * best fit a subset of the source image (determined by alpha) to the corrected image.
  4323. * @return new_camera_matrix Output new camera intrinsic matrix.
  4324. *
  4325. * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
  4326. * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
  4327. * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
  4328. * When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
  4329. * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
  4330. * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
  4331. * #initUndistortRectifyMap to produce the maps for #remap .
  4332. */
  4333. + (Mat*)getOptimalNewCameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imageSize:(Size2i*)imageSize alpha:(double)alpha newImgSize:(Size2i*)newImgSize validPixROI:(Rect2i*)validPixROI centerPrincipalPoint:(BOOL)centerPrincipalPoint NS_SWIFT_NAME(getOptimalNewCameraMatrix(cameraMatrix:distCoeffs:imageSize:alpha:newImgSize:validPixROI:centerPrincipalPoint:));
  4334. /**
  4335. * Returns the new camera intrinsic matrix based on the free scaling parameter.
  4336. *
  4337. * @param cameraMatrix Input camera intrinsic matrix.
  4338. * @param distCoeffs Input vector of distortion coefficients
  4339. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  4340. * assumed.
  4341. * @param imageSize Original image size.
  4342. * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
  4343. * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
  4344. * #stereoRectify for details.
  4345. * @param newImgSize Image size after rectification. By default, it is set to imageSize .
  4346. * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the
  4347. * undistorted image. See roi1, roi2 description in #stereoRectify .
  4348. * principal point should be at the image center or not. By default, the principal point is chosen to
  4349. * best fit a subset of the source image (determined by alpha) to the corrected image.
  4350. * @return new_camera_matrix Output new camera intrinsic matrix.
  4351. *
  4352. * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
  4353. * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
  4354. * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
  4355. * When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
  4356. * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
  4357. * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
  4358. * #initUndistortRectifyMap to produce the maps for #remap .
  4359. */
  4360. + (Mat*)getOptimalNewCameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imageSize:(Size2i*)imageSize alpha:(double)alpha newImgSize:(Size2i*)newImgSize validPixROI:(Rect2i*)validPixROI NS_SWIFT_NAME(getOptimalNewCameraMatrix(cameraMatrix:distCoeffs:imageSize:alpha:newImgSize:validPixROI:));
  4361. /**
  4362. * Returns the new camera intrinsic matrix based on the free scaling parameter.
  4363. *
  4364. * @param cameraMatrix Input camera intrinsic matrix.
  4365. * @param distCoeffs Input vector of distortion coefficients
  4366. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  4367. * assumed.
  4368. * @param imageSize Original image size.
  4369. * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
  4370. * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
  4371. * #stereoRectify for details.
  4372. * @param newImgSize Image size after rectification. By default, it is set to imageSize .
  4373. * undistorted image. See roi1, roi2 description in #stereoRectify .
  4374. * principal point should be at the image center or not. By default, the principal point is chosen to
  4375. * best fit a subset of the source image (determined by alpha) to the corrected image.
  4376. * @return new_camera_matrix Output new camera intrinsic matrix.
  4377. *
  4378. * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
  4379. * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
  4380. * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
  4381. * When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
  4382. * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
  4383. * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
  4384. * #initUndistortRectifyMap to produce the maps for #remap .
  4385. */
  4386. + (Mat*)getOptimalNewCameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imageSize:(Size2i*)imageSize alpha:(double)alpha newImgSize:(Size2i*)newImgSize NS_SWIFT_NAME(getOptimalNewCameraMatrix(cameraMatrix:distCoeffs:imageSize:alpha:newImgSize:));
  4387. /**
  4388. * Returns the new camera intrinsic matrix based on the free scaling parameter.
  4389. *
  4390. * @param cameraMatrix Input camera intrinsic matrix.
  4391. * @param distCoeffs Input vector of distortion coefficients
  4392. * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
  4393. * assumed.
  4394. * @param imageSize Original image size.
  4395. * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
  4396. * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
  4397. * #stereoRectify for details.
  4398. * undistorted image. See roi1, roi2 description in #stereoRectify .
  4399. * principal point should be at the image center or not. By default, the principal point is chosen to
  4400. * best fit a subset of the source image (determined by alpha) to the corrected image.
  4401. * @return new_camera_matrix Output new camera intrinsic matrix.
  4402. *
  4403. * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
  4404. * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
  4405. * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
  4406. * When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
  4407. * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
  4408. * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
  4409. * #initUndistortRectifyMap to produce the maps for #remap .
  4410. */
  4411. + (Mat*)getOptimalNewCameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imageSize:(Size2i*)imageSize alpha:(double)alpha NS_SWIFT_NAME(getOptimalNewCameraMatrix(cameraMatrix:distCoeffs:imageSize:alpha:));
  4412. //
  4413. // void cv::calibrateHandEye(vector_Mat R_gripper2base, vector_Mat t_gripper2base, vector_Mat R_target2cam, vector_Mat t_target2cam, Mat& R_cam2gripper, Mat& t_cam2gripper, HandEyeCalibrationMethod method = CALIB_HAND_EYE_TSAI)
  4414. //
  4415. /**
  4416. * Computes Hand-Eye calibration: `$$_{}^{g}\textrm{T}_c$$`
  4417. *
  4418. * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
  4419. * expressed in the gripper frame to the robot base frame (`$$_{}^{b}\textrm{T}_g$$`).
  4420. * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  4421. * for all the transformations from gripper frame to robot base frame.
  4422. * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
  4423. * expressed in the gripper frame to the robot base frame (`$$_{}^{b}\textrm{T}_g$$`).
  4424. * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  4425. * from gripper frame to robot base frame.
  4426. * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
  4427. * expressed in the target frame to the camera frame (`$$_{}^{c}\textrm{T}_t$$`).
  4428. * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  4429. * for all the transformations from calibration target frame to camera frame.
  4430. * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
  4431. * expressed in the target frame to the camera frame (`$$_{}^{c}\textrm{T}_t$$`).
  4432. * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  4433. * from calibration target frame to camera frame.
  4434. * @param R_cam2gripper Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
  4435. * expressed in the camera frame to the gripper frame (`$$_{}^{g}\textrm{T}_c$$`).
  4436. * @param t_cam2gripper Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
  4437. * expressed in the camera frame to the gripper frame (`$$_{}^{g}\textrm{T}_c$$`).
  4438. * @param method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod
  4439. *
  4440. * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
  4441. * rotation then the translation (separable solutions) and the following methods are implemented:
  4442. * - R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
  4443. * - F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
  4444. * - R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
  4445. *
  4446. * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
  4447. * with the following implemented methods:
  4448. * - N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
  4449. * - K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
  4450. *
  4451. * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
  4452. * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
  4453. *
  4454. * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
  4455. * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
  4456. * the suitable transformations to the function, see below.
  4457. *
  4458. * ![](pics/hand-eye_figure.png)
  4459. *
  4460. * The calibration procedure is the following:
  4461. * - a static calibration pattern is used to estimate the transformation between the target frame
  4462. * and the camera frame
  4463. * - the robot gripper is moved in order to acquire several poses
  4464. * - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
  4465. * instance the robot kinematics
  4466. * `$$
  4467. * \begin{bmatrix}
  4468. * X_b\\
  4469. * Y_b\\
  4470. * Z_b\\
  4471. * 1
  4472. * \end{bmatrix}
  4473. * =
  4474. * \begin{bmatrix}
  4475. * _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\
  4476. * 0_{1 \times 3} & 1
  4477. * \end{bmatrix}
  4478. * \begin{bmatrix}
  4479. * X_g\\
  4480. * Y_g\\
  4481. * Z_g\\
  4482. * 1
  4483. * \end{bmatrix}
  4484. * $$`
  4485. * - for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
  4486. * for instance a pose estimation method (PnP) from 2D-3D point correspondences
  4487. * `$$
  4488. * \begin{bmatrix}
  4489. * X_c\\
  4490. * Y_c\\
  4491. * Z_c\\
  4492. * 1
  4493. * \end{bmatrix}
  4494. * =
  4495. * \begin{bmatrix}
  4496. * _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\
  4497. * 0_{1 \times 3} & 1
  4498. * \end{bmatrix}
  4499. * \begin{bmatrix}
  4500. * X_t\\
  4501. * Y_t\\
  4502. * Z_t\\
  4503. * 1
  4504. * \end{bmatrix}
  4505. * $$`
  4506. *
  4507. * The Hand-Eye calibration procedure returns the following homogeneous transformation
  4508. * `$$
  4509. * \begin{bmatrix}
  4510. * X_g\\
  4511. * Y_g\\
  4512. * Z_g\\
  4513. * 1
  4514. * \end{bmatrix}
  4515. * =
  4516. * \begin{bmatrix}
  4517. * _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\
  4518. * 0_{1 \times 3} & 1
  4519. * \end{bmatrix}
  4520. * \begin{bmatrix}
  4521. * X_c\\
  4522. * Y_c\\
  4523. * Z_c\\
  4524. * 1
  4525. * \end{bmatrix}
  4526. * $$`
  4527. *
  4528. * This problem is also known as solving the `$$\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}$$` equation:
  4529. * - for an eye-in-hand configuration
  4530. * `$$
  4531. * \begin{align*}
  4532. * ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
  4533. * \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
  4534. *
  4535. * (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &=
  4536. * \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
  4537. *
  4538. * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
  4539. * \end{align*}
  4540. * $$`
  4541. *
  4542. * - for an eye-to-hand configuration
  4543. * `$$
  4544. * \begin{align*}
  4545. * ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
  4546. * \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
  4547. *
  4548. * (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &=
  4549. * \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
  4550. *
  4551. * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
  4552. * \end{align*}
  4553. * $$`
  4554. *
  4555. * \note
  4556. * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
  4557. * \note
  4558. * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
  4559. * So at least 3 different poses are required, but it is strongly recommended to use many more poses.
  4560. */
  4561. + (void)calibrateHandEye:(NSArray<Mat*>*)R_gripper2base t_gripper2base:(NSArray<Mat*>*)t_gripper2base R_target2cam:(NSArray<Mat*>*)R_target2cam t_target2cam:(NSArray<Mat*>*)t_target2cam R_cam2gripper:(Mat*)R_cam2gripper t_cam2gripper:(Mat*)t_cam2gripper method:(HandEyeCalibrationMethod)method NS_SWIFT_NAME(calibrateHandEye(R_gripper2base:t_gripper2base:R_target2cam:t_target2cam:R_cam2gripper:t_cam2gripper:method:));
  4562. /**
  4563. * Computes Hand-Eye calibration: `$$_{}^{g}\textrm{T}_c$$`
  4564. *
  4565. * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
  4566. * expressed in the gripper frame to the robot base frame (`$$_{}^{b}\textrm{T}_g$$`).
  4567. * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  4568. * for all the transformations from gripper frame to robot base frame.
  4569. * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
  4570. * expressed in the gripper frame to the robot base frame (`$$_{}^{b}\textrm{T}_g$$`).
  4571. * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  4572. * from gripper frame to robot base frame.
  4573. * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
  4574. * expressed in the target frame to the camera frame (`$$_{}^{c}\textrm{T}_t$$`).
  4575. * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  4576. * for all the transformations from calibration target frame to camera frame.
  4577. * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
  4578. * expressed in the target frame to the camera frame (`$$_{}^{c}\textrm{T}_t$$`).
  4579. * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  4580. * from calibration target frame to camera frame.
  4581. * @param R_cam2gripper Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
  4582. * expressed in the camera frame to the gripper frame (`$$_{}^{g}\textrm{T}_c$$`).
  4583. * @param t_cam2gripper Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
  4584. * expressed in the camera frame to the gripper frame (`$$_{}^{g}\textrm{T}_c$$`).
  4585. *
  4586. * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
  4587. * rotation then the translation (separable solutions) and the following methods are implemented:
  4588. * - R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
  4589. * - F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
  4590. * - R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
  4591. *
  4592. * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
  4593. * with the following implemented methods:
  4594. * - N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
  4595. * - K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
  4596. *
  4597. * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
  4598. * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
  4599. *
  4600. * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
  4601. * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
  4602. * the suitable transformations to the function, see below.
  4603. *
  4604. * ![](pics/hand-eye_figure.png)
  4605. *
  4606. * The calibration procedure is the following:
  4607. * - a static calibration pattern is used to estimate the transformation between the target frame
  4608. * and the camera frame
  4609. * - the robot gripper is moved in order to acquire several poses
  4610. * - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
  4611. * instance the robot kinematics
  4612. * `$$
  4613. * \begin{bmatrix}
  4614. * X_b\\
  4615. * Y_b\\
  4616. * Z_b\\
  4617. * 1
  4618. * \end{bmatrix}
  4619. * =
  4620. * \begin{bmatrix}
  4621. * _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\
  4622. * 0_{1 \times 3} & 1
  4623. * \end{bmatrix}
  4624. * \begin{bmatrix}
  4625. * X_g\\
  4626. * Y_g\\
  4627. * Z_g\\
  4628. * 1
  4629. * \end{bmatrix}
  4630. * $$`
  4631. * - for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
  4632. * for instance a pose estimation method (PnP) from 2D-3D point correspondences
  4633. * `$$
  4634. * \begin{bmatrix}
  4635. * X_c\\
  4636. * Y_c\\
  4637. * Z_c\\
  4638. * 1
  4639. * \end{bmatrix}
  4640. * =
  4641. * \begin{bmatrix}
  4642. * _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\
  4643. * 0_{1 \times 3} & 1
  4644. * \end{bmatrix}
  4645. * \begin{bmatrix}
  4646. * X_t\\
  4647. * Y_t\\
  4648. * Z_t\\
  4649. * 1
  4650. * \end{bmatrix}
  4651. * $$`
  4652. *
  4653. * The Hand-Eye calibration procedure returns the following homogeneous transformation
  4654. * `$$
  4655. * \begin{bmatrix}
  4656. * X_g\\
  4657. * Y_g\\
  4658. * Z_g\\
  4659. * 1
  4660. * \end{bmatrix}
  4661. * =
  4662. * \begin{bmatrix}
  4663. * _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\
  4664. * 0_{1 \times 3} & 1
  4665. * \end{bmatrix}
  4666. * \begin{bmatrix}
  4667. * X_c\\
  4668. * Y_c\\
  4669. * Z_c\\
  4670. * 1
  4671. * \end{bmatrix}
  4672. * $$`
  4673. *
  4674. * This problem is also known as solving the `$$\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}$$` equation:
  4675. * - for an eye-in-hand configuration
  4676. * `$$
  4677. * \begin{align*}
  4678. * ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
  4679. * \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
  4680. *
  4681. * (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &=
  4682. * \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
  4683. *
  4684. * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
  4685. * \end{align*}
  4686. * $$`
  4687. *
  4688. * - for an eye-to-hand configuration
  4689. * `$$
  4690. * \begin{align*}
  4691. * ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
  4692. * \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
  4693. *
  4694. * (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &=
  4695. * \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
  4696. *
  4697. * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
  4698. * \end{align*}
  4699. * $$`
  4700. *
  4701. * \note
  4702. * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
  4703. * \note
  4704. * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
  4705. * So at least 3 different poses are required, but it is strongly recommended to use many more poses.
  4706. */
  4707. + (void)calibrateHandEye:(NSArray<Mat*>*)R_gripper2base t_gripper2base:(NSArray<Mat*>*)t_gripper2base R_target2cam:(NSArray<Mat*>*)R_target2cam t_target2cam:(NSArray<Mat*>*)t_target2cam R_cam2gripper:(Mat*)R_cam2gripper t_cam2gripper:(Mat*)t_cam2gripper NS_SWIFT_NAME(calibrateHandEye(R_gripper2base:t_gripper2base:R_target2cam:t_target2cam:R_cam2gripper:t_cam2gripper:));
  4708. //
  4709. // void cv::calibrateRobotWorldHandEye(vector_Mat R_world2cam, vector_Mat t_world2cam, vector_Mat R_base2gripper, vector_Mat t_base2gripper, Mat& R_base2world, Mat& t_base2world, Mat& R_gripper2cam, Mat& t_gripper2cam, RobotWorldHandEyeCalibrationMethod method = CALIB_ROBOT_WORLD_HAND_EYE_SHAH)
  4710. //
  4711. /**
  4712. * Computes Robot-World/Hand-Eye calibration: `$$_{}^{w}\textrm{T}_b$$` and `$$_{}^{c}\textrm{T}_g$$`
  4713. *
  4714. * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
  4715. * expressed in the world frame to the camera frame (`$$_{}^{c}\textrm{T}_w$$`).
  4716. * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  4717. * for all the transformations from world frame to the camera frame.
  4718. * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
  4719. * expressed in the world frame to the camera frame (`$$_{}^{c}\textrm{T}_w$$`).
  4720. * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  4721. * from world frame to the camera frame.
  4722. * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
  4723. * expressed in the robot base frame to the gripper frame (`$$_{}^{g}\textrm{T}_b$$`).
  4724. * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  4725. * for all the transformations from robot base frame to the gripper frame.
  4726. * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
  4727. * expressed in the robot base frame to the gripper frame (`$$_{}^{g}\textrm{T}_b$$`).
  4728. * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  4729. * from robot base frame to the gripper frame.
  4730. * @param R_base2world Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
  4731. * expressed in the robot base frame to the world frame (`$$_{}^{w}\textrm{T}_b$$`).
  4732. * @param t_base2world Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
  4733. * expressed in the robot base frame to the world frame (`$$_{}^{w}\textrm{T}_b$$`).
  4734. * @param R_gripper2cam Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
  4735. * expressed in the gripper frame to the camera frame (`$$_{}^{c}\textrm{T}_g$$`).
  4736. * @param t_gripper2cam Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
  4737. * expressed in the gripper frame to the camera frame (`$$_{}^{c}\textrm{T}_g$$`).
  4738. * @param method One of the implemented Robot-World/Hand-Eye calibration method, see cv::RobotWorldHandEyeCalibrationMethod
  4739. *
  4740. * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
  4741. * rotation then the translation (separable solutions):
  4742. * - M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
  4743. *
  4744. * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
  4745. * with the following implemented method:
  4746. * - A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
  4747. *
  4748. * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
  4749. * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
  4750. *
  4751. * ![](pics/robot-world_hand-eye_figure.png)
  4752. *
  4753. * The calibration procedure is the following:
  4754. * - a static calibration pattern is used to estimate the transformation between the target frame
  4755. * and the camera frame
  4756. * - the robot gripper is moved in order to acquire several poses
  4757. * - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
  4758. * instance the robot kinematics
  4759. * `$$
  4760. * \begin{bmatrix}
  4761. * X_g\\
  4762. * Y_g\\
  4763. * Z_g\\
  4764. * 1
  4765. * \end{bmatrix}
  4766. * =
  4767. * \begin{bmatrix}
  4768. * _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\
  4769. * 0_{1 \times 3} & 1
  4770. * \end{bmatrix}
  4771. * \begin{bmatrix}
  4772. * X_b\\
  4773. * Y_b\\
  4774. * Z_b\\
  4775. * 1
  4776. * \end{bmatrix}
  4777. * $$`
  4778. * - for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
  4779. * for instance a pose estimation method (PnP) from 2D-3D point correspondences
  4780. * `$$
  4781. * \begin{bmatrix}
  4782. * X_c\\
  4783. * Y_c\\
  4784. * Z_c\\
  4785. * 1
  4786. * \end{bmatrix}
  4787. * =
  4788. * \begin{bmatrix}
  4789. * _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\
  4790. * 0_{1 \times 3} & 1
  4791. * \end{bmatrix}
  4792. * \begin{bmatrix}
  4793. * X_w\\
  4794. * Y_w\\
  4795. * Z_w\\
  4796. * 1
  4797. * \end{bmatrix}
  4798. * $$`
  4799. *
  4800. * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
  4801. * `$$
  4802. * \begin{bmatrix}
  4803. * X_w\\
  4804. * Y_w\\
  4805. * Z_w\\
  4806. * 1
  4807. * \end{bmatrix}
  4808. * =
  4809. * \begin{bmatrix}
  4810. * _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\
  4811. * 0_{1 \times 3} & 1
  4812. * \end{bmatrix}
  4813. * \begin{bmatrix}
  4814. * X_b\\
  4815. * Y_b\\
  4816. * Z_b\\
  4817. * 1
  4818. * \end{bmatrix}
  4819. * $$`
  4820. * `$$
  4821. * \begin{bmatrix}
  4822. * X_c\\
  4823. * Y_c\\
  4824. * Z_c\\
  4825. * 1
  4826. * \end{bmatrix}
  4827. * =
  4828. * \begin{bmatrix}
  4829. * _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\
  4830. * 0_{1 \times 3} & 1
  4831. * \end{bmatrix}
  4832. * \begin{bmatrix}
  4833. * X_g\\
  4834. * Y_g\\
  4835. * Z_g\\
  4836. * 1
  4837. * \end{bmatrix}
  4838. * $$`
  4839. *
  4840. * This problem is also known as solving the `$$\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}$$` equation, with:
  4841. * - `$$\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w$$`
  4842. * - `$$\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b$$`
  4843. * - `$$\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g$$`
  4844. * - `$$\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b$$`
  4845. *
  4846. * \note
  4847. * At least 3 measurements are required (input vectors size must be greater or equal to 3).
  4848. */
  4849. + (void)calibrateRobotWorldHandEye:(NSArray<Mat*>*)R_world2cam t_world2cam:(NSArray<Mat*>*)t_world2cam R_base2gripper:(NSArray<Mat*>*)R_base2gripper t_base2gripper:(NSArray<Mat*>*)t_base2gripper R_base2world:(Mat*)R_base2world t_base2world:(Mat*)t_base2world R_gripper2cam:(Mat*)R_gripper2cam t_gripper2cam:(Mat*)t_gripper2cam method:(RobotWorldHandEyeCalibrationMethod)method NS_SWIFT_NAME(calibrateRobotWorldHandEye(R_world2cam:t_world2cam:R_base2gripper:t_base2gripper:R_base2world:t_base2world:R_gripper2cam:t_gripper2cam:method:));
  4850. /**
  4851. * Computes Robot-World/Hand-Eye calibration: `$$_{}^{w}\textrm{T}_b$$` and `$$_{}^{c}\textrm{T}_g$$`
  4852. *
  4853. * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
  4854. * expressed in the world frame to the camera frame (`$$_{}^{c}\textrm{T}_w$$`).
  4855. * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  4856. * for all the transformations from world frame to the camera frame.
  4857. * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
  4858. * expressed in the world frame to the camera frame (`$$_{}^{c}\textrm{T}_w$$`).
  4859. * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  4860. * from world frame to the camera frame.
  4861. * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
  4862. * expressed in the robot base frame to the gripper frame (`$$_{}^{g}\textrm{T}_b$$`).
  4863. * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  4864. * for all the transformations from robot base frame to the gripper frame.
  4865. * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
  4866. * expressed in the robot base frame to the gripper frame (`$$_{}^{g}\textrm{T}_b$$`).
  4867. * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  4868. * from robot base frame to the gripper frame.
  4869. * @param R_base2world Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
  4870. * expressed in the robot base frame to the world frame (`$$_{}^{w}\textrm{T}_b$$`).
  4871. * @param t_base2world Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
  4872. * expressed in the robot base frame to the world frame (`$$_{}^{w}\textrm{T}_b$$`).
  4873. * @param R_gripper2cam Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
  4874. * expressed in the gripper frame to the camera frame (`$$_{}^{c}\textrm{T}_g$$`).
  4875. * @param t_gripper2cam Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
  4876. * expressed in the gripper frame to the camera frame (`$$_{}^{c}\textrm{T}_g$$`).
  4877. *
  4878. * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
  4879. * rotation then the translation (separable solutions):
  4880. * - M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
  4881. *
  4882. * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
  4883. * with the following implemented method:
  4884. * - A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
  4885. *
  4886. * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
  4887. * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
  4888. *
  4889. * ![](pics/robot-world_hand-eye_figure.png)
  4890. *
  4891. * The calibration procedure is the following:
  4892. * - a static calibration pattern is used to estimate the transformation between the target frame
  4893. * and the camera frame
  4894. * - the robot gripper is moved in order to acquire several poses
  4895. * - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
  4896. * instance the robot kinematics
  4897. * `$$
  4898. * \begin{bmatrix}
  4899. * X_g\\
  4900. * Y_g\\
  4901. * Z_g\\
  4902. * 1
  4903. * \end{bmatrix}
  4904. * =
  4905. * \begin{bmatrix}
  4906. * _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\
  4907. * 0_{1 \times 3} & 1
  4908. * \end{bmatrix}
  4909. * \begin{bmatrix}
  4910. * X_b\\
  4911. * Y_b\\
  4912. * Z_b\\
  4913. * 1
  4914. * \end{bmatrix}
  4915. * $$`
  4916. * - for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
  4917. * for instance a pose estimation method (PnP) from 2D-3D point correspondences
  4918. * `$$
  4919. * \begin{bmatrix}
  4920. * X_c\\
  4921. * Y_c\\
  4922. * Z_c\\
  4923. * 1
  4924. * \end{bmatrix}
  4925. * =
  4926. * \begin{bmatrix}
  4927. * _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\
  4928. * 0_{1 \times 3} & 1
  4929. * \end{bmatrix}
  4930. * \begin{bmatrix}
  4931. * X_w\\
  4932. * Y_w\\
  4933. * Z_w\\
  4934. * 1
  4935. * \end{bmatrix}
  4936. * $$`
  4937. *
  4938. * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
  4939. * `$$
  4940. * \begin{bmatrix}
  4941. * X_w\\
  4942. * Y_w\\
  4943. * Z_w\\
  4944. * 1
  4945. * \end{bmatrix}
  4946. * =
  4947. * \begin{bmatrix}
  4948. * _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\
  4949. * 0_{1 \times 3} & 1
  4950. * \end{bmatrix}
  4951. * \begin{bmatrix}
  4952. * X_b\\
  4953. * Y_b\\
  4954. * Z_b\\
  4955. * 1
  4956. * \end{bmatrix}
  4957. * $$`
  4958. * `$$
  4959. * \begin{bmatrix}
  4960. * X_c\\
  4961. * Y_c\\
  4962. * Z_c\\
  4963. * 1
  4964. * \end{bmatrix}
  4965. * =
  4966. * \begin{bmatrix}
  4967. * _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\
  4968. * 0_{1 \times 3} & 1
  4969. * \end{bmatrix}
  4970. * \begin{bmatrix}
  4971. * X_g\\
  4972. * Y_g\\
  4973. * Z_g\\
  4974. * 1
  4975. * \end{bmatrix}
  4976. * $$`
  4977. *
  4978. * This problem is also known as solving the `$$\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}$$` equation, with:
  4979. * - `$$\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w$$`
  4980. * - `$$\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b$$`
  4981. * - `$$\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g$$`
  4982. * - `$$\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b$$`
  4983. *
  4984. * \note
  4985. * At least 3 measurements are required (input vectors size must be greater or equal to 3).
  4986. */
  4987. + (void)calibrateRobotWorldHandEye:(NSArray<Mat*>*)R_world2cam t_world2cam:(NSArray<Mat*>*)t_world2cam R_base2gripper:(NSArray<Mat*>*)R_base2gripper t_base2gripper:(NSArray<Mat*>*)t_base2gripper R_base2world:(Mat*)R_base2world t_base2world:(Mat*)t_base2world R_gripper2cam:(Mat*)R_gripper2cam t_gripper2cam:(Mat*)t_gripper2cam NS_SWIFT_NAME(calibrateRobotWorldHandEye(R_world2cam:t_world2cam:R_base2gripper:t_base2gripper:R_base2world:t_base2world:R_gripper2cam:t_gripper2cam:));
  4988. //
  4989. // void cv::convertPointsToHomogeneous(Mat src, Mat& dst)
  4990. //
  4991. /**
  4992. * Converts points from Euclidean to homogeneous space.
  4993. *
  4994. * @param src Input vector of N-dimensional points.
  4995. * @param dst Output vector of N+1-dimensional points.
  4996. *
  4997. * The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
  4998. * point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).
  4999. */
  5000. + (void)convertPointsToHomogeneous:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(convertPointsToHomogeneous(src:dst:));
  5001. //
  5002. // void cv::convertPointsFromHomogeneous(Mat src, Mat& dst)
  5003. //
  5004. /**
  5005. * Converts points from homogeneous to Euclidean space.
  5006. *
  5007. * @param src Input vector of N-dimensional points.
  5008. * @param dst Output vector of N-1-dimensional points.
  5009. *
  5010. * The function converts points homogeneous to Euclidean space using perspective projection. That is,
  5011. * each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
  5012. * output point coordinates will be (0,0,0,...).
  5013. */
  5014. + (void)convertPointsFromHomogeneous:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(convertPointsFromHomogeneous(src:dst:));
  5015. //
  5016. // Mat cv::findFundamentalMat(Mat points1, Mat points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat& mask = Mat())
  5017. //
  5018. /**
  5019. * Calculates a fundamental matrix from the corresponding points in two images.
  5020. *
  5021. * @param points1 Array of N points from the first image. The point coordinates should be
  5022. * floating-point (single or double precision).
  5023. * @param points2 Array of the second image points of the same size and format as points1 .
  5024. * @param method Method for computing a fundamental matrix.
  5025. * - REF: FM_7POINT for a 7-point algorithm. `$$N = 7$$`
  5026. * - REF: FM_8POINT for an 8-point algorithm. `$$N \ge 8$$`
  5027. * - REF: FM_RANSAC for the RANSAC algorithm. `$$N \ge 8$$`
  5028. * - REF: FM_LMEDS for the LMedS algorithm. `$$N \ge 8$$`
  5029. * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
  5030. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5031. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5032. * point localization, image resolution, and the image noise.
  5033. * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
  5034. * of confidence (probability) that the estimated matrix is correct.
  5035. * @param mask optional output mask
  5036. * @param maxIters The maximum number of robust method iterations.
  5037. *
  5038. * The epipolar geometry is described by the following equation:
  5039. *
  5040. * `$$[p_2; 1]^T F [p_1; 1] = 0$$`
  5041. *
  5042. * where `$$F$$` is a fundamental matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5043. * second images, respectively.
  5044. *
  5045. * The function calculates the fundamental matrix using one of four methods listed above and returns
  5046. * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
  5047. * algorithm, the function may return up to 3 solutions ( `$$9 \times 3$$` matrix that stores all 3
  5048. * matrices sequentially).
  5049. *
  5050. * The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
  5051. * epipolar lines corresponding to the specified points. It can also be passed to
  5052. * #stereoRectifyUncalibrated to compute the rectification transformation. :
  5053. *
  5054. * // Example. Estimation of fundamental matrix using the RANSAC algorithm
  5055. * int point_count = 100;
  5056. * vector<Point2f> points1(point_count);
  5057. * vector<Point2f> points2(point_count);
  5058. *
  5059. * // initialize the points here ...
  5060. * for( int i = 0; i < point_count; i++ )
  5061. * {
  5062. * points1[i] = ...;
  5063. * points2[i] = ...;
  5064. * }
  5065. *
  5066. * Mat fundamental_matrix =
  5067. * findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
  5068. *
  5069. */
  5070. + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold confidence:(double)confidence maxIters:(int)maxIters mask:(Mat*)mask NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:confidence:maxIters:mask:));
  5071. /**
  5072. * Calculates a fundamental matrix from the corresponding points in two images.
  5073. *
  5074. * @param points1 Array of N points from the first image. The point coordinates should be
  5075. * floating-point (single or double precision).
  5076. * @param points2 Array of the second image points of the same size and format as points1 .
  5077. * @param method Method for computing a fundamental matrix.
  5078. * - REF: FM_7POINT for a 7-point algorithm. `$$N = 7$$`
  5079. * - REF: FM_8POINT for an 8-point algorithm. `$$N \ge 8$$`
  5080. * - REF: FM_RANSAC for the RANSAC algorithm. `$$N \ge 8$$`
  5081. * - REF: FM_LMEDS for the LMedS algorithm. `$$N \ge 8$$`
  5082. * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
  5083. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5084. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5085. * point localization, image resolution, and the image noise.
  5086. * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
  5087. * of confidence (probability) that the estimated matrix is correct.
  5088. * @param maxIters The maximum number of robust method iterations.
  5089. *
  5090. * The epipolar geometry is described by the following equation:
  5091. *
  5092. * `$$[p_2; 1]^T F [p_1; 1] = 0$$`
  5093. *
  5094. * where `$$F$$` is a fundamental matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5095. * second images, respectively.
  5096. *
  5097. * The function calculates the fundamental matrix using one of four methods listed above and returns
  5098. * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
  5099. * algorithm, the function may return up to 3 solutions ( `$$9 \times 3$$` matrix that stores all 3
  5100. * matrices sequentially).
  5101. *
  5102. * The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
  5103. * epipolar lines corresponding to the specified points. It can also be passed to
  5104. * #stereoRectifyUncalibrated to compute the rectification transformation. :
  5105. *
  5106. * // Example. Estimation of fundamental matrix using the RANSAC algorithm
  5107. * int point_count = 100;
  5108. * vector<Point2f> points1(point_count);
  5109. * vector<Point2f> points2(point_count);
  5110. *
  5111. * // initialize the points here ...
  5112. * for( int i = 0; i < point_count; i++ )
  5113. * {
  5114. * points1[i] = ...;
  5115. * points2[i] = ...;
  5116. * }
  5117. *
  5118. * Mat fundamental_matrix =
  5119. * findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
  5120. *
  5121. */
  5122. + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold confidence:(double)confidence maxIters:(int)maxIters NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:confidence:maxIters:));
  5123. //
  5124. // Mat cv::findFundamentalMat(Mat points1, Mat points2, int method = FM_RANSAC, double ransacReprojThreshold = 3., double confidence = 0.99, Mat& mask = Mat())
  5125. //
  5126. + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold confidence:(double)confidence mask:(Mat*)mask NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:confidence:mask:));
  5127. + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold confidence:(double)confidence NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:confidence:));
  5128. + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:));
  5129. + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:));
  5130. + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 NS_SWIFT_NAME(findFundamentalMat(points1:points2:));
  5131. //
  5132. // Mat cv::findFundamentalMat(Mat points1, Mat points2, Mat& mask, UsacParams params)
  5133. //
  5134. + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 mask:(Mat*)mask params:(UsacParams*)params NS_SWIFT_NAME(findFundamentalMat(points1:points2:mask:params:));
  5135. //
  5136. // Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat())
  5137. //
  5138. /**
  5139. * Calculates an essential matrix from the corresponding points in two images.
  5140. *
  5141. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5142. * be floating-point (single or double precision).
  5143. * @param points2 Array of the second image points of the same size and format as points1 .
  5144. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  5145. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5146. * same camera intrinsic matrix. If this assumption does not hold for your use case, use
  5147. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5148. * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
  5149. * passing these coordinates, pass the identity matrix for this parameter.
  5150. * @param method Method for computing an essential matrix.
  5151. * - REF: RANSAC for the RANSAC algorithm.
  5152. * - REF: LMEDS for the LMedS algorithm.
  5153. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5154. * confidence (probability) that the estimated matrix is correct.
  5155. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5156. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5157. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5158. * point localization, image resolution, and the image noise.
  5159. * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
  5160. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5161. * @param maxIters The maximum number of robust method iterations.
  5162. *
  5163. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5164. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5165. *
  5166. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5167. *
  5168. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5169. * second images, respectively. The result of this function may be passed further to
  5170. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5171. */
  5172. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method prob:(double)prob threshold:(double)threshold maxIters:(int)maxIters mask:(Mat*)mask NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:prob:threshold:maxIters:mask:));
  5173. /**
  5174. * Calculates an essential matrix from the corresponding points in two images.
  5175. *
  5176. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5177. * be floating-point (single or double precision).
  5178. * @param points2 Array of the second image points of the same size and format as points1 .
  5179. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  5180. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5181. * same camera intrinsic matrix. If this assumption does not hold for your use case, use
  5182. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5183. * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
  5184. * passing these coordinates, pass the identity matrix for this parameter.
  5185. * @param method Method for computing an essential matrix.
  5186. * - REF: RANSAC for the RANSAC algorithm.
  5187. * - REF: LMEDS for the LMedS algorithm.
  5188. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5189. * confidence (probability) that the estimated matrix is correct.
  5190. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5191. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5192. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5193. * point localization, image resolution, and the image noise.
  5194. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5195. * @param maxIters The maximum number of robust method iterations.
  5196. *
  5197. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5198. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5199. *
  5200. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5201. *
  5202. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5203. * second images, respectively. The result of this function may be passed further to
  5204. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5205. */
  5206. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method prob:(double)prob threshold:(double)threshold maxIters:(int)maxIters NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:prob:threshold:maxIters:));
  5207. /**
  5208. * Calculates an essential matrix from the corresponding points in two images.
  5209. *
  5210. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5211. * be floating-point (single or double precision).
  5212. * @param points2 Array of the second image points of the same size and format as points1 .
  5213. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  5214. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5215. * same camera intrinsic matrix. If this assumption does not hold for your use case, use
  5216. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5217. * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
  5218. * passing these coordinates, pass the identity matrix for this parameter.
  5219. * @param method Method for computing an essential matrix.
  5220. * - REF: RANSAC for the RANSAC algorithm.
  5221. * - REF: LMEDS for the LMedS algorithm.
  5222. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5223. * confidence (probability) that the estimated matrix is correct.
  5224. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5225. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5226. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5227. * point localization, image resolution, and the image noise.
  5228. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5229. *
  5230. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5231. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5232. *
  5233. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5234. *
  5235. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5236. * second images, respectively. The result of this function may be passed further to
  5237. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5238. */
  5239. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method prob:(double)prob threshold:(double)threshold NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:prob:threshold:));
  5240. /**
  5241. * Calculates an essential matrix from the corresponding points in two images.
  5242. *
  5243. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5244. * be floating-point (single or double precision).
  5245. * @param points2 Array of the second image points of the same size and format as points1 .
  5246. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  5247. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5248. * same camera intrinsic matrix. If this assumption does not hold for your use case, use
  5249. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5250. * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
  5251. * passing these coordinates, pass the identity matrix for this parameter.
  5252. * @param method Method for computing an essential matrix.
  5253. * - REF: RANSAC for the RANSAC algorithm.
  5254. * - REF: LMEDS for the LMedS algorithm.
  5255. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5256. * confidence (probability) that the estimated matrix is correct.
  5257. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5258. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5259. * point localization, image resolution, and the image noise.
  5260. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5261. *
  5262. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5263. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5264. *
  5265. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5266. *
  5267. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5268. * second images, respectively. The result of this function may be passed further to
  5269. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5270. */
  5271. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method prob:(double)prob NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:prob:));
  5272. /**
  5273. * Calculates an essential matrix from the corresponding points in two images.
  5274. *
  5275. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5276. * be floating-point (single or double precision).
  5277. * @param points2 Array of the second image points of the same size and format as points1 .
  5278. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  5279. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5280. * same camera intrinsic matrix. If this assumption does not hold for your use case, use
  5281. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5282. * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
  5283. * passing these coordinates, pass the identity matrix for this parameter.
  5284. * @param method Method for computing an essential matrix.
  5285. * - REF: RANSAC for the RANSAC algorithm.
  5286. * - REF: LMEDS for the LMedS algorithm.
  5287. * confidence (probability) that the estimated matrix is correct.
  5288. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5289. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5290. * point localization, image resolution, and the image noise.
  5291. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5292. *
  5293. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5294. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5295. *
  5296. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5297. *
  5298. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5299. * second images, respectively. The result of this function may be passed further to
  5300. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5301. */
  5302. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:));
  5303. /**
  5304. * Calculates an essential matrix from the corresponding points in two images.
  5305. *
  5306. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5307. * be floating-point (single or double precision).
  5308. * @param points2 Array of the second image points of the same size and format as points1 .
  5309. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  5310. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5311. * same camera intrinsic matrix. If this assumption does not hold for your use case, use
  5312. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5313. * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
  5314. * passing these coordinates, pass the identity matrix for this parameter.
  5315. * - REF: RANSAC for the RANSAC algorithm.
  5316. * - REF: LMEDS for the LMedS algorithm.
  5317. * confidence (probability) that the estimated matrix is correct.
  5318. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5319. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5320. * point localization, image resolution, and the image noise.
  5321. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5322. *
  5323. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5324. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5325. *
  5326. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5327. *
  5328. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5329. * second images, respectively. The result of this function may be passed further to
  5330. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5331. */
  5332. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:));
  5333. //
  5334. // Mat cv::findEssentialMat(Mat points1, Mat points2, double focal = 1.0, Point2d pp = Point2d(0, 0), int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat())
  5335. //
  5336. /**
  5337. *
  5338. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5339. * be floating-point (single or double precision).
  5340. * @param points2 Array of the second image points of the same size and format as points1 .
  5341. * @param focal focal length of the camera. Note that this function assumes that points1 and points2
  5342. * are feature points from cameras with same focal length and principal point.
  5343. * @param pp principal point of the camera.
  5344. * @param method Method for computing a fundamental matrix.
  5345. * - REF: RANSAC for the RANSAC algorithm.
  5346. * - REF: LMEDS for the LMedS algorithm.
  5347. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5348. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5349. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5350. * point localization, image resolution, and the image noise.
  5351. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5352. * confidence (probability) that the estimated matrix is correct.
  5353. * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
  5354. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5355. * @param maxIters The maximum number of robust method iterations.
  5356. *
  5357. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  5358. * principal point:
  5359. *
  5360. * `$$A =
  5361. * \begin{bmatrix}
  5362. * f & 0 & x_{pp} \\
  5363. * 0 & f & y_{pp} \\
  5364. * 0 & 0 & 1
  5365. * \end{bmatrix}$$`
  5366. */
  5367. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method prob:(double)prob threshold:(double)threshold maxIters:(int)maxIters mask:(Mat*)mask NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:prob:threshold:maxIters:mask:));
  5368. /**
  5369. *
  5370. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5371. * be floating-point (single or double precision).
  5372. * @param points2 Array of the second image points of the same size and format as points1 .
  5373. * @param focal focal length of the camera. Note that this function assumes that points1 and points2
  5374. * are feature points from cameras with same focal length and principal point.
  5375. * @param pp principal point of the camera.
  5376. * @param method Method for computing a fundamental matrix.
  5377. * - REF: RANSAC for the RANSAC algorithm.
  5378. * - REF: LMEDS for the LMedS algorithm.
  5379. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5380. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5381. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5382. * point localization, image resolution, and the image noise.
  5383. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5384. * confidence (probability) that the estimated matrix is correct.
  5385. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5386. * @param maxIters The maximum number of robust method iterations.
  5387. *
  5388. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  5389. * principal point:
  5390. *
  5391. * `$$A =
  5392. * \begin{bmatrix}
  5393. * f & 0 & x_{pp} \\
  5394. * 0 & f & y_{pp} \\
  5395. * 0 & 0 & 1
  5396. * \end{bmatrix}$$`
  5397. */
  5398. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method prob:(double)prob threshold:(double)threshold maxIters:(int)maxIters NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:prob:threshold:maxIters:));
  5399. /**
  5400. *
  5401. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5402. * be floating-point (single or double precision).
  5403. * @param points2 Array of the second image points of the same size and format as points1 .
  5404. * @param focal focal length of the camera. Note that this function assumes that points1 and points2
  5405. * are feature points from cameras with same focal length and principal point.
  5406. * @param pp principal point of the camera.
  5407. * @param method Method for computing a fundamental matrix.
  5408. * - REF: RANSAC for the RANSAC algorithm.
  5409. * - REF: LMEDS for the LMedS algorithm.
  5410. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5411. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5412. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5413. * point localization, image resolution, and the image noise.
  5414. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5415. * confidence (probability) that the estimated matrix is correct.
  5416. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5417. *
  5418. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  5419. * principal point:
  5420. *
  5421. * `$$A =
  5422. * \begin{bmatrix}
  5423. * f & 0 & x_{pp} \\
  5424. * 0 & f & y_{pp} \\
  5425. * 0 & 0 & 1
  5426. * \end{bmatrix}$$`
  5427. */
  5428. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method prob:(double)prob threshold:(double)threshold NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:prob:threshold:));
  5429. /**
  5430. *
  5431. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5432. * be floating-point (single or double precision).
  5433. * @param points2 Array of the second image points of the same size and format as points1 .
  5434. * @param focal focal length of the camera. Note that this function assumes that points1 and points2
  5435. * are feature points from cameras with same focal length and principal point.
  5436. * @param pp principal point of the camera.
  5437. * @param method Method for computing a fundamental matrix.
  5438. * - REF: RANSAC for the RANSAC algorithm.
  5439. * - REF: LMEDS for the LMedS algorithm.
  5440. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5441. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5442. * point localization, image resolution, and the image noise.
  5443. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5444. * confidence (probability) that the estimated matrix is correct.
  5445. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5446. *
  5447. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  5448. * principal point:
  5449. *
  5450. * `$$A =
  5451. * \begin{bmatrix}
  5452. * f & 0 & x_{pp} \\
  5453. * 0 & f & y_{pp} \\
  5454. * 0 & 0 & 1
  5455. * \end{bmatrix}$$`
  5456. */
  5457. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method prob:(double)prob NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:prob:));
  5458. /**
  5459. *
  5460. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5461. * be floating-point (single or double precision).
  5462. * @param points2 Array of the second image points of the same size and format as points1 .
  5463. * @param focal focal length of the camera. Note that this function assumes that points1 and points2
  5464. * are feature points from cameras with same focal length and principal point.
  5465. * @param pp principal point of the camera.
  5466. * @param method Method for computing a fundamental matrix.
  5467. * - REF: RANSAC for the RANSAC algorithm.
  5468. * - REF: LMEDS for the LMedS algorithm.
  5469. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5470. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5471. * point localization, image resolution, and the image noise.
  5472. * confidence (probability) that the estimated matrix is correct.
  5473. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5474. *
  5475. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  5476. * principal point:
  5477. *
  5478. * `$$A =
  5479. * \begin{bmatrix}
  5480. * f & 0 & x_{pp} \\
  5481. * 0 & f & y_{pp} \\
  5482. * 0 & 0 & 1
  5483. * \end{bmatrix}$$`
  5484. */
  5485. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:));
  5486. /**
  5487. *
  5488. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5489. * be floating-point (single or double precision).
  5490. * @param points2 Array of the second image points of the same size and format as points1 .
  5491. * @param focal focal length of the camera. Note that this function assumes that points1 and points2
  5492. * are feature points from cameras with same focal length and principal point.
  5493. * @param pp principal point of the camera.
  5494. * - REF: RANSAC for the RANSAC algorithm.
  5495. * - REF: LMEDS for the LMedS algorithm.
  5496. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5497. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5498. * point localization, image resolution, and the image noise.
  5499. * confidence (probability) that the estimated matrix is correct.
  5500. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5501. *
  5502. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  5503. * principal point:
  5504. *
  5505. * `$$A =
  5506. * \begin{bmatrix}
  5507. * f & 0 & x_{pp} \\
  5508. * 0 & f & y_{pp} \\
  5509. * 0 & 0 & 1
  5510. * \end{bmatrix}$$`
  5511. */
  5512. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:));
  5513. /**
  5514. *
  5515. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5516. * be floating-point (single or double precision).
  5517. * @param points2 Array of the second image points of the same size and format as points1 .
  5518. * @param focal focal length of the camera. Note that this function assumes that points1 and points2
  5519. * are feature points from cameras with same focal length and principal point.
  5520. * - REF: RANSAC for the RANSAC algorithm.
  5521. * - REF: LMEDS for the LMedS algorithm.
  5522. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5523. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5524. * point localization, image resolution, and the image noise.
  5525. * confidence (probability) that the estimated matrix is correct.
  5526. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5527. *
  5528. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  5529. * principal point:
  5530. *
  5531. * `$$A =
  5532. * \begin{bmatrix}
  5533. * f & 0 & x_{pp} \\
  5534. * 0 & f & y_{pp} \\
  5535. * 0 & 0 & 1
  5536. * \end{bmatrix}$$`
  5537. */
  5538. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:));
  5539. /**
  5540. *
  5541. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5542. * be floating-point (single or double precision).
  5543. * @param points2 Array of the second image points of the same size and format as points1 .
  5544. * are feature points from cameras with same focal length and principal point.
  5545. * - REF: RANSAC for the RANSAC algorithm.
  5546. * - REF: LMEDS for the LMedS algorithm.
  5547. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5548. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5549. * point localization, image resolution, and the image noise.
  5550. * confidence (probability) that the estimated matrix is correct.
  5551. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5552. *
  5553. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  5554. * principal point:
  5555. *
  5556. * `$$A =
  5557. * \begin{bmatrix}
  5558. * f & 0 & x_{pp} \\
  5559. * 0 & f & y_{pp} \\
  5560. * 0 & 0 & 1
  5561. * \end{bmatrix}$$`
  5562. */
  5563. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 NS_SWIFT_NAME(findEssentialMat(points1:points2:));
  5564. //
  5565. // Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
  5566. //
  5567. /**
  5568. * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
  5569. *
  5570. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5571. * be floating-point (single or double precision).
  5572. * @param points2 Array of the second image points of the same size and format as points1 .
  5573. * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5574. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5575. * same camera matrix. If this assumption does not hold for your use case, use
  5576. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5577. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5578. * passing these coordinates, pass the identity matrix for this parameter.
  5579. * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5580. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5581. * same camera matrix. If this assumption does not hold for your use case, use
  5582. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5583. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5584. * passing these coordinates, pass the identity matrix for this parameter.
  5585. * @param distCoeffs1 Input vector of distortion coefficients
  5586. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5587. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5588. * @param distCoeffs2 Input vector of distortion coefficients
  5589. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5590. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5591. * @param method Method for computing an essential matrix.
  5592. * - REF: RANSAC for the RANSAC algorithm.
  5593. * - REF: LMEDS for the LMedS algorithm.
  5594. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5595. * confidence (probability) that the estimated matrix is correct.
  5596. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5597. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5598. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5599. * point localization, image resolution, and the image noise.
  5600. * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
  5601. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5602. *
  5603. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5604. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5605. *
  5606. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5607. *
  5608. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5609. * second images, respectively. The result of this function may be passed further to
  5610. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5611. */
  5612. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 method:(int)method prob:(double)prob threshold:(double)threshold mask:(Mat*)mask NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:method:prob:threshold:mask:));
  5613. /**
  5614. * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
  5615. *
  5616. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5617. * be floating-point (single or double precision).
  5618. * @param points2 Array of the second image points of the same size and format as points1 .
  5619. * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5620. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5621. * same camera matrix. If this assumption does not hold for your use case, use
  5622. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5623. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5624. * passing these coordinates, pass the identity matrix for this parameter.
  5625. * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5626. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5627. * same camera matrix. If this assumption does not hold for your use case, use
  5628. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5629. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5630. * passing these coordinates, pass the identity matrix for this parameter.
  5631. * @param distCoeffs1 Input vector of distortion coefficients
  5632. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5633. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5634. * @param distCoeffs2 Input vector of distortion coefficients
  5635. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5636. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5637. * @param method Method for computing an essential matrix.
  5638. * - REF: RANSAC for the RANSAC algorithm.
  5639. * - REF: LMEDS for the LMedS algorithm.
  5640. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5641. * confidence (probability) that the estimated matrix is correct.
  5642. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5643. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5644. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5645. * point localization, image resolution, and the image noise.
  5646. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5647. *
  5648. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5649. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5650. *
  5651. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5652. *
  5653. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5654. * second images, respectively. The result of this function may be passed further to
  5655. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5656. */
  5657. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 method:(int)method prob:(double)prob threshold:(double)threshold NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:method:prob:threshold:));
  5658. /**
  5659. * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
  5660. *
  5661. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5662. * be floating-point (single or double precision).
  5663. * @param points2 Array of the second image points of the same size and format as points1 .
  5664. * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5665. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5666. * same camera matrix. If this assumption does not hold for your use case, use
  5667. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5668. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5669. * passing these coordinates, pass the identity matrix for this parameter.
  5670. * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5671. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5672. * same camera matrix. If this assumption does not hold for your use case, use
  5673. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5674. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5675. * passing these coordinates, pass the identity matrix for this parameter.
  5676. * @param distCoeffs1 Input vector of distortion coefficients
  5677. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5678. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5679. * @param distCoeffs2 Input vector of distortion coefficients
  5680. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5681. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5682. * @param method Method for computing an essential matrix.
  5683. * - REF: RANSAC for the RANSAC algorithm.
  5684. * - REF: LMEDS for the LMedS algorithm.
  5685. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5686. * confidence (probability) that the estimated matrix is correct.
  5687. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5688. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5689. * point localization, image resolution, and the image noise.
  5690. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5691. *
  5692. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5693. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5694. *
  5695. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5696. *
  5697. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5698. * second images, respectively. The result of this function may be passed further to
  5699. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5700. */
  5701. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 method:(int)method prob:(double)prob NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:method:prob:));
  5702. /**
  5703. * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
  5704. *
  5705. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5706. * be floating-point (single or double precision).
  5707. * @param points2 Array of the second image points of the same size and format as points1 .
  5708. * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5709. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5710. * same camera matrix. If this assumption does not hold for your use case, use
  5711. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5712. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5713. * passing these coordinates, pass the identity matrix for this parameter.
  5714. * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5715. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5716. * same camera matrix. If this assumption does not hold for your use case, use
  5717. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5718. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5719. * passing these coordinates, pass the identity matrix for this parameter.
  5720. * @param distCoeffs1 Input vector of distortion coefficients
  5721. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5722. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5723. * @param distCoeffs2 Input vector of distortion coefficients
  5724. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5725. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5726. * @param method Method for computing an essential matrix.
  5727. * - REF: RANSAC for the RANSAC algorithm.
  5728. * - REF: LMEDS for the LMedS algorithm.
  5729. * confidence (probability) that the estimated matrix is correct.
  5730. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5731. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5732. * point localization, image resolution, and the image noise.
  5733. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5734. *
  5735. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5736. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5737. *
  5738. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5739. *
  5740. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5741. * second images, respectively. The result of this function may be passed further to
  5742. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5743. */
  5744. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 method:(int)method NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:method:));
  5745. /**
  5746. * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
  5747. *
  5748. * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  5749. * be floating-point (single or double precision).
  5750. * @param points2 Array of the second image points of the same size and format as points1 .
  5751. * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5752. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5753. * same camera matrix. If this assumption does not hold for your use case, use
  5754. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5755. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5756. * passing these coordinates, pass the identity matrix for this parameter.
  5757. * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  5758. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  5759. * same camera matrix. If this assumption does not hold for your use case, use
  5760. * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  5761. * to normalized image coordinates, which are valid for the identity camera matrix. When
  5762. * passing these coordinates, pass the identity matrix for this parameter.
  5763. * @param distCoeffs1 Input vector of distortion coefficients
  5764. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5765. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5766. * @param distCoeffs2 Input vector of distortion coefficients
  5767. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  5768. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  5769. * - REF: RANSAC for the RANSAC algorithm.
  5770. * - REF: LMEDS for the LMedS algorithm.
  5771. * confidence (probability) that the estimated matrix is correct.
  5772. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5773. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5774. * point localization, image resolution, and the image noise.
  5775. * for the other points. The array is computed only in the RANSAC and LMedS methods.
  5776. *
  5777. * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
  5778. * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  5779. *
  5780. * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
  5781. *
  5782. * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
  5783. * second images, respectively. The result of this function may be passed further to
  5784. * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
  5785. */
  5786. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:));
  5787. //
  5788. // Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat& mask, UsacParams params)
  5789. //
  5790. + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 cameraMatrix2:(Mat*)cameraMatrix2 dist_coeff1:(Mat*)dist_coeff1 dist_coeff2:(Mat*)dist_coeff2 mask:(Mat*)mask params:(UsacParams*)params NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:cameraMatrix2:dist_coeff1:dist_coeff2:mask:params:));
  5791. //
  5792. // void cv::decomposeEssentialMat(Mat E, Mat& R1, Mat& R2, Mat& t)
  5793. //
  5794. /**
  5795. * Decompose an essential matrix to possible rotations and translation.
  5796. *
  5797. * @param E The input essential matrix.
  5798. * @param R1 One possible rotation matrix.
  5799. * @param R2 Another possible rotation matrix.
  5800. * @param t One possible translation.
  5801. *
  5802. * This function decomposes the essential matrix E using svd decomposition CITE: HartleyZ00. In
  5803. * general, four possible poses exist for the decomposition of E. They are `$$[R_1, t]$$`,
  5804. * `$$[R_1, -t]$$`, `$$[R_2, t]$$`, `$$[R_2, -t]$$`.
  5805. *
  5806. * If E gives the epipolar constraint `$$[p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0$$` between the image
  5807. * points `$$p_1$$` in the first image and `$$p_2$$` in second image, then any of the tuples
  5808. * `$$[R_1, t]$$`, `$$[R_1, -t]$$`, `$$[R_2, t]$$`, `$$[R_2, -t]$$` is a change of basis from the first
  5809. * camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one
  5810. * can only get the direction of the translation. For this reason, the translation t is returned with
  5811. * unit length.
  5812. */
  5813. + (void)decomposeEssentialMat:(Mat*)E R1:(Mat*)R1 R2:(Mat*)R2 t:(Mat*)t NS_SWIFT_NAME(decomposeEssentialMat(E:R1:R2:t:));
  5814. //
  5815. // int cv::recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat& E, Mat& R, Mat& t, int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
  5816. //
  5817. /**
  5818. * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
  5819. * inliers that pass the check.
  5820. *
  5821. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  5822. * floating-point (single or double precision).
  5823. * @param points2 Array of the second image points of the same size and format as points1 .
  5824. * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
  5825. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  5826. * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
  5827. * REF: calibrateCamera.
  5828. * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
  5829. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  5830. * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
  5831. * REF: calibrateCamera.
  5832. * @param E The output essential matrix.
  5833. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  5834. * that performs a change of basis from the first camera's coordinate system to the second camera's
  5835. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  5836. * described below.
  5837. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  5838. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  5839. * length.
  5840. * @param method Method for computing an essential matrix.
  5841. * - REF: RANSAC for the RANSAC algorithm.
  5842. * - REF: LMEDS for the LMedS algorithm.
  5843. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5844. * confidence (probability) that the estimated matrix is correct.
  5845. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5846. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5847. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5848. * point localization, image resolution, and the image noise.
  5849. * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
  5850. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  5851. * recover pose. In the output mask only inliers which pass the cheirality check.
  5852. *
  5853. * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
  5854. * possible pose hypotheses by doing cheirality check. The cheirality check means that the
  5855. * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
  5856. *
  5857. * This function can be used to process the output E and mask from REF: findEssentialMat. In this
  5858. * scenario, points1 and points2 are the same input for findEssentialMat.:
  5859. *
  5860. * // Example. Estimation of fundamental matrix using the RANSAC algorithm
  5861. * int point_count = 100;
  5862. * vector<Point2f> points1(point_count);
  5863. * vector<Point2f> points2(point_count);
  5864. *
  5865. * // initialize the points here ...
  5866. * for( int i = 0; i < point_count; i++ )
  5867. * {
  5868. * points1[i] = ...;
  5869. * points2[i] = ...;
  5870. * }
  5871. *
  5872. * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
  5873. * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
  5874. *
  5875. * // Output: Essential matrix, relative rotation and relative translation.
  5876. * Mat E, R, t, mask;
  5877. *
  5878. * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
  5879. *
  5880. */
  5881. + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t method:(int)method prob:(double)prob threshold:(double)threshold mask:(Mat*)mask NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:method:prob:threshold:mask:));
  5882. /**
  5883. * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
  5884. * inliers that pass the check.
  5885. *
  5886. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  5887. * floating-point (single or double precision).
  5888. * @param points2 Array of the second image points of the same size and format as points1 .
  5889. * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
  5890. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  5891. * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
  5892. * REF: calibrateCamera.
  5893. * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
  5894. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  5895. * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
  5896. * REF: calibrateCamera.
  5897. * @param E The output essential matrix.
  5898. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  5899. * that performs a change of basis from the first camera's coordinate system to the second camera's
  5900. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  5901. * described below.
  5902. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  5903. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  5904. * length.
  5905. * @param method Method for computing an essential matrix.
  5906. * - REF: RANSAC for the RANSAC algorithm.
  5907. * - REF: LMEDS for the LMedS algorithm.
  5908. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5909. * confidence (probability) that the estimated matrix is correct.
  5910. * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  5911. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5912. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5913. * point localization, image resolution, and the image noise.
  5914. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  5915. * recover pose. In the output mask only inliers which pass the cheirality check.
  5916. *
  5917. * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
  5918. * possible pose hypotheses by doing cheirality check. The cheirality check means that the
  5919. * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
  5920. *
  5921. * This function can be used to process the output E and mask from REF: findEssentialMat. In this
  5922. * scenario, points1 and points2 are the same input for findEssentialMat.:
  5923. *
  5924. * // Example. Estimation of fundamental matrix using the RANSAC algorithm
  5925. * int point_count = 100;
  5926. * vector<Point2f> points1(point_count);
  5927. * vector<Point2f> points2(point_count);
  5928. *
  5929. * // initialize the points here ...
  5930. * for( int i = 0; i < point_count; i++ )
  5931. * {
  5932. * points1[i] = ...;
  5933. * points2[i] = ...;
  5934. * }
  5935. *
  5936. * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
  5937. * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
  5938. *
  5939. * // Output: Essential matrix, relative rotation and relative translation.
  5940. * Mat E, R, t, mask;
  5941. *
  5942. * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
  5943. *
  5944. */
  5945. + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t method:(int)method prob:(double)prob threshold:(double)threshold NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:method:prob:threshold:));
  5946. /**
  5947. * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
  5948. * inliers that pass the check.
  5949. *
  5950. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  5951. * floating-point (single or double precision).
  5952. * @param points2 Array of the second image points of the same size and format as points1 .
  5953. * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
  5954. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  5955. * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
  5956. * REF: calibrateCamera.
  5957. * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
  5958. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  5959. * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
  5960. * REF: calibrateCamera.
  5961. * @param E The output essential matrix.
  5962. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  5963. * that performs a change of basis from the first camera's coordinate system to the second camera's
  5964. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  5965. * described below.
  5966. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  5967. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  5968. * length.
  5969. * @param method Method for computing an essential matrix.
  5970. * - REF: RANSAC for the RANSAC algorithm.
  5971. * - REF: LMEDS for the LMedS algorithm.
  5972. * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  5973. * confidence (probability) that the estimated matrix is correct.
  5974. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  5975. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  5976. * point localization, image resolution, and the image noise.
  5977. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  5978. * recover pose. In the output mask only inliers which pass the cheirality check.
  5979. *
  5980. * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
  5981. * possible pose hypotheses by doing cheirality check. The cheirality check means that the
  5982. * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
  5983. *
  5984. * This function can be used to process the output E and mask from REF: findEssentialMat. In this
  5985. * scenario, points1 and points2 are the same input for findEssentialMat.:
  5986. *
  5987. * // Example. Estimation of fundamental matrix using the RANSAC algorithm
  5988. * int point_count = 100;
  5989. * vector<Point2f> points1(point_count);
  5990. * vector<Point2f> points2(point_count);
  5991. *
  5992. * // initialize the points here ...
  5993. * for( int i = 0; i < point_count; i++ )
  5994. * {
  5995. * points1[i] = ...;
  5996. * points2[i] = ...;
  5997. * }
  5998. *
  5999. * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
  6000. * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
  6001. *
  6002. * // Output: Essential matrix, relative rotation and relative translation.
  6003. * Mat E, R, t, mask;
  6004. *
  6005. * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
  6006. *
  6007. */
  6008. + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t method:(int)method prob:(double)prob NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:method:prob:));
  6009. /**
  6010. * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
  6011. * inliers that pass the check.
  6012. *
  6013. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6014. * floating-point (single or double precision).
  6015. * @param points2 Array of the second image points of the same size and format as points1 .
  6016. * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
  6017. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  6018. * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
  6019. * REF: calibrateCamera.
  6020. * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
  6021. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  6022. * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
  6023. * REF: calibrateCamera.
  6024. * @param E The output essential matrix.
  6025. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6026. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6027. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6028. * described below.
  6029. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6030. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6031. * length.
  6032. * @param method Method for computing an essential matrix.
  6033. * - REF: RANSAC for the RANSAC algorithm.
  6034. * - REF: LMEDS for the LMedS algorithm.
  6035. * confidence (probability) that the estimated matrix is correct.
  6036. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  6037. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  6038. * point localization, image resolution, and the image noise.
  6039. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6040. * recover pose. In the output mask only inliers which pass the cheirality check.
  6041. *
  6042. * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
  6043. * possible pose hypotheses by doing cheirality check. The cheirality check means that the
  6044. * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
  6045. *
  6046. * This function can be used to process the output E and mask from REF: findEssentialMat. In this
  6047. * scenario, points1 and points2 are the same input for findEssentialMat.:
  6048. *
  6049. * // Example. Estimation of fundamental matrix using the RANSAC algorithm
  6050. * int point_count = 100;
  6051. * vector<Point2f> points1(point_count);
  6052. * vector<Point2f> points2(point_count);
  6053. *
  6054. * // initialize the points here ...
  6055. * for( int i = 0; i < point_count; i++ )
  6056. * {
  6057. * points1[i] = ...;
  6058. * points2[i] = ...;
  6059. * }
  6060. *
  6061. * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
  6062. * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
  6063. *
  6064. * // Output: Essential matrix, relative rotation and relative translation.
  6065. * Mat E, R, t, mask;
  6066. *
  6067. * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
  6068. *
  6069. */
  6070. + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t method:(int)method NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:method:));
  6071. /**
  6072. * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
  6073. * inliers that pass the check.
  6074. *
  6075. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6076. * floating-point (single or double precision).
  6077. * @param points2 Array of the second image points of the same size and format as points1 .
  6078. * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
  6079. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  6080. * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
  6081. * REF: calibrateCamera.
  6082. * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
  6083. * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  6084. * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
  6085. * REF: calibrateCamera.
  6086. * @param E The output essential matrix.
  6087. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6088. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6089. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6090. * described below.
  6091. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6092. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6093. * length.
  6094. * - REF: RANSAC for the RANSAC algorithm.
  6095. * - REF: LMEDS for the LMedS algorithm.
  6096. * confidence (probability) that the estimated matrix is correct.
  6097. * line in pixels, beyond which the point is considered an outlier and is not used for computing the
  6098. * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  6099. * point localization, image resolution, and the image noise.
  6100. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6101. * recover pose. In the output mask only inliers which pass the cheirality check.
  6102. *
  6103. * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
  6104. * possible pose hypotheses by doing cheirality check. The cheirality check means that the
  6105. * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
  6106. *
  6107. * This function can be used to process the output E and mask from REF: findEssentialMat. In this
  6108. * scenario, points1 and points2 are the same input for findEssentialMat.:
  6109. *
  6110. * // Example. Estimation of fundamental matrix using the RANSAC algorithm
  6111. * int point_count = 100;
  6112. * vector<Point2f> points1(point_count);
  6113. * vector<Point2f> points2(point_count);
  6114. *
  6115. * // initialize the points here ...
  6116. * for( int i = 0; i < point_count; i++ )
  6117. * {
  6118. * points1[i] = ...;
  6119. * points2[i] = ...;
  6120. * }
  6121. *
  6122. * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
  6123. * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
  6124. *
  6125. * // Output: Essential matrix, relative rotation and relative translation.
  6126. * Mat E, R, t, mask;
  6127. *
  6128. * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
  6129. *
  6130. */
  6131. + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:));
  6132. //
  6133. // int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, Mat& mask = Mat())
  6134. //
  6135. /**
  6136. * Recovers the relative camera rotation and the translation from an estimated essential
  6137. * matrix and the corresponding points in two images, using cheirality check. Returns the number of
  6138. * inliers that pass the check.
  6139. *
  6140. * @param E The input essential matrix.
  6141. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6142. * floating-point (single or double precision).
  6143. * @param points2 Array of the second image points of the same size and format as points1 .
  6144. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  6145. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  6146. * same camera intrinsic matrix.
  6147. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6148. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6149. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6150. * described below.
  6151. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6152. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6153. * length.
  6154. * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
  6155. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6156. * recover pose. In the output mask only inliers which pass the cheirality check.
  6157. *
  6158. * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
  6159. * possible pose hypotheses by doing cheirality check. The cheirality check means that the
  6160. * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
  6161. *
  6162. * This function can be used to process the output E and mask from REF: findEssentialMat. In this
  6163. * scenario, points1 and points2 are the same input for #findEssentialMat :
  6164. *
  6165. * // Example. Estimation of fundamental matrix using the RANSAC algorithm
  6166. * int point_count = 100;
  6167. * vector<Point2f> points1(point_count);
  6168. * vector<Point2f> points2(point_count);
  6169. *
  6170. * // initialize the points here ...
  6171. * for( int i = 0; i < point_count; i++ )
  6172. * {
  6173. * points1[i] = ...;
  6174. * points2[i] = ...;
  6175. * }
  6176. *
  6177. * // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
  6178. * Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
  6179. *
  6180. * Mat E, R, t, mask;
  6181. *
  6182. * E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
  6183. * recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
  6184. *
  6185. */
  6186. + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t mask:(Mat*)mask NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:mask:));
  6187. /**
  6188. * Recovers the relative camera rotation and the translation from an estimated essential
  6189. * matrix and the corresponding points in two images, using cheirality check. Returns the number of
  6190. * inliers that pass the check.
  6191. *
  6192. * @param E The input essential matrix.
  6193. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6194. * floating-point (single or double precision).
  6195. * @param points2 Array of the second image points of the same size and format as points1 .
  6196. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  6197. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  6198. * same camera intrinsic matrix.
  6199. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6200. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6201. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6202. * described below.
  6203. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6204. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6205. * length.
  6206. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6207. * recover pose. In the output mask only inliers which pass the cheirality check.
  6208. *
  6209. * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
  6210. * possible pose hypotheses by doing cheirality check. The cheirality check means that the
  6211. * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
  6212. *
  6213. * This function can be used to process the output E and mask from REF: findEssentialMat. In this
  6214. * scenario, points1 and points2 are the same input for #findEssentialMat :
  6215. *
  6216. * // Example. Estimation of fundamental matrix using the RANSAC algorithm
  6217. * int point_count = 100;
  6218. * vector<Point2f> points1(point_count);
  6219. * vector<Point2f> points2(point_count);
  6220. *
  6221. * // initialize the points here ...
  6222. * for( int i = 0; i < point_count; i++ )
  6223. * {
  6224. * points1[i] = ...;
  6225. * points2[i] = ...;
  6226. * }
  6227. *
  6228. * // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
  6229. * Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
  6230. *
  6231. * Mat E, R, t, mask;
  6232. *
  6233. * E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
  6234. * recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
  6235. *
  6236. */
  6237. + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:));
  6238. //
  6239. // int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat& R, Mat& t, double focal = 1.0, Point2d pp = Point2d(0, 0), Mat& mask = Mat())
  6240. //
  6241. /**
  6242. *
  6243. * @param E The input essential matrix.
  6244. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6245. * floating-point (single or double precision).
  6246. * @param points2 Array of the second image points of the same size and format as points1 .
  6247. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6248. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6249. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6250. * description below.
  6251. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6252. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6253. * length.
  6254. * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
  6255. * are feature points from cameras with same focal length and principal point.
  6256. * @param pp principal point of the camera.
  6257. * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
  6258. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6259. * recover pose. In the output mask only inliers which pass the cheirality check.
  6260. *
  6261. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  6262. * principal point:
  6263. *
  6264. * `$$A =
  6265. * \begin{bmatrix}
  6266. * f & 0 & x_{pp} \\
  6267. * 0 & f & y_{pp} \\
  6268. * 0 & 0 & 1
  6269. * \end{bmatrix}$$`
  6270. */
  6271. + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 R:(Mat*)R t:(Mat*)t focal:(double)focal pp:(Point2d*)pp mask:(Mat*)mask NS_SWIFT_NAME(recoverPose(E:points1:points2:R:t:focal:pp:mask:));
  6272. /**
  6273. *
  6274. * @param E The input essential matrix.
  6275. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6276. * floating-point (single or double precision).
  6277. * @param points2 Array of the second image points of the same size and format as points1 .
  6278. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6279. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6280. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6281. * description below.
  6282. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6283. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6284. * length.
  6285. * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
  6286. * are feature points from cameras with same focal length and principal point.
  6287. * @param pp principal point of the camera.
  6288. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6289. * recover pose. In the output mask only inliers which pass the cheirality check.
  6290. *
  6291. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  6292. * principal point:
  6293. *
  6294. * `$$A =
  6295. * \begin{bmatrix}
  6296. * f & 0 & x_{pp} \\
  6297. * 0 & f & y_{pp} \\
  6298. * 0 & 0 & 1
  6299. * \end{bmatrix}$$`
  6300. */
  6301. + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 R:(Mat*)R t:(Mat*)t focal:(double)focal pp:(Point2d*)pp NS_SWIFT_NAME(recoverPose(E:points1:points2:R:t:focal:pp:));
  6302. /**
  6303. *
  6304. * @param E The input essential matrix.
  6305. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6306. * floating-point (single or double precision).
  6307. * @param points2 Array of the second image points of the same size and format as points1 .
  6308. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6309. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6310. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6311. * description below.
  6312. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6313. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6314. * length.
  6315. * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
  6316. * are feature points from cameras with same focal length and principal point.
  6317. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6318. * recover pose. In the output mask only inliers which pass the cheirality check.
  6319. *
  6320. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  6321. * principal point:
  6322. *
  6323. * `$$A =
  6324. * \begin{bmatrix}
  6325. * f & 0 & x_{pp} \\
  6326. * 0 & f & y_{pp} \\
  6327. * 0 & 0 & 1
  6328. * \end{bmatrix}$$`
  6329. */
  6330. + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 R:(Mat*)R t:(Mat*)t focal:(double)focal NS_SWIFT_NAME(recoverPose(E:points1:points2:R:t:focal:));
  6331. /**
  6332. *
  6333. * @param E The input essential matrix.
  6334. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6335. * floating-point (single or double precision).
  6336. * @param points2 Array of the second image points of the same size and format as points1 .
  6337. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6338. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6339. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6340. * description below.
  6341. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6342. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6343. * length.
  6344. * are feature points from cameras with same focal length and principal point.
  6345. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6346. * recover pose. In the output mask only inliers which pass the cheirality check.
  6347. *
  6348. * This function differs from the one above that it computes camera intrinsic matrix from focal length and
  6349. * principal point:
  6350. *
  6351. * `$$A =
  6352. * \begin{bmatrix}
  6353. * f & 0 & x_{pp} \\
  6354. * 0 & f & y_{pp} \\
  6355. * 0 & 0 & 1
  6356. * \end{bmatrix}$$`
  6357. */
  6358. + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 R:(Mat*)R t:(Mat*)t NS_SWIFT_NAME(recoverPose(E:points1:points2:R:t:));
  6359. //
  6360. // int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, double distanceThresh, Mat& mask = Mat(), Mat& triangulatedPoints = Mat())
  6361. //
  6362. /**
  6363. *
  6364. * @param E The input essential matrix.
  6365. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6366. * floating-point (single or double precision).
  6367. * @param points2 Array of the second image points of the same size and format as points1.
  6368. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  6369. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  6370. * same camera intrinsic matrix.
  6371. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6372. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6373. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6374. * description below.
  6375. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6376. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6377. * length.
  6378. * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
  6379. * points).
  6380. * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
  6381. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6382. * recover pose. In the output mask only inliers which pass the cheirality check.
  6383. * @param triangulatedPoints 3D points which were reconstructed by triangulation.
  6384. *
  6385. * This function differs from the one above that it outputs the triangulated 3D point that are used for
  6386. * the cheirality check.
  6387. */
  6388. + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t distanceThresh:(double)distanceThresh mask:(Mat*)mask triangulatedPoints:(Mat*)triangulatedPoints NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:distanceThresh:mask:triangulatedPoints:));
  6389. /**
  6390. *
  6391. * @param E The input essential matrix.
  6392. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6393. * floating-point (single or double precision).
  6394. * @param points2 Array of the second image points of the same size and format as points1.
  6395. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  6396. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  6397. * same camera intrinsic matrix.
  6398. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6399. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6400. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6401. * description below.
  6402. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6403. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6404. * length.
  6405. * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
  6406. * points).
  6407. * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
  6408. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6409. * recover pose. In the output mask only inliers which pass the cheirality check.
  6410. *
  6411. * This function differs from the one above that it outputs the triangulated 3D point that are used for
  6412. * the cheirality check.
  6413. */
  6414. + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t distanceThresh:(double)distanceThresh mask:(Mat*)mask NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:distanceThresh:mask:));
  6415. /**
  6416. *
  6417. * @param E The input essential matrix.
  6418. * @param points1 Array of N 2D points from the first image. The point coordinates should be
  6419. * floating-point (single or double precision).
  6420. * @param points2 Array of the second image points of the same size and format as points1.
  6421. * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
  6422. * Note that this function assumes that points1 and points2 are feature points from cameras with the
  6423. * same camera intrinsic matrix.
  6424. * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  6425. * that performs a change of basis from the first camera's coordinate system to the second camera's
  6426. * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  6427. * description below.
  6428. * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
  6429. * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  6430. * length.
  6431. * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
  6432. * points).
  6433. * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  6434. * recover pose. In the output mask only inliers which pass the cheirality check.
  6435. *
  6436. * This function differs from the one above that it outputs the triangulated 3D point that are used for
  6437. * the cheirality check.
  6438. */
  6439. + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t distanceThresh:(double)distanceThresh NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:distanceThresh:));
  6440. //
  6441. // void cv::computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat& lines)
  6442. //
  6443. /**
  6444. * For points in an image of a stereo pair, computes the corresponding epilines in the other image.
  6445. *
  6446. * @param points Input points. `$$N \times 1$$` or `$$1 \times N$$` matrix of type CV_32FC2 or
  6447. * vector\<Point2f\> .
  6448. * @param whichImage Index of the image (1 or 2) that contains the points .
  6449. * @param F Fundamental matrix that can be estimated using #findFundamentalMat or #stereoRectify .
  6450. * @param lines Output vector of the epipolar lines corresponding to the points in the other image.
  6451. * Each line `$$ax + by + c=0$$` is encoded by 3 numbers `$$(a, b, c)$$` .
  6452. *
  6453. * For every point in one of the two images of a stereo pair, the function finds the equation of the
  6454. * corresponding epipolar line in the other image.
  6455. *
  6456. * From the fundamental matrix definition (see #findFundamentalMat ), line `$$l^{(2)}_i$$` in the second
  6457. * image for the point `$$p^{(1)}_i$$` in the first image (when whichImage=1 ) is computed as:
  6458. *
  6459. * `$$l^{(2)}_i = F p^{(1)}_i$$`
  6460. *
  6461. * And vice versa, when whichImage=2, `$$l^{(1)}_i$$` is computed from `$$p^{(2)}_i$$` as:
  6462. *
  6463. * `$$l^{(1)}_i = F^T p^{(2)}_i$$`
  6464. *
  6465. * Line coefficients are defined up to a scale. They are normalized so that `$$a_i^2+b_i^2=1$$` .
  6466. */
  6467. + (void)computeCorrespondEpilines:(Mat*)points whichImage:(int)whichImage F:(Mat*)F lines:(Mat*)lines NS_SWIFT_NAME(computeCorrespondEpilines(points:whichImage:F:lines:));
  6468. //
  6469. // void cv::triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D)
  6470. //
  6471. /**
  6472. * This function reconstructs 3-dimensional points (in homogeneous coordinates) by using
  6473. * their observations with a stereo camera.
  6474. *
  6475. * @param projMatr1 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points
  6476. * given in the world's coordinate system into the first image.
  6477. * @param projMatr2 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points
  6478. * given in the world's coordinate system into the second image.
  6479. * @param projPoints1 2xN array of feature points in the first image. In the case of the c++ version,
  6480. * it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
  6481. * @param projPoints2 2xN array of corresponding points in the second image. In the case of the c++
  6482. * version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
  6483. * @param points4D 4xN array of reconstructed points in homogeneous coordinates. These points are
  6484. * returned in the world's coordinate system.
  6485. *
  6486. * NOTE:
  6487. * Keep in mind that all input data should be of float type in order for this function to work.
  6488. *
  6489. * NOTE:
  6490. * If the projection matrices from REF: stereoRectify are used, then the returned points are
  6491. * represented in the first camera's rectified coordinate system.
  6492. *
  6493. * @sa
  6494. * reprojectImageTo3D
  6495. */
  6496. + (void)triangulatePoints:(Mat*)projMatr1 projMatr2:(Mat*)projMatr2 projPoints1:(Mat*)projPoints1 projPoints2:(Mat*)projPoints2 points4D:(Mat*)points4D NS_SWIFT_NAME(triangulatePoints(projMatr1:projMatr2:projPoints1:projPoints2:points4D:));
  6497. //
  6498. // void cv::correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2)
  6499. //
  6500. /**
  6501. * Refines coordinates of corresponding points.
  6502. *
  6503. * @param F 3x3 fundamental matrix.
  6504. * @param points1 1xN array containing the first set of points.
  6505. * @param points2 1xN array containing the second set of points.
  6506. * @param newPoints1 The optimized points1.
  6507. * @param newPoints2 The optimized points2.
  6508. *
  6509. * The function implements the Optimal Triangulation Method (see Multiple View Geometry for details).
  6510. * For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it
  6511. * computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric
  6512. * error `$$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2$$` (where `$$d(a,b)$$` is the
  6513. * geometric distance between points `$$a$$` and `$$b$$` ) subject to the epipolar constraint
  6514. * `$$newPoints2^T * F * newPoints1 = 0$$` .
  6515. */
  6516. + (void)correctMatches:(Mat*)F points1:(Mat*)points1 points2:(Mat*)points2 newPoints1:(Mat*)newPoints1 newPoints2:(Mat*)newPoints2 NS_SWIFT_NAME(correctMatches(F:points1:points2:newPoints1:newPoints2:));
  6517. //
  6518. // void cv::filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat())
  6519. //
  6520. /**
  6521. * Filters off small noise blobs (speckles) in the disparity map
  6522. *
  6523. * @param img The input 16-bit signed disparity image
  6524. * @param newVal The disparity value used to paint-off the speckles
  6525. * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
  6526. * affected by the algorithm
  6527. * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
  6528. * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
  6529. * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
  6530. * account when specifying this parameter value.
  6531. * @param buf The optional temporary buffer to avoid memory allocation within the function.
  6532. */
  6533. + (void)filterSpeckles:(Mat*)img newVal:(double)newVal maxSpeckleSize:(int)maxSpeckleSize maxDiff:(double)maxDiff buf:(Mat*)buf NS_SWIFT_NAME(filterSpeckles(img:newVal:maxSpeckleSize:maxDiff:buf:));
  6534. /**
  6535. * Filters off small noise blobs (speckles) in the disparity map
  6536. *
  6537. * @param img The input 16-bit signed disparity image
  6538. * @param newVal The disparity value used to paint-off the speckles
  6539. * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
  6540. * affected by the algorithm
  6541. * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
  6542. * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
  6543. * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
  6544. * account when specifying this parameter value.
  6545. */
  6546. + (void)filterSpeckles:(Mat*)img newVal:(double)newVal maxSpeckleSize:(int)maxSpeckleSize maxDiff:(double)maxDiff NS_SWIFT_NAME(filterSpeckles(img:newVal:maxSpeckleSize:maxDiff:));
  6547. //
  6548. // Rect cv::getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize)
  6549. //
  6550. + (Rect2i*)getValidDisparityROI:(Rect2i*)roi1 roi2:(Rect2i*)roi2 minDisparity:(int)minDisparity numberOfDisparities:(int)numberOfDisparities blockSize:(int)blockSize NS_SWIFT_NAME(getValidDisparityROI(roi1:roi2:minDisparity:numberOfDisparities:blockSize:));
  6551. //
  6552. // void cv::validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1)
  6553. //
  6554. + (void)validateDisparity:(Mat*)disparity cost:(Mat*)cost minDisparity:(int)minDisparity numberOfDisparities:(int)numberOfDisparities disp12MaxDisp:(int)disp12MaxDisp NS_SWIFT_NAME(validateDisparity(disparity:cost:minDisparity:numberOfDisparities:disp12MaxDisp:));
  6555. + (void)validateDisparity:(Mat*)disparity cost:(Mat*)cost minDisparity:(int)minDisparity numberOfDisparities:(int)numberOfDisparities NS_SWIFT_NAME(validateDisparity(disparity:cost:minDisparity:numberOfDisparities:));
  6556. //
  6557. // void cv::reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1)
  6558. //
  6559. /**
  6560. * Reprojects a disparity image to 3D space.
  6561. *
  6562. * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
  6563. * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
  6564. * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
  6565. * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
  6566. * being used here.
  6567. * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
  6568. * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
  6569. * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
  6570. * camera's rectified coordinate system.
  6571. * @param Q `$$4 \times 4$$` perspective transformation matrix that can be obtained with
  6572. * REF: stereoRectify.
  6573. * @param handleMissingValues Indicates, whether the function should handle missing values (i.e.
  6574. * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
  6575. * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
  6576. * to 3D points with a very large Z value (currently set to 10000).
  6577. * @param ddepth The optional output array depth. If it is -1, the output image will have CV_32F
  6578. * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
  6579. *
  6580. * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
  6581. * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
  6582. * computes:
  6583. *
  6584. * `$$\begin{bmatrix}
  6585. * X \\
  6586. * Y \\
  6587. * Z \\
  6588. * W
  6589. * \end{bmatrix} = Q \begin{bmatrix}
  6590. * x \\
  6591. * y \\
  6592. * \texttt{disparity} (x,y) \\
  6593. * z
  6594. * \end{bmatrix}.$$`
  6595. *
  6596. * @sa
  6597. * To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
  6598. */
  6599. + (void)reprojectImageTo3D:(Mat*)disparity _3dImage:(Mat*)_3dImage Q:(Mat*)Q handleMissingValues:(BOOL)handleMissingValues ddepth:(int)ddepth NS_SWIFT_NAME(reprojectImageTo3D(disparity:_3dImage:Q:handleMissingValues:ddepth:));
  6600. /**
  6601. * Reprojects a disparity image to 3D space.
  6602. *
  6603. * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
  6604. * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
  6605. * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
  6606. * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
  6607. * being used here.
  6608. * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
  6609. * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
  6610. * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
  6611. * camera's rectified coordinate system.
  6612. * @param Q `$$4 \times 4$$` perspective transformation matrix that can be obtained with
  6613. * REF: stereoRectify.
  6614. * @param handleMissingValues Indicates, whether the function should handle missing values (i.e.
  6615. * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
  6616. * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
  6617. * to 3D points with a very large Z value (currently set to 10000).
  6618. * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
  6619. *
  6620. * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
  6621. * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
  6622. * computes:
  6623. *
  6624. * `$$\begin{bmatrix}
  6625. * X \\
  6626. * Y \\
  6627. * Z \\
  6628. * W
  6629. * \end{bmatrix} = Q \begin{bmatrix}
  6630. * x \\
  6631. * y \\
  6632. * \texttt{disparity} (x,y) \\
  6633. * z
  6634. * \end{bmatrix}.$$`
  6635. *
  6636. * @sa
  6637. * To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
  6638. */
  6639. + (void)reprojectImageTo3D:(Mat*)disparity _3dImage:(Mat*)_3dImage Q:(Mat*)Q handleMissingValues:(BOOL)handleMissingValues NS_SWIFT_NAME(reprojectImageTo3D(disparity:_3dImage:Q:handleMissingValues:));
  6640. /**
  6641. * Reprojects a disparity image to 3D space.
  6642. *
  6643. * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
  6644. * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
  6645. * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
  6646. * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
  6647. * being used here.
  6648. * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
  6649. * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
  6650. * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
  6651. * camera's rectified coordinate system.
  6652. * @param Q `$$4 \times 4$$` perspective transformation matrix that can be obtained with
  6653. * REF: stereoRectify.
  6654. * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
  6655. * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
  6656. * to 3D points with a very large Z value (currently set to 10000).
  6657. * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
  6658. *
  6659. * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
  6660. * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
  6661. * computes:
  6662. *
  6663. * `$$\begin{bmatrix}
  6664. * X \\
  6665. * Y \\
  6666. * Z \\
  6667. * W
  6668. * \end{bmatrix} = Q \begin{bmatrix}
  6669. * x \\
  6670. * y \\
  6671. * \texttt{disparity} (x,y) \\
  6672. * z
  6673. * \end{bmatrix}.$$`
  6674. *
  6675. * @sa
  6676. * To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
  6677. */
  6678. + (void)reprojectImageTo3D:(Mat*)disparity _3dImage:(Mat*)_3dImage Q:(Mat*)Q NS_SWIFT_NAME(reprojectImageTo3D(disparity:_3dImage:Q:));
  6679. //
  6680. // double cv::sampsonDistance(Mat pt1, Mat pt2, Mat F)
  6681. //
  6682. /**
  6683. * Calculates the Sampson Distance between two points.
  6684. *
  6685. * The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as:
  6686. * `$$
  6687. * sd( \texttt{pt1} , \texttt{pt2} )=
  6688. * \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2}
  6689. * {((\texttt{F} \cdot \texttt{pt1})(0))^2 +
  6690. * ((\texttt{F} \cdot \texttt{pt1})(1))^2 +
  6691. * ((\texttt{F}^t \cdot \texttt{pt2})(0))^2 +
  6692. * ((\texttt{F}^t \cdot \texttt{pt2})(1))^2}
  6693. * $$`
  6694. * The fundamental matrix may be calculated using the #findFundamentalMat function. See CITE: HartleyZ00 11.4.3 for details.
  6695. * @param pt1 first homogeneous 2d point
  6696. * @param pt2 second homogeneous 2d point
  6697. * @param F fundamental matrix
  6698. * @return The computed Sampson distance.
  6699. */
  6700. + (double)sampsonDistance:(Mat*)pt1 pt2:(Mat*)pt2 F:(Mat*)F NS_SWIFT_NAME(sampsonDistance(pt1:pt2:F:));
  6701. //
  6702. // int cv::estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
  6703. //
  6704. /**
  6705. * Computes an optimal affine transformation between two 3D point sets.
  6706. *
  6707. * It computes
  6708. * `$$
  6709. * \begin{bmatrix}
  6710. * x\\
  6711. * y\\
  6712. * z\\
  6713. * \end{bmatrix}
  6714. * =
  6715. * \begin{bmatrix}
  6716. * a_{11} & a_{12} & a_{13}\\
  6717. * a_{21} & a_{22} & a_{23}\\
  6718. * a_{31} & a_{32} & a_{33}\\
  6719. * \end{bmatrix}
  6720. * \begin{bmatrix}
  6721. * X\\
  6722. * Y\\
  6723. * Z\\
  6724. * \end{bmatrix}
  6725. * +
  6726. * \begin{bmatrix}
  6727. * b_1\\
  6728. * b_2\\
  6729. * b_3\\
  6730. * \end{bmatrix}
  6731. * $$`
  6732. *
  6733. * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
  6734. * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
  6735. * @param out Output 3D affine transformation matrix `$$3 \times 4$$` of the form
  6736. * `$$
  6737. * \begin{bmatrix}
  6738. * a_{11} & a_{12} & a_{13} & b_1\\
  6739. * a_{21} & a_{22} & a_{23} & b_2\\
  6740. * a_{31} & a_{32} & a_{33} & b_3\\
  6741. * \end{bmatrix}
  6742. * $$`
  6743. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  6744. * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
  6745. * an inlier.
  6746. * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  6747. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  6748. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  6749. *
  6750. * The function estimates an optimal 3D affine transformation between two 3D point sets using the
  6751. * RANSAC algorithm.
  6752. */
  6753. + (int)estimateAffine3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers ransacThreshold:(double)ransacThreshold confidence:(double)confidence NS_SWIFT_NAME(estimateAffine3D(src:dst:out:inliers:ransacThreshold:confidence:));
  6754. /**
  6755. * Computes an optimal affine transformation between two 3D point sets.
  6756. *
  6757. * It computes
  6758. * `$$
  6759. * \begin{bmatrix}
  6760. * x\\
  6761. * y\\
  6762. * z\\
  6763. * \end{bmatrix}
  6764. * =
  6765. * \begin{bmatrix}
  6766. * a_{11} & a_{12} & a_{13}\\
  6767. * a_{21} & a_{22} & a_{23}\\
  6768. * a_{31} & a_{32} & a_{33}\\
  6769. * \end{bmatrix}
  6770. * \begin{bmatrix}
  6771. * X\\
  6772. * Y\\
  6773. * Z\\
  6774. * \end{bmatrix}
  6775. * +
  6776. * \begin{bmatrix}
  6777. * b_1\\
  6778. * b_2\\
  6779. * b_3\\
  6780. * \end{bmatrix}
  6781. * $$`
  6782. *
  6783. * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
  6784. * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
  6785. * @param out Output 3D affine transformation matrix `$$3 \times 4$$` of the form
  6786. * `$$
  6787. * \begin{bmatrix}
  6788. * a_{11} & a_{12} & a_{13} & b_1\\
  6789. * a_{21} & a_{22} & a_{23} & b_2\\
  6790. * a_{31} & a_{32} & a_{33} & b_3\\
  6791. * \end{bmatrix}
  6792. * $$`
  6793. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  6794. * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
  6795. * an inlier.
  6796. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  6797. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  6798. *
  6799. * The function estimates an optimal 3D affine transformation between two 3D point sets using the
  6800. * RANSAC algorithm.
  6801. */
  6802. + (int)estimateAffine3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers ransacThreshold:(double)ransacThreshold NS_SWIFT_NAME(estimateAffine3D(src:dst:out:inliers:ransacThreshold:));
  6803. /**
  6804. * Computes an optimal affine transformation between two 3D point sets.
  6805. *
  6806. * It computes
  6807. * `$$
  6808. * \begin{bmatrix}
  6809. * x\\
  6810. * y\\
  6811. * z\\
  6812. * \end{bmatrix}
  6813. * =
  6814. * \begin{bmatrix}
  6815. * a_{11} & a_{12} & a_{13}\\
  6816. * a_{21} & a_{22} & a_{23}\\
  6817. * a_{31} & a_{32} & a_{33}\\
  6818. * \end{bmatrix}
  6819. * \begin{bmatrix}
  6820. * X\\
  6821. * Y\\
  6822. * Z\\
  6823. * \end{bmatrix}
  6824. * +
  6825. * \begin{bmatrix}
  6826. * b_1\\
  6827. * b_2\\
  6828. * b_3\\
  6829. * \end{bmatrix}
  6830. * $$`
  6831. *
  6832. * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
  6833. * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
  6834. * @param out Output 3D affine transformation matrix `$$3 \times 4$$` of the form
  6835. * `$$
  6836. * \begin{bmatrix}
  6837. * a_{11} & a_{12} & a_{13} & b_1\\
  6838. * a_{21} & a_{22} & a_{23} & b_2\\
  6839. * a_{31} & a_{32} & a_{33} & b_3\\
  6840. * \end{bmatrix}
  6841. * $$`
  6842. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  6843. * an inlier.
  6844. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  6845. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  6846. *
  6847. * The function estimates an optimal 3D affine transformation between two 3D point sets using the
  6848. * RANSAC algorithm.
  6849. */
  6850. + (int)estimateAffine3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers NS_SWIFT_NAME(estimateAffine3D(src:dst:out:inliers:));
  6851. //
  6852. // Mat cv::estimateAffine3D(Mat src, Mat dst, double* scale = nullptr, bool force_rotation = true)
  6853. //
  6854. /**
  6855. * Computes an optimal affine transformation between two 3D point sets.
  6856. *
  6857. * It computes `$$R,s,t$$` minimizing `$$\sum{i} dst_i - c \cdot R \cdot src_i $$`
  6858. * where `$$R$$` is a 3x3 rotation matrix, `$$t$$` is a 3x1 translation vector and `$$s$$` is a
  6859. * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
  6860. * The estimated affine transform has a homogeneous scale which is a subclass of affine
  6861. * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
  6862. * points each.
  6863. *
  6864. * @param src First input 3D point set.
  6865. * @param dst Second input 3D point set.
  6866. * @param scale If null is passed, the scale parameter c will be assumed to be 1.0.
  6867. * Else the pointed-to variable will be set to the optimal scale.
  6868. * @param force_rotation If true, the returned rotation will never be a reflection.
  6869. * This might be unwanted, e.g. when optimizing a transform between a right- and a
  6870. * left-handed coordinate system.
  6871. * @return 3D affine transformation matrix `$$3 \times 4$$` of the form
  6872. * `$$T =
  6873. * \begin{bmatrix}
  6874. * R & t\\
  6875. * \end{bmatrix}
  6876. * $$`
  6877. */
  6878. + (Mat*)estimateAffine3D:(Mat*)src dst:(Mat*)dst scale:(double*)scale force_rotation:(BOOL)force_rotation NS_SWIFT_NAME(estimateAffine3D(src:dst:scale:force_rotation:));
  6879. /**
  6880. * Computes an optimal affine transformation between two 3D point sets.
  6881. *
  6882. * It computes `$$R,s,t$$` minimizing `$$\sum{i} dst_i - c \cdot R \cdot src_i $$`
  6883. * where `$$R$$` is a 3x3 rotation matrix, `$$t$$` is a 3x1 translation vector and `$$s$$` is a
  6884. * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
  6885. * The estimated affine transform has a homogeneous scale which is a subclass of affine
  6886. * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
  6887. * points each.
  6888. *
  6889. * @param src First input 3D point set.
  6890. * @param dst Second input 3D point set.
  6891. * @param scale If null is passed, the scale parameter c will be assumed to be 1.0.
  6892. * Else the pointed-to variable will be set to the optimal scale.
  6893. * This might be unwanted, e.g. when optimizing a transform between a right- and a
  6894. * left-handed coordinate system.
  6895. * @return 3D affine transformation matrix `$$3 \times 4$$` of the form
  6896. * `$$T =
  6897. * \begin{bmatrix}
  6898. * R & t\\
  6899. * \end{bmatrix}
  6900. * $$`
  6901. */
  6902. + (Mat*)estimateAffine3D:(Mat*)src dst:(Mat*)dst scale:(double*)scale NS_SWIFT_NAME(estimateAffine3D(src:dst:scale:));
  6903. /**
  6904. * Computes an optimal affine transformation between two 3D point sets.
  6905. *
  6906. * It computes `$$R,s,t$$` minimizing `$$\sum{i} dst_i - c \cdot R \cdot src_i $$`
  6907. * where `$$R$$` is a 3x3 rotation matrix, `$$t$$` is a 3x1 translation vector and `$$s$$` is a
  6908. * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
  6909. * The estimated affine transform has a homogeneous scale which is a subclass of affine
  6910. * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
  6911. * points each.
  6912. *
  6913. * @param src First input 3D point set.
  6914. * @param dst Second input 3D point set.
  6915. * Else the pointed-to variable will be set to the optimal scale.
  6916. * This might be unwanted, e.g. when optimizing a transform between a right- and a
  6917. * left-handed coordinate system.
  6918. * @return 3D affine transformation matrix `$$3 \times 4$$` of the form
  6919. * `$$T =
  6920. * \begin{bmatrix}
  6921. * R & t\\
  6922. * \end{bmatrix}
  6923. * $$`
  6924. */
  6925. + (Mat*)estimateAffine3D:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(estimateAffine3D(src:dst:));
  6926. //
  6927. // int cv::estimateTranslation3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
  6928. //
  6929. /**
  6930. * Computes an optimal translation between two 3D point sets.
  6931. *
  6932. * It computes
  6933. * `$$
  6934. * \begin{bmatrix}
  6935. * x\\
  6936. * y\\
  6937. * z\\
  6938. * \end{bmatrix}
  6939. * =
  6940. * \begin{bmatrix}
  6941. * X\\
  6942. * Y\\
  6943. * Z\\
  6944. * \end{bmatrix}
  6945. * +
  6946. * \begin{bmatrix}
  6947. * b_1\\
  6948. * b_2\\
  6949. * b_3\\
  6950. * \end{bmatrix}
  6951. * $$`
  6952. *
  6953. * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
  6954. * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
  6955. * @param out Output 3D translation vector `$$3 \times 1$$` of the form
  6956. * `$$
  6957. * \begin{bmatrix}
  6958. * b_1 \\
  6959. * b_2 \\
  6960. * b_3 \\
  6961. * \end{bmatrix}
  6962. * $$`
  6963. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  6964. * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
  6965. * an inlier.
  6966. * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  6967. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  6968. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  6969. *
  6970. * The function estimates an optimal 3D translation between two 3D point sets using the
  6971. * RANSAC algorithm.
  6972. *
  6973. */
  6974. + (int)estimateTranslation3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers ransacThreshold:(double)ransacThreshold confidence:(double)confidence NS_SWIFT_NAME(estimateTranslation3D(src:dst:out:inliers:ransacThreshold:confidence:));
  6975. /**
  6976. * Computes an optimal translation between two 3D point sets.
  6977. *
  6978. * It computes
  6979. * `$$
  6980. * \begin{bmatrix}
  6981. * x\\
  6982. * y\\
  6983. * z\\
  6984. * \end{bmatrix}
  6985. * =
  6986. * \begin{bmatrix}
  6987. * X\\
  6988. * Y\\
  6989. * Z\\
  6990. * \end{bmatrix}
  6991. * +
  6992. * \begin{bmatrix}
  6993. * b_1\\
  6994. * b_2\\
  6995. * b_3\\
  6996. * \end{bmatrix}
  6997. * $$`
  6998. *
  6999. * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
  7000. * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
  7001. * @param out Output 3D translation vector `$$3 \times 1$$` of the form
  7002. * `$$
  7003. * \begin{bmatrix}
  7004. * b_1 \\
  7005. * b_2 \\
  7006. * b_3 \\
  7007. * \end{bmatrix}
  7008. * $$`
  7009. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  7010. * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
  7011. * an inlier.
  7012. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7013. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7014. *
  7015. * The function estimates an optimal 3D translation between two 3D point sets using the
  7016. * RANSAC algorithm.
  7017. *
  7018. */
  7019. + (int)estimateTranslation3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers ransacThreshold:(double)ransacThreshold NS_SWIFT_NAME(estimateTranslation3D(src:dst:out:inliers:ransacThreshold:));
  7020. /**
  7021. * Computes an optimal translation between two 3D point sets.
  7022. *
  7023. * It computes
  7024. * `$$
  7025. * \begin{bmatrix}
  7026. * x\\
  7027. * y\\
  7028. * z\\
  7029. * \end{bmatrix}
  7030. * =
  7031. * \begin{bmatrix}
  7032. * X\\
  7033. * Y\\
  7034. * Z\\
  7035. * \end{bmatrix}
  7036. * +
  7037. * \begin{bmatrix}
  7038. * b_1\\
  7039. * b_2\\
  7040. * b_3\\
  7041. * \end{bmatrix}
  7042. * $$`
  7043. *
  7044. * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
  7045. * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
  7046. * @param out Output 3D translation vector `$$3 \times 1$$` of the form
  7047. * `$$
  7048. * \begin{bmatrix}
  7049. * b_1 \\
  7050. * b_2 \\
  7051. * b_3 \\
  7052. * \end{bmatrix}
  7053. * $$`
  7054. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  7055. * an inlier.
  7056. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7057. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7058. *
  7059. * The function estimates an optimal 3D translation between two 3D point sets using the
  7060. * RANSAC algorithm.
  7061. *
  7062. */
  7063. + (int)estimateTranslation3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers NS_SWIFT_NAME(estimateTranslation3D(src:dst:out:inliers:));
  7064. //
  7065. // Mat cv::estimateAffine2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
  7066. //
  7067. /**
  7068. * Computes an optimal affine transformation between two 2D point sets.
  7069. *
  7070. * It computes
  7071. * `$$
  7072. * \begin{bmatrix}
  7073. * x\\
  7074. * y\\
  7075. * \end{bmatrix}
  7076. * =
  7077. * \begin{bmatrix}
  7078. * a_{11} & a_{12}\\
  7079. * a_{21} & a_{22}\\
  7080. * \end{bmatrix}
  7081. * \begin{bmatrix}
  7082. * X\\
  7083. * Y\\
  7084. * \end{bmatrix}
  7085. * +
  7086. * \begin{bmatrix}
  7087. * b_1\\
  7088. * b_2\\
  7089. * \end{bmatrix}
  7090. * $$`
  7091. *
  7092. * @param from First input 2D point set containing `$$(X,Y)$$`.
  7093. * @param to Second input 2D point set containing `$$(x,y)$$`.
  7094. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  7095. * @param method Robust method used to compute transformation. The following methods are possible:
  7096. * - REF: RANSAC - RANSAC-based robust method
  7097. * - REF: LMEDS - Least-Median robust method
  7098. * RANSAC is the default method.
  7099. * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  7100. * a point as an inlier. Applies only to RANSAC.
  7101. * @param maxIters The maximum number of robust method iterations.
  7102. * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  7103. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7104. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7105. * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
  7106. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7107. *
  7108. * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
  7109. * could not be estimated. The returned matrix has the following form:
  7110. * `$$
  7111. * \begin{bmatrix}
  7112. * a_{11} & a_{12} & b_1\\
  7113. * a_{21} & a_{22} & b_2\\
  7114. * \end{bmatrix}
  7115. * $$`
  7116. *
  7117. * The function estimates an optimal 2D affine transformation between two 2D point sets using the
  7118. * selected robust algorithm.
  7119. *
  7120. * The computed transformation is then refined further (using only inliers) with the
  7121. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7122. *
  7123. * NOTE:
  7124. * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
  7125. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7126. * correctly only when there are more than 50% of inliers.
  7127. *
  7128. * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7129. */
  7130. + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters confidence:(double)confidence refineIters:(size_t)refineIters NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:));
  7131. /**
  7132. * Computes an optimal affine transformation between two 2D point sets.
  7133. *
  7134. * It computes
  7135. * `$$
  7136. * \begin{bmatrix}
  7137. * x\\
  7138. * y\\
  7139. * \end{bmatrix}
  7140. * =
  7141. * \begin{bmatrix}
  7142. * a_{11} & a_{12}\\
  7143. * a_{21} & a_{22}\\
  7144. * \end{bmatrix}
  7145. * \begin{bmatrix}
  7146. * X\\
  7147. * Y\\
  7148. * \end{bmatrix}
  7149. * +
  7150. * \begin{bmatrix}
  7151. * b_1\\
  7152. * b_2\\
  7153. * \end{bmatrix}
  7154. * $$`
  7155. *
  7156. * @param from First input 2D point set containing `$$(X,Y)$$`.
  7157. * @param to Second input 2D point set containing `$$(x,y)$$`.
  7158. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  7159. * @param method Robust method used to compute transformation. The following methods are possible:
  7160. * - REF: RANSAC - RANSAC-based robust method
  7161. * - REF: LMEDS - Least-Median robust method
  7162. * RANSAC is the default method.
  7163. * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  7164. * a point as an inlier. Applies only to RANSAC.
  7165. * @param maxIters The maximum number of robust method iterations.
  7166. * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  7167. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7168. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7169. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7170. *
  7171. * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
  7172. * could not be estimated. The returned matrix has the following form:
  7173. * `$$
  7174. * \begin{bmatrix}
  7175. * a_{11} & a_{12} & b_1\\
  7176. * a_{21} & a_{22} & b_2\\
  7177. * \end{bmatrix}
  7178. * $$`
  7179. *
  7180. * The function estimates an optimal 2D affine transformation between two 2D point sets using the
  7181. * selected robust algorithm.
  7182. *
  7183. * The computed transformation is then refined further (using only inliers) with the
  7184. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7185. *
  7186. * NOTE:
  7187. * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
  7188. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7189. * correctly only when there are more than 50% of inliers.
  7190. *
  7191. * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7192. */
  7193. + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters confidence:(double)confidence NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:ransacReprojThreshold:maxIters:confidence:));
  7194. /**
  7195. * Computes an optimal affine transformation between two 2D point sets.
  7196. *
  7197. * It computes
  7198. * `$$
  7199. * \begin{bmatrix}
  7200. * x\\
  7201. * y\\
  7202. * \end{bmatrix}
  7203. * =
  7204. * \begin{bmatrix}
  7205. * a_{11} & a_{12}\\
  7206. * a_{21} & a_{22}\\
  7207. * \end{bmatrix}
  7208. * \begin{bmatrix}
  7209. * X\\
  7210. * Y\\
  7211. * \end{bmatrix}
  7212. * +
  7213. * \begin{bmatrix}
  7214. * b_1\\
  7215. * b_2\\
  7216. * \end{bmatrix}
  7217. * $$`
  7218. *
  7219. * @param from First input 2D point set containing `$$(X,Y)$$`.
  7220. * @param to Second input 2D point set containing `$$(x,y)$$`.
  7221. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  7222. * @param method Robust method used to compute transformation. The following methods are possible:
  7223. * - REF: RANSAC - RANSAC-based robust method
  7224. * - REF: LMEDS - Least-Median robust method
  7225. * RANSAC is the default method.
  7226. * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  7227. * a point as an inlier. Applies only to RANSAC.
  7228. * @param maxIters The maximum number of robust method iterations.
  7229. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7230. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7231. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7232. *
  7233. * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
  7234. * could not be estimated. The returned matrix has the following form:
  7235. * `$$
  7236. * \begin{bmatrix}
  7237. * a_{11} & a_{12} & b_1\\
  7238. * a_{21} & a_{22} & b_2\\
  7239. * \end{bmatrix}
  7240. * $$`
  7241. *
  7242. * The function estimates an optimal 2D affine transformation between two 2D point sets using the
  7243. * selected robust algorithm.
  7244. *
  7245. * The computed transformation is then refined further (using only inliers) with the
  7246. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7247. *
  7248. * NOTE:
  7249. * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
  7250. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7251. * correctly only when there are more than 50% of inliers.
  7252. *
  7253. * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7254. */
  7255. + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:ransacReprojThreshold:maxIters:));
  7256. /**
  7257. * Computes an optimal affine transformation between two 2D point sets.
  7258. *
  7259. * It computes
  7260. * `$$
  7261. * \begin{bmatrix}
  7262. * x\\
  7263. * y\\
  7264. * \end{bmatrix}
  7265. * =
  7266. * \begin{bmatrix}
  7267. * a_{11} & a_{12}\\
  7268. * a_{21} & a_{22}\\
  7269. * \end{bmatrix}
  7270. * \begin{bmatrix}
  7271. * X\\
  7272. * Y\\
  7273. * \end{bmatrix}
  7274. * +
  7275. * \begin{bmatrix}
  7276. * b_1\\
  7277. * b_2\\
  7278. * \end{bmatrix}
  7279. * $$`
  7280. *
  7281. * @param from First input 2D point set containing `$$(X,Y)$$`.
  7282. * @param to Second input 2D point set containing `$$(x,y)$$`.
  7283. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  7284. * @param method Robust method used to compute transformation. The following methods are possible:
  7285. * - REF: RANSAC - RANSAC-based robust method
  7286. * - REF: LMEDS - Least-Median robust method
  7287. * RANSAC is the default method.
  7288. * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  7289. * a point as an inlier. Applies only to RANSAC.
  7290. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7291. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7292. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7293. *
  7294. * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
  7295. * could not be estimated. The returned matrix has the following form:
  7296. * `$$
  7297. * \begin{bmatrix}
  7298. * a_{11} & a_{12} & b_1\\
  7299. * a_{21} & a_{22} & b_2\\
  7300. * \end{bmatrix}
  7301. * $$`
  7302. *
  7303. * The function estimates an optimal 2D affine transformation between two 2D point sets using the
  7304. * selected robust algorithm.
  7305. *
  7306. * The computed transformation is then refined further (using only inliers) with the
  7307. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7308. *
  7309. * NOTE:
  7310. * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
  7311. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7312. * correctly only when there are more than 50% of inliers.
  7313. *
  7314. * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7315. */
  7316. + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:ransacReprojThreshold:));
  7317. /**
  7318. * Computes an optimal affine transformation between two 2D point sets.
  7319. *
  7320. * It computes
  7321. * `$$
  7322. * \begin{bmatrix}
  7323. * x\\
  7324. * y\\
  7325. * \end{bmatrix}
  7326. * =
  7327. * \begin{bmatrix}
  7328. * a_{11} & a_{12}\\
  7329. * a_{21} & a_{22}\\
  7330. * \end{bmatrix}
  7331. * \begin{bmatrix}
  7332. * X\\
  7333. * Y\\
  7334. * \end{bmatrix}
  7335. * +
  7336. * \begin{bmatrix}
  7337. * b_1\\
  7338. * b_2\\
  7339. * \end{bmatrix}
  7340. * $$`
  7341. *
  7342. * @param from First input 2D point set containing `$$(X,Y)$$`.
  7343. * @param to Second input 2D point set containing `$$(x,y)$$`.
  7344. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  7345. * @param method Robust method used to compute transformation. The following methods are possible:
  7346. * - REF: RANSAC - RANSAC-based robust method
  7347. * - REF: LMEDS - Least-Median robust method
  7348. * RANSAC is the default method.
  7349. * a point as an inlier. Applies only to RANSAC.
  7350. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7351. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7352. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7353. *
  7354. * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
  7355. * could not be estimated. The returned matrix has the following form:
  7356. * `$$
  7357. * \begin{bmatrix}
  7358. * a_{11} & a_{12} & b_1\\
  7359. * a_{21} & a_{22} & b_2\\
  7360. * \end{bmatrix}
  7361. * $$`
  7362. *
  7363. * The function estimates an optimal 2D affine transformation between two 2D point sets using the
  7364. * selected robust algorithm.
  7365. *
  7366. * The computed transformation is then refined further (using only inliers) with the
  7367. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7368. *
  7369. * NOTE:
  7370. * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
  7371. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7372. * correctly only when there are more than 50% of inliers.
  7373. *
  7374. * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7375. */
  7376. + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:));
  7377. /**
  7378. * Computes an optimal affine transformation between two 2D point sets.
  7379. *
  7380. * It computes
  7381. * `$$
  7382. * \begin{bmatrix}
  7383. * x\\
  7384. * y\\
  7385. * \end{bmatrix}
  7386. * =
  7387. * \begin{bmatrix}
  7388. * a_{11} & a_{12}\\
  7389. * a_{21} & a_{22}\\
  7390. * \end{bmatrix}
  7391. * \begin{bmatrix}
  7392. * X\\
  7393. * Y\\
  7394. * \end{bmatrix}
  7395. * +
  7396. * \begin{bmatrix}
  7397. * b_1\\
  7398. * b_2\\
  7399. * \end{bmatrix}
  7400. * $$`
  7401. *
  7402. * @param from First input 2D point set containing `$$(X,Y)$$`.
  7403. * @param to Second input 2D point set containing `$$(x,y)$$`.
  7404. * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  7405. * - REF: RANSAC - RANSAC-based robust method
  7406. * - REF: LMEDS - Least-Median robust method
  7407. * RANSAC is the default method.
  7408. * a point as an inlier. Applies only to RANSAC.
  7409. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7410. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7411. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7412. *
  7413. * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
  7414. * could not be estimated. The returned matrix has the following form:
  7415. * `$$
  7416. * \begin{bmatrix}
  7417. * a_{11} & a_{12} & b_1\\
  7418. * a_{21} & a_{22} & b_2\\
  7419. * \end{bmatrix}
  7420. * $$`
  7421. *
  7422. * The function estimates an optimal 2D affine transformation between two 2D point sets using the
  7423. * selected robust algorithm.
  7424. *
  7425. * The computed transformation is then refined further (using only inliers) with the
  7426. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7427. *
  7428. * NOTE:
  7429. * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
  7430. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7431. * correctly only when there are more than 50% of inliers.
  7432. *
  7433. * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7434. */
  7435. + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:));
  7436. /**
  7437. * Computes an optimal affine transformation between two 2D point sets.
  7438. *
  7439. * It computes
  7440. * `$$
  7441. * \begin{bmatrix}
  7442. * x\\
  7443. * y\\
  7444. * \end{bmatrix}
  7445. * =
  7446. * \begin{bmatrix}
  7447. * a_{11} & a_{12}\\
  7448. * a_{21} & a_{22}\\
  7449. * \end{bmatrix}
  7450. * \begin{bmatrix}
  7451. * X\\
  7452. * Y\\
  7453. * \end{bmatrix}
  7454. * +
  7455. * \begin{bmatrix}
  7456. * b_1\\
  7457. * b_2\\
  7458. * \end{bmatrix}
  7459. * $$`
  7460. *
  7461. * @param from First input 2D point set containing `$$(X,Y)$$`.
  7462. * @param to Second input 2D point set containing `$$(x,y)$$`.
  7463. * - REF: RANSAC - RANSAC-based robust method
  7464. * - REF: LMEDS - Least-Median robust method
  7465. * RANSAC is the default method.
  7466. * a point as an inlier. Applies only to RANSAC.
  7467. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7468. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7469. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7470. *
  7471. * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
  7472. * could not be estimated. The returned matrix has the following form:
  7473. * `$$
  7474. * \begin{bmatrix}
  7475. * a_{11} & a_{12} & b_1\\
  7476. * a_{21} & a_{22} & b_2\\
  7477. * \end{bmatrix}
  7478. * $$`
  7479. *
  7480. * The function estimates an optimal 2D affine transformation between two 2D point sets using the
  7481. * selected robust algorithm.
  7482. *
  7483. * The computed transformation is then refined further (using only inliers) with the
  7484. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7485. *
  7486. * NOTE:
  7487. * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
  7488. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7489. * correctly only when there are more than 50% of inliers.
  7490. *
  7491. * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7492. */
  7493. + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to NS_SWIFT_NAME(estimateAffine2D(from:to:));
  7494. //
  7495. // Mat cv::estimateAffine2D(Mat pts1, Mat pts2, Mat& inliers, UsacParams params)
  7496. //
  7497. + (Mat*)estimateAffine2D:(Mat*)pts1 pts2:(Mat*)pts2 inliers:(Mat*)inliers params:(UsacParams*)params NS_SWIFT_NAME(estimateAffine2D(pts1:pts2:inliers:params:));
  7498. //
  7499. // Mat cv::estimateAffinePartial2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
  7500. //
  7501. /**
  7502. * Computes an optimal limited affine transformation with 4 degrees of freedom between
  7503. * two 2D point sets.
  7504. *
  7505. * @param from First input 2D point set.
  7506. * @param to Second input 2D point set.
  7507. * @param inliers Output vector indicating which points are inliers.
  7508. * @param method Robust method used to compute transformation. The following methods are possible:
  7509. * - REF: RANSAC - RANSAC-based robust method
  7510. * - REF: LMEDS - Least-Median robust method
  7511. * RANSAC is the default method.
  7512. * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  7513. * a point as an inlier. Applies only to RANSAC.
  7514. * @param maxIters The maximum number of robust method iterations.
  7515. * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  7516. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7517. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7518. * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
  7519. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7520. *
  7521. * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
  7522. * empty matrix if transformation could not be estimated.
  7523. *
  7524. * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
  7525. * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
  7526. * estimation.
  7527. *
  7528. * The computed transformation is then refined further (using only inliers) with the
  7529. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7530. *
  7531. * Estimated transformation matrix is:
  7532. * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
  7533. * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
  7534. * \end{bmatrix} $$`
  7535. * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
  7536. * translations in `$$ x, y $$` axes respectively.
  7537. *
  7538. * NOTE:
  7539. * The RANSAC method can handle practically any ratio of outliers but need a threshold to
  7540. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7541. * correctly only when there are more than 50% of inliers.
  7542. *
  7543. * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7544. */
  7545. + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters confidence:(double)confidence refineIters:(size_t)refineIters NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:));
  7546. /**
  7547. * Computes an optimal limited affine transformation with 4 degrees of freedom between
  7548. * two 2D point sets.
  7549. *
  7550. * @param from First input 2D point set.
  7551. * @param to Second input 2D point set.
  7552. * @param inliers Output vector indicating which points are inliers.
  7553. * @param method Robust method used to compute transformation. The following methods are possible:
  7554. * - REF: RANSAC - RANSAC-based robust method
  7555. * - REF: LMEDS - Least-Median robust method
  7556. * RANSAC is the default method.
  7557. * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  7558. * a point as an inlier. Applies only to RANSAC.
  7559. * @param maxIters The maximum number of robust method iterations.
  7560. * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  7561. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7562. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7563. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7564. *
  7565. * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
  7566. * empty matrix if transformation could not be estimated.
  7567. *
  7568. * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
  7569. * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
  7570. * estimation.
  7571. *
  7572. * The computed transformation is then refined further (using only inliers) with the
  7573. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7574. *
  7575. * Estimated transformation matrix is:
  7576. * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
  7577. * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
  7578. * \end{bmatrix} $$`
  7579. * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
  7580. * translations in `$$ x, y $$` axes respectively.
  7581. *
  7582. * NOTE:
  7583. * The RANSAC method can handle practically any ratio of outliers but need a threshold to
  7584. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7585. * correctly only when there are more than 50% of inliers.
  7586. *
  7587. * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7588. */
  7589. + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters confidence:(double)confidence NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:ransacReprojThreshold:maxIters:confidence:));
  7590. /**
  7591. * Computes an optimal limited affine transformation with 4 degrees of freedom between
  7592. * two 2D point sets.
  7593. *
  7594. * @param from First input 2D point set.
  7595. * @param to Second input 2D point set.
  7596. * @param inliers Output vector indicating which points are inliers.
  7597. * @param method Robust method used to compute transformation. The following methods are possible:
  7598. * - REF: RANSAC - RANSAC-based robust method
  7599. * - REF: LMEDS - Least-Median robust method
  7600. * RANSAC is the default method.
  7601. * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  7602. * a point as an inlier. Applies only to RANSAC.
  7603. * @param maxIters The maximum number of robust method iterations.
  7604. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7605. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7606. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7607. *
  7608. * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
  7609. * empty matrix if transformation could not be estimated.
  7610. *
  7611. * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
  7612. * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
  7613. * estimation.
  7614. *
  7615. * The computed transformation is then refined further (using only inliers) with the
  7616. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7617. *
  7618. * Estimated transformation matrix is:
  7619. * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
  7620. * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
  7621. * \end{bmatrix} $$`
  7622. * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
  7623. * translations in `$$ x, y $$` axes respectively.
  7624. *
  7625. * NOTE:
  7626. * The RANSAC method can handle practically any ratio of outliers but need a threshold to
  7627. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7628. * correctly only when there are more than 50% of inliers.
  7629. *
  7630. * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7631. */
  7632. + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:ransacReprojThreshold:maxIters:));
  7633. /**
  7634. * Computes an optimal limited affine transformation with 4 degrees of freedom between
  7635. * two 2D point sets.
  7636. *
  7637. * @param from First input 2D point set.
  7638. * @param to Second input 2D point set.
  7639. * @param inliers Output vector indicating which points are inliers.
  7640. * @param method Robust method used to compute transformation. The following methods are possible:
  7641. * - REF: RANSAC - RANSAC-based robust method
  7642. * - REF: LMEDS - Least-Median robust method
  7643. * RANSAC is the default method.
  7644. * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  7645. * a point as an inlier. Applies only to RANSAC.
  7646. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7647. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7648. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7649. *
  7650. * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
  7651. * empty matrix if transformation could not be estimated.
  7652. *
  7653. * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
  7654. * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
  7655. * estimation.
  7656. *
  7657. * The computed transformation is then refined further (using only inliers) with the
  7658. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7659. *
  7660. * Estimated transformation matrix is:
  7661. * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
  7662. * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
  7663. * \end{bmatrix} $$`
  7664. * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
  7665. * translations in `$$ x, y $$` axes respectively.
  7666. *
  7667. * NOTE:
  7668. * The RANSAC method can handle practically any ratio of outliers but need a threshold to
  7669. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7670. * correctly only when there are more than 50% of inliers.
  7671. *
  7672. * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7673. */
  7674. + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:ransacReprojThreshold:));
  7675. /**
  7676. * Computes an optimal limited affine transformation with 4 degrees of freedom between
  7677. * two 2D point sets.
  7678. *
  7679. * @param from First input 2D point set.
  7680. * @param to Second input 2D point set.
  7681. * @param inliers Output vector indicating which points are inliers.
  7682. * @param method Robust method used to compute transformation. The following methods are possible:
  7683. * - REF: RANSAC - RANSAC-based robust method
  7684. * - REF: LMEDS - Least-Median robust method
  7685. * RANSAC is the default method.
  7686. * a point as an inlier. Applies only to RANSAC.
  7687. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7688. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7689. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7690. *
  7691. * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
  7692. * empty matrix if transformation could not be estimated.
  7693. *
  7694. * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
  7695. * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
  7696. * estimation.
  7697. *
  7698. * The computed transformation is then refined further (using only inliers) with the
  7699. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7700. *
  7701. * Estimated transformation matrix is:
  7702. * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
  7703. * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
  7704. * \end{bmatrix} $$`
  7705. * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
  7706. * translations in `$$ x, y $$` axes respectively.
  7707. *
  7708. * NOTE:
  7709. * The RANSAC method can handle practically any ratio of outliers but need a threshold to
  7710. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7711. * correctly only when there are more than 50% of inliers.
  7712. *
  7713. * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7714. */
  7715. + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:));
  7716. /**
  7717. * Computes an optimal limited affine transformation with 4 degrees of freedom between
  7718. * two 2D point sets.
  7719. *
  7720. * @param from First input 2D point set.
  7721. * @param to Second input 2D point set.
  7722. * @param inliers Output vector indicating which points are inliers.
  7723. * - REF: RANSAC - RANSAC-based robust method
  7724. * - REF: LMEDS - Least-Median robust method
  7725. * RANSAC is the default method.
  7726. * a point as an inlier. Applies only to RANSAC.
  7727. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7728. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7729. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7730. *
  7731. * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
  7732. * empty matrix if transformation could not be estimated.
  7733. *
  7734. * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
  7735. * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
  7736. * estimation.
  7737. *
  7738. * The computed transformation is then refined further (using only inliers) with the
  7739. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7740. *
  7741. * Estimated transformation matrix is:
  7742. * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
  7743. * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
  7744. * \end{bmatrix} $$`
  7745. * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
  7746. * translations in `$$ x, y $$` axes respectively.
  7747. *
  7748. * NOTE:
  7749. * The RANSAC method can handle practically any ratio of outliers but need a threshold to
  7750. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7751. * correctly only when there are more than 50% of inliers.
  7752. *
  7753. * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7754. */
  7755. + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:));
  7756. /**
  7757. * Computes an optimal limited affine transformation with 4 degrees of freedom between
  7758. * two 2D point sets.
  7759. *
  7760. * @param from First input 2D point set.
  7761. * @param to Second input 2D point set.
  7762. * - REF: RANSAC - RANSAC-based robust method
  7763. * - REF: LMEDS - Least-Median robust method
  7764. * RANSAC is the default method.
  7765. * a point as an inlier. Applies only to RANSAC.
  7766. * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  7767. * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  7768. * Passing 0 will disable refining, so the output matrix will be output of robust method.
  7769. *
  7770. * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
  7771. * empty matrix if transformation could not be estimated.
  7772. *
  7773. * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
  7774. * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
  7775. * estimation.
  7776. *
  7777. * The computed transformation is then refined further (using only inliers) with the
  7778. * Levenberg-Marquardt method to reduce the re-projection error even more.
  7779. *
  7780. * Estimated transformation matrix is:
  7781. * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
  7782. * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
  7783. * \end{bmatrix} $$`
  7784. * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
  7785. * translations in `$$ x, y $$` axes respectively.
  7786. *
  7787. * NOTE:
  7788. * The RANSAC method can handle practically any ratio of outliers but need a threshold to
  7789. * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  7790. * correctly only when there are more than 50% of inliers.
  7791. *
  7792. * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
  7793. */
  7794. + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to NS_SWIFT_NAME(estimateAffinePartial2D(from:to:));
  7795. //
  7796. // int cv::decomposeHomographyMat(Mat H, Mat K, vector_Mat& rotations, vector_Mat& translations, vector_Mat& normals)
  7797. //
  7798. /**
  7799. * Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
  7800. *
  7801. * @param H The input homography matrix between two images.
  7802. * @param K The input camera intrinsic matrix.
  7803. * @param rotations Array of rotation matrices.
  7804. * @param translations Array of translation matrices.
  7805. * @param normals Array of plane normal matrices.
  7806. *
  7807. * This function extracts relative camera motion between two views of a planar object and returns up to
  7808. * four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of
  7809. * the homography matrix H is described in detail in CITE: Malis.
  7810. *
  7811. * If the homography H, induced by the plane, gives the constraint
  7812. * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$` on the source image points
  7813. * `$$p_i$$` and the destination image points `$$p'_i$$`, then the tuple of rotations[k] and
  7814. * translations[k] is a change of basis from the source camera's coordinate system to the destination
  7815. * camera's coordinate system. However, by decomposing H, one can only get the translation normalized
  7816. * by the (typically unknown) depth of the scene, i.e. its direction but with normalized length.
  7817. *
  7818. * If point correspondences are available, at least two solutions may further be invalidated, by
  7819. * applying positive depth constraint, i.e. all points must be in front of the camera.
  7820. */
  7821. + (int)decomposeHomographyMat:(Mat*)H K:(Mat*)K rotations:(NSMutableArray<Mat*>*)rotations translations:(NSMutableArray<Mat*>*)translations normals:(NSMutableArray<Mat*>*)normals NS_SWIFT_NAME(decomposeHomographyMat(H:K:rotations:translations:normals:));
  7822. //
  7823. // void cv::filterHomographyDecompByVisibleRefpoints(vector_Mat rotations, vector_Mat normals, Mat beforePoints, Mat afterPoints, Mat& possibleSolutions, Mat pointsMask = Mat())
  7824. //
  7825. /**
  7826. * Filters homography decompositions based on additional information.
  7827. *
  7828. * @param rotations Vector of rotation matrices.
  7829. * @param normals Vector of plane normal matrices.
  7830. * @param beforePoints Vector of (rectified) visible reference points before the homography is applied
  7831. * @param afterPoints Vector of (rectified) visible reference points after the homography is applied
  7832. * @param possibleSolutions Vector of int indices representing the viable solution set after filtering
  7833. * @param pointsMask optional Mat/Vector of 8u type representing the mask for the inliers as given by the #findHomography function
  7834. *
  7835. * This function is intended to filter the output of the #decomposeHomographyMat based on additional
  7836. * information as described in CITE: Malis . The summary of the method: the #decomposeHomographyMat function
  7837. * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
  7838. * sets of points visible in the camera frame before and after the homography transformation is applied,
  7839. * we can determine which are the true potential solutions and which are the opposites by verifying which
  7840. * homographies are consistent with all visible reference points being in front of the camera. The inputs
  7841. * are left unchanged; the filtered solution set is returned as indices into the existing one.
  7842. */
  7843. + (void)filterHomographyDecompByVisibleRefpoints:(NSArray<Mat*>*)rotations normals:(NSArray<Mat*>*)normals beforePoints:(Mat*)beforePoints afterPoints:(Mat*)afterPoints possibleSolutions:(Mat*)possibleSolutions pointsMask:(Mat*)pointsMask NS_SWIFT_NAME(filterHomographyDecompByVisibleRefpoints(rotations:normals:beforePoints:afterPoints:possibleSolutions:pointsMask:));
  7844. /**
  7845. * Filters homography decompositions based on additional information.
  7846. *
  7847. * @param rotations Vector of rotation matrices.
  7848. * @param normals Vector of plane normal matrices.
  7849. * @param beforePoints Vector of (rectified) visible reference points before the homography is applied
  7850. * @param afterPoints Vector of (rectified) visible reference points after the homography is applied
  7851. * @param possibleSolutions Vector of int indices representing the viable solution set after filtering
  7852. *
  7853. * This function is intended to filter the output of the #decomposeHomographyMat based on additional
  7854. * information as described in CITE: Malis . The summary of the method: the #decomposeHomographyMat function
  7855. * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
  7856. * sets of points visible in the camera frame before and after the homography transformation is applied,
  7857. * we can determine which are the true potential solutions and which are the opposites by verifying which
  7858. * homographies are consistent with all visible reference points being in front of the camera. The inputs
  7859. * are left unchanged; the filtered solution set is returned as indices into the existing one.
  7860. */
  7861. + (void)filterHomographyDecompByVisibleRefpoints:(NSArray<Mat*>*)rotations normals:(NSArray<Mat*>*)normals beforePoints:(Mat*)beforePoints afterPoints:(Mat*)afterPoints possibleSolutions:(Mat*)possibleSolutions NS_SWIFT_NAME(filterHomographyDecompByVisibleRefpoints(rotations:normals:beforePoints:afterPoints:possibleSolutions:));
  7862. //
  7863. // void cv::undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat())
  7864. //
  7865. /**
  7866. * Transforms an image to compensate for lens distortion.
  7867. *
  7868. * The function transforms an image to compensate radial and tangential lens distortion.
  7869. *
  7870. * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
  7871. * (with bilinear interpolation). See the former function for details of the transformation being
  7872. * performed.
  7873. *
  7874. * Those pixels in the destination image, for which there is no correspondent pixels in the source
  7875. * image, are filled with zeros (black color).
  7876. *
  7877. * A particular subset of the source image that will be visible in the corrected image can be regulated
  7878. * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
  7879. * newCameraMatrix depending on your requirements.
  7880. *
  7881. * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
  7882. * the resolution of images is different from the resolution used at the calibration stage, `$$f_x,
  7883. * f_y, c_x$$` and `$$c_y$$` need to be scaled accordingly, while the distortion coefficients remain
  7884. * the same.
  7885. *
  7886. * @param src Input (distorted) image.
  7887. * @param dst Output (corrected) image that has the same size and type as src .
  7888. * @param cameraMatrix Input camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  7889. * @param distCoeffs Input vector of distortion coefficients
  7890. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  7891. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  7892. * @param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as
  7893. * cameraMatrix but you may additionally scale and shift the result by using a different matrix.
  7894. */
  7895. + (void)undistort:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs newCameraMatrix:(Mat*)newCameraMatrix NS_SWIFT_NAME(undistort(src:dst:cameraMatrix:distCoeffs:newCameraMatrix:));
  7896. /**
  7897. * Transforms an image to compensate for lens distortion.
  7898. *
  7899. * The function transforms an image to compensate radial and tangential lens distortion.
  7900. *
  7901. * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
  7902. * (with bilinear interpolation). See the former function for details of the transformation being
  7903. * performed.
  7904. *
  7905. * Those pixels in the destination image, for which there is no correspondent pixels in the source
  7906. * image, are filled with zeros (black color).
  7907. *
  7908. * A particular subset of the source image that will be visible in the corrected image can be regulated
  7909. * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
  7910. * newCameraMatrix depending on your requirements.
  7911. *
  7912. * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
  7913. * the resolution of images is different from the resolution used at the calibration stage, `$$f_x,
  7914. * f_y, c_x$$` and `$$c_y$$` need to be scaled accordingly, while the distortion coefficients remain
  7915. * the same.
  7916. *
  7917. * @param src Input (distorted) image.
  7918. * @param dst Output (corrected) image that has the same size and type as src .
  7919. * @param cameraMatrix Input camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  7920. * @param distCoeffs Input vector of distortion coefficients
  7921. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  7922. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  7923. * cameraMatrix but you may additionally scale and shift the result by using a different matrix.
  7924. */
  7925. + (void)undistort:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(undistort(src:dst:cameraMatrix:distCoeffs:));
  7926. //
  7927. // void cv::initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
  7928. //
  7929. /**
  7930. * Computes the undistortion and rectification transformation map.
  7931. *
  7932. * The function computes the joint undistortion and rectification transformation and represents the
  7933. * result in the form of maps for #remap. The undistorted image looks like original, as if it is
  7934. * captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a
  7935. * monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by
  7936. * #getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera,
  7937. * newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
  7938. *
  7939. * Also, this new camera is oriented differently in the coordinate space, according to R. That, for
  7940. * example, helps to align two heads of a stereo camera so that the epipolar lines on both images
  7941. * become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera).
  7942. *
  7943. * The function actually builds the maps for the inverse mapping algorithm that is used by #remap. That
  7944. * is, for each pixel `$$(u, v)$$` in the destination (corrected and rectified) image, the function
  7945. * computes the corresponding coordinates in the source image (that is, in the original image from
  7946. * camera). The following process is applied:
  7947. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} }
  7948. * \begin{array}{l}
  7949. * x \leftarrow (u - {c'}_x)/{f'}_x \\
  7950. * y \leftarrow (v - {c'}_y)/{f'}_y \\
  7951. * {[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\
  7952. * x' \leftarrow X/W \\
  7953. * y' \leftarrow Y/W \\
  7954. * r^2 \leftarrow x'^2 + y'^2 \\
  7955. * x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
  7956. * + 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\
  7957. * y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
  7958. * + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
  7959. * s\vecthree{x'''}{y'''}{1} =
  7960. * \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)}
  7961. * {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
  7962. * {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\
  7963. * map_x(u,v) \leftarrow x''' f_x + c_x \\
  7964. * map_y(u,v) \leftarrow y''' f_y + c_y
  7965. * \end{array}
  7966. * $$`
  7967. * where `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  7968. * are the distortion coefficients.
  7969. *
  7970. * In case of a stereo camera, this function is called twice: once for each camera head, after
  7971. * #stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera
  7972. * was not calibrated, it is still possible to compute the rectification transformations directly from
  7973. * the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes
  7974. * homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
  7975. * space. R can be computed from H as
  7976. * `$$\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}$$`
  7977. * where cameraMatrix can be chosen arbitrarily.
  7978. *
  7979. * @param cameraMatrix Input camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  7980. * @param distCoeffs Input vector of distortion coefficients
  7981. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  7982. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  7983. * @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,
  7984. * computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
  7985. * is assumed. In cvInitUndistortMap R assumed to be an identity matrix.
  7986. * @param newCameraMatrix New camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}$$`.
  7987. * @param size Undistorted image size.
  7988. * @param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
  7989. * @param map1 The first output map.
  7990. * @param map2 The second output map.
  7991. */
  7992. + (void)initUndistortRectifyMap:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R newCameraMatrix:(Mat*)newCameraMatrix size:(Size2i*)size m1type:(int)m1type map1:(Mat*)map1 map2:(Mat*)map2 NS_SWIFT_NAME(initUndistortRectifyMap(cameraMatrix:distCoeffs:R:newCameraMatrix:size:m1type:map1:map2:));
  7993. //
  7994. // void cv::initInverseRectificationMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
  7995. //
  7996. /**
  7997. * Computes the projection and inverse-rectification transformation map. In essense, this is the inverse of
  7998. * #initUndistortRectifyMap to accomodate stereo-rectification of projectors ('inverse-cameras') in projector-camera pairs.
  7999. *
  8000. * The function computes the joint projection and inverse rectification transformation and represents the
  8001. * result in the form of maps for #remap. The projected image looks like a distorted version of the original which,
  8002. * once projected by a projector, should visually match the original. In case of a monocular camera, newCameraMatrix
  8003. * is usually equal to cameraMatrix, or it can be computed by
  8004. * #getOptimalNewCameraMatrix for a better control over scaling. In case of a projector-camera pair,
  8005. * newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
  8006. *
  8007. * The projector is oriented differently in the coordinate space, according to R. In case of projector-camera pairs,
  8008. * this helps align the projector (in the same manner as #initUndistortRectifyMap for the camera) to create a stereo-rectified pair. This
  8009. * allows epipolar lines on both images to become horizontal and have the same y-coordinate (in case of a horizontally aligned projector-camera pair).
  8010. *
  8011. * The function builds the maps for the inverse mapping algorithm that is used by #remap. That
  8012. * is, for each pixel `$$(u, v)$$` in the destination (projected and inverse-rectified) image, the function
  8013. * computes the corresponding coordinates in the source image (that is, in the original digital image). The following process is applied:
  8014. *
  8015. * `$$
  8016. * \begin{array}{l}
  8017. * \text{newCameraMatrix}\\
  8018. * x \leftarrow (u - {c'}_x)/{f'}_x \\
  8019. * y \leftarrow (v - {c'}_y)/{f'}_y \\
  8020. *
  8021. * \\\text{Undistortion}
  8022. * \\\scriptsize{\textit{though equation shown is for radial undistortion, function implements cv::undistortPoints()}}\\
  8023. * r^2 \leftarrow x^2 + y^2 \\
  8024. * \theta \leftarrow \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\\
  8025. * x' \leftarrow \frac{x}{\theta} \\
  8026. * y' \leftarrow \frac{y}{\theta} \\
  8027. *
  8028. * \\\text{Rectification}\\
  8029. * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
  8030. * x'' \leftarrow X/W \\
  8031. * y'' \leftarrow Y/W \\
  8032. *
  8033. * \\\text{cameraMatrix}\\
  8034. * map_x(u,v) \leftarrow x'' f_x + c_x \\
  8035. * map_y(u,v) \leftarrow y'' f_y + c_y
  8036. * \end{array}
  8037. * $$`
  8038. * where `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  8039. * are the distortion coefficients vector distCoeffs.
  8040. *
  8041. * In case of a stereo-rectified projector-camera pair, this function is called for the projector while #initUndistortRectifyMap is called for the camera head.
  8042. * This is done after #stereoRectify, which in turn is called after #stereoCalibrate. If the projector-camera pair
  8043. * is not calibrated, it is still possible to compute the rectification transformations directly from
  8044. * the fundamental matrix using #stereoRectifyUncalibrated. For the projector and camera, the function computes
  8045. * homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
  8046. * space. R can be computed from H as
  8047. * `$$\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}$$`
  8048. * where cameraMatrix can be chosen arbitrarily.
  8049. *
  8050. * @param cameraMatrix Input camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  8051. * @param distCoeffs Input vector of distortion coefficients
  8052. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  8053. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  8054. * @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2,
  8055. * computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
  8056. * is assumed.
  8057. * @param newCameraMatrix New camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}$$`.
  8058. * @param size Distorted image size.
  8059. * @param m1type Type of the first output map. Can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
  8060. * @param map1 The first output map for #remap.
  8061. * @param map2 The second output map for #remap.
  8062. */
  8063. + (void)initInverseRectificationMap:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R newCameraMatrix:(Mat*)newCameraMatrix size:(Size2i*)size m1type:(int)m1type map1:(Mat*)map1 map2:(Mat*)map2 NS_SWIFT_NAME(initInverseRectificationMap(cameraMatrix:distCoeffs:R:newCameraMatrix:size:m1type:map1:map2:));
  8064. //
  8065. // Mat cv::getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false)
  8066. //
  8067. /**
  8068. * Returns the default new camera matrix.
  8069. *
  8070. * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
  8071. * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
  8072. *
  8073. * In the latter case, the new camera matrix will be:
  8074. *
  8075. * `$$\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,$$`
  8076. *
  8077. * where `$$f_x$$` and `$$f_y$$` are `$$(0,0)$$` and `$$(1,1)$$` elements of cameraMatrix, respectively.
  8078. *
  8079. * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
  8080. * move the principal point. However, when you work with stereo, it is important to move the principal
  8081. * points in both views to the same y-coordinate (which is required by most of stereo correspondence
  8082. * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
  8083. * each view where the principal points are located at the center.
  8084. *
  8085. * @param cameraMatrix Input camera matrix.
  8086. * @param imgsize Camera view image size in pixels.
  8087. * @param centerPrincipalPoint Location of the principal point in the new camera matrix. The
  8088. * parameter indicates whether this location should be at the image center or not.
  8089. */
  8090. + (Mat*)getDefaultNewCameraMatrix:(Mat*)cameraMatrix imgsize:(Size2i*)imgsize centerPrincipalPoint:(BOOL)centerPrincipalPoint NS_SWIFT_NAME(getDefaultNewCameraMatrix(cameraMatrix:imgsize:centerPrincipalPoint:));
  8091. /**
  8092. * Returns the default new camera matrix.
  8093. *
  8094. * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
  8095. * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
  8096. *
  8097. * In the latter case, the new camera matrix will be:
  8098. *
  8099. * `$$\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,$$`
  8100. *
  8101. * where `$$f_x$$` and `$$f_y$$` are `$$(0,0)$$` and `$$(1,1)$$` elements of cameraMatrix, respectively.
  8102. *
  8103. * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
  8104. * move the principal point. However, when you work with stereo, it is important to move the principal
  8105. * points in both views to the same y-coordinate (which is required by most of stereo correspondence
  8106. * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
  8107. * each view where the principal points are located at the center.
  8108. *
  8109. * @param cameraMatrix Input camera matrix.
  8110. * @param imgsize Camera view image size in pixels.
  8111. * parameter indicates whether this location should be at the image center or not.
  8112. */
  8113. + (Mat*)getDefaultNewCameraMatrix:(Mat*)cameraMatrix imgsize:(Size2i*)imgsize NS_SWIFT_NAME(getDefaultNewCameraMatrix(cameraMatrix:imgsize:));
  8114. /**
  8115. * Returns the default new camera matrix.
  8116. *
  8117. * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
  8118. * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
  8119. *
  8120. * In the latter case, the new camera matrix will be:
  8121. *
  8122. * `$$\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,$$`
  8123. *
  8124. * where `$$f_x$$` and `$$f_y$$` are `$$(0,0)$$` and `$$(1,1)$$` elements of cameraMatrix, respectively.
  8125. *
  8126. * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
  8127. * move the principal point. However, when you work with stereo, it is important to move the principal
  8128. * points in both views to the same y-coordinate (which is required by most of stereo correspondence
  8129. * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
  8130. * each view where the principal points are located at the center.
  8131. *
  8132. * @param cameraMatrix Input camera matrix.
  8133. * parameter indicates whether this location should be at the image center or not.
  8134. */
  8135. + (Mat*)getDefaultNewCameraMatrix:(Mat*)cameraMatrix NS_SWIFT_NAME(getDefaultNewCameraMatrix(cameraMatrix:));
  8136. //
  8137. // void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat())
  8138. //
  8139. /**
  8140. * Computes the ideal point coordinates from the observed point coordinates.
  8141. *
  8142. * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
  8143. * sparse set of points instead of a raster image. Also the function performs a reverse transformation
  8144. * to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
  8145. * planar object, it does, up to a translation vector, if the proper R is specified.
  8146. *
  8147. * For each observed point coordinate `$$(u, v)$$` the function computes:
  8148. * `$$
  8149. * \begin{array}{l}
  8150. * x^{"} \leftarrow (u - c_x)/f_x \\
  8151. * y^{"} \leftarrow (v - c_y)/f_y \\
  8152. * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
  8153. * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
  8154. * x \leftarrow X/W \\
  8155. * y \leftarrow Y/W \\
  8156. * \text{only performed if P is specified:} \\
  8157. * u' \leftarrow x {f'}_x + {c'}_x \\
  8158. * v' \leftarrow y {f'}_y + {c'}_y
  8159. * \end{array}
  8160. * $$`
  8161. *
  8162. * where *undistort* is an approximate iterative algorithm that estimates the normalized original
  8163. * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
  8164. * coordinates do not depend on the camera matrix).
  8165. *
  8166. * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
  8167. * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
  8168. * vector\<Point2f\> ).
  8169. * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
  8170. * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
  8171. * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  8172. * @param distCoeffs Input vector of distortion coefficients
  8173. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  8174. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  8175. * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
  8176. * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
  8177. * @param P New camera matrix (3x3) or new projection matrix (3x4) `$$\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}$$`. P1 or P2 computed by
  8178. * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
  8179. */
  8180. + (void)undistortPoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R P:(Mat*)P NS_SWIFT_NAME(undistortPoints(src:dst:cameraMatrix:distCoeffs:R:P:));
  8181. /**
  8182. * Computes the ideal point coordinates from the observed point coordinates.
  8183. *
  8184. * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
  8185. * sparse set of points instead of a raster image. Also the function performs a reverse transformation
  8186. * to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
  8187. * planar object, it does, up to a translation vector, if the proper R is specified.
  8188. *
  8189. * For each observed point coordinate `$$(u, v)$$` the function computes:
  8190. * `$$
  8191. * \begin{array}{l}
  8192. * x^{"} \leftarrow (u - c_x)/f_x \\
  8193. * y^{"} \leftarrow (v - c_y)/f_y \\
  8194. * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
  8195. * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
  8196. * x \leftarrow X/W \\
  8197. * y \leftarrow Y/W \\
  8198. * \text{only performed if P is specified:} \\
  8199. * u' \leftarrow x {f'}_x + {c'}_x \\
  8200. * v' \leftarrow y {f'}_y + {c'}_y
  8201. * \end{array}
  8202. * $$`
  8203. *
  8204. * where *undistort* is an approximate iterative algorithm that estimates the normalized original
  8205. * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
  8206. * coordinates do not depend on the camera matrix).
  8207. *
  8208. * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
  8209. * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
  8210. * vector\<Point2f\> ).
  8211. * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
  8212. * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
  8213. * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  8214. * @param distCoeffs Input vector of distortion coefficients
  8215. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  8216. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  8217. * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
  8218. * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
  8219. * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
  8220. */
  8221. + (void)undistortPoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R NS_SWIFT_NAME(undistortPoints(src:dst:cameraMatrix:distCoeffs:R:));
  8222. /**
  8223. * Computes the ideal point coordinates from the observed point coordinates.
  8224. *
  8225. * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
  8226. * sparse set of points instead of a raster image. Also the function performs a reverse transformation
  8227. * to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
  8228. * planar object, it does, up to a translation vector, if the proper R is specified.
  8229. *
  8230. * For each observed point coordinate `$$(u, v)$$` the function computes:
  8231. * `$$
  8232. * \begin{array}{l}
  8233. * x^{"} \leftarrow (u - c_x)/f_x \\
  8234. * y^{"} \leftarrow (v - c_y)/f_y \\
  8235. * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
  8236. * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
  8237. * x \leftarrow X/W \\
  8238. * y \leftarrow Y/W \\
  8239. * \text{only performed if P is specified:} \\
  8240. * u' \leftarrow x {f'}_x + {c'}_x \\
  8241. * v' \leftarrow y {f'}_y + {c'}_y
  8242. * \end{array}
  8243. * $$`
  8244. *
  8245. * where *undistort* is an approximate iterative algorithm that estimates the normalized original
  8246. * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
  8247. * coordinates do not depend on the camera matrix).
  8248. *
  8249. * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
  8250. * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
  8251. * vector\<Point2f\> ).
  8252. * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
  8253. * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
  8254. * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  8255. * @param distCoeffs Input vector of distortion coefficients
  8256. * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
  8257. * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  8258. * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
  8259. * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
  8260. */
  8261. + (void)undistortPoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(undistortPoints(src:dst:cameraMatrix:distCoeffs:));
  8262. //
  8263. // void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria)
  8264. //
  8265. /**
  8266. *
  8267. * NOTE: Default version of #undistortPoints does 5 iterations to compute undistorted points.
  8268. */
  8269. + (void)undistortPointsIter:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R P:(Mat*)P criteria:(TermCriteria*)criteria NS_SWIFT_NAME(undistortPoints(src:dst:cameraMatrix:distCoeffs:R:P:criteria:));
  8270. //
  8271. // void cv::undistortImagePoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, TermCriteria arg1 = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 0.01))
  8272. //
  8273. /**
  8274. * Compute undistorted image points position
  8275. *
  8276. * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or
  8277. * CV_64FC2) (or vector\<Point2f\> ).
  8278. * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector\<Point2f\> ).
  8279. * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  8280. * @param distCoeffs Distortion coefficients
  8281. */
  8282. + (void)undistortImagePoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs arg1:(TermCriteria*)arg1 NS_SWIFT_NAME(undistortImagePoints(src:dst:cameraMatrix:distCoeffs:arg1:));
  8283. /**
  8284. * Compute undistorted image points position
  8285. *
  8286. * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or
  8287. * CV_64FC2) (or vector\<Point2f\> ).
  8288. * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector\<Point2f\> ).
  8289. * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
  8290. * @param distCoeffs Distortion coefficients
  8291. */
  8292. + (void)undistortImagePoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(undistortImagePoints(src:dst:cameraMatrix:distCoeffs:));
  8293. //
  8294. // void cv::fisheye::projectPoints(Mat objectPoints, Mat& imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha = 0, Mat& jacobian = Mat())
  8295. //
  8296. + (void)projectPoints:(Mat*)objectPoints imagePoints:(Mat*)imagePoints rvec:(Mat*)rvec tvec:(Mat*)tvec K:(Mat*)K D:(Mat*)D alpha:(double)alpha jacobian:(Mat*)jacobian NS_SWIFT_NAME(projectPoints(objectPoints:imagePoints:rvec:tvec:K:D:alpha:jacobian:));
  8297. + (void)projectPoints:(Mat*)objectPoints imagePoints:(Mat*)imagePoints rvec:(Mat*)rvec tvec:(Mat*)tvec K:(Mat*)K D:(Mat*)D alpha:(double)alpha NS_SWIFT_NAME(projectPoints(objectPoints:imagePoints:rvec:tvec:K:D:alpha:));
  8298. + (void)projectPoints:(Mat*)objectPoints imagePoints:(Mat*)imagePoints rvec:(Mat*)rvec tvec:(Mat*)tvec K:(Mat*)K D:(Mat*)D NS_SWIFT_NAME(projectPoints(objectPoints:imagePoints:rvec:tvec:K:D:));
  8299. //
  8300. // void cv::fisheye::distortPoints(Mat undistorted, Mat& distorted, Mat K, Mat D, double alpha = 0)
  8301. //
  8302. /**
  8303. * Distorts 2D points using fisheye model.
  8304. *
  8305. * @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is
  8306. * the number of points in the view.
  8307. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8308. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8309. * @param alpha The skew coefficient.
  8310. * @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
  8311. *
  8312. * Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
  8313. * This means if you want to distort image points you have to multiply them with `$$K^{-1}$$`.
  8314. */
  8315. + (void)distortPoints:(Mat*)undistorted distorted:(Mat*)distorted K:(Mat*)K D:(Mat*)D alpha:(double)alpha NS_SWIFT_NAME(distortPoints(undistorted:distorted:K:D:alpha:));
  8316. /**
  8317. * Distorts 2D points using fisheye model.
  8318. *
  8319. * @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is
  8320. * the number of points in the view.
  8321. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8322. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8323. * @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
  8324. *
  8325. * Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
  8326. * This means if you want to distort image points you have to multiply them with `$$K^{-1}$$`.
  8327. */
  8328. + (void)distortPoints:(Mat*)undistorted distorted:(Mat*)distorted K:(Mat*)K D:(Mat*)D NS_SWIFT_NAME(distortPoints(undistorted:distorted:K:D:));
  8329. //
  8330. // void cv::fisheye::undistortPoints(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat R = Mat(), Mat P = Mat(), TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 1e-8))
  8331. //
  8332. /**
  8333. * Undistorts 2D points using fisheye model
  8334. *
  8335. * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the
  8336. * number of points in the view.
  8337. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8338. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8339. * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  8340. * 1-channel or 1x1 3-channel
  8341. * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  8342. * @param criteria Termination criteria
  8343. * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
  8344. */
  8345. + (void)undistortPoints:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D R:(Mat*)R P:(Mat*)P criteria:(TermCriteria*)criteria NS_SWIFT_NAME(undistortPoints(distorted:undistorted:K:D:R:P:criteria:));
  8346. /**
  8347. * Undistorts 2D points using fisheye model
  8348. *
  8349. * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the
  8350. * number of points in the view.
  8351. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8352. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8353. * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  8354. * 1-channel or 1x1 3-channel
  8355. * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  8356. * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
  8357. */
  8358. + (void)undistortPoints:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D R:(Mat*)R P:(Mat*)P NS_SWIFT_NAME(undistortPoints(distorted:undistorted:K:D:R:P:));
  8359. /**
  8360. * Undistorts 2D points using fisheye model
  8361. *
  8362. * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the
  8363. * number of points in the view.
  8364. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8365. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8366. * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  8367. * 1-channel or 1x1 3-channel
  8368. * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
  8369. */
  8370. + (void)undistortPoints:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D R:(Mat*)R NS_SWIFT_NAME(undistortPoints(distorted:undistorted:K:D:R:));
  8371. /**
  8372. * Undistorts 2D points using fisheye model
  8373. *
  8374. * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the
  8375. * number of points in the view.
  8376. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8377. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8378. * 1-channel or 1x1 3-channel
  8379. * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
  8380. */
  8381. + (void)undistortPoints:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D NS_SWIFT_NAME(undistortPoints(distorted:undistorted:K:D:));
  8382. //
  8383. // void cv::fisheye::initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat& map1, Mat& map2)
  8384. //
  8385. /**
  8386. * Computes undistortion and rectification maps for image transform by #remap. If D is empty zero
  8387. * distortion is used, if R or P is empty identity matrixes are used.
  8388. *
  8389. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8390. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8391. * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  8392. * 1-channel or 1x1 3-channel
  8393. * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  8394. * @param size Undistorted image size.
  8395. * @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See #convertMaps
  8396. * for details.
  8397. * @param map1 The first output map.
  8398. * @param map2 The second output map.
  8399. */
  8400. + (void)initUndistortRectifyMap:(Mat*)K D:(Mat*)D R:(Mat*)R P:(Mat*)P size:(Size2i*)size m1type:(int)m1type map1:(Mat*)map1 map2:(Mat*)map2 NS_SWIFT_NAME(initUndistortRectifyMap(K:D:R:P:size:m1type:map1:map2:));
  8401. //
  8402. // void cv::fisheye::undistortImage(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat Knew = cv::Mat(), Size new_size = Size())
  8403. //
  8404. /**
  8405. * Transforms an image to compensate for fisheye lens distortion.
  8406. *
  8407. * @param distorted image with fisheye lens distortion.
  8408. * @param undistorted Output image with compensated fisheye lens distortion.
  8409. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8410. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8411. * @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
  8412. * may additionally scale and shift the result by using a different matrix.
  8413. * @param new_size the new size
  8414. *
  8415. * The function transforms an image to compensate radial and tangential lens distortion.
  8416. *
  8417. * The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
  8418. * (with bilinear interpolation). See the former function for details of the transformation being
  8419. * performed.
  8420. *
  8421. * See below the results of undistortImage.
  8422. * - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
  8423. * k_4, k_5, k_6) of distortion were optimized under calibration)
  8424. * - b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
  8425. * k_3, k_4) of fisheye distortion were optimized under calibration)
  8426. * - c\) original image was captured with fisheye lens
  8427. *
  8428. * Pictures a) and b) almost the same. But if we consider points of image located far from the center
  8429. * of image, we can notice that on image a) these points are distorted.
  8430. *
  8431. * ![image](pics/fisheye_undistorted.jpg)
  8432. */
  8433. + (void)undistortImage:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D Knew:(Mat*)Knew new_size:(Size2i*)new_size NS_SWIFT_NAME(undistortImage(distorted:undistorted:K:D:Knew:new_size:));
  8434. /**
  8435. * Transforms an image to compensate for fisheye lens distortion.
  8436. *
  8437. * @param distorted image with fisheye lens distortion.
  8438. * @param undistorted Output image with compensated fisheye lens distortion.
  8439. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8440. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8441. * @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
  8442. * may additionally scale and shift the result by using a different matrix.
  8443. *
  8444. * The function transforms an image to compensate radial and tangential lens distortion.
  8445. *
  8446. * The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
  8447. * (with bilinear interpolation). See the former function for details of the transformation being
  8448. * performed.
  8449. *
  8450. * See below the results of undistortImage.
  8451. * - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
  8452. * k_4, k_5, k_6) of distortion were optimized under calibration)
  8453. * - b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
  8454. * k_3, k_4) of fisheye distortion were optimized under calibration)
  8455. * - c\) original image was captured with fisheye lens
  8456. *
  8457. * Pictures a) and b) almost the same. But if we consider points of image located far from the center
  8458. * of image, we can notice that on image a) these points are distorted.
  8459. *
  8460. * ![image](pics/fisheye_undistorted.jpg)
  8461. */
  8462. + (void)undistortImage:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D Knew:(Mat*)Knew NS_SWIFT_NAME(undistortImage(distorted:undistorted:K:D:Knew:));
  8463. /**
  8464. * Transforms an image to compensate for fisheye lens distortion.
  8465. *
  8466. * @param distorted image with fisheye lens distortion.
  8467. * @param undistorted Output image with compensated fisheye lens distortion.
  8468. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8469. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8470. * may additionally scale and shift the result by using a different matrix.
  8471. *
  8472. * The function transforms an image to compensate radial and tangential lens distortion.
  8473. *
  8474. * The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
  8475. * (with bilinear interpolation). See the former function for details of the transformation being
  8476. * performed.
  8477. *
  8478. * See below the results of undistortImage.
  8479. * - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
  8480. * k_4, k_5, k_6) of distortion were optimized under calibration)
  8481. * - b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
  8482. * k_3, k_4) of fisheye distortion were optimized under calibration)
  8483. * - c\) original image was captured with fisheye lens
  8484. *
  8485. * Pictures a) and b) almost the same. But if we consider points of image located far from the center
  8486. * of image, we can notice that on image a) these points are distorted.
  8487. *
  8488. * ![image](pics/fisheye_undistorted.jpg)
  8489. */
  8490. + (void)undistortImage:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D NS_SWIFT_NAME(undistortImage(distorted:undistorted:K:D:));
  8491. //
  8492. // void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat& P, double balance = 0.0, Size new_size = Size(), double fov_scale = 1.0)
  8493. //
  8494. /**
  8495. * Estimates new camera intrinsic matrix for undistortion or rectification.
  8496. *
  8497. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8498. * @param image_size Size of the image
  8499. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8500. * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  8501. * 1-channel or 1x1 3-channel
  8502. * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  8503. * @param balance Sets the new focal length in range between the min focal length and the max focal
  8504. * length. Balance is in range of [0, 1].
  8505. * @param new_size the new size
  8506. * @param fov_scale Divisor for new focal length.
  8507. */
  8508. + (void)estimateNewCameraMatrixForUndistortRectify:(Mat*)K D:(Mat*)D image_size:(Size2i*)image_size R:(Mat*)R P:(Mat*)P balance:(double)balance new_size:(Size2i*)new_size fov_scale:(double)fov_scale NS_SWIFT_NAME(estimateNewCameraMatrixForUndistortRectify(K:D:image_size:R:P:balance:new_size:fov_scale:));
  8509. /**
  8510. * Estimates new camera intrinsic matrix for undistortion or rectification.
  8511. *
  8512. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8513. * @param image_size Size of the image
  8514. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8515. * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  8516. * 1-channel or 1x1 3-channel
  8517. * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  8518. * @param balance Sets the new focal length in range between the min focal length and the max focal
  8519. * length. Balance is in range of [0, 1].
  8520. * @param new_size the new size
  8521. */
  8522. + (void)estimateNewCameraMatrixForUndistortRectify:(Mat*)K D:(Mat*)D image_size:(Size2i*)image_size R:(Mat*)R P:(Mat*)P balance:(double)balance new_size:(Size2i*)new_size NS_SWIFT_NAME(estimateNewCameraMatrixForUndistortRectify(K:D:image_size:R:P:balance:new_size:));
  8523. /**
  8524. * Estimates new camera intrinsic matrix for undistortion or rectification.
  8525. *
  8526. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8527. * @param image_size Size of the image
  8528. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8529. * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  8530. * 1-channel or 1x1 3-channel
  8531. * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  8532. * @param balance Sets the new focal length in range between the min focal length and the max focal
  8533. * length. Balance is in range of [0, 1].
  8534. */
  8535. + (void)estimateNewCameraMatrixForUndistortRectify:(Mat*)K D:(Mat*)D image_size:(Size2i*)image_size R:(Mat*)R P:(Mat*)P balance:(double)balance NS_SWIFT_NAME(estimateNewCameraMatrixForUndistortRectify(K:D:image_size:R:P:balance:));
  8536. /**
  8537. * Estimates new camera intrinsic matrix for undistortion or rectification.
  8538. *
  8539. * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
  8540. * @param image_size Size of the image
  8541. * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8542. * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  8543. * 1-channel or 1x1 3-channel
  8544. * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  8545. * length. Balance is in range of [0, 1].
  8546. */
  8547. + (void)estimateNewCameraMatrixForUndistortRectify:(Mat*)K D:(Mat*)D image_size:(Size2i*)image_size R:(Mat*)R P:(Mat*)P NS_SWIFT_NAME(estimateNewCameraMatrixForUndistortRectify(K:D:image_size:R:P:));
  8548. //
  8549. // double cv::fisheye::calibrate(vector_Mat objectPoints, vector_Mat imagePoints, Size image_size, Mat& K, Mat& D, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
  8550. //
  8551. /**
  8552. * Performs camera calibration
  8553. *
  8554. * @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
  8555. * coordinate space.
  8556. * @param imagePoints vector of vectors of the projections of calibration pattern points.
  8557. * imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
  8558. * objectPoints[i].size() for each i.
  8559. * @param image_size Size of the image used only to initialize the camera intrinsic matrix.
  8560. * @param K Output 3x3 floating-point camera intrinsic matrix
  8561. * `$$\cameramatrix{A}$$` . If
  8562. * REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
  8563. * initialized before calling the function.
  8564. * @param D Output vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8565. * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
  8566. * That is, each k-th rotation vector together with the corresponding k-th translation vector (see
  8567. * the next output parameter description) brings the calibration pattern from the model coordinate
  8568. * space (in which object points are specified) to the world coordinate space, that is, a real
  8569. * position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
  8570. * @param tvecs Output vector of translation vectors estimated for each pattern view.
  8571. * @param flags Different flags that may be zero or a combination of the following values:
  8572. * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
  8573. * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  8574. * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
  8575. * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
  8576. * of intrinsic optimization.
  8577. * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
  8578. * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
  8579. * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
  8580. * are set to zeros and stay zero.
  8581. * - REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
  8582. * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
  8583. * - REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
  8584. * optimization. It is the `$$max(width,height)/\pi$$` or the provided `$$f_x$$`, `$$f_y$$` when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
  8585. * @param criteria Termination criteria for the iterative optimization algorithm.
  8586. */
  8587. + (double)calibrate:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints image_size:(Size2i*)image_size K:(Mat*)K D:(Mat*)D rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrate(objectPoints:imagePoints:image_size:K:D:rvecs:tvecs:flags:criteria:));
  8588. /**
  8589. * Performs camera calibration
  8590. *
  8591. * @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
  8592. * coordinate space.
  8593. * @param imagePoints vector of vectors of the projections of calibration pattern points.
  8594. * imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
  8595. * objectPoints[i].size() for each i.
  8596. * @param image_size Size of the image used only to initialize the camera intrinsic matrix.
  8597. * @param K Output 3x3 floating-point camera intrinsic matrix
  8598. * `$$\cameramatrix{A}$$` . If
  8599. * REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
  8600. * initialized before calling the function.
  8601. * @param D Output vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8602. * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
  8603. * That is, each k-th rotation vector together with the corresponding k-th translation vector (see
  8604. * the next output parameter description) brings the calibration pattern from the model coordinate
  8605. * space (in which object points are specified) to the world coordinate space, that is, a real
  8606. * position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
  8607. * @param tvecs Output vector of translation vectors estimated for each pattern view.
  8608. * @param flags Different flags that may be zero or a combination of the following values:
  8609. * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
  8610. * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  8611. * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
  8612. * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
  8613. * of intrinsic optimization.
  8614. * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
  8615. * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
  8616. * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
  8617. * are set to zeros and stay zero.
  8618. * - REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
  8619. * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
  8620. * - REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
  8621. * optimization. It is the `$$max(width,height)/\pi$$` or the provided `$$f_x$$`, `$$f_y$$` when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
  8622. */
  8623. + (double)calibrate:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints image_size:(Size2i*)image_size K:(Mat*)K D:(Mat*)D rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags NS_SWIFT_NAME(calibrate(objectPoints:imagePoints:image_size:K:D:rvecs:tvecs:flags:));
  8624. /**
  8625. * Performs camera calibration
  8626. *
  8627. * @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
  8628. * coordinate space.
  8629. * @param imagePoints vector of vectors of the projections of calibration pattern points.
  8630. * imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
  8631. * objectPoints[i].size() for each i.
  8632. * @param image_size Size of the image used only to initialize the camera intrinsic matrix.
  8633. * @param K Output 3x3 floating-point camera intrinsic matrix
  8634. * `$$\cameramatrix{A}$$` . If
  8635. * REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
  8636. * initialized before calling the function.
  8637. * @param D Output vector of distortion coefficients `$$\distcoeffsfisheye$$`.
  8638. * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
  8639. * That is, each k-th rotation vector together with the corresponding k-th translation vector (see
  8640. * the next output parameter description) brings the calibration pattern from the model coordinate
  8641. * space (in which object points are specified) to the world coordinate space, that is, a real
  8642. * position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
  8643. * @param tvecs Output vector of translation vectors estimated for each pattern view.
  8644. * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
  8645. * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  8646. * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
  8647. * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
  8648. * of intrinsic optimization.
  8649. * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
  8650. * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
  8651. * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
  8652. * are set to zeros and stay zero.
  8653. * - REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
  8654. * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
  8655. * - REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
  8656. * optimization. It is the `$$max(width,height)/\pi$$` or the provided `$$f_x$$`, `$$f_y$$` when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
  8657. */
  8658. + (double)calibrate:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints image_size:(Size2i*)image_size K:(Mat*)K D:(Mat*)D rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs NS_SWIFT_NAME(calibrate(objectPoints:imagePoints:image_size:K:D:rvecs:tvecs:));
  8659. //
  8660. // void cv::fisheye::stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags, Size newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0)
  8661. //
  8662. /**
  8663. * Stereo rectification for fisheye camera model
  8664. *
  8665. * @param K1 First camera intrinsic matrix.
  8666. * @param D1 First camera distortion parameters.
  8667. * @param K2 Second camera intrinsic matrix.
  8668. * @param D2 Second camera distortion parameters.
  8669. * @param imageSize Size of the image used for stereo calibration.
  8670. * @param R Rotation matrix between the coordinate systems of the first and the second
  8671. * cameras.
  8672. * @param tvec Translation vector between coordinate systems of the cameras.
  8673. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
  8674. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
  8675. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  8676. * camera.
  8677. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  8678. * camera.
  8679. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see reprojectImageTo3D ).
  8680. * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
  8681. * the function makes the principal points of each camera have the same pixel coordinates in the
  8682. * rectified views. And if the flag is not set, the function may still shift the images in the
  8683. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  8684. * useful image area.
  8685. * @param newImageSize New image resolution after rectification. The same size should be passed to
  8686. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  8687. * is passed (default), it is set to the original imageSize . Setting it to larger value can help you
  8688. * preserve details in the original image, especially when there is a big radial distortion.
  8689. * @param balance Sets the new focal length in range between the min focal length and the max focal
  8690. * length. Balance is in range of [0, 1].
  8691. * @param fov_scale Divisor for new focal length.
  8692. */
  8693. + (void)stereoRectify:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R tvec:(Mat*)tvec R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags newImageSize:(Size2i*)newImageSize balance:(double)balance fov_scale:(double)fov_scale NS_SWIFT_NAME(stereoRectify(K1:D1:K2:D2:imageSize:R:tvec:R1:R2:P1:P2:Q:flags:newImageSize:balance:fov_scale:));
  8694. /**
  8695. * Stereo rectification for fisheye camera model
  8696. *
  8697. * @param K1 First camera intrinsic matrix.
  8698. * @param D1 First camera distortion parameters.
  8699. * @param K2 Second camera intrinsic matrix.
  8700. * @param D2 Second camera distortion parameters.
  8701. * @param imageSize Size of the image used for stereo calibration.
  8702. * @param R Rotation matrix between the coordinate systems of the first and the second
  8703. * cameras.
  8704. * @param tvec Translation vector between coordinate systems of the cameras.
  8705. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
  8706. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
  8707. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  8708. * camera.
  8709. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  8710. * camera.
  8711. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see reprojectImageTo3D ).
  8712. * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
  8713. * the function makes the principal points of each camera have the same pixel coordinates in the
  8714. * rectified views. And if the flag is not set, the function may still shift the images in the
  8715. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  8716. * useful image area.
  8717. * @param newImageSize New image resolution after rectification. The same size should be passed to
  8718. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  8719. * is passed (default), it is set to the original imageSize . Setting it to larger value can help you
  8720. * preserve details in the original image, especially when there is a big radial distortion.
  8721. * @param balance Sets the new focal length in range between the min focal length and the max focal
  8722. * length. Balance is in range of [0, 1].
  8723. */
  8724. + (void)stereoRectify:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R tvec:(Mat*)tvec R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags newImageSize:(Size2i*)newImageSize balance:(double)balance NS_SWIFT_NAME(stereoRectify(K1:D1:K2:D2:imageSize:R:tvec:R1:R2:P1:P2:Q:flags:newImageSize:balance:));
  8725. /**
  8726. * Stereo rectification for fisheye camera model
  8727. *
  8728. * @param K1 First camera intrinsic matrix.
  8729. * @param D1 First camera distortion parameters.
  8730. * @param K2 Second camera intrinsic matrix.
  8731. * @param D2 Second camera distortion parameters.
  8732. * @param imageSize Size of the image used for stereo calibration.
  8733. * @param R Rotation matrix between the coordinate systems of the first and the second
  8734. * cameras.
  8735. * @param tvec Translation vector between coordinate systems of the cameras.
  8736. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
  8737. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
  8738. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  8739. * camera.
  8740. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  8741. * camera.
  8742. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see reprojectImageTo3D ).
  8743. * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
  8744. * the function makes the principal points of each camera have the same pixel coordinates in the
  8745. * rectified views. And if the flag is not set, the function may still shift the images in the
  8746. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  8747. * useful image area.
  8748. * @param newImageSize New image resolution after rectification. The same size should be passed to
  8749. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  8750. * is passed (default), it is set to the original imageSize . Setting it to larger value can help you
  8751. * preserve details in the original image, especially when there is a big radial distortion.
  8752. * length. Balance is in range of [0, 1].
  8753. */
  8754. + (void)stereoRectify:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R tvec:(Mat*)tvec R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags newImageSize:(Size2i*)newImageSize NS_SWIFT_NAME(stereoRectify(K1:D1:K2:D2:imageSize:R:tvec:R1:R2:P1:P2:Q:flags:newImageSize:));
  8755. /**
  8756. * Stereo rectification for fisheye camera model
  8757. *
  8758. * @param K1 First camera intrinsic matrix.
  8759. * @param D1 First camera distortion parameters.
  8760. * @param K2 Second camera intrinsic matrix.
  8761. * @param D2 Second camera distortion parameters.
  8762. * @param imageSize Size of the image used for stereo calibration.
  8763. * @param R Rotation matrix between the coordinate systems of the first and the second
  8764. * cameras.
  8765. * @param tvec Translation vector between coordinate systems of the cameras.
  8766. * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
  8767. * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
  8768. * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  8769. * camera.
  8770. * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  8771. * camera.
  8772. * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see reprojectImageTo3D ).
  8773. * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
  8774. * the function makes the principal points of each camera have the same pixel coordinates in the
  8775. * rectified views. And if the flag is not set, the function may still shift the images in the
  8776. * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  8777. * useful image area.
  8778. * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  8779. * is passed (default), it is set to the original imageSize . Setting it to larger value can help you
  8780. * preserve details in the original image, especially when there is a big radial distortion.
  8781. * length. Balance is in range of [0, 1].
  8782. */
  8783. + (void)stereoRectify:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R tvec:(Mat*)tvec R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags NS_SWIFT_NAME(stereoRectify(K1:D1:K2:D2:imageSize:R:tvec:R1:R2:P1:P2:Q:flags:));
  8784. //
  8785. // double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
  8786. //
  8787. /**
  8788. * Performs stereo calibration
  8789. *
  8790. * @param objectPoints Vector of vectors of the calibration pattern points.
  8791. * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
  8792. * observed by the first camera.
  8793. * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
  8794. * observed by the second camera.
  8795. * @param K1 Input/output first camera intrinsic matrix:
  8796. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}$$` , `$$j = 0,\, 1$$` . If
  8797. * any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
  8798. * some or all of the matrix components must be initialized.
  8799. * @param D1 Input/output vector of distortion coefficients `$$\distcoeffsfisheye$$` of 4 elements.
  8800. * @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
  8801. * @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
  8802. * similar to D1 .
  8803. * @param imageSize Size of the image used only to initialize camera intrinsic matrix.
  8804. * @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
  8805. * @param T Output translation vector between the coordinate systems of the cameras.
  8806. * @param flags Different flags that may be zero or a combination of the following values:
  8807. * - REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
  8808. * are estimated.
  8809. * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
  8810. * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  8811. * center (imageSize is used), and focal distances are computed in a least-squares fashion.
  8812. * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
  8813. * of intrinsic optimization.
  8814. * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
  8815. * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
  8816. * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
  8817. * zero.
  8818. * @param criteria Termination criteria for the iterative optimization algorithm.
  8819. */
  8820. + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 K1:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:K1:D1:K2:D2:imageSize:R:T:flags:criteria:));
  8821. /**
  8822. * Performs stereo calibration
  8823. *
  8824. * @param objectPoints Vector of vectors of the calibration pattern points.
  8825. * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
  8826. * observed by the first camera.
  8827. * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
  8828. * observed by the second camera.
  8829. * @param K1 Input/output first camera intrinsic matrix:
  8830. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}$$` , `$$j = 0,\, 1$$` . If
  8831. * any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
  8832. * some or all of the matrix components must be initialized.
  8833. * @param D1 Input/output vector of distortion coefficients `$$\distcoeffsfisheye$$` of 4 elements.
  8834. * @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
  8835. * @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
  8836. * similar to D1 .
  8837. * @param imageSize Size of the image used only to initialize camera intrinsic matrix.
  8838. * @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
  8839. * @param T Output translation vector between the coordinate systems of the cameras.
  8840. * @param flags Different flags that may be zero or a combination of the following values:
  8841. * - REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
  8842. * are estimated.
  8843. * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
  8844. * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  8845. * center (imageSize is used), and focal distances are computed in a least-squares fashion.
  8846. * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
  8847. * of intrinsic optimization.
  8848. * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
  8849. * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
  8850. * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
  8851. * zero.
  8852. */
  8853. + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 K1:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T flags:(int)flags NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:K1:D1:K2:D2:imageSize:R:T:flags:));
  8854. /**
  8855. * Performs stereo calibration
  8856. *
  8857. * @param objectPoints Vector of vectors of the calibration pattern points.
  8858. * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
  8859. * observed by the first camera.
  8860. * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
  8861. * observed by the second camera.
  8862. * @param K1 Input/output first camera intrinsic matrix:
  8863. * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}$$` , `$$j = 0,\, 1$$` . If
  8864. * any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
  8865. * some or all of the matrix components must be initialized.
  8866. * @param D1 Input/output vector of distortion coefficients `$$\distcoeffsfisheye$$` of 4 elements.
  8867. * @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
  8868. * @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
  8869. * similar to D1 .
  8870. * @param imageSize Size of the image used only to initialize camera intrinsic matrix.
  8871. * @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
  8872. * @param T Output translation vector between the coordinate systems of the cameras.
  8873. * - REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
  8874. * are estimated.
  8875. * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
  8876. * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  8877. * center (imageSize is used), and focal distances are computed in a least-squares fashion.
  8878. * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
  8879. * of intrinsic optimization.
  8880. * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
  8881. * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
  8882. * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
  8883. * zero.
  8884. */
  8885. + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 K1:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:K1:D1:K2:D2:imageSize:R:T:));
  8886. @end
  8887. NS_ASSUME_NONNULL_END