12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239 |
- //
- // This file is auto-generated. Please don't modify it!
- //
- #pragma once
- #ifdef __cplusplus
- //#import "opencv.hpp"
- #import "opencv2/calib3d.hpp"
- #else
- #define CV_EXPORTS
- #endif
- #import <Foundation/Foundation.h>
- @class CirclesGridFinderParameters;
- @class Double3;
- @class Mat;
- @class Point2d;
- @class Rect2i;
- @class Scalar;
- @class Size2i;
- @class TermCriteria;
- @class UsacParams;
- // C++: enum HandEyeCalibrationMethod (cv.HandEyeCalibrationMethod)
- typedef NS_ENUM(int, HandEyeCalibrationMethod) {
- CALIB_HAND_EYE_TSAI = 0,
- CALIB_HAND_EYE_PARK = 1,
- CALIB_HAND_EYE_HORAUD = 2,
- CALIB_HAND_EYE_ANDREFF = 3,
- CALIB_HAND_EYE_DANIILIDIS = 4
- };
- // C++: enum LocalOptimMethod (cv.LocalOptimMethod)
- typedef NS_ENUM(int, LocalOptimMethod) {
- LOCAL_OPTIM_NULL = 0,
- LOCAL_OPTIM_INNER_LO = 1,
- LOCAL_OPTIM_INNER_AND_ITER_LO = 2,
- LOCAL_OPTIM_GC = 3,
- LOCAL_OPTIM_SIGMA = 4
- };
- // C++: enum NeighborSearchMethod (cv.NeighborSearchMethod)
- typedef NS_ENUM(int, NeighborSearchMethod) {
- NEIGH_FLANN_KNN = 0,
- NEIGH_GRID = 1,
- NEIGH_FLANN_RADIUS = 2
- };
- // C++: enum RobotWorldHandEyeCalibrationMethod (cv.RobotWorldHandEyeCalibrationMethod)
- typedef NS_ENUM(int, RobotWorldHandEyeCalibrationMethod) {
- CALIB_ROBOT_WORLD_HAND_EYE_SHAH = 0,
- CALIB_ROBOT_WORLD_HAND_EYE_LI = 1
- };
- // C++: enum SamplingMethod (cv.SamplingMethod)
- typedef NS_ENUM(int, SamplingMethod) {
- SAMPLING_UNIFORM = 0,
- SAMPLING_PROGRESSIVE_NAPSAC = 1,
- SAMPLING_NAPSAC = 2,
- SAMPLING_PROSAC = 3
- };
- // C++: enum ScoreMethod (cv.ScoreMethod)
- typedef NS_ENUM(int, ScoreMethod) {
- SCORE_METHOD_RANSAC = 0,
- SCORE_METHOD_MSAC = 1,
- SCORE_METHOD_MAGSAC = 2,
- SCORE_METHOD_LMEDS = 3
- };
- // C++: enum SolvePnPMethod (cv.SolvePnPMethod)
- typedef NS_ENUM(int, SolvePnPMethod) {
- SOLVEPNP_ITERATIVE = 0,
- SOLVEPNP_EPNP = 1,
- SOLVEPNP_P3P = 2,
- SOLVEPNP_DLS = 3,
- SOLVEPNP_UPNP = 4,
- SOLVEPNP_AP3P = 5,
- SOLVEPNP_IPPE = 6,
- SOLVEPNP_IPPE_SQUARE = 7,
- SOLVEPNP_SQPNP = 8,
- SOLVEPNP_MAX_COUNT = 8+1
- };
- // C++: enum UndistortTypes (cv.UndistortTypes)
- typedef NS_ENUM(int, UndistortTypes) {
- PROJ_SPHERICAL_ORTHO = 0,
- PROJ_SPHERICAL_EQRECT = 1
- };
- NS_ASSUME_NONNULL_BEGIN
- // C++: class Calib3d
- /**
- * The Calib3d module
- *
- * Member classes: `UsacParams`, `CirclesGridFinderParameters`, `StereoMatcher`, `StereoBM`, `StereoSGBM`
- *
- * Member enums: `SolvePnPMethod`, `HandEyeCalibrationMethod`, `RobotWorldHandEyeCalibrationMethod`, `SamplingMethod`, `LocalOptimMethod`, `ScoreMethod`, `NeighborSearchMethod`, `GridType`, `UndistortTypes`
- */
- CV_EXPORTS @interface Calib3d : NSObject
- #pragma mark - Class Constants
- @property (class, readonly) int CV_ITERATIVE NS_SWIFT_NAME(CV_ITERATIVE);
- @property (class, readonly) int CV_EPNP NS_SWIFT_NAME(CV_EPNP);
- @property (class, readonly) int CV_P3P NS_SWIFT_NAME(CV_P3P);
- @property (class, readonly) int CV_DLS NS_SWIFT_NAME(CV_DLS);
- @property (class, readonly) int CvLevMarq_DONE NS_SWIFT_NAME(CvLevMarq_DONE);
- @property (class, readonly) int CvLevMarq_STARTED NS_SWIFT_NAME(CvLevMarq_STARTED);
- @property (class, readonly) int CvLevMarq_CALC_J NS_SWIFT_NAME(CvLevMarq_CALC_J);
- @property (class, readonly) int CvLevMarq_CHECK_ERR NS_SWIFT_NAME(CvLevMarq_CHECK_ERR);
- @property (class, readonly) int LMEDS NS_SWIFT_NAME(LMEDS);
- @property (class, readonly) int RANSAC NS_SWIFT_NAME(RANSAC);
- @property (class, readonly) int RHO NS_SWIFT_NAME(RHO);
- @property (class, readonly) int USAC_DEFAULT NS_SWIFT_NAME(USAC_DEFAULT);
- @property (class, readonly) int USAC_PARALLEL NS_SWIFT_NAME(USAC_PARALLEL);
- @property (class, readonly) int USAC_FM_8PTS NS_SWIFT_NAME(USAC_FM_8PTS);
- @property (class, readonly) int USAC_FAST NS_SWIFT_NAME(USAC_FAST);
- @property (class, readonly) int USAC_ACCURATE NS_SWIFT_NAME(USAC_ACCURATE);
- @property (class, readonly) int USAC_PROSAC NS_SWIFT_NAME(USAC_PROSAC);
- @property (class, readonly) int USAC_MAGSAC NS_SWIFT_NAME(USAC_MAGSAC);
- @property (class, readonly) int CALIB_CB_ADAPTIVE_THRESH NS_SWIFT_NAME(CALIB_CB_ADAPTIVE_THRESH);
- @property (class, readonly) int CALIB_CB_NORMALIZE_IMAGE NS_SWIFT_NAME(CALIB_CB_NORMALIZE_IMAGE);
- @property (class, readonly) int CALIB_CB_FILTER_QUADS NS_SWIFT_NAME(CALIB_CB_FILTER_QUADS);
- @property (class, readonly) int CALIB_CB_FAST_CHECK NS_SWIFT_NAME(CALIB_CB_FAST_CHECK);
- @property (class, readonly) int CALIB_CB_EXHAUSTIVE NS_SWIFT_NAME(CALIB_CB_EXHAUSTIVE);
- @property (class, readonly) int CALIB_CB_ACCURACY NS_SWIFT_NAME(CALIB_CB_ACCURACY);
- @property (class, readonly) int CALIB_CB_LARGER NS_SWIFT_NAME(CALIB_CB_LARGER);
- @property (class, readonly) int CALIB_CB_MARKER NS_SWIFT_NAME(CALIB_CB_MARKER);
- @property (class, readonly) int CALIB_CB_SYMMETRIC_GRID NS_SWIFT_NAME(CALIB_CB_SYMMETRIC_GRID);
- @property (class, readonly) int CALIB_CB_ASYMMETRIC_GRID NS_SWIFT_NAME(CALIB_CB_ASYMMETRIC_GRID);
- @property (class, readonly) int CALIB_CB_CLUSTERING NS_SWIFT_NAME(CALIB_CB_CLUSTERING);
- @property (class, readonly) int CALIB_NINTRINSIC NS_SWIFT_NAME(CALIB_NINTRINSIC);
- @property (class, readonly) int CALIB_USE_INTRINSIC_GUESS NS_SWIFT_NAME(CALIB_USE_INTRINSIC_GUESS);
- @property (class, readonly) int CALIB_FIX_ASPECT_RATIO NS_SWIFT_NAME(CALIB_FIX_ASPECT_RATIO);
- @property (class, readonly) int CALIB_FIX_PRINCIPAL_POINT NS_SWIFT_NAME(CALIB_FIX_PRINCIPAL_POINT);
- @property (class, readonly) int CALIB_ZERO_TANGENT_DIST NS_SWIFT_NAME(CALIB_ZERO_TANGENT_DIST);
- @property (class, readonly) int CALIB_FIX_FOCAL_LENGTH NS_SWIFT_NAME(CALIB_FIX_FOCAL_LENGTH);
- @property (class, readonly) int CALIB_FIX_K1 NS_SWIFT_NAME(CALIB_FIX_K1);
- @property (class, readonly) int CALIB_FIX_K2 NS_SWIFT_NAME(CALIB_FIX_K2);
- @property (class, readonly) int CALIB_FIX_K3 NS_SWIFT_NAME(CALIB_FIX_K3);
- @property (class, readonly) int CALIB_FIX_K4 NS_SWIFT_NAME(CALIB_FIX_K4);
- @property (class, readonly) int CALIB_FIX_K5 NS_SWIFT_NAME(CALIB_FIX_K5);
- @property (class, readonly) int CALIB_FIX_K6 NS_SWIFT_NAME(CALIB_FIX_K6);
- @property (class, readonly) int CALIB_RATIONAL_MODEL NS_SWIFT_NAME(CALIB_RATIONAL_MODEL);
- @property (class, readonly) int CALIB_THIN_PRISM_MODEL NS_SWIFT_NAME(CALIB_THIN_PRISM_MODEL);
- @property (class, readonly) int CALIB_FIX_S1_S2_S3_S4 NS_SWIFT_NAME(CALIB_FIX_S1_S2_S3_S4);
- @property (class, readonly) int CALIB_TILTED_MODEL NS_SWIFT_NAME(CALIB_TILTED_MODEL);
- @property (class, readonly) int CALIB_FIX_TAUX_TAUY NS_SWIFT_NAME(CALIB_FIX_TAUX_TAUY);
- @property (class, readonly) int CALIB_USE_QR NS_SWIFT_NAME(CALIB_USE_QR);
- @property (class, readonly) int CALIB_FIX_TANGENT_DIST NS_SWIFT_NAME(CALIB_FIX_TANGENT_DIST);
- @property (class, readonly) int CALIB_FIX_INTRINSIC NS_SWIFT_NAME(CALIB_FIX_INTRINSIC);
- @property (class, readonly) int CALIB_SAME_FOCAL_LENGTH NS_SWIFT_NAME(CALIB_SAME_FOCAL_LENGTH);
- @property (class, readonly) int CALIB_ZERO_DISPARITY NS_SWIFT_NAME(CALIB_ZERO_DISPARITY);
- @property (class, readonly) int CALIB_USE_LU NS_SWIFT_NAME(CALIB_USE_LU);
- @property (class, readonly) int CALIB_USE_EXTRINSIC_GUESS NS_SWIFT_NAME(CALIB_USE_EXTRINSIC_GUESS);
- @property (class, readonly) int FM_7POINT NS_SWIFT_NAME(FM_7POINT);
- @property (class, readonly) int FM_8POINT NS_SWIFT_NAME(FM_8POINT);
- @property (class, readonly) int FM_LMEDS NS_SWIFT_NAME(FM_LMEDS);
- @property (class, readonly) int FM_RANSAC NS_SWIFT_NAME(FM_RANSAC);
- @property (class, readonly) int CALIB_RECOMPUTE_EXTRINSIC NS_SWIFT_NAME(CALIB_RECOMPUTE_EXTRINSIC);
- @property (class, readonly) int CALIB_CHECK_COND NS_SWIFT_NAME(CALIB_CHECK_COND);
- @property (class, readonly) int CALIB_FIX_SKEW NS_SWIFT_NAME(CALIB_FIX_SKEW);
- #pragma mark - Methods
- //
- // void cv::Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat())
- //
- /**
- * Converts a rotation matrix to a rotation vector or vice versa.
- *
- * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
- * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
- * @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
- * derivatives of the output array components with respect to the input array components.
- *
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}$$`
- *
- * Inverse transformation can be also done easily, since
- *
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}$$`
- *
- * A rotation vector is a convenient and most compact representation of a rotation matrix (since any
- * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
- * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
- *
- * NOTE: More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
- * can be found in:
- * - A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
- *
- * NOTE: Useful information on SE(3) and Lie Groups can be found in:
- * - A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
- * - Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
- * - A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
- */
- + (void)Rodrigues:(Mat*)src dst:(Mat*)dst jacobian:(Mat*)jacobian NS_SWIFT_NAME(Rodrigues(src:dst:jacobian:));
- /**
- * Converts a rotation matrix to a rotation vector or vice versa.
- *
- * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
- * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
- * derivatives of the output array components with respect to the input array components.
- *
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}$$`
- *
- * Inverse transformation can be also done easily, since
- *
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}$$`
- *
- * A rotation vector is a convenient and most compact representation of a rotation matrix (since any
- * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
- * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
- *
- * NOTE: More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
- * can be found in:
- * - A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
- *
- * NOTE: Useful information on SE(3) and Lie Groups can be found in:
- * - A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
- * - Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
- * - A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
- */
- + (void)Rodrigues:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(Rodrigues(src:dst:));
- //
- // Mat cv::findHomography(Mat srcPoints, Mat dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat(), int maxIters = 2000, double confidence = 0.995)
- //
- /**
- * Finds a perspective transformation between two planes.
- *
- * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- * or vector\<Point2f\> .
- * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- * a vector\<Point2f\> .
- * @param method Method used to compute a homography matrix. The following methods are possible:
- * - **0** - a regular method using all the points, i.e., the least squares method
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * - REF: RHO - PROSAC-based robust method
- * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
- * (used in the RANSAC and RHO methods only). That is, if
- * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
- * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
- * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
- * mask values are ignored.
- * @param maxIters The maximum number of RANSAC iterations.
- * @param confidence Confidence level, between 0 and 1.
- *
- * The function finds and returns the perspective transformation `$$H$$` between the source and the
- * destination planes:
- *
- * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
- *
- * so that the back-projection error
- *
- * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
- *
- * is minimized. If the parameter method is set to the default value 0, the function uses all the point
- * pairs to compute an initial homography estimate with a simple least-squares scheme.
- *
- * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
- * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- * the mask of inliers/outliers.
- *
- * Regardless of the method, robust or not, the computed homography matrix is refined further (using
- * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- * re-projection error even more.
- *
- * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- * noise is rather small, use the default method (method=0).
- *
- * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
- * cannot be estimated, an empty one will be returned.
- *
- * @sa
- * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- * perspectiveTransform
- */
- + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold mask:(Mat*)mask maxIters:(int)maxIters confidence:(double)confidence NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:ransacReprojThreshold:mask:maxIters:confidence:));
- /**
- * Finds a perspective transformation between two planes.
- *
- * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- * or vector\<Point2f\> .
- * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- * a vector\<Point2f\> .
- * @param method Method used to compute a homography matrix. The following methods are possible:
- * - **0** - a regular method using all the points, i.e., the least squares method
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * - REF: RHO - PROSAC-based robust method
- * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
- * (used in the RANSAC and RHO methods only). That is, if
- * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
- * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
- * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
- * mask values are ignored.
- * @param maxIters The maximum number of RANSAC iterations.
- *
- * The function finds and returns the perspective transformation `$$H$$` between the source and the
- * destination planes:
- *
- * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
- *
- * so that the back-projection error
- *
- * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
- *
- * is minimized. If the parameter method is set to the default value 0, the function uses all the point
- * pairs to compute an initial homography estimate with a simple least-squares scheme.
- *
- * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
- * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- * the mask of inliers/outliers.
- *
- * Regardless of the method, robust or not, the computed homography matrix is refined further (using
- * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- * re-projection error even more.
- *
- * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- * noise is rather small, use the default method (method=0).
- *
- * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
- * cannot be estimated, an empty one will be returned.
- *
- * @sa
- * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- * perspectiveTransform
- */
- + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold mask:(Mat*)mask maxIters:(int)maxIters NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:ransacReprojThreshold:mask:maxIters:));
- /**
- * Finds a perspective transformation between two planes.
- *
- * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- * or vector\<Point2f\> .
- * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- * a vector\<Point2f\> .
- * @param method Method used to compute a homography matrix. The following methods are possible:
- * - **0** - a regular method using all the points, i.e., the least squares method
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * - REF: RHO - PROSAC-based robust method
- * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
- * (used in the RANSAC and RHO methods only). That is, if
- * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
- * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
- * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
- * mask values are ignored.
- *
- * The function finds and returns the perspective transformation `$$H$$` between the source and the
- * destination planes:
- *
- * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
- *
- * so that the back-projection error
- *
- * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
- *
- * is minimized. If the parameter method is set to the default value 0, the function uses all the point
- * pairs to compute an initial homography estimate with a simple least-squares scheme.
- *
- * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
- * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- * the mask of inliers/outliers.
- *
- * Regardless of the method, robust or not, the computed homography matrix is refined further (using
- * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- * re-projection error even more.
- *
- * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- * noise is rather small, use the default method (method=0).
- *
- * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
- * cannot be estimated, an empty one will be returned.
- *
- * @sa
- * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- * perspectiveTransform
- */
- + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold mask:(Mat*)mask NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:ransacReprojThreshold:mask:));
- /**
- * Finds a perspective transformation between two planes.
- *
- * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- * or vector\<Point2f\> .
- * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- * a vector\<Point2f\> .
- * @param method Method used to compute a homography matrix. The following methods are possible:
- * - **0** - a regular method using all the points, i.e., the least squares method
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * - REF: RHO - PROSAC-based robust method
- * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
- * (used in the RANSAC and RHO methods only). That is, if
- * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
- * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
- * mask values are ignored.
- *
- * The function finds and returns the perspective transformation `$$H$$` between the source and the
- * destination planes:
- *
- * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
- *
- * so that the back-projection error
- *
- * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
- *
- * is minimized. If the parameter method is set to the default value 0, the function uses all the point
- * pairs to compute an initial homography estimate with a simple least-squares scheme.
- *
- * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
- * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- * the mask of inliers/outliers.
- *
- * Regardless of the method, robust or not, the computed homography matrix is refined further (using
- * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- * re-projection error even more.
- *
- * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- * noise is rather small, use the default method (method=0).
- *
- * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
- * cannot be estimated, an empty one will be returned.
- *
- * @sa
- * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- * perspectiveTransform
- */
- + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:ransacReprojThreshold:));
- /**
- * Finds a perspective transformation between two planes.
- *
- * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- * or vector\<Point2f\> .
- * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- * a vector\<Point2f\> .
- * @param method Method used to compute a homography matrix. The following methods are possible:
- * - **0** - a regular method using all the points, i.e., the least squares method
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * - REF: RHO - PROSAC-based robust method
- * (used in the RANSAC and RHO methods only). That is, if
- * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
- * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
- * mask values are ignored.
- *
- * The function finds and returns the perspective transformation `$$H$$` between the source and the
- * destination planes:
- *
- * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
- *
- * so that the back-projection error
- *
- * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
- *
- * is minimized. If the parameter method is set to the default value 0, the function uses all the point
- * pairs to compute an initial homography estimate with a simple least-squares scheme.
- *
- * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
- * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- * the mask of inliers/outliers.
- *
- * Regardless of the method, robust or not, the computed homography matrix is refined further (using
- * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- * re-projection error even more.
- *
- * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- * noise is rather small, use the default method (method=0).
- *
- * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
- * cannot be estimated, an empty one will be returned.
- *
- * @sa
- * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- * perspectiveTransform
- */
- + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints method:(int)method NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:method:));
- /**
- * Finds a perspective transformation between two planes.
- *
- * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- * or vector\<Point2f\> .
- * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- * a vector\<Point2f\> .
- * - **0** - a regular method using all the points, i.e., the least squares method
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * - REF: RHO - PROSAC-based robust method
- * (used in the RANSAC and RHO methods only). That is, if
- * `$$\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}$$`
- * then the point `$$i$$` is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
- * mask values are ignored.
- *
- * The function finds and returns the perspective transformation `$$H$$` between the source and the
- * destination planes:
- *
- * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$`
- *
- * so that the back-projection error
- *
- * `$$\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2$$`
- *
- * is minimized. If the parameter method is set to the default value 0, the function uses all the point
- * pairs to compute an initial homography estimate with a simple least-squares scheme.
- *
- * However, if not all of the point pairs ( `$$srcPoints_i$$`, `$$dstPoints_i$$` ) fit the rigid perspective
- * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- * the mask of inliers/outliers.
- *
- * Regardless of the method, robust or not, the computed homography matrix is refined further (using
- * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- * re-projection error even more.
- *
- * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- * noise is rather small, use the default method (method=0).
- *
- * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- * determined up to a scale. Thus, it is normalized so that `$$h_{33}=1$$`. Note that whenever an `$$H$$` matrix
- * cannot be estimated, an empty one will be returned.
- *
- * @sa
- * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- * perspectiveTransform
- */
- + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:));
- //
- // Mat cv::findHomography(Mat srcPoints, Mat dstPoints, Mat& mask, UsacParams params)
- //
- + (Mat*)findHomography:(Mat*)srcPoints dstPoints:(Mat*)dstPoints mask:(Mat*)mask params:(UsacParams*)params NS_SWIFT_NAME(findHomography(srcPoints:dstPoints:mask:params:));
- //
- // Vec3d cv::RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat())
- //
- /**
- * Computes an RQ decomposition of 3x3 matrices.
- *
- * @param src 3x3 input matrix.
- * @param mtxR Output 3x3 upper-triangular matrix.
- * @param mtxQ Output 3x3 orthogonal matrix.
- * @param Qx Optional output 3x3 rotation matrix around x-axis.
- * @param Qy Optional output 3x3 rotation matrix around y-axis.
- * @param Qz Optional output 3x3 rotation matrix around z-axis.
- *
- * The function computes a RQ decomposition using the given rotations. This function is used in
- * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
- * and a rotation matrix.
- *
- * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
- * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
- * sequence of rotations about the three principal axes that results in the same orientation of an
- * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
- * are only one of the possible solutions.
- */
- + (Double3*)RQDecomp3x3:(Mat*)src mtxR:(Mat*)mtxR mtxQ:(Mat*)mtxQ Qx:(Mat*)Qx Qy:(Mat*)Qy Qz:(Mat*)Qz NS_SWIFT_NAME(RQDecomp3x3(src:mtxR:mtxQ:Qx:Qy:Qz:));
- /**
- * Computes an RQ decomposition of 3x3 matrices.
- *
- * @param src 3x3 input matrix.
- * @param mtxR Output 3x3 upper-triangular matrix.
- * @param mtxQ Output 3x3 orthogonal matrix.
- * @param Qx Optional output 3x3 rotation matrix around x-axis.
- * @param Qy Optional output 3x3 rotation matrix around y-axis.
- *
- * The function computes a RQ decomposition using the given rotations. This function is used in
- * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
- * and a rotation matrix.
- *
- * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
- * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
- * sequence of rotations about the three principal axes that results in the same orientation of an
- * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
- * are only one of the possible solutions.
- */
- + (Double3*)RQDecomp3x3:(Mat*)src mtxR:(Mat*)mtxR mtxQ:(Mat*)mtxQ Qx:(Mat*)Qx Qy:(Mat*)Qy NS_SWIFT_NAME(RQDecomp3x3(src:mtxR:mtxQ:Qx:Qy:));
- /**
- * Computes an RQ decomposition of 3x3 matrices.
- *
- * @param src 3x3 input matrix.
- * @param mtxR Output 3x3 upper-triangular matrix.
- * @param mtxQ Output 3x3 orthogonal matrix.
- * @param Qx Optional output 3x3 rotation matrix around x-axis.
- *
- * The function computes a RQ decomposition using the given rotations. This function is used in
- * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
- * and a rotation matrix.
- *
- * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
- * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
- * sequence of rotations about the three principal axes that results in the same orientation of an
- * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
- * are only one of the possible solutions.
- */
- + (Double3*)RQDecomp3x3:(Mat*)src mtxR:(Mat*)mtxR mtxQ:(Mat*)mtxQ Qx:(Mat*)Qx NS_SWIFT_NAME(RQDecomp3x3(src:mtxR:mtxQ:Qx:));
- /**
- * Computes an RQ decomposition of 3x3 matrices.
- *
- * @param src 3x3 input matrix.
- * @param mtxR Output 3x3 upper-triangular matrix.
- * @param mtxQ Output 3x3 orthogonal matrix.
- *
- * The function computes a RQ decomposition using the given rotations. This function is used in
- * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
- * and a rotation matrix.
- *
- * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
- * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
- * sequence of rotations about the three principal axes that results in the same orientation of an
- * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
- * are only one of the possible solutions.
- */
- + (Double3*)RQDecomp3x3:(Mat*)src mtxR:(Mat*)mtxR mtxQ:(Mat*)mtxQ NS_SWIFT_NAME(RQDecomp3x3(src:mtxR:mtxQ:));
- //
- // void cv::decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat())
- //
- /**
- * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
- *
- * @param projMatrix 3x4 input projection matrix P.
- * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
- * @param rotMatrix Output 3x3 external rotation matrix R.
- * @param transVect Output 4x1 translation vector T.
- * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
- * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
- * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
- * @param eulerAngles Optional three-element vector containing three Euler angles of rotation in
- * degrees.
- *
- * The function computes a decomposition of a projection matrix into a calibration and a rotation
- * matrix and the position of a camera.
- *
- * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- *
- * The function is based on RQDecomp3x3 .
- */
- + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect rotMatrixX:(Mat*)rotMatrixX rotMatrixY:(Mat*)rotMatrixY rotMatrixZ:(Mat*)rotMatrixZ eulerAngles:(Mat*)eulerAngles NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:rotMatrixX:rotMatrixY:rotMatrixZ:eulerAngles:));
- /**
- * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
- *
- * @param projMatrix 3x4 input projection matrix P.
- * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
- * @param rotMatrix Output 3x3 external rotation matrix R.
- * @param transVect Output 4x1 translation vector T.
- * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
- * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
- * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
- * degrees.
- *
- * The function computes a decomposition of a projection matrix into a calibration and a rotation
- * matrix and the position of a camera.
- *
- * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- *
- * The function is based on RQDecomp3x3 .
- */
- + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect rotMatrixX:(Mat*)rotMatrixX rotMatrixY:(Mat*)rotMatrixY rotMatrixZ:(Mat*)rotMatrixZ NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:rotMatrixX:rotMatrixY:rotMatrixZ:));
- /**
- * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
- *
- * @param projMatrix 3x4 input projection matrix P.
- * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
- * @param rotMatrix Output 3x3 external rotation matrix R.
- * @param transVect Output 4x1 translation vector T.
- * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
- * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
- * degrees.
- *
- * The function computes a decomposition of a projection matrix into a calibration and a rotation
- * matrix and the position of a camera.
- *
- * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- *
- * The function is based on RQDecomp3x3 .
- */
- + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect rotMatrixX:(Mat*)rotMatrixX rotMatrixY:(Mat*)rotMatrixY NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:rotMatrixX:rotMatrixY:));
- /**
- * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
- *
- * @param projMatrix 3x4 input projection matrix P.
- * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
- * @param rotMatrix Output 3x3 external rotation matrix R.
- * @param transVect Output 4x1 translation vector T.
- * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
- * degrees.
- *
- * The function computes a decomposition of a projection matrix into a calibration and a rotation
- * matrix and the position of a camera.
- *
- * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- *
- * The function is based on RQDecomp3x3 .
- */
- + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect rotMatrixX:(Mat*)rotMatrixX NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:rotMatrixX:));
- /**
- * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
- *
- * @param projMatrix 3x4 input projection matrix P.
- * @param cameraMatrix Output 3x3 camera intrinsic matrix `$$\cameramatrix{A}$$`.
- * @param rotMatrix Output 3x3 external rotation matrix R.
- * @param transVect Output 4x1 translation vector T.
- * degrees.
- *
- * The function computes a decomposition of a projection matrix into a calibration and a rotation
- * matrix and the position of a camera.
- *
- * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- *
- * The function is based on RQDecomp3x3 .
- */
- + (void)decomposeProjectionMatrix:(Mat*)projMatrix cameraMatrix:(Mat*)cameraMatrix rotMatrix:(Mat*)rotMatrix transVect:(Mat*)transVect NS_SWIFT_NAME(decomposeProjectionMatrix(projMatrix:cameraMatrix:rotMatrix:transVect:));
- //
- // void cv::matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB)
- //
- /**
- * Computes partial derivatives of the matrix product for each multiplied matrix.
- *
- * @param A First multiplied matrix.
- * @param B Second multiplied matrix.
- * @param dABdA First output derivative matrix d(A\*B)/dA of size
- * `$$\texttt{A.rows*B.cols} \times {A.rows*A.cols}$$` .
- * @param dABdB Second output derivative matrix d(A\*B)/dB of size
- * `$$\texttt{A.rows*B.cols} \times {B.rows*B.cols}$$` .
- *
- * The function computes partial derivatives of the elements of the matrix product `$$A*B$$` with regard to
- * the elements of each of the two input matrices. The function is used to compute the Jacobian
- * matrices in #stereoCalibrate but can also be used in any other similar optimization function.
- */
- + (void)matMulDeriv:(Mat*)A B:(Mat*)B dABdA:(Mat*)dABdA dABdB:(Mat*)dABdB NS_SWIFT_NAME(matMulDeriv(A:B:dABdA:dABdB:));
- //
- // void cv::composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat())
- //
- /**
- * Combines two rotation-and-shift transformations.
- *
- * @param rvec1 First rotation vector.
- * @param tvec1 First translation vector.
- * @param rvec2 Second rotation vector.
- * @param tvec2 Second translation vector.
- * @param rvec3 Output rotation vector of the superposition.
- * @param tvec3 Output translation vector of the superposition.
- * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
- * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
- * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
- * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
- * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
- * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
- * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
- * @param dt3dt2 Optional output derivative of tvec3 with regard to tvec2
- *
- * The functions compute:
- *
- * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
- *
- * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
- * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
- *
- * Also, the functions can compute the derivatives of the output vectors with regards to the input
- * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- * function that contains a matrix multiplication.
- */
- + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 dt3dr1:(Mat*)dt3dr1 dt3dt1:(Mat*)dt3dt1 dt3dr2:(Mat*)dt3dr2 dt3dt2:(Mat*)dt3dt2 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:dt3dr1:dt3dt1:dt3dr2:dt3dt2:));
- /**
- * Combines two rotation-and-shift transformations.
- *
- * @param rvec1 First rotation vector.
- * @param tvec1 First translation vector.
- * @param rvec2 Second rotation vector.
- * @param tvec2 Second translation vector.
- * @param rvec3 Output rotation vector of the superposition.
- * @param tvec3 Output translation vector of the superposition.
- * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
- * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
- * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
- * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
- * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
- * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
- * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
- *
- * The functions compute:
- *
- * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
- *
- * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
- * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
- *
- * Also, the functions can compute the derivatives of the output vectors with regards to the input
- * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- * function that contains a matrix multiplication.
- */
- + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 dt3dr1:(Mat*)dt3dr1 dt3dt1:(Mat*)dt3dt1 dt3dr2:(Mat*)dt3dr2 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:dt3dr1:dt3dt1:dt3dr2:));
- /**
- * Combines two rotation-and-shift transformations.
- *
- * @param rvec1 First rotation vector.
- * @param tvec1 First translation vector.
- * @param rvec2 Second rotation vector.
- * @param tvec2 Second translation vector.
- * @param rvec3 Output rotation vector of the superposition.
- * @param tvec3 Output translation vector of the superposition.
- * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
- * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
- * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
- * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
- * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
- * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
- *
- * The functions compute:
- *
- * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
- *
- * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
- * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
- *
- * Also, the functions can compute the derivatives of the output vectors with regards to the input
- * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- * function that contains a matrix multiplication.
- */
- + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 dt3dr1:(Mat*)dt3dr1 dt3dt1:(Mat*)dt3dt1 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:dt3dr1:dt3dt1:));
- /**
- * Combines two rotation-and-shift transformations.
- *
- * @param rvec1 First rotation vector.
- * @param tvec1 First translation vector.
- * @param rvec2 Second rotation vector.
- * @param tvec2 Second translation vector.
- * @param rvec3 Output rotation vector of the superposition.
- * @param tvec3 Output translation vector of the superposition.
- * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
- * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
- * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
- * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
- * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
- *
- * The functions compute:
- *
- * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
- *
- * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
- * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
- *
- * Also, the functions can compute the derivatives of the output vectors with regards to the input
- * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- * function that contains a matrix multiplication.
- */
- + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 dt3dr1:(Mat*)dt3dr1 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:dt3dr1:));
- /**
- * Combines two rotation-and-shift transformations.
- *
- * @param rvec1 First rotation vector.
- * @param tvec1 First translation vector.
- * @param rvec2 Second rotation vector.
- * @param tvec2 Second translation vector.
- * @param rvec3 Output rotation vector of the superposition.
- * @param tvec3 Output translation vector of the superposition.
- * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
- * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
- * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
- * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
- *
- * The functions compute:
- *
- * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
- *
- * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
- * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
- *
- * Also, the functions can compute the derivatives of the output vectors with regards to the input
- * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- * function that contains a matrix multiplication.
- */
- + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 dr3dt2:(Mat*)dr3dt2 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:dr3dt2:));
- /**
- * Combines two rotation-and-shift transformations.
- *
- * @param rvec1 First rotation vector.
- * @param tvec1 First translation vector.
- * @param rvec2 Second rotation vector.
- * @param tvec2 Second translation vector.
- * @param rvec3 Output rotation vector of the superposition.
- * @param tvec3 Output translation vector of the superposition.
- * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
- * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
- * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
- *
- * The functions compute:
- *
- * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
- *
- * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
- * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
- *
- * Also, the functions can compute the derivatives of the output vectors with regards to the input
- * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- * function that contains a matrix multiplication.
- */
- + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 dr3dr2:(Mat*)dr3dr2 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:dr3dr2:));
- /**
- * Combines two rotation-and-shift transformations.
- *
- * @param rvec1 First rotation vector.
- * @param tvec1 First translation vector.
- * @param rvec2 Second rotation vector.
- * @param tvec2 Second translation vector.
- * @param rvec3 Output rotation vector of the superposition.
- * @param tvec3 Output translation vector of the superposition.
- * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
- * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
- *
- * The functions compute:
- *
- * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
- *
- * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
- * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
- *
- * Also, the functions can compute the derivatives of the output vectors with regards to the input
- * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- * function that contains a matrix multiplication.
- */
- + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 dr3dt1:(Mat*)dr3dt1 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:dr3dt1:));
- /**
- * Combines two rotation-and-shift transformations.
- *
- * @param rvec1 First rotation vector.
- * @param tvec1 First translation vector.
- * @param rvec2 Second rotation vector.
- * @param tvec2 Second translation vector.
- * @param rvec3 Output rotation vector of the superposition.
- * @param tvec3 Output translation vector of the superposition.
- * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
- *
- * The functions compute:
- *
- * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
- *
- * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
- * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
- *
- * Also, the functions can compute the derivatives of the output vectors with regards to the input
- * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- * function that contains a matrix multiplication.
- */
- + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 dr3dr1:(Mat*)dr3dr1 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:dr3dr1:));
- /**
- * Combines two rotation-and-shift transformations.
- *
- * @param rvec1 First rotation vector.
- * @param tvec1 First translation vector.
- * @param rvec2 Second rotation vector.
- * @param tvec2 Second translation vector.
- * @param rvec3 Output rotation vector of the superposition.
- * @param tvec3 Output translation vector of the superposition.
- *
- * The functions compute:
- *
- * `$$\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,$$`
- *
- * where `$$\mathrm{rodrigues}$$` denotes a rotation vector to a rotation matrix transformation, and
- * `$$\mathrm{rodrigues}^{-1}$$` denotes the inverse transformation. See Rodrigues for details.
- *
- * Also, the functions can compute the derivatives of the output vectors with regards to the input
- * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- * function that contains a matrix multiplication.
- */
- + (void)composeRT:(Mat*)rvec1 tvec1:(Mat*)tvec1 rvec2:(Mat*)rvec2 tvec2:(Mat*)tvec2 rvec3:(Mat*)rvec3 tvec3:(Mat*)tvec3 NS_SWIFT_NAME(composeRT(rvec1:tvec1:rvec2:tvec2:rvec3:tvec3:));
- //
- // void cv::projectPoints(Mat objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, Mat distCoeffs, Mat& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0)
- //
- /**
- * Projects 3D points to an image plane.
- *
- * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
- * 1-channel or 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is the number of points in the view.
- * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
- * basis from world to camera coordinate system, see REF: calibrateCamera for details.
- * @param tvec The translation vector, see parameter description above.
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$` . If the vector is empty, the zero distortion coefficients are assumed.
- * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
- * vector\<Point2f\> .
- * @param jacobian Optional output 2Nx(10+\<numDistCoeffs\>) jacobian matrix of derivatives of image
- * points with respect to components of the rotation vector, translation vector, focal lengths,
- * coordinates of the principal point and the distortion coefficients. In the old interface different
- * components of the jacobian are returned via different output parameters.
- * @param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
- * function assumes that the aspect ratio (`$$f_x / f_y$$`) is fixed and correspondingly adjusts the
- * jacobian matrix.
- *
- * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
- * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
- * derivatives of image points coordinates (as functions of all the input parameters) with respect to
- * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
- * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
- * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
- * parameters.
- *
- * NOTE: By setting rvec = tvec = `$$[0, 0, 0]$$`, or by setting cameraMatrix to a 3x3 identity matrix,
- * or by passing zero distortion coefficients, one can get various useful partial cases of the
- * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
- * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
- */
- + (void)projectPoints:(Mat*)objectPoints rvec:(Mat*)rvec tvec:(Mat*)tvec cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imagePoints:(Mat*)imagePoints jacobian:(Mat*)jacobian aspectRatio:(double)aspectRatio NS_SWIFT_NAME(projectPoints(objectPoints:rvec:tvec:cameraMatrix:distCoeffs:imagePoints:jacobian:aspectRatio:));
- /**
- * Projects 3D points to an image plane.
- *
- * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
- * 1-channel or 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is the number of points in the view.
- * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
- * basis from world to camera coordinate system, see REF: calibrateCamera for details.
- * @param tvec The translation vector, see parameter description above.
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$` . If the vector is empty, the zero distortion coefficients are assumed.
- * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
- * vector\<Point2f\> .
- * @param jacobian Optional output 2Nx(10+\<numDistCoeffs\>) jacobian matrix of derivatives of image
- * points with respect to components of the rotation vector, translation vector, focal lengths,
- * coordinates of the principal point and the distortion coefficients. In the old interface different
- * components of the jacobian are returned via different output parameters.
- * function assumes that the aspect ratio (`$$f_x / f_y$$`) is fixed and correspondingly adjusts the
- * jacobian matrix.
- *
- * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
- * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
- * derivatives of image points coordinates (as functions of all the input parameters) with respect to
- * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
- * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
- * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
- * parameters.
- *
- * NOTE: By setting rvec = tvec = `$$[0, 0, 0]$$`, or by setting cameraMatrix to a 3x3 identity matrix,
- * or by passing zero distortion coefficients, one can get various useful partial cases of the
- * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
- * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
- */
- + (void)projectPoints:(Mat*)objectPoints rvec:(Mat*)rvec tvec:(Mat*)tvec cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imagePoints:(Mat*)imagePoints jacobian:(Mat*)jacobian NS_SWIFT_NAME(projectPoints(objectPoints:rvec:tvec:cameraMatrix:distCoeffs:imagePoints:jacobian:));
- /**
- * Projects 3D points to an image plane.
- *
- * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
- * 1-channel or 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is the number of points in the view.
- * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
- * basis from world to camera coordinate system, see REF: calibrateCamera for details.
- * @param tvec The translation vector, see parameter description above.
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$` . If the vector is empty, the zero distortion coefficients are assumed.
- * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
- * vector\<Point2f\> .
- * points with respect to components of the rotation vector, translation vector, focal lengths,
- * coordinates of the principal point and the distortion coefficients. In the old interface different
- * components of the jacobian are returned via different output parameters.
- * function assumes that the aspect ratio (`$$f_x / f_y$$`) is fixed and correspondingly adjusts the
- * jacobian matrix.
- *
- * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
- * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
- * derivatives of image points coordinates (as functions of all the input parameters) with respect to
- * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
- * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
- * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
- * parameters.
- *
- * NOTE: By setting rvec = tvec = `$$[0, 0, 0]$$`, or by setting cameraMatrix to a 3x3 identity matrix,
- * or by passing zero distortion coefficients, one can get various useful partial cases of the
- * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
- * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
- */
- + (void)projectPoints:(Mat*)objectPoints rvec:(Mat*)rvec tvec:(Mat*)tvec cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imagePoints:(Mat*)imagePoints NS_SWIFT_NAME(projectPoints(objectPoints:rvec:tvec:cameraMatrix:distCoeffs:imagePoints:));
- //
- // bool cv::solvePnP(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE)
- //
- /**
- * Finds an object pose from 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
- * coordinate frame to the camera coordinate frame, using different methods:
- * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
- * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
- * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
- *
- * More information about Perspective-n-Points is described in REF: calib3d_solvePnP
- *
- * NOTE:
- * - An example of how to use solvePnP for planar augmented reality can be found at
- * opencv_source_code/samples/python/plane_ar.py
- * - If you are using Python:
- * - Numpy array slices won't work as input because solvePnP requires contiguous
- * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- * modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * which requires 2-channel information.
- * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- * unstable and sometimes give completely wrong results. If you pass one of these two
- * flags, REF: SOLVEPNP_EPNP method will be used instead.
- * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- * global solution to converge.
- * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - With REF: SOLVEPNP_SQPNP input points must be >= 3
- */
- + (BOOL)solvePnP:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(int)flags NS_SWIFT_NAME(solvePnP(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:flags:));
- /**
- * Finds an object pose from 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
- * coordinate frame to the camera coordinate frame, using different methods:
- * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
- * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
- * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- *
- * More information about Perspective-n-Points is described in REF: calib3d_solvePnP
- *
- * NOTE:
- * - An example of how to use solvePnP for planar augmented reality can be found at
- * opencv_source_code/samples/python/plane_ar.py
- * - If you are using Python:
- * - Numpy array slices won't work as input because solvePnP requires contiguous
- * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- * modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * which requires 2-channel information.
- * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- * unstable and sometimes give completely wrong results. If you pass one of these two
- * flags, REF: SOLVEPNP_EPNP method will be used instead.
- * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- * global solution to converge.
- * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - With REF: SOLVEPNP_SQPNP input points must be >= 3
- */
- + (BOOL)solvePnP:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess NS_SWIFT_NAME(solvePnP(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:));
- /**
- * Finds an object pose from 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
- * coordinate frame to the camera coordinate frame, using different methods:
- * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
- * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
- * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- *
- * More information about Perspective-n-Points is described in REF: calib3d_solvePnP
- *
- * NOTE:
- * - An example of how to use solvePnP for planar augmented reality can be found at
- * opencv_source_code/samples/python/plane_ar.py
- * - If you are using Python:
- * - Numpy array slices won't work as input because solvePnP requires contiguous
- * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- * modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * which requires 2-channel information.
- * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- * unstable and sometimes give completely wrong results. If you pass one of these two
- * flags, REF: SOLVEPNP_EPNP method will be used instead.
- * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- * global solution to converge.
- * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - With REF: SOLVEPNP_SQPNP input points must be >= 3
- */
- + (BOOL)solvePnP:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnP(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:));
- //
- // bool cv::solvePnPRansac(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, double confidence = 0.99, Mat& inliers = Mat(), int flags = SOLVEPNP_ITERATIVE)
- //
- /**
- * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param iterationsCount Number of iterations.
- * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
- * is the maximum allowed distance between the observed and computed point projections to consider it
- * an inlier.
- * @param confidence The probability that the algorithm produces a useful result.
- * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
- * @param flags Method for solving a PnP problem (see REF: solvePnP ).
- *
- * The function estimates an object pose given a set of object points, their corresponding image
- * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- * makes the function resistant to outliers.
- *
- * NOTE:
- * - An example of how to use solvePNPRansac for object detection can be found at
- * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- * - The default method used to estimate the camera pose for the Minimal Sample Sets step
- * is #SOLVEPNP_EPNP. Exceptions are:
- * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- * - The method used to estimate the camera pose using all the inliers is defined by the
- * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- * the method #SOLVEPNP_EPNP will be used instead.
- */
- + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount reprojectionError:(float)reprojectionError confidence:(double)confidence inliers:(Mat*)inliers flags:(int)flags NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:reprojectionError:confidence:inliers:flags:));
- /**
- * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param iterationsCount Number of iterations.
- * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
- * is the maximum allowed distance between the observed and computed point projections to consider it
- * an inlier.
- * @param confidence The probability that the algorithm produces a useful result.
- * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
- *
- * The function estimates an object pose given a set of object points, their corresponding image
- * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- * makes the function resistant to outliers.
- *
- * NOTE:
- * - An example of how to use solvePNPRansac for object detection can be found at
- * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- * - The default method used to estimate the camera pose for the Minimal Sample Sets step
- * is #SOLVEPNP_EPNP. Exceptions are:
- * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- * - The method used to estimate the camera pose using all the inliers is defined by the
- * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- * the method #SOLVEPNP_EPNP will be used instead.
- */
- + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount reprojectionError:(float)reprojectionError confidence:(double)confidence inliers:(Mat*)inliers NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:reprojectionError:confidence:inliers:));
- /**
- * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param iterationsCount Number of iterations.
- * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
- * is the maximum allowed distance between the observed and computed point projections to consider it
- * an inlier.
- * @param confidence The probability that the algorithm produces a useful result.
- *
- * The function estimates an object pose given a set of object points, their corresponding image
- * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- * makes the function resistant to outliers.
- *
- * NOTE:
- * - An example of how to use solvePNPRansac for object detection can be found at
- * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- * - The default method used to estimate the camera pose for the Minimal Sample Sets step
- * is #SOLVEPNP_EPNP. Exceptions are:
- * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- * - The method used to estimate the camera pose using all the inliers is defined by the
- * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- * the method #SOLVEPNP_EPNP will be used instead.
- */
- + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount reprojectionError:(float)reprojectionError confidence:(double)confidence NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:reprojectionError:confidence:));
- /**
- * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param iterationsCount Number of iterations.
- * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
- * is the maximum allowed distance between the observed and computed point projections to consider it
- * an inlier.
- *
- * The function estimates an object pose given a set of object points, their corresponding image
- * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- * makes the function resistant to outliers.
- *
- * NOTE:
- * - An example of how to use solvePNPRansac for object detection can be found at
- * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- * - The default method used to estimate the camera pose for the Minimal Sample Sets step
- * is #SOLVEPNP_EPNP. Exceptions are:
- * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- * - The method used to estimate the camera pose using all the inliers is defined by the
- * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- * the method #SOLVEPNP_EPNP will be used instead.
- */
- + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount reprojectionError:(float)reprojectionError NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:reprojectionError:));
- /**
- * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param iterationsCount Number of iterations.
- * is the maximum allowed distance between the observed and computed point projections to consider it
- * an inlier.
- *
- * The function estimates an object pose given a set of object points, their corresponding image
- * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- * makes the function resistant to outliers.
- *
- * NOTE:
- * - An example of how to use solvePNPRansac for object detection can be found at
- * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- * - The default method used to estimate the camera pose for the Minimal Sample Sets step
- * is #SOLVEPNP_EPNP. Exceptions are:
- * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- * - The method used to estimate the camera pose using all the inliers is defined by the
- * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- * the method #SOLVEPNP_EPNP will be used instead.
- */
- + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess iterationsCount:(int)iterationsCount NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:iterationsCount:));
- /**
- * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * is the maximum allowed distance between the observed and computed point projections to consider it
- * an inlier.
- *
- * The function estimates an object pose given a set of object points, their corresponding image
- * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- * makes the function resistant to outliers.
- *
- * NOTE:
- * - An example of how to use solvePNPRansac for object detection can be found at
- * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- * - The default method used to estimate the camera pose for the Minimal Sample Sets step
- * is #SOLVEPNP_EPNP. Exceptions are:
- * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- * - The method used to estimate the camera pose using all the inliers is defined by the
- * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- * the method #SOLVEPNP_EPNP will be used instead.
- */
- + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:));
- /**
- * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Output translation vector.
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * is the maximum allowed distance between the observed and computed point projections to consider it
- * an inlier.
- *
- * The function estimates an object pose given a set of object points, their corresponding image
- * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- * makes the function resistant to outliers.
- *
- * NOTE:
- * - An example of how to use solvePNPRansac for object detection can be found at
- * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- * - The default method used to estimate the camera pose for the Minimal Sample Sets step
- * is #SOLVEPNP_EPNP. Exceptions are:
- * - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- * - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- * - The method used to estimate the camera pose using all the inliers is defined by the
- * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- * the method #SOLVEPNP_EPNP will be used instead.
- */
- + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:));
- //
- // bool cv::solvePnPRansac(Mat objectPoints, Mat imagePoints, Mat& cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, Mat& inliers, UsacParams params = UsacParams())
- //
- + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec inliers:(Mat*)inliers params:(UsacParams*)params NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:inliers:params:));
- + (BOOL)solvePnPRansac:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec inliers:(Mat*)inliers NS_SWIFT_NAME(solvePnPRansac(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:inliers:));
- //
- // int cv::solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags)
- //
- /**
- * Finds an object pose from 3 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or
- * 1x3/3x1 3-channel. vector\<Point3f\> can be also passed here.
- * @param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
- * vector\<Point2f\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvecs Output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- * the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.
- * @param tvecs Output translation vectors.
- * @param flags Method for solving a P3P problem:
- * - REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
- * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
- * - REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis.
- * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
- *
- * The function estimates the object pose given 3 object points, their corresponding image
- * projections, as well as the camera intrinsic matrix and the distortion coefficients.
- *
- * NOTE:
- * The solutions are sorted by reprojection errors (lowest to highest).
- */
- + (int)solveP3P:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags NS_SWIFT_NAME(solveP3P(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:flags:));
- //
- // void cv::solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON))
- //
- /**
- * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- * where N is the number of points. vector\<Point3d\> can also be passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can also be passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
- * @param tvec Input/Output translation vector. Input values are used as an initial solution.
- * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
- *
- * The function refines the object pose given at least 3 object points, their corresponding image
- * projections, an initial solution for the rotation and translation vector,
- * as well as the camera intrinsic matrix and the distortion coefficients.
- * The function minimizes the projection error with respect to the rotation and the translation vectors, according
- * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.
- */
- + (void)solvePnPRefineLM:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec criteria:(TermCriteria*)criteria NS_SWIFT_NAME(solvePnPRefineLM(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:criteria:));
- /**
- * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- * where N is the number of points. vector\<Point3d\> can also be passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can also be passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
- * @param tvec Input/Output translation vector. Input values are used as an initial solution.
- *
- * The function refines the object pose given at least 3 object points, their corresponding image
- * projections, an initial solution for the rotation and translation vector,
- * as well as the camera intrinsic matrix and the distortion coefficients.
- * The function minimizes the projection error with respect to the rotation and the translation vectors, according
- * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.
- */
- + (void)solvePnPRefineLM:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnPRefineLM(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:));
- //
- // void cv::solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), double VVSlambda = 1)
- //
- /**
- * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- * where N is the number of points. vector\<Point3d\> can also be passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can also be passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
- * @param tvec Input/Output translation vector. Input values are used as an initial solution.
- * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
- * @param VVSlambda Gain for the virtual visual servoing control law, equivalent to the `$$\alpha$$`
- * gain in the Damped Gauss-Newton formulation.
- *
- * The function refines the object pose given at least 3 object points, their corresponding image
- * projections, an initial solution for the rotation and translation vector,
- * as well as the camera intrinsic matrix and the distortion coefficients.
- * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
- * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
- */
- + (void)solvePnPRefineVVS:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec criteria:(TermCriteria*)criteria VVSlambda:(double)VVSlambda NS_SWIFT_NAME(solvePnPRefineVVS(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:criteria:VVSlambda:));
- /**
- * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- * where N is the number of points. vector\<Point3d\> can also be passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can also be passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
- * @param tvec Input/Output translation vector. Input values are used as an initial solution.
- * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
- * gain in the Damped Gauss-Newton formulation.
- *
- * The function refines the object pose given at least 3 object points, their corresponding image
- * projections, an initial solution for the rotation and translation vector,
- * as well as the camera intrinsic matrix and the distortion coefficients.
- * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
- * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
- */
- + (void)solvePnPRefineVVS:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec criteria:(TermCriteria*)criteria NS_SWIFT_NAME(solvePnPRefineVVS(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:criteria:));
- /**
- * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- * where N is the number of points. vector\<Point3d\> can also be passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can also be passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
- * @param tvec Input/Output translation vector. Input values are used as an initial solution.
- * gain in the Damped Gauss-Newton formulation.
- *
- * The function refines the object pose given at least 3 object points, their corresponding image
- * projections, an initial solution for the rotation and translation vector,
- * as well as the camera intrinsic matrix and the distortion coefficients.
- * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
- * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
- */
- + (void)solvePnPRefineVVS:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnPRefineVVS(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:));
- //
- // int cv::solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, Mat rvec = Mat(), Mat tvec = Mat(), Mat& reprojectionError = Mat())
- //
- /**
- * Finds an object pose from 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- * couple), depending on the number of input points and the chosen method:
- * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- * Only 1 solution is returned.
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvecs Vector of output translation vectors.
- * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
- * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- * and useExtrinsicGuess is set to true.
- * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- * and useExtrinsicGuess is set to true.
- * @param reprojectionError Optional vector of reprojection error, that is the RMS error
- * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
- * and the 3D object points projected with the estimated pose.
- *
- * More information is described in REF: calib3d_solvePnP
- *
- * NOTE:
- * - An example of how to use solvePnP for planar augmented reality can be found at
- * opencv_source_code/samples/python/plane_ar.py
- * - If you are using Python:
- * - Numpy array slices won't work as input because solvePnP requires contiguous
- * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- * modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * which requires 2-channel information.
- * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- * unstable and sometimes give completely wrong results. If you pass one of these two
- * flags, REF: SOLVEPNP_EPNP method will be used instead.
- * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- * global solution to converge.
- * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- */
- + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(SolvePnPMethod)flags rvec:(Mat*)rvec tvec:(Mat*)tvec reprojectionError:(Mat*)reprojectionError NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:flags:rvec:tvec:reprojectionError:));
- /**
- * Finds an object pose from 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- * couple), depending on the number of input points and the chosen method:
- * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- * Only 1 solution is returned.
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvecs Vector of output translation vectors.
- * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
- * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- * and useExtrinsicGuess is set to true.
- * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- * and useExtrinsicGuess is set to true.
- * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
- * and the 3D object points projected with the estimated pose.
- *
- * More information is described in REF: calib3d_solvePnP
- *
- * NOTE:
- * - An example of how to use solvePnP for planar augmented reality can be found at
- * opencv_source_code/samples/python/plane_ar.py
- * - If you are using Python:
- * - Numpy array slices won't work as input because solvePnP requires contiguous
- * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- * modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * which requires 2-channel information.
- * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- * unstable and sometimes give completely wrong results. If you pass one of these two
- * flags, REF: SOLVEPNP_EPNP method will be used instead.
- * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- * global solution to converge.
- * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- */
- + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(SolvePnPMethod)flags rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:flags:rvec:tvec:));
- /**
- * Finds an object pose from 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- * couple), depending on the number of input points and the chosen method:
- * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- * Only 1 solution is returned.
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvecs Vector of output translation vectors.
- * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
- * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- * and useExtrinsicGuess is set to true.
- * and useExtrinsicGuess is set to true.
- * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
- * and the 3D object points projected with the estimated pose.
- *
- * More information is described in REF: calib3d_solvePnP
- *
- * NOTE:
- * - An example of how to use solvePnP for planar augmented reality can be found at
- * opencv_source_code/samples/python/plane_ar.py
- * - If you are using Python:
- * - Numpy array slices won't work as input because solvePnP requires contiguous
- * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- * modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * which requires 2-channel information.
- * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- * unstable and sometimes give completely wrong results. If you pass one of these two
- * flags, REF: SOLVEPNP_EPNP method will be used instead.
- * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- * global solution to converge.
- * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- */
- + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(SolvePnPMethod)flags rvec:(Mat*)rvec NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:flags:rvec:));
- /**
- * Finds an object pose from 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- * couple), depending on the number of input points and the chosen method:
- * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- * Only 1 solution is returned.
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvecs Vector of output translation vectors.
- * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
- * and useExtrinsicGuess is set to true.
- * and useExtrinsicGuess is set to true.
- * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
- * and the 3D object points projected with the estimated pose.
- *
- * More information is described in REF: calib3d_solvePnP
- *
- * NOTE:
- * - An example of how to use solvePnP for planar augmented reality can be found at
- * opencv_source_code/samples/python/plane_ar.py
- * - If you are using Python:
- * - Numpy array slices won't work as input because solvePnP requires contiguous
- * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- * modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * which requires 2-channel information.
- * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- * unstable and sometimes give completely wrong results. If you pass one of these two
- * flags, REF: SOLVEPNP_EPNP method will be used instead.
- * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- * global solution to converge.
- * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- */
- + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess flags:(SolvePnPMethod)flags NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:flags:));
- /**
- * Finds an object pose from 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- * couple), depending on the number of input points and the chosen method:
- * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- * Only 1 solution is returned.
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvecs Vector of output translation vectors.
- * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * and useExtrinsicGuess is set to true.
- * and useExtrinsicGuess is set to true.
- * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
- * and the 3D object points projected with the estimated pose.
- *
- * More information is described in REF: calib3d_solvePnP
- *
- * NOTE:
- * - An example of how to use solvePnP for planar augmented reality can be found at
- * opencv_source_code/samples/python/plane_ar.py
- * - If you are using Python:
- * - Numpy array slices won't work as input because solvePnP requires contiguous
- * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- * modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * which requires 2-channel information.
- * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- * unstable and sometimes give completely wrong results. If you pass one of these two
- * flags, REF: SOLVEPNP_EPNP method will be used instead.
- * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- * global solution to converge.
- * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- */
- + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs useExtrinsicGuess:(BOOL)useExtrinsicGuess NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:useExtrinsicGuess:));
- /**
- * Finds an object pose from 3D-2D point correspondences.
- *
- * @see `REF: calib3d_solvePnP`
- *
- * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- * couple), depending on the number of input points and the chosen method:
- * - P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- * - REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- * - REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- * - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- * Only 1 solution is returned.
- *
- * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
- * 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
- * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- * where N is the number of points. vector\<Point2d\> can be also passed here.
- * @param cameraMatrix Input camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvecs Vector of output translation vectors.
- * the provided rvec and tvec values as initial approximations of the rotation and translation
- * vectors, respectively, and further optimizes them.
- * and useExtrinsicGuess is set to true.
- * and useExtrinsicGuess is set to true.
- * (`$$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} $$`) between the input image points
- * and the 3D object points projected with the estimated pose.
- *
- * More information is described in REF: calib3d_solvePnP
- *
- * NOTE:
- * - An example of how to use solvePnP for planar augmented reality can be found at
- * opencv_source_code/samples/python/plane_ar.py
- * - If you are using Python:
- * - Numpy array slices won't work as input because solvePnP requires contiguous
- * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- * modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- * which requires 2-channel information.
- * - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- * np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- * - The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- * unstable and sometimes give completely wrong results. If you pass one of these two
- * flags, REF: SOLVEPNP_EPNP method will be used instead.
- * - The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- * - With REF: SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- * global solution to converge.
- * - With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- * - With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- * Number of input points must be 4. Object points must be defined in the following order:
- * - point 0: [-squareLength / 2, squareLength / 2, 0]
- * - point 1: [ squareLength / 2, squareLength / 2, 0]
- * - point 2: [ squareLength / 2, -squareLength / 2, 0]
- * - point 3: [-squareLength / 2, -squareLength / 2, 0]
- */
- + (int)solvePnPGeneric:(Mat*)objectPoints imagePoints:(Mat*)imagePoints cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs NS_SWIFT_NAME(solvePnPGeneric(objectPoints:imagePoints:cameraMatrix:distCoeffs:rvecs:tvecs:));
- //
- // Mat cv::initCameraMatrix2D(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, double aspectRatio = 1.0)
- //
- /**
- * Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
- *
- * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
- * coordinate space. In the old interface all the per-view vectors are concatenated. See
- * #calibrateCamera for details.
- * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
- * old interface all the per-view vectors are concatenated.
- * @param imageSize Image size in pixels used to initialize the principal point.
- * @param aspectRatio If it is zero or negative, both `$$f_x$$` and `$$f_y$$` are estimated independently.
- * Otherwise, `$$f_x = f_y * \texttt{aspectRatio}$$` .
- *
- * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
- * Currently, the function only supports planar calibration patterns, which are patterns where each
- * object point has z-coordinate =0.
- */
- + (Mat*)initCameraMatrix2D:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize aspectRatio:(double)aspectRatio NS_SWIFT_NAME(initCameraMatrix2D(objectPoints:imagePoints:imageSize:aspectRatio:));
- /**
- * Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
- *
- * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
- * coordinate space. In the old interface all the per-view vectors are concatenated. See
- * #calibrateCamera for details.
- * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
- * old interface all the per-view vectors are concatenated.
- * @param imageSize Image size in pixels used to initialize the principal point.
- * Otherwise, `$$f_x = f_y * \texttt{aspectRatio}$$` .
- *
- * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
- * Currently, the function only supports planar calibration patterns, which are patterns where each
- * object point has z-coordinate =0.
- */
- + (Mat*)initCameraMatrix2D:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize NS_SWIFT_NAME(initCameraMatrix2D(objectPoints:imagePoints:imageSize:));
- //
- // bool cv::findChessboardCorners(Mat image, Size patternSize, Mat& corners, int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE)
- //
- /**
- * Finds the positions of internal corners of the chessboard.
- *
- * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
- * @param patternSize Number of inner corners per a chessboard row and column
- * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
- * @param corners Output array of detected corners.
- * @param flags Various operation flags that can be zero or a combination of the following values:
- * - REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
- * and white, rather than a fixed threshold level (computed from the average image brightness).
- * - REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before
- * applying fixed or adaptive thresholding.
- * - REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
- * square-like shape) to filter out false quads extracted at the contour retrieval stage.
- * - REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
- * and shortcut the call if none is found. This can drastically speed up the call in the
- * degenerate condition when no chessboard is observed.
- *
- * The function attempts to determine whether the input image is a view of the chessboard pattern and
- * locate the internal chessboard corners. The function returns a non-zero value if all of the corners
- * are found and they are placed in a certain order (row by row, left to right in every row).
- * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
- * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
- * squares touch each other. The detected coordinates are approximate, and to determine their positions
- * more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
- * different parameters if returned coordinates are not accurate enough.
- *
- * Sample usage of detecting and drawing chessboard corners: :
- *
- * Size patternsize(8,6); //interior number of corners
- * Mat gray = ....; //source image
- * vector<Point2f> corners; //this will be filled by the detected corners
- *
- * //CALIB_CB_FAST_CHECK saves a lot of time on images
- * //that do not contain any chessboard corners
- * bool patternfound = findChessboardCorners(gray, patternsize, corners,
- * CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
- * + CALIB_CB_FAST_CHECK);
- *
- * if(patternfound)
- * cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
- * TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
- *
- * drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
- *
- * NOTE: The function requires white space (like a square-thick border, the wider the better) around
- * the board to make the detection more robust in various environments. Otherwise, if there is no
- * border and the background is dark, the outer black squares cannot be segmented properly and so the
- * square grouping and ordering algorithm fails.
- *
- * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
- */
- + (BOOL)findChessboardCorners:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners flags:(int)flags NS_SWIFT_NAME(findChessboardCorners(image:patternSize:corners:flags:));
- /**
- * Finds the positions of internal corners of the chessboard.
- *
- * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
- * @param patternSize Number of inner corners per a chessboard row and column
- * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
- * @param corners Output array of detected corners.
- * - REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
- * and white, rather than a fixed threshold level (computed from the average image brightness).
- * - REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before
- * applying fixed or adaptive thresholding.
- * - REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
- * square-like shape) to filter out false quads extracted at the contour retrieval stage.
- * - REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
- * and shortcut the call if none is found. This can drastically speed up the call in the
- * degenerate condition when no chessboard is observed.
- *
- * The function attempts to determine whether the input image is a view of the chessboard pattern and
- * locate the internal chessboard corners. The function returns a non-zero value if all of the corners
- * are found and they are placed in a certain order (row by row, left to right in every row).
- * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
- * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
- * squares touch each other. The detected coordinates are approximate, and to determine their positions
- * more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
- * different parameters if returned coordinates are not accurate enough.
- *
- * Sample usage of detecting and drawing chessboard corners: :
- *
- * Size patternsize(8,6); //interior number of corners
- * Mat gray = ....; //source image
- * vector<Point2f> corners; //this will be filled by the detected corners
- *
- * //CALIB_CB_FAST_CHECK saves a lot of time on images
- * //that do not contain any chessboard corners
- * bool patternfound = findChessboardCorners(gray, patternsize, corners,
- * CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
- * + CALIB_CB_FAST_CHECK);
- *
- * if(patternfound)
- * cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
- * TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
- *
- * drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
- *
- * NOTE: The function requires white space (like a square-thick border, the wider the better) around
- * the board to make the detection more robust in various environments. Otherwise, if there is no
- * border and the background is dark, the outer black squares cannot be segmented properly and so the
- * square grouping and ordering algorithm fails.
- *
- * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
- */
- + (BOOL)findChessboardCorners:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners NS_SWIFT_NAME(findChessboardCorners(image:patternSize:corners:));
- //
- // bool cv::checkChessboard(Mat img, Size size)
- //
- + (BOOL)checkChessboard:(Mat*)img size:(Size2i*)size NS_SWIFT_NAME(checkChessboard(img:size:));
- //
- // bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags, Mat& meta)
- //
- /**
- * Finds the positions of internal corners of the chessboard using a sector based approach.
- *
- * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
- * @param patternSize Number of inner corners per a chessboard row and column
- * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
- * @param corners Output array of detected corners.
- * @param flags Various operation flags that can be zero or a combination of the following values:
- * - REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection.
- * - REF: CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate.
- * - REF: CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to aliasing effects.
- * - REF: CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description).
- * - REF: CALIB_CB_MARKER The detected pattern must have a marker (see description).
- * This should be used if an accurate camera calibration is required.
- * @param meta Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)).
- * Each entry stands for one corner of the pattern and can have one of the following values:
- * - 0 = no meta data attached
- * - 1 = left-top corner of a black cell
- * - 2 = left-top corner of a white cell
- * - 3 = left-top corner of a black cell with a white marker dot
- * - 4 = left-top corner of a white cell with a black marker dot (pattern origin in case of markers otherwise first corner)
- *
- * The function is analog to #findChessboardCorners but uses a localized radon
- * transformation approximated by box filters being more robust to all sort of
- * noise, faster on larger images and is able to directly return the sub-pixel
- * position of the internal chessboard corners. The Method is based on the paper
- * CITE: duda2018 "Accurate Detection and Localization of Checkerboard Corners for
- * Calibration" demonstrating that the returned sub-pixel positions are more
- * accurate than the one returned by cornerSubPix allowing a precise camera
- * calibration for demanding applications.
- *
- * In the case, the flags REF: CALIB_CB_LARGER or REF: CALIB_CB_MARKER are given,
- * the result can be recovered from the optional meta array. Both flags are
- * helpful to use calibration patterns exceeding the field of view of the camera.
- * These oversized patterns allow more accurate calibrations as corners can be
- * utilized, which are as close as possible to the image borders. For a
- * consistent coordinate system across all images, the optional marker (see image
- * below) can be used to move the origin of the board to the location where the
- * black circle is located.
- *
- * NOTE: The function requires a white boarder with roughly the same width as one
- * of the checkerboard fields around the whole board to improve the detection in
- * various environments. In addition, because of the localized radon
- * transformation it is beneficial to use round corners for the field corners
- * which are located on the outside of the board. The following figure illustrates
- * a sample checkerboard optimized for the detection. However, any other checkerboard
- * can be used as well.
- *
- * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
- * ![Checkerboard](pics/checkerboard_radon.png)
- */
- + (BOOL)findChessboardCornersSBWithMeta:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners flags:(int)flags meta:(Mat*)meta NS_SWIFT_NAME(findChessboardCornersSB(image:patternSize:corners:flags:meta:));
- //
- // bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags = 0)
- //
- + (BOOL)findChessboardCornersSB:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners flags:(int)flags NS_SWIFT_NAME(findChessboardCornersSB(image:patternSize:corners:flags:));
- + (BOOL)findChessboardCornersSB:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners NS_SWIFT_NAME(findChessboardCornersSB(image:patternSize:corners:));
- //
- // Scalar cv::estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance = 0.8F, bool vertical = false, Mat& sharpness = Mat())
- //
- /**
- * Estimates the sharpness of a detected chessboard.
- *
- * Image sharpness, as well as brightness, are a critical parameter for accuracte
- * camera calibration. For accessing these parameters for filtering out
- * problematic calibraiton images, this method calculates edge profiles by traveling from
- * black to white chessboard cell centers. Based on this, the number of pixels is
- * calculated required to transit from black to white. This width of the
- * transition area is a good indication of how sharp the chessboard is imaged
- * and should be below ~3.0 pixels.
- *
- * @param image Gray image used to find chessboard corners
- * @param patternSize Size of a found chessboard pattern
- * @param corners Corners found by #findChessboardCornersSB
- * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
- * @param vertical By default edge responses for horizontal lines are calculated
- * @param sharpness Optional output array with a sharpness value for calculated edge responses (see description)
- *
- * The optional sharpness array is of type CV_32FC1 and has for each calculated
- * profile one row with the following five entries:
- * 0 = x coordinate of the underlying edge in the image
- * 1 = y coordinate of the underlying edge in the image
- * 2 = width of the transition area (sharpness)
- * 3 = signal strength in the black cell (min brightness)
- * 4 = signal strength in the white cell (max brightness)
- *
- * @return Scalar(average sharpness, average min brightness, average max brightness,0)
- */
- + (Scalar*)estimateChessboardSharpness:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners rise_distance:(float)rise_distance vertical:(BOOL)vertical sharpness:(Mat*)sharpness NS_SWIFT_NAME(estimateChessboardSharpness(image:patternSize:corners:rise_distance:vertical:sharpness:));
- /**
- * Estimates the sharpness of a detected chessboard.
- *
- * Image sharpness, as well as brightness, are a critical parameter for accuracte
- * camera calibration. For accessing these parameters for filtering out
- * problematic calibraiton images, this method calculates edge profiles by traveling from
- * black to white chessboard cell centers. Based on this, the number of pixels is
- * calculated required to transit from black to white. This width of the
- * transition area is a good indication of how sharp the chessboard is imaged
- * and should be below ~3.0 pixels.
- *
- * @param image Gray image used to find chessboard corners
- * @param patternSize Size of a found chessboard pattern
- * @param corners Corners found by #findChessboardCornersSB
- * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
- * @param vertical By default edge responses for horizontal lines are calculated
- *
- * The optional sharpness array is of type CV_32FC1 and has for each calculated
- * profile one row with the following five entries:
- * 0 = x coordinate of the underlying edge in the image
- * 1 = y coordinate of the underlying edge in the image
- * 2 = width of the transition area (sharpness)
- * 3 = signal strength in the black cell (min brightness)
- * 4 = signal strength in the white cell (max brightness)
- *
- * @return Scalar(average sharpness, average min brightness, average max brightness,0)
- */
- + (Scalar*)estimateChessboardSharpness:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners rise_distance:(float)rise_distance vertical:(BOOL)vertical NS_SWIFT_NAME(estimateChessboardSharpness(image:patternSize:corners:rise_distance:vertical:));
- /**
- * Estimates the sharpness of a detected chessboard.
- *
- * Image sharpness, as well as brightness, are a critical parameter for accuracte
- * camera calibration. For accessing these parameters for filtering out
- * problematic calibraiton images, this method calculates edge profiles by traveling from
- * black to white chessboard cell centers. Based on this, the number of pixels is
- * calculated required to transit from black to white. This width of the
- * transition area is a good indication of how sharp the chessboard is imaged
- * and should be below ~3.0 pixels.
- *
- * @param image Gray image used to find chessboard corners
- * @param patternSize Size of a found chessboard pattern
- * @param corners Corners found by #findChessboardCornersSB
- * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
- *
- * The optional sharpness array is of type CV_32FC1 and has for each calculated
- * profile one row with the following five entries:
- * 0 = x coordinate of the underlying edge in the image
- * 1 = y coordinate of the underlying edge in the image
- * 2 = width of the transition area (sharpness)
- * 3 = signal strength in the black cell (min brightness)
- * 4 = signal strength in the white cell (max brightness)
- *
- * @return Scalar(average sharpness, average min brightness, average max brightness,0)
- */
- + (Scalar*)estimateChessboardSharpness:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners rise_distance:(float)rise_distance NS_SWIFT_NAME(estimateChessboardSharpness(image:patternSize:corners:rise_distance:));
- /**
- * Estimates the sharpness of a detected chessboard.
- *
- * Image sharpness, as well as brightness, are a critical parameter for accuracte
- * camera calibration. For accessing these parameters for filtering out
- * problematic calibraiton images, this method calculates edge profiles by traveling from
- * black to white chessboard cell centers. Based on this, the number of pixels is
- * calculated required to transit from black to white. This width of the
- * transition area is a good indication of how sharp the chessboard is imaged
- * and should be below ~3.0 pixels.
- *
- * @param image Gray image used to find chessboard corners
- * @param patternSize Size of a found chessboard pattern
- * @param corners Corners found by #findChessboardCornersSB
- *
- * The optional sharpness array is of type CV_32FC1 and has for each calculated
- * profile one row with the following five entries:
- * 0 = x coordinate of the underlying edge in the image
- * 1 = y coordinate of the underlying edge in the image
- * 2 = width of the transition area (sharpness)
- * 3 = signal strength in the black cell (min brightness)
- * 4 = signal strength in the white cell (max brightness)
- *
- * @return Scalar(average sharpness, average min brightness, average max brightness,0)
- */
- + (Scalar*)estimateChessboardSharpness:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners NS_SWIFT_NAME(estimateChessboardSharpness(image:patternSize:corners:));
- //
- // bool cv::find4QuadCornerSubpix(Mat img, Mat& corners, Size region_size)
- //
- + (BOOL)find4QuadCornerSubpix:(Mat*)img corners:(Mat*)corners region_size:(Size2i*)region_size NS_SWIFT_NAME(find4QuadCornerSubpix(img:corners:region_size:));
- //
- // void cv::drawChessboardCorners(Mat& image, Size patternSize, Mat corners, bool patternWasFound)
- //
- /**
- * Renders the detected chessboard corners.
- *
- * @param image Destination image. It must be an 8-bit color image.
- * @param patternSize Number of inner corners per a chessboard row and column
- * (patternSize = cv::Size(points_per_row,points_per_column)).
- * @param corners Array of detected corners, the output of #findChessboardCorners.
- * @param patternWasFound Parameter indicating whether the complete board was found or not. The
- * return value of #findChessboardCorners should be passed here.
- *
- * The function draws individual chessboard corners detected either as red circles if the board was not
- * found, or as colored corners connected with lines if the board was found.
- */
- + (void)drawChessboardCorners:(Mat*)image patternSize:(Size2i*)patternSize corners:(Mat*)corners patternWasFound:(BOOL)patternWasFound NS_SWIFT_NAME(drawChessboardCorners(image:patternSize:corners:patternWasFound:));
- //
- // void cv::drawFrameAxes(Mat& image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness = 3)
- //
- /**
- * Draw axes of the world/object coordinate system from pose estimation. @see `+solvePnP:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:flags:`
- *
- * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
- * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
- * `$$\cameramatrix{A}$$`
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is empty, the zero distortion coefficients are assumed.
- * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Translation vector.
- * @param length Length of the painted axes in the same unit than tvec (usually in meters).
- * @param thickness Line thickness of the painted axes.
- *
- * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
- * OX is drawn in red, OY in green and OZ in blue.
- */
- + (void)drawFrameAxes:(Mat*)image cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec length:(float)length thickness:(int)thickness NS_SWIFT_NAME(drawFrameAxes(image:cameraMatrix:distCoeffs:rvec:tvec:length:thickness:));
- /**
- * Draw axes of the world/object coordinate system from pose estimation. @see `+solvePnP:imagePoints:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:flags:`
- *
- * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
- * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
- * `$$\cameramatrix{A}$$`
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is empty, the zero distortion coefficients are assumed.
- * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- * the model coordinate system to the camera coordinate system.
- * @param tvec Translation vector.
- * @param length Length of the painted axes in the same unit than tvec (usually in meters).
- *
- * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
- * OX is drawn in red, OY in green and OZ in blue.
- */
- + (void)drawFrameAxes:(Mat*)image cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec length:(float)length NS_SWIFT_NAME(drawFrameAxes(image:cameraMatrix:distCoeffs:rvec:tvec:length:));
- //
- // bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags, _hidden_ blobDetector = cv::SimpleBlobDetector::create(), CirclesGridFinderParameters parameters)
- //
- /**
- * Finds centers in the grid of circles.
- *
- * @param image grid view of input circles; it must be an 8-bit grayscale or color image.
- * @param patternSize number of circles per row and column
- * ( patternSize = Size(points_per_row, points_per_colum) ).
- * @param centers output array of detected centers.
- * @param flags various operation flags that can be one of the following values:
- * - REF: CALIB_CB_SYMMETRIC_GRID uses symmetric pattern of circles.
- * - REF: CALIB_CB_ASYMMETRIC_GRID uses asymmetric pattern of circles.
- * - REF: CALIB_CB_CLUSTERING uses a special algorithm for grid detection. It is more robust to
- * perspective distortions but much more sensitive to background clutter.
- * @param blobDetector feature detector that finds blobs like dark circles on light background.
- * If `blobDetector` is NULL then `image` represents Point2f array of candidates.
- * @param parameters struct for finding circles in a grid pattern.
- *
- * The function attempts to determine whether the input image contains a grid of circles. If it is, the
- * function locates centers of the circles. The function returns a non-zero value if all of the centers
- * have been found and they have been placed in a certain order (row by row, left to right in every
- * row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0.
- *
- * Sample usage of detecting and drawing the centers of circles: :
- *
- * Size patternsize(7,7); //number of centers
- * Mat gray = ...; //source image
- * vector<Point2f> centers; //this will be filled by the detected centers
- *
- * bool patternfound = findCirclesGrid(gray, patternsize, centers);
- *
- * drawChessboardCorners(img, patternsize, Mat(centers), patternfound);
- *
- * NOTE: The function requires white space (like a square-thick border, the wider the better) around
- * the board to make the detection more robust in various environments.
- */
- + (BOOL)findCirclesGrid:(Mat*)image patternSize:(Size2i*)patternSize centers:(Mat*)centers flags:(int)flags parameters:(CirclesGridFinderParameters*)parameters NS_SWIFT_NAME(findCirclesGrid(image:patternSize:centers:flags:parameters:));
- //
- // bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, _hidden_ blobDetector = cv::SimpleBlobDetector::create())
- //
- + (BOOL)findCirclesGrid:(Mat*)image patternSize:(Size2i*)patternSize centers:(Mat*)centers flags:(int)flags NS_SWIFT_NAME(findCirclesGrid(image:patternSize:centers:flags:));
- + (BOOL)findCirclesGrid:(Mat*)image patternSize:(Size2i*)patternSize centers:(Mat*)centers NS_SWIFT_NAME(findCirclesGrid(image:patternSize:centers:));
- //
- // double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
- //
- /**
- * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
- * pattern.
- *
- * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
- * the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
- * vector contains as many elements as the number of pattern views. If the same calibration pattern
- * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
- * possible to use partially occluded patterns or even different patterns in different views. Then,
- * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
- * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
- * In the old interface all the vectors of object points from different views are concatenated
- * together.
- * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
- * pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
- * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
- * respectively. In the old interface all the vectors of object points from different views are
- * concatenated together.
- * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
- * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
- * `$$\cameramatrix{A}$$` . If REF: CALIB_USE_INTRINSIC_GUESS
- * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
- * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
- * @param distCoeffs Input/output vector of distortion coefficients
- * `$$\distcoeffs$$`.
- * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
- * (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
- * i-th translation vector (see the next output parameter description) brings the calibration pattern
- * from the object coordinate space (in which object points are specified) to the camera coordinate
- * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
- * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
- * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
- * space.
- * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
- * describtion above.
- * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
- * parameters. Order of deviations values:
- * `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
- * s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
- * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
- * parameters. Order of deviations values: `$$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})$$` where M is
- * the number of pattern views. `$$R_i, T_i$$` are concatenated 1x3 vectors.
- * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
- * @param flags Different flags that may be zero or a combination of the following values:
- * - REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- * Note, that if intrinsic parameters are known, there is no need to use this function just to
- * estimate extrinsic parameters. Use REF: solvePnP instead.
- * - REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- * optimization. It stays at the center or at a different location specified when
- * REF: CALIB_USE_INTRINSIC_GUESS is set too.
- * - REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
- * ratio fx/fy stays the same as in the input cameraMatrix . When
- * REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
- * ignored, only their ratio is computed and used further.
- * - REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients `$$(p_1, p_2)$$` are set
- * to zeros and stay zero.
- * - REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
- * REF: CALIB_USE_INTRINSIC_GUESS is set.
- * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
- * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
- * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the rational model and return 8 coefficients or more.
- * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the thin prism model and return 12 coefficients or more.
- * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the tilted sensor model and return 14 coefficients.
- * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * @param criteria Termination criteria for the iterative optimization algorithm.
- *
- * @return the overall RMS re-projection error.
- *
- * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
- * points and their corresponding 2D projections in each view must be specified. That may be achieved
- * by using an object with known geometry and easily detectable feature points. Such an object is
- * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
- * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
- * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
- * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
- * be used as long as initial cameraMatrix is provided.
- *
- * The algorithm performs the following steps:
- *
- * - Compute the initial intrinsic parameters (the option only available for planar calibration
- * patterns) or read them from the input parameters. The distortion coefficients are all set to
- * zeros initially unless some of CALIB_FIX_K? are specified.
- *
- * - Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
- * done using REF: solvePnP .
- *
- * - Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
- * that is, the total sum of squared distances between the observed feature points imagePoints and
- * the projected (using the current estimates for camera parameters and the poses) object points
- * objectPoints. See REF: projectPoints for details.
- *
- * NOTE:
- * If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
- * and REF: calibrateCamera returns bad values (zero distortion coefficients, `$$c_x$$` and
- * `$$c_y$$` very far from the image center, and/or large differences between `$$f_x$$` and
- * `$$f_y$$` (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
- * instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
- *
- * @sa
- * calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
- * undistort
- */
- + (double)calibrateCameraExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:flags:criteria:));
- /**
- * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
- * pattern.
- *
- * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
- * the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
- * vector contains as many elements as the number of pattern views. If the same calibration pattern
- * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
- * possible to use partially occluded patterns or even different patterns in different views. Then,
- * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
- * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
- * In the old interface all the vectors of object points from different views are concatenated
- * together.
- * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
- * pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
- * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
- * respectively. In the old interface all the vectors of object points from different views are
- * concatenated together.
- * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
- * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
- * `$$\cameramatrix{A}$$` . If REF: CALIB_USE_INTRINSIC_GUESS
- * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
- * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
- * @param distCoeffs Input/output vector of distortion coefficients
- * `$$\distcoeffs$$`.
- * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
- * (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
- * i-th translation vector (see the next output parameter description) brings the calibration pattern
- * from the object coordinate space (in which object points are specified) to the camera coordinate
- * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
- * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
- * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
- * space.
- * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
- * describtion above.
- * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
- * parameters. Order of deviations values:
- * `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
- * s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
- * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
- * parameters. Order of deviations values: `$$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})$$` where M is
- * the number of pattern views. `$$R_i, T_i$$` are concatenated 1x3 vectors.
- * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
- * @param flags Different flags that may be zero or a combination of the following values:
- * - REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- * Note, that if intrinsic parameters are known, there is no need to use this function just to
- * estimate extrinsic parameters. Use REF: solvePnP instead.
- * - REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- * optimization. It stays at the center or at a different location specified when
- * REF: CALIB_USE_INTRINSIC_GUESS is set too.
- * - REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
- * ratio fx/fy stays the same as in the input cameraMatrix . When
- * REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
- * ignored, only their ratio is computed and used further.
- * - REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients `$$(p_1, p_2)$$` are set
- * to zeros and stay zero.
- * - REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
- * REF: CALIB_USE_INTRINSIC_GUESS is set.
- * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
- * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
- * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the rational model and return 8 coefficients or more.
- * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the thin prism model and return 12 coefficients or more.
- * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the tilted sensor model and return 14 coefficients.
- * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- *
- * @return the overall RMS re-projection error.
- *
- * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
- * points and their corresponding 2D projections in each view must be specified. That may be achieved
- * by using an object with known geometry and easily detectable feature points. Such an object is
- * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
- * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
- * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
- * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
- * be used as long as initial cameraMatrix is provided.
- *
- * The algorithm performs the following steps:
- *
- * - Compute the initial intrinsic parameters (the option only available for planar calibration
- * patterns) or read them from the input parameters. The distortion coefficients are all set to
- * zeros initially unless some of CALIB_FIX_K? are specified.
- *
- * - Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
- * done using REF: solvePnP .
- *
- * - Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
- * that is, the total sum of squared distances between the observed feature points imagePoints and
- * the projected (using the current estimates for camera parameters and the poses) object points
- * objectPoints. See REF: projectPoints for details.
- *
- * NOTE:
- * If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
- * and REF: calibrateCamera returns bad values (zero distortion coefficients, `$$c_x$$` and
- * `$$c_y$$` very far from the image center, and/or large differences between `$$f_x$$` and
- * `$$f_y$$` (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
- * instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
- *
- * @sa
- * calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
- * undistort
- */
- + (double)calibrateCameraExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors flags:(int)flags NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:flags:));
- /**
- * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
- * pattern.
- *
- * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
- * the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
- * vector contains as many elements as the number of pattern views. If the same calibration pattern
- * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
- * possible to use partially occluded patterns or even different patterns in different views. Then,
- * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
- * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
- * In the old interface all the vectors of object points from different views are concatenated
- * together.
- * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
- * pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
- * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
- * respectively. In the old interface all the vectors of object points from different views are
- * concatenated together.
- * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
- * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
- * `$$\cameramatrix{A}$$` . If REF: CALIB_USE_INTRINSIC_GUESS
- * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
- * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
- * @param distCoeffs Input/output vector of distortion coefficients
- * `$$\distcoeffs$$`.
- * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
- * (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
- * i-th translation vector (see the next output parameter description) brings the calibration pattern
- * from the object coordinate space (in which object points are specified) to the camera coordinate
- * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
- * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
- * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
- * space.
- * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
- * describtion above.
- * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
- * parameters. Order of deviations values:
- * `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
- * s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
- * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
- * parameters. Order of deviations values: `$$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})$$` where M is
- * the number of pattern views. `$$R_i, T_i$$` are concatenated 1x3 vectors.
- * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
- * - REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- * Note, that if intrinsic parameters are known, there is no need to use this function just to
- * estimate extrinsic parameters. Use REF: solvePnP instead.
- * - REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- * optimization. It stays at the center or at a different location specified when
- * REF: CALIB_USE_INTRINSIC_GUESS is set too.
- * - REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
- * ratio fx/fy stays the same as in the input cameraMatrix . When
- * REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
- * ignored, only their ratio is computed and used further.
- * - REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients `$$(p_1, p_2)$$` are set
- * to zeros and stay zero.
- * - REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
- * REF: CALIB_USE_INTRINSIC_GUESS is set.
- * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
- * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
- * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the rational model and return 8 coefficients or more.
- * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the thin prism model and return 12 coefficients or more.
- * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the tilted sensor model and return 14 coefficients.
- * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- *
- * @return the overall RMS re-projection error.
- *
- * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
- * points and their corresponding 2D projections in each view must be specified. That may be achieved
- * by using an object with known geometry and easily detectable feature points. Such an object is
- * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
- * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
- * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
- * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
- * be used as long as initial cameraMatrix is provided.
- *
- * The algorithm performs the following steps:
- *
- * - Compute the initial intrinsic parameters (the option only available for planar calibration
- * patterns) or read them from the input parameters. The distortion coefficients are all set to
- * zeros initially unless some of CALIB_FIX_K? are specified.
- *
- * - Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
- * done using REF: solvePnP .
- *
- * - Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
- * that is, the total sum of squared distances between the observed feature points imagePoints and
- * the projected (using the current estimates for camera parameters and the poses) object points
- * objectPoints. See REF: projectPoints for details.
- *
- * NOTE:
- * If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
- * and REF: calibrateCamera returns bad values (zero distortion coefficients, `$$c_x$$` and
- * `$$c_y$$` very far from the image center, and/or large differences between `$$f_x$$` and
- * `$$f_y$$` (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
- * instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
- *
- * @sa
- * calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
- * undistort
- */
- + (double)calibrateCameraExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:));
- //
- // double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
- //
- + (double)calibrateCamera:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:flags:criteria:));
- + (double)calibrateCamera:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:flags:));
- + (double)calibrateCamera:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs NS_SWIFT_NAME(calibrateCamera(objectPoints:imagePoints:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:));
- //
- // double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& stdDeviationsObjPoints, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
- //
- /**
- * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
- *
- * This function is an extension of #calibrateCamera with the method of releasing object which was
- * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
- * targets (calibration plates), this method can dramatically improve the precision of the estimated
- * camera parameters. Both the object-releasing method and standard method are supported by this
- * function. Use the parameter **iFixedPoint** for method selection. In the internal implementation,
- * #calibrateCamera is a wrapper for this function.
- *
- * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
- * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
- * the identical calibration board must be used in each view and it must be fully visible, and all
- * objectPoints[i] must be the same and all points should be roughly close to a plane. **The calibration
- * target has to be rigid, or at least static if the camera (rather than the calibration target) is
- * shifted for grabbing images.**
- * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
- * #calibrateCamera for details.
- * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
- * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
- * a switch for calibration method selection. If object-releasing method to be used, pass in the
- * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
- * make standard calibration method selected. Usually the top-right corner point of the calibration
- * board grid is recommended to be fixed when object-releasing method being utilized. According to
- * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
- * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
- * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
- * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
- * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
- * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
- * for details.
- * @param tvecs Output vector of translation vectors estimated for each pattern view.
- * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
- * be scaled based on three fixed points. The returned coordinates are accurate only if the above
- * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
- * is ignored with standard calibration method.
- * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
- * See #calibrateCamera for details.
- * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
- * See #calibrateCamera for details.
- * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
- * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
- * parameter is ignored with standard calibration method.
- * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
- * @param flags Different flags that may be zero or a combination of some predefined values. See
- * #calibrateCamera for details. If the method of releasing object is used, the calibration time may
- * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
- * less precise and less stable in some rare cases.
- * @param criteria Termination criteria for the iterative optimization algorithm.
- *
- * @return the overall RMS re-projection error.
- *
- * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
- * #calibrateCamera for other detailed explanations.
- * @sa
- * calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
- */
- + (double)calibrateCameraROExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics stdDeviationsObjPoints:(Mat*)stdDeviationsObjPoints perViewErrors:(Mat*)perViewErrors flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:stdDeviationsIntrinsics:stdDeviationsExtrinsics:stdDeviationsObjPoints:perViewErrors:flags:criteria:));
- /**
- * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
- *
- * This function is an extension of #calibrateCamera with the method of releasing object which was
- * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
- * targets (calibration plates), this method can dramatically improve the precision of the estimated
- * camera parameters. Both the object-releasing method and standard method are supported by this
- * function. Use the parameter **iFixedPoint** for method selection. In the internal implementation,
- * #calibrateCamera is a wrapper for this function.
- *
- * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
- * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
- * the identical calibration board must be used in each view and it must be fully visible, and all
- * objectPoints[i] must be the same and all points should be roughly close to a plane. **The calibration
- * target has to be rigid, or at least static if the camera (rather than the calibration target) is
- * shifted for grabbing images.**
- * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
- * #calibrateCamera for details.
- * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
- * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
- * a switch for calibration method selection. If object-releasing method to be used, pass in the
- * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
- * make standard calibration method selected. Usually the top-right corner point of the calibration
- * board grid is recommended to be fixed when object-releasing method being utilized. According to
- * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
- * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
- * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
- * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
- * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
- * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
- * for details.
- * @param tvecs Output vector of translation vectors estimated for each pattern view.
- * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
- * be scaled based on three fixed points. The returned coordinates are accurate only if the above
- * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
- * is ignored with standard calibration method.
- * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
- * See #calibrateCamera for details.
- * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
- * See #calibrateCamera for details.
- * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
- * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
- * parameter is ignored with standard calibration method.
- * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
- * @param flags Different flags that may be zero or a combination of some predefined values. See
- * #calibrateCamera for details. If the method of releasing object is used, the calibration time may
- * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
- * less precise and less stable in some rare cases.
- *
- * @return the overall RMS re-projection error.
- *
- * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
- * #calibrateCamera for other detailed explanations.
- * @sa
- * calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
- */
- + (double)calibrateCameraROExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics stdDeviationsObjPoints:(Mat*)stdDeviationsObjPoints perViewErrors:(Mat*)perViewErrors flags:(int)flags NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:stdDeviationsIntrinsics:stdDeviationsExtrinsics:stdDeviationsObjPoints:perViewErrors:flags:));
- /**
- * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
- *
- * This function is an extension of #calibrateCamera with the method of releasing object which was
- * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
- * targets (calibration plates), this method can dramatically improve the precision of the estimated
- * camera parameters. Both the object-releasing method and standard method are supported by this
- * function. Use the parameter **iFixedPoint** for method selection. In the internal implementation,
- * #calibrateCamera is a wrapper for this function.
- *
- * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
- * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
- * the identical calibration board must be used in each view and it must be fully visible, and all
- * objectPoints[i] must be the same and all points should be roughly close to a plane. **The calibration
- * target has to be rigid, or at least static if the camera (rather than the calibration target) is
- * shifted for grabbing images.**
- * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
- * #calibrateCamera for details.
- * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
- * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
- * a switch for calibration method selection. If object-releasing method to be used, pass in the
- * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
- * make standard calibration method selected. Usually the top-right corner point of the calibration
- * board grid is recommended to be fixed when object-releasing method being utilized. According to
- * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
- * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
- * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
- * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
- * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
- * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
- * for details.
- * @param tvecs Output vector of translation vectors estimated for each pattern view.
- * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
- * be scaled based on three fixed points. The returned coordinates are accurate only if the above
- * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
- * is ignored with standard calibration method.
- * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
- * See #calibrateCamera for details.
- * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
- * See #calibrateCamera for details.
- * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
- * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
- * parameter is ignored with standard calibration method.
- * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
- * #calibrateCamera for details. If the method of releasing object is used, the calibration time may
- * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
- * less precise and less stable in some rare cases.
- *
- * @return the overall RMS re-projection error.
- *
- * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
- * #calibrateCamera for other detailed explanations.
- * @sa
- * calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
- */
- + (double)calibrateCameraROExtended:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics stdDeviationsObjPoints:(Mat*)stdDeviationsObjPoints perViewErrors:(Mat*)perViewErrors NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:stdDeviationsIntrinsics:stdDeviationsExtrinsics:stdDeviationsObjPoints:perViewErrors:));
- //
- // double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
- //
- + (double)calibrateCameraRO:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:flags:criteria:));
- + (double)calibrateCameraRO:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints flags:(int)flags NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:flags:));
- + (double)calibrateCameraRO:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints imageSize:(Size2i*)imageSize iFixedPoint:(int)iFixedPoint cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs newObjPoints:(Mat*)newObjPoints NS_SWIFT_NAME(calibrateCameraRO(objectPoints:imagePoints:imageSize:iFixedPoint:cameraMatrix:distCoeffs:rvecs:tvecs:newObjPoints:));
- //
- // void cv::calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio)
- //
- /**
- * Computes useful camera characteristics from the camera intrinsic matrix.
- *
- * @param cameraMatrix Input camera intrinsic matrix that can be estimated by #calibrateCamera or
- * #stereoCalibrate .
- * @param imageSize Input image size in pixels.
- * @param apertureWidth Physical width in mm of the sensor.
- * @param apertureHeight Physical height in mm of the sensor.
- * @param fovx Output field of view in degrees along the horizontal sensor axis.
- * @param fovy Output field of view in degrees along the vertical sensor axis.
- * @param focalLength Focal length of the lens in mm.
- * @param principalPoint Principal point in mm.
- * @param aspectRatio `$$f_y/f_x$$`
- *
- * The function computes various useful camera characteristics from the previously estimated camera
- * matrix.
- *
- * NOTE:
- * Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
- * the chessboard pitch (it can thus be any value).
- */
- + (void)calibrationMatrixValues:(Mat*)cameraMatrix imageSize:(Size2i*)imageSize apertureWidth:(double)apertureWidth apertureHeight:(double)apertureHeight fovx:(double*)fovx fovy:(double*)fovy focalLength:(double*)focalLength principalPoint:(Point2d*)principalPoint aspectRatio:(double*)aspectRatio NS_SWIFT_NAME(calibrationMatrixValues(cameraMatrix:imageSize:apertureWidth:apertureHeight:fovx:fovy:focalLength:principalPoint:aspectRatio:));
- //
- // double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
- //
- /**
- * Calibrates a stereo camera set up. This function finds the intrinsic parameters
- * for each of the two cameras and the extrinsic parameters between the two cameras.
- *
- * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
- * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
- * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
- * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
- * be equal for each i.
- * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
- * observed by the first camera. The same structure as in REF: calibrateCamera.
- * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
- * observed by the second camera. The same structure as in REF: calibrateCamera.
- * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
- * cameraMatrix1.
- * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
- * description for distCoeffs1.
- * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
- * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
- * points given in the first camera's coordinate system to points in the second camera's
- * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
- * from the first camera's coordinate system to the second camera's coordinate system. Due to its
- * duality, this tuple is equivalent to the position of the first camera with respect to the
- * second camera coordinate system.
- * @param T Output translation vector, see description above.
- * @param E Output essential matrix.
- * @param F Output fundamental matrix.
- * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
- * @param flags Different flags that may be zero or a combination of the following values:
- * - REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
- * matrices are estimated.
- * - REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
- * according to the specified flags. Initial values are provided by the user.
- * - REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
- * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
- * - REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
- * - REF: CALIB_FIX_FOCAL_LENGTH Fix `$$f^{(j)}_x$$` and `$$f^{(j)}_y$$` .
- * - REF: CALIB_FIX_ASPECT_RATIO Optimize `$$f^{(j)}_y$$` . Fix the ratio `$$f^{(j)}_x/f^{(j)}_y$$`
- * .
- * - REF: CALIB_SAME_FOCAL_LENGTH Enforce `$$f^{(0)}_x=f^{(1)}_x$$` and `$$f^{(0)}_y=f^{(1)}_y$$` .
- * - REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
- * zeros and fix there.
- * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
- * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
- * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
- * compatibility, this extra flag should be explicitly specified to make the calibration
- * function use the rational model and return 8 coefficients. If the flag is not set, the
- * function computes and returns only 5 distortion coefficients.
- * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the thin prism model and return 12 coefficients. If the flag is not
- * set, the function computes and returns only 5 distortion coefficients.
- * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
- * set, the function computes and returns only 5 distortion coefficients.
- * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * @param criteria Termination criteria for the iterative optimization algorithm.
- *
- * The function estimates the transformation between two cameras making a stereo pair. If one computes
- * the poses of an object relative to the first camera and to the second camera,
- * ( `$$R_1$$`,`$$T_1$$` ) and (`$$R_2$$`,`$$T_2$$`), respectively, for a stereo camera where the
- * relative position and orientation between the two cameras are fixed, then those poses definitely
- * relate to each other. This means, if the relative position and orientation (`$$R$$`,`$$T$$`) of the
- * two cameras is known, it is possible to compute (`$$R_2$$`,`$$T_2$$`) when (`$$R_1$$`,`$$T_1$$`) is
- * given. This is what the described function does. It computes (`$$R$$`,`$$T$$`) such that:
- *
- * `$$R_2=R R_1$$`
- * `$$T_2=R T_1 + T.$$`
- *
- * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
- * coordinate system when given the point's coordinate representation in the first camera's coordinate
- * system:
- *
- * `$$\begin{bmatrix}
- * X_2 \\
- * Y_2 \\
- * Z_2 \\
- * 1
- * \end{bmatrix} = \begin{bmatrix}
- * R & T \\
- * 0 & 1
- * \end{bmatrix} \begin{bmatrix}
- * X_1 \\
- * Y_1 \\
- * Z_1 \\
- * 1
- * \end{bmatrix}.$$`
- *
- *
- * Optionally, it computes the essential matrix E:
- *
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R$$`
- *
- * where `$$T_i$$` are components of the translation vector `$$T$$` : `$$T=[T_0, T_1, T_2]^T$$` .
- * And the function can also compute the fundamental matrix F:
- *
- * `$$F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}$$`
- *
- * Besides the stereo-related information, the function can also perform a full calibration of each of
- * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
- * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
- * estimated with high accuracy for each of the cameras individually (for example, using
- * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
- * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
- * estimated at once, it makes sense to restrict some parameters, for example, pass
- * REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
- * reasonable assumption.
- *
- * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
- * points in all the available views from both cameras. The function returns the final value of the
- * re-projection error.
- */
- + (double)stereoCalibrateExtended:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F perViewErrors:(Mat*)perViewErrors flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:perViewErrors:flags:criteria:));
- /**
- * Calibrates a stereo camera set up. This function finds the intrinsic parameters
- * for each of the two cameras and the extrinsic parameters between the two cameras.
- *
- * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
- * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
- * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
- * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
- * be equal for each i.
- * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
- * observed by the first camera. The same structure as in REF: calibrateCamera.
- * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
- * observed by the second camera. The same structure as in REF: calibrateCamera.
- * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
- * cameraMatrix1.
- * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
- * description for distCoeffs1.
- * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
- * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
- * points given in the first camera's coordinate system to points in the second camera's
- * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
- * from the first camera's coordinate system to the second camera's coordinate system. Due to its
- * duality, this tuple is equivalent to the position of the first camera with respect to the
- * second camera coordinate system.
- * @param T Output translation vector, see description above.
- * @param E Output essential matrix.
- * @param F Output fundamental matrix.
- * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
- * @param flags Different flags that may be zero or a combination of the following values:
- * - REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
- * matrices are estimated.
- * - REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
- * according to the specified flags. Initial values are provided by the user.
- * - REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
- * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
- * - REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
- * - REF: CALIB_FIX_FOCAL_LENGTH Fix `$$f^{(j)}_x$$` and `$$f^{(j)}_y$$` .
- * - REF: CALIB_FIX_ASPECT_RATIO Optimize `$$f^{(j)}_y$$` . Fix the ratio `$$f^{(j)}_x/f^{(j)}_y$$`
- * .
- * - REF: CALIB_SAME_FOCAL_LENGTH Enforce `$$f^{(0)}_x=f^{(1)}_x$$` and `$$f^{(0)}_y=f^{(1)}_y$$` .
- * - REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
- * zeros and fix there.
- * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
- * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
- * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
- * compatibility, this extra flag should be explicitly specified to make the calibration
- * function use the rational model and return 8 coefficients. If the flag is not set, the
- * function computes and returns only 5 distortion coefficients.
- * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the thin prism model and return 12 coefficients. If the flag is not
- * set, the function computes and returns only 5 distortion coefficients.
- * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
- * set, the function computes and returns only 5 distortion coefficients.
- * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- *
- * The function estimates the transformation between two cameras making a stereo pair. If one computes
- * the poses of an object relative to the first camera and to the second camera,
- * ( `$$R_1$$`,`$$T_1$$` ) and (`$$R_2$$`,`$$T_2$$`), respectively, for a stereo camera where the
- * relative position and orientation between the two cameras are fixed, then those poses definitely
- * relate to each other. This means, if the relative position and orientation (`$$R$$`,`$$T$$`) of the
- * two cameras is known, it is possible to compute (`$$R_2$$`,`$$T_2$$`) when (`$$R_1$$`,`$$T_1$$`) is
- * given. This is what the described function does. It computes (`$$R$$`,`$$T$$`) such that:
- *
- * `$$R_2=R R_1$$`
- * `$$T_2=R T_1 + T.$$`
- *
- * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
- * coordinate system when given the point's coordinate representation in the first camera's coordinate
- * system:
- *
- * `$$\begin{bmatrix}
- * X_2 \\
- * Y_2 \\
- * Z_2 \\
- * 1
- * \end{bmatrix} = \begin{bmatrix}
- * R & T \\
- * 0 & 1
- * \end{bmatrix} \begin{bmatrix}
- * X_1 \\
- * Y_1 \\
- * Z_1 \\
- * 1
- * \end{bmatrix}.$$`
- *
- *
- * Optionally, it computes the essential matrix E:
- *
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R$$`
- *
- * where `$$T_i$$` are components of the translation vector `$$T$$` : `$$T=[T_0, T_1, T_2]^T$$` .
- * And the function can also compute the fundamental matrix F:
- *
- * `$$F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}$$`
- *
- * Besides the stereo-related information, the function can also perform a full calibration of each of
- * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
- * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
- * estimated with high accuracy for each of the cameras individually (for example, using
- * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
- * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
- * estimated at once, it makes sense to restrict some parameters, for example, pass
- * REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
- * reasonable assumption.
- *
- * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
- * points in all the available views from both cameras. The function returns the final value of the
- * re-projection error.
- */
- + (double)stereoCalibrateExtended:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F perViewErrors:(Mat*)perViewErrors flags:(int)flags NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:perViewErrors:flags:));
- /**
- * Calibrates a stereo camera set up. This function finds the intrinsic parameters
- * for each of the two cameras and the extrinsic parameters between the two cameras.
- *
- * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
- * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
- * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
- * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
- * be equal for each i.
- * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
- * observed by the first camera. The same structure as in REF: calibrateCamera.
- * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
- * observed by the second camera. The same structure as in REF: calibrateCamera.
- * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
- * cameraMatrix1.
- * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
- * description for distCoeffs1.
- * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
- * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
- * points given in the first camera's coordinate system to points in the second camera's
- * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
- * from the first camera's coordinate system to the second camera's coordinate system. Due to its
- * duality, this tuple is equivalent to the position of the first camera with respect to the
- * second camera coordinate system.
- * @param T Output translation vector, see description above.
- * @param E Output essential matrix.
- * @param F Output fundamental matrix.
- * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
- * - REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
- * matrices are estimated.
- * - REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
- * according to the specified flags. Initial values are provided by the user.
- * - REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
- * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
- * - REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
- * - REF: CALIB_FIX_FOCAL_LENGTH Fix `$$f^{(j)}_x$$` and `$$f^{(j)}_y$$` .
- * - REF: CALIB_FIX_ASPECT_RATIO Optimize `$$f^{(j)}_y$$` . Fix the ratio `$$f^{(j)}_x/f^{(j)}_y$$`
- * .
- * - REF: CALIB_SAME_FOCAL_LENGTH Enforce `$$f^{(0)}_x=f^{(1)}_x$$` and `$$f^{(0)}_y=f^{(1)}_y$$` .
- * - REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
- * zeros and fix there.
- * - REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
- * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
- * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
- * compatibility, this extra flag should be explicitly specified to make the calibration
- * function use the rational model and return 8 coefficients. If the flag is not set, the
- * function computes and returns only 5 distortion coefficients.
- * - REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the thin prism model and return 12 coefficients. If the flag is not
- * set, the function computes and returns only 5 distortion coefficients.
- * - REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- * - REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- * backward compatibility, this extra flag should be explicitly specified to make the
- * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
- * set, the function computes and returns only 5 distortion coefficients.
- * - REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- *
- * The function estimates the transformation between two cameras making a stereo pair. If one computes
- * the poses of an object relative to the first camera and to the second camera,
- * ( `$$R_1$$`,`$$T_1$$` ) and (`$$R_2$$`,`$$T_2$$`), respectively, for a stereo camera where the
- * relative position and orientation between the two cameras are fixed, then those poses definitely
- * relate to each other. This means, if the relative position and orientation (`$$R$$`,`$$T$$`) of the
- * two cameras is known, it is possible to compute (`$$R_2$$`,`$$T_2$$`) when (`$$R_1$$`,`$$T_1$$`) is
- * given. This is what the described function does. It computes (`$$R$$`,`$$T$$`) such that:
- *
- * `$$R_2=R R_1$$`
- * `$$T_2=R T_1 + T.$$`
- *
- * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
- * coordinate system when given the point's coordinate representation in the first camera's coordinate
- * system:
- *
- * `$$\begin{bmatrix}
- * X_2 \\
- * Y_2 \\
- * Z_2 \\
- * 1
- * \end{bmatrix} = \begin{bmatrix}
- * R & T \\
- * 0 & 1
- * \end{bmatrix} \begin{bmatrix}
- * X_1 \\
- * Y_1 \\
- * Z_1 \\
- * 1
- * \end{bmatrix}.$$`
- *
- *
- * Optionally, it computes the essential matrix E:
- *
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R$$`
- *
- * where `$$T_i$$` are components of the translation vector `$$T$$` : `$$T=[T_0, T_1, T_2]^T$$` .
- * And the function can also compute the fundamental matrix F:
- *
- * `$$F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}$$`
- *
- * Besides the stereo-related information, the function can also perform a full calibration of each of
- * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
- * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
- * estimated with high accuracy for each of the cameras individually (for example, using
- * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
- * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
- * estimated at once, it makes sense to restrict some parameters, for example, pass
- * REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
- * reasonable assumption.
- *
- * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
- * points in all the available views from both cameras. The function returns the final value of the
- * re-projection error.
- */
- + (double)stereoCalibrateExtended:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F perViewErrors:(Mat*)perViewErrors NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:perViewErrors:));
- //
- // double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
- //
- + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:flags:criteria:));
- + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F flags:(int)flags NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:flags:));
- + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T E:(Mat*)E F:(Mat*)F NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:E:F:));
- //
- // void cv::stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0)
- //
- /**
- * Computes rectification transforms for each head of a calibrated stereo camera.
- *
- * @param cameraMatrix1 First camera intrinsic matrix.
- * @param distCoeffs1 First camera distortion parameters.
- * @param cameraMatrix2 Second camera intrinsic matrix.
- * @param distCoeffs2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param T Translation vector from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- * brings points given in the unrectified first camera's coordinate system to points in the rectified
- * first camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- * brings points given in the unrectified second camera's coordinate system to points in the rectified
- * second camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified first camera's image.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified second camera's image.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
- * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
- * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- * images are zoomed and shifted so that only valid pixels are visible (no black areas after
- * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- * pixels from the original images from the cameras are retained in the rectified images (no source
- * image pixels are lost). Any intermediate value yields an intermediate result between
- * those two extreme cases.
- * @param newImageSize New image resolution after rectification. The same size should be passed to
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- * @param validPixROI2 Optional output rectangles inside the rectified images where all the pixels
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- *
- * The function computes the rotation matrices for each camera that (virtually) make both camera image
- * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- * as input. As output, it provides two rotation matrices and also two projection matrices in the new
- * coordinates. The function distinguishes the following two cases:
- *
- * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
- * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- * corresponding epipolar lines in the left and right cameras are horizontal and have the same
- * y-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx_1 & 0 \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx_2 & T_x*f \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix} ,$$`
- *
- * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
- * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_1 & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_2 & T_y*f \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix},$$`
- *
- * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- * initialize the rectification map for each camera.
- *
- * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- * the corresponding image regions. This means that the images are well rectified, which is what most
- * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- * their interiors are all valid pixels.
- *
- * ![image](pics/stereo_undistort.jpg)
- */
- + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags alpha:(double)alpha newImageSize:(Size2i*)newImageSize validPixROI1:(Rect2i*)validPixROI1 validPixROI2:(Rect2i*)validPixROI2 NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:alpha:newImageSize:validPixROI1:validPixROI2:));
- /**
- * Computes rectification transforms for each head of a calibrated stereo camera.
- *
- * @param cameraMatrix1 First camera intrinsic matrix.
- * @param distCoeffs1 First camera distortion parameters.
- * @param cameraMatrix2 Second camera intrinsic matrix.
- * @param distCoeffs2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param T Translation vector from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- * brings points given in the unrectified first camera's coordinate system to points in the rectified
- * first camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- * brings points given in the unrectified second camera's coordinate system to points in the rectified
- * second camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified first camera's image.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified second camera's image.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
- * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
- * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- * images are zoomed and shifted so that only valid pixels are visible (no black areas after
- * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- * pixels from the original images from the cameras are retained in the rectified images (no source
- * image pixels are lost). Any intermediate value yields an intermediate result between
- * those two extreme cases.
- * @param newImageSize New image resolution after rectification. The same size should be passed to
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- *
- * The function computes the rotation matrices for each camera that (virtually) make both camera image
- * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- * as input. As output, it provides two rotation matrices and also two projection matrices in the new
- * coordinates. The function distinguishes the following two cases:
- *
- * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
- * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- * corresponding epipolar lines in the left and right cameras are horizontal and have the same
- * y-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx_1 & 0 \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx_2 & T_x*f \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix} ,$$`
- *
- * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
- * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_1 & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_2 & T_y*f \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix},$$`
- *
- * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- * initialize the rectification map for each camera.
- *
- * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- * the corresponding image regions. This means that the images are well rectified, which is what most
- * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- * their interiors are all valid pixels.
- *
- * ![image](pics/stereo_undistort.jpg)
- */
- + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags alpha:(double)alpha newImageSize:(Size2i*)newImageSize validPixROI1:(Rect2i*)validPixROI1 NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:alpha:newImageSize:validPixROI1:));
- /**
- * Computes rectification transforms for each head of a calibrated stereo camera.
- *
- * @param cameraMatrix1 First camera intrinsic matrix.
- * @param distCoeffs1 First camera distortion parameters.
- * @param cameraMatrix2 Second camera intrinsic matrix.
- * @param distCoeffs2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param T Translation vector from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- * brings points given in the unrectified first camera's coordinate system to points in the rectified
- * first camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- * brings points given in the unrectified second camera's coordinate system to points in the rectified
- * second camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified first camera's image.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified second camera's image.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
- * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
- * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- * images are zoomed and shifted so that only valid pixels are visible (no black areas after
- * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- * pixels from the original images from the cameras are retained in the rectified images (no source
- * image pixels are lost). Any intermediate value yields an intermediate result between
- * those two extreme cases.
- * @param newImageSize New image resolution after rectification. The same size should be passed to
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- *
- * The function computes the rotation matrices for each camera that (virtually) make both camera image
- * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- * as input. As output, it provides two rotation matrices and also two projection matrices in the new
- * coordinates. The function distinguishes the following two cases:
- *
- * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
- * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- * corresponding epipolar lines in the left and right cameras are horizontal and have the same
- * y-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx_1 & 0 \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx_2 & T_x*f \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix} ,$$`
- *
- * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
- * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_1 & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_2 & T_y*f \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix},$$`
- *
- * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- * initialize the rectification map for each camera.
- *
- * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- * the corresponding image regions. This means that the images are well rectified, which is what most
- * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- * their interiors are all valid pixels.
- *
- * ![image](pics/stereo_undistort.jpg)
- */
- + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags alpha:(double)alpha newImageSize:(Size2i*)newImageSize NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:alpha:newImageSize:));
- /**
- * Computes rectification transforms for each head of a calibrated stereo camera.
- *
- * @param cameraMatrix1 First camera intrinsic matrix.
- * @param distCoeffs1 First camera distortion parameters.
- * @param cameraMatrix2 Second camera intrinsic matrix.
- * @param distCoeffs2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param T Translation vector from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- * brings points given in the unrectified first camera's coordinate system to points in the rectified
- * first camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- * brings points given in the unrectified second camera's coordinate system to points in the rectified
- * second camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified first camera's image.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified second camera's image.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
- * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
- * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- * images are zoomed and shifted so that only valid pixels are visible (no black areas after
- * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- * pixels from the original images from the cameras are retained in the rectified images (no source
- * image pixels are lost). Any intermediate value yields an intermediate result between
- * those two extreme cases.
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- *
- * The function computes the rotation matrices for each camera that (virtually) make both camera image
- * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- * as input. As output, it provides two rotation matrices and also two projection matrices in the new
- * coordinates. The function distinguishes the following two cases:
- *
- * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
- * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- * corresponding epipolar lines in the left and right cameras are horizontal and have the same
- * y-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx_1 & 0 \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx_2 & T_x*f \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix} ,$$`
- *
- * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
- * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_1 & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_2 & T_y*f \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix},$$`
- *
- * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- * initialize the rectification map for each camera.
- *
- * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- * the corresponding image regions. This means that the images are well rectified, which is what most
- * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- * their interiors are all valid pixels.
- *
- * ![image](pics/stereo_undistort.jpg)
- */
- + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags alpha:(double)alpha NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:alpha:));
- /**
- * Computes rectification transforms for each head of a calibrated stereo camera.
- *
- * @param cameraMatrix1 First camera intrinsic matrix.
- * @param distCoeffs1 First camera distortion parameters.
- * @param cameraMatrix2 Second camera intrinsic matrix.
- * @param distCoeffs2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param T Translation vector from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- * brings points given in the unrectified first camera's coordinate system to points in the rectified
- * first camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- * brings points given in the unrectified second camera's coordinate system to points in the rectified
- * second camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified first camera's image.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified second camera's image.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
- * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- * images are zoomed and shifted so that only valid pixels are visible (no black areas after
- * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- * pixels from the original images from the cameras are retained in the rectified images (no source
- * image pixels are lost). Any intermediate value yields an intermediate result between
- * those two extreme cases.
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- *
- * The function computes the rotation matrices for each camera that (virtually) make both camera image
- * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- * as input. As output, it provides two rotation matrices and also two projection matrices in the new
- * coordinates. The function distinguishes the following two cases:
- *
- * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
- * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- * corresponding epipolar lines in the left and right cameras are horizontal and have the same
- * y-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx_1 & 0 \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx_2 & T_x*f \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix} ,$$`
- *
- * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
- * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_1 & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_2 & T_y*f \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix},$$`
- *
- * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- * initialize the rectification map for each camera.
- *
- * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- * the corresponding image regions. This means that the images are well rectified, which is what most
- * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- * their interiors are all valid pixels.
- *
- * ![image](pics/stereo_undistort.jpg)
- */
- + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:flags:));
- /**
- * Computes rectification transforms for each head of a calibrated stereo camera.
- *
- * @param cameraMatrix1 First camera intrinsic matrix.
- * @param distCoeffs1 First camera distortion parameters.
- * @param cameraMatrix2 Second camera intrinsic matrix.
- * @param distCoeffs2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param T Translation vector from the coordinate system of the first camera to the second camera,
- * see REF: stereoCalibrate.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- * brings points given in the unrectified first camera's coordinate system to points in the rectified
- * first camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- * brings points given in the unrectified second camera's coordinate system to points in the rectified
- * second camera's coordinate system. In more technical terms, it performs a change of basis from the
- * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified first camera's image.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera, i.e. it projects points given in the rectified first camera coordinate system into the
- * rectified second camera's image.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- * images are zoomed and shifted so that only valid pixels are visible (no black areas after
- * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- * pixels from the original images from the cameras are retained in the rectified images (no source
- * image pixels are lost). Any intermediate value yields an intermediate result between
- * those two extreme cases.
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- * (see the picture below).
- *
- * The function computes the rotation matrices for each camera that (virtually) make both camera image
- * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- * as input. As output, it provides two rotation matrices and also two projection matrices in the new
- * coordinates. The function distinguishes the following two cases:
- *
- * - **Horizontal stereo**: the first and the second camera views are shifted relative to each other
- * mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- * corresponding epipolar lines in the left and right cameras are horizontal and have the same
- * y-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx_1 & 0 \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx_2 & T_x*f \\
- * 0 & f & cy & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix} ,$$`
- *
- * where `$$T_x$$` is a horizontal shift between the cameras and `$$cx_1=cx_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * - **Vertical stereo**: the first and the second camera views are shifted relative to each other
- * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- *
- * `$$\texttt{P1} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_1 & 0 \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix}$$`
- *
- * `$$\texttt{P2} = \begin{bmatrix}
- * f & 0 & cx & 0 \\
- * 0 & f & cy_2 & T_y*f \\
- * 0 & 0 & 1 & 0
- * \end{bmatrix},$$`
- *
- * where `$$T_y$$` is a vertical shift between the cameras and `$$cy_1=cy_2$$` if
- * REF: CALIB_ZERO_DISPARITY is set.
- *
- * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- * initialize the rectification map for each camera.
- *
- * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- * the corresponding image regions. This means that the images are well rectified, which is what most
- * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- * their interiors are all valid pixels.
- *
- * ![image](pics/stereo_undistort.jpg)
- */
- + (void)stereoRectify:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q NS_SWIFT_NAME(stereoRectify(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:imageSize:R:T:R1:R2:P1:P2:Q:));
- //
- // bool cv::stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5)
- //
- /**
- * Computes a rectification transform for an uncalibrated stereo camera.
- *
- * @param points1 Array of feature points in the first image.
- * @param points2 The corresponding points in the second image. The same formats as in
- * #findFundamentalMat are supported.
- * @param F Input fundamental matrix. It can be computed from the same set of point pairs using
- * #findFundamentalMat .
- * @param imgSize Size of the image.
- * @param H1 Output rectification homography matrix for the first image.
- * @param H2 Output rectification homography matrix for the second image.
- * @param threshold Optional threshold used to filter out the outliers. If the parameter is greater
- * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
- * for which `$$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}$$` ) are
- * rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
- *
- * The function computes the rectification transformations without knowing intrinsic parameters of the
- * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
- * related difference from #stereoRectify is that the function outputs not the rectification
- * transformations in the object (3D) space, but the planar perspective transformations encoded by the
- * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
- *
- * NOTE:
- * While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
- * depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
- * it would be better to correct it before computing the fundamental matrix and calling this
- * function. For example, distortion coefficients can be estimated for each head of stereo camera
- * separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
- * just the point coordinates can be corrected with #undistortPoints .
- */
- + (BOOL)stereoRectifyUncalibrated:(Mat*)points1 points2:(Mat*)points2 F:(Mat*)F imgSize:(Size2i*)imgSize H1:(Mat*)H1 H2:(Mat*)H2 threshold:(double)threshold NS_SWIFT_NAME(stereoRectifyUncalibrated(points1:points2:F:imgSize:H1:H2:threshold:));
- /**
- * Computes a rectification transform for an uncalibrated stereo camera.
- *
- * @param points1 Array of feature points in the first image.
- * @param points2 The corresponding points in the second image. The same formats as in
- * #findFundamentalMat are supported.
- * @param F Input fundamental matrix. It can be computed from the same set of point pairs using
- * #findFundamentalMat .
- * @param imgSize Size of the image.
- * @param H1 Output rectification homography matrix for the first image.
- * @param H2 Output rectification homography matrix for the second image.
- * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
- * for which `$$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}$$` ) are
- * rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
- *
- * The function computes the rectification transformations without knowing intrinsic parameters of the
- * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
- * related difference from #stereoRectify is that the function outputs not the rectification
- * transformations in the object (3D) space, but the planar perspective transformations encoded by the
- * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
- *
- * NOTE:
- * While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
- * depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
- * it would be better to correct it before computing the fundamental matrix and calling this
- * function. For example, distortion coefficients can be estimated for each head of stereo camera
- * separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
- * just the point coordinates can be corrected with #undistortPoints .
- */
- + (BOOL)stereoRectifyUncalibrated:(Mat*)points1 points2:(Mat*)points2 F:(Mat*)F imgSize:(Size2i*)imgSize H1:(Mat*)H1 H2:(Mat*)H2 NS_SWIFT_NAME(stereoRectifyUncalibrated(points1:points2:F:imgSize:H1:H2:));
- //
- // float cv::rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags)
- //
- + (float)rectify3Collinear:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 cameraMatrix3:(Mat*)cameraMatrix3 distCoeffs3:(Mat*)distCoeffs3 imgpt1:(NSArray<Mat*>*)imgpt1 imgpt3:(NSArray<Mat*>*)imgpt3 imageSize:(Size2i*)imageSize R12:(Mat*)R12 T12:(Mat*)T12 R13:(Mat*)R13 T13:(Mat*)T13 R1:(Mat*)R1 R2:(Mat*)R2 R3:(Mat*)R3 P1:(Mat*)P1 P2:(Mat*)P2 P3:(Mat*)P3 Q:(Mat*)Q alpha:(double)alpha newImgSize:(Size2i*)newImgSize roi1:(Rect2i*)roi1 roi2:(Rect2i*)roi2 flags:(int)flags NS_SWIFT_NAME(rectify3Collinear(cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:cameraMatrix3:distCoeffs3:imgpt1:imgpt3:imageSize:R12:T12:R13:T13:R1:R2:R3:P1:P2:P3:Q:alpha:newImgSize:roi1:roi2:flags:));
- //
- // Mat cv::getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false)
- //
- /**
- * Returns the new camera intrinsic matrix based on the free scaling parameter.
- *
- * @param cameraMatrix Input camera intrinsic matrix.
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param imageSize Original image size.
- * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
- * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
- * #stereoRectify for details.
- * @param newImgSize Image size after rectification. By default, it is set to imageSize .
- * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the
- * undistorted image. See roi1, roi2 description in #stereoRectify .
- * @param centerPrincipalPoint Optional flag that indicates whether in the new camera intrinsic matrix the
- * principal point should be at the image center or not. By default, the principal point is chosen to
- * best fit a subset of the source image (determined by alpha) to the corrected image.
- * @return new_camera_matrix Output new camera intrinsic matrix.
- *
- * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
- * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
- * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
- * When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
- * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
- * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
- * #initUndistortRectifyMap to produce the maps for #remap .
- */
- + (Mat*)getOptimalNewCameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imageSize:(Size2i*)imageSize alpha:(double)alpha newImgSize:(Size2i*)newImgSize validPixROI:(Rect2i*)validPixROI centerPrincipalPoint:(BOOL)centerPrincipalPoint NS_SWIFT_NAME(getOptimalNewCameraMatrix(cameraMatrix:distCoeffs:imageSize:alpha:newImgSize:validPixROI:centerPrincipalPoint:));
- /**
- * Returns the new camera intrinsic matrix based on the free scaling parameter.
- *
- * @param cameraMatrix Input camera intrinsic matrix.
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param imageSize Original image size.
- * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
- * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
- * #stereoRectify for details.
- * @param newImgSize Image size after rectification. By default, it is set to imageSize .
- * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the
- * undistorted image. See roi1, roi2 description in #stereoRectify .
- * principal point should be at the image center or not. By default, the principal point is chosen to
- * best fit a subset of the source image (determined by alpha) to the corrected image.
- * @return new_camera_matrix Output new camera intrinsic matrix.
- *
- * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
- * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
- * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
- * When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
- * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
- * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
- * #initUndistortRectifyMap to produce the maps for #remap .
- */
- + (Mat*)getOptimalNewCameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imageSize:(Size2i*)imageSize alpha:(double)alpha newImgSize:(Size2i*)newImgSize validPixROI:(Rect2i*)validPixROI NS_SWIFT_NAME(getOptimalNewCameraMatrix(cameraMatrix:distCoeffs:imageSize:alpha:newImgSize:validPixROI:));
- /**
- * Returns the new camera intrinsic matrix based on the free scaling parameter.
- *
- * @param cameraMatrix Input camera intrinsic matrix.
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param imageSize Original image size.
- * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
- * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
- * #stereoRectify for details.
- * @param newImgSize Image size after rectification. By default, it is set to imageSize .
- * undistorted image. See roi1, roi2 description in #stereoRectify .
- * principal point should be at the image center or not. By default, the principal point is chosen to
- * best fit a subset of the source image (determined by alpha) to the corrected image.
- * @return new_camera_matrix Output new camera intrinsic matrix.
- *
- * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
- * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
- * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
- * When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
- * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
- * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
- * #initUndistortRectifyMap to produce the maps for #remap .
- */
- + (Mat*)getOptimalNewCameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imageSize:(Size2i*)imageSize alpha:(double)alpha newImgSize:(Size2i*)newImgSize NS_SWIFT_NAME(getOptimalNewCameraMatrix(cameraMatrix:distCoeffs:imageSize:alpha:newImgSize:));
- /**
- * Returns the new camera intrinsic matrix based on the free scaling parameter.
- *
- * @param cameraMatrix Input camera intrinsic matrix.
- * @param distCoeffs Input vector of distortion coefficients
- * `$$\distcoeffs$$`. If the vector is NULL/empty, the zero distortion coefficients are
- * assumed.
- * @param imageSize Original image size.
- * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
- * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
- * #stereoRectify for details.
- * undistorted image. See roi1, roi2 description in #stereoRectify .
- * principal point should be at the image center or not. By default, the principal point is chosen to
- * best fit a subset of the source image (determined by alpha) to the corrected image.
- * @return new_camera_matrix Output new camera intrinsic matrix.
- *
- * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
- * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
- * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
- * When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
- * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
- * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
- * #initUndistortRectifyMap to produce the maps for #remap .
- */
- + (Mat*)getOptimalNewCameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs imageSize:(Size2i*)imageSize alpha:(double)alpha NS_SWIFT_NAME(getOptimalNewCameraMatrix(cameraMatrix:distCoeffs:imageSize:alpha:));
- //
- // void cv::calibrateHandEye(vector_Mat R_gripper2base, vector_Mat t_gripper2base, vector_Mat R_target2cam, vector_Mat t_target2cam, Mat& R_cam2gripper, Mat& t_cam2gripper, HandEyeCalibrationMethod method = CALIB_HAND_EYE_TSAI)
- //
- /**
- * Computes Hand-Eye calibration: `$$_{}^{g}\textrm{T}_c$$`
- *
- * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the gripper frame to the robot base frame (`$$_{}^{b}\textrm{T}_g$$`).
- * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
- * for all the transformations from gripper frame to robot base frame.
- * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the gripper frame to the robot base frame (`$$_{}^{b}\textrm{T}_g$$`).
- * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
- * from gripper frame to robot base frame.
- * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the target frame to the camera frame (`$$_{}^{c}\textrm{T}_t$$`).
- * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
- * for all the transformations from calibration target frame to camera frame.
- * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the target frame to the camera frame (`$$_{}^{c}\textrm{T}_t$$`).
- * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
- * from calibration target frame to camera frame.
- * @param R_cam2gripper Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the camera frame to the gripper frame (`$$_{}^{g}\textrm{T}_c$$`).
- * @param t_cam2gripper Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the camera frame to the gripper frame (`$$_{}^{g}\textrm{T}_c$$`).
- * @param method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod
- *
- * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
- * rotation then the translation (separable solutions) and the following methods are implemented:
- * - R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
- * - F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
- * - R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
- *
- * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
- * with the following implemented methods:
- * - N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
- * - K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
- *
- * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
- * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
- *
- * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
- * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
- * the suitable transformations to the function, see below.
- *
- * ![](pics/hand-eye_figure.png)
- *
- * The calibration procedure is the following:
- * - a static calibration pattern is used to estimate the transformation between the target frame
- * and the camera frame
- * - the robot gripper is moved in order to acquire several poses
- * - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- * instance the robot kinematics
- * `$$
- * \begin{bmatrix}
- * X_b\\
- * Y_b\\
- * Z_b\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_g\\
- * Y_g\\
- * Z_g\\
- * 1
- * \end{bmatrix}
- * $$`
- * - for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
- * for instance a pose estimation method (PnP) from 2D-3D point correspondences
- * `$$
- * \begin{bmatrix}
- * X_c\\
- * Y_c\\
- * Z_c\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_t\\
- * Y_t\\
- * Z_t\\
- * 1
- * \end{bmatrix}
- * $$`
- *
- * The Hand-Eye calibration procedure returns the following homogeneous transformation
- * `$$
- * \begin{bmatrix}
- * X_g\\
- * Y_g\\
- * Z_g\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_c\\
- * Y_c\\
- * Z_c\\
- * 1
- * \end{bmatrix}
- * $$`
- *
- * This problem is also known as solving the `$$\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}$$` equation:
- * - for an eye-in-hand configuration
- * `$$
- * \begin{align*}
- * ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- * \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
- *
- * (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &=
- * \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
- *
- * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- * \end{align*}
- * $$`
- *
- * - for an eye-to-hand configuration
- * `$$
- * \begin{align*}
- * ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- * \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
- *
- * (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &=
- * \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
- *
- * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- * \end{align*}
- * $$`
- *
- * \note
- * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
- * \note
- * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
- * So at least 3 different poses are required, but it is strongly recommended to use many more poses.
- */
- + (void)calibrateHandEye:(NSArray<Mat*>*)R_gripper2base t_gripper2base:(NSArray<Mat*>*)t_gripper2base R_target2cam:(NSArray<Mat*>*)R_target2cam t_target2cam:(NSArray<Mat*>*)t_target2cam R_cam2gripper:(Mat*)R_cam2gripper t_cam2gripper:(Mat*)t_cam2gripper method:(HandEyeCalibrationMethod)method NS_SWIFT_NAME(calibrateHandEye(R_gripper2base:t_gripper2base:R_target2cam:t_target2cam:R_cam2gripper:t_cam2gripper:method:));
- /**
- * Computes Hand-Eye calibration: `$$_{}^{g}\textrm{T}_c$$`
- *
- * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the gripper frame to the robot base frame (`$$_{}^{b}\textrm{T}_g$$`).
- * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
- * for all the transformations from gripper frame to robot base frame.
- * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the gripper frame to the robot base frame (`$$_{}^{b}\textrm{T}_g$$`).
- * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
- * from gripper frame to robot base frame.
- * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the target frame to the camera frame (`$$_{}^{c}\textrm{T}_t$$`).
- * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
- * for all the transformations from calibration target frame to camera frame.
- * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the target frame to the camera frame (`$$_{}^{c}\textrm{T}_t$$`).
- * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
- * from calibration target frame to camera frame.
- * @param R_cam2gripper Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the camera frame to the gripper frame (`$$_{}^{g}\textrm{T}_c$$`).
- * @param t_cam2gripper Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the camera frame to the gripper frame (`$$_{}^{g}\textrm{T}_c$$`).
- *
- * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
- * rotation then the translation (separable solutions) and the following methods are implemented:
- * - R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
- * - F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
- * - R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
- *
- * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
- * with the following implemented methods:
- * - N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
- * - K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
- *
- * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
- * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
- *
- * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
- * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
- * the suitable transformations to the function, see below.
- *
- * ![](pics/hand-eye_figure.png)
- *
- * The calibration procedure is the following:
- * - a static calibration pattern is used to estimate the transformation between the target frame
- * and the camera frame
- * - the robot gripper is moved in order to acquire several poses
- * - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- * instance the robot kinematics
- * `$$
- * \begin{bmatrix}
- * X_b\\
- * Y_b\\
- * Z_b\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_g\\
- * Y_g\\
- * Z_g\\
- * 1
- * \end{bmatrix}
- * $$`
- * - for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
- * for instance a pose estimation method (PnP) from 2D-3D point correspondences
- * `$$
- * \begin{bmatrix}
- * X_c\\
- * Y_c\\
- * Z_c\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_t\\
- * Y_t\\
- * Z_t\\
- * 1
- * \end{bmatrix}
- * $$`
- *
- * The Hand-Eye calibration procedure returns the following homogeneous transformation
- * `$$
- * \begin{bmatrix}
- * X_g\\
- * Y_g\\
- * Z_g\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_c\\
- * Y_c\\
- * Z_c\\
- * 1
- * \end{bmatrix}
- * $$`
- *
- * This problem is also known as solving the `$$\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}$$` equation:
- * - for an eye-in-hand configuration
- * `$$
- * \begin{align*}
- * ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- * \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
- *
- * (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &=
- * \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
- *
- * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- * \end{align*}
- * $$`
- *
- * - for an eye-to-hand configuration
- * `$$
- * \begin{align*}
- * ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- * \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
- *
- * (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &=
- * \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
- *
- * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- * \end{align*}
- * $$`
- *
- * \note
- * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
- * \note
- * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
- * So at least 3 different poses are required, but it is strongly recommended to use many more poses.
- */
- + (void)calibrateHandEye:(NSArray<Mat*>*)R_gripper2base t_gripper2base:(NSArray<Mat*>*)t_gripper2base R_target2cam:(NSArray<Mat*>*)R_target2cam t_target2cam:(NSArray<Mat*>*)t_target2cam R_cam2gripper:(Mat*)R_cam2gripper t_cam2gripper:(Mat*)t_cam2gripper NS_SWIFT_NAME(calibrateHandEye(R_gripper2base:t_gripper2base:R_target2cam:t_target2cam:R_cam2gripper:t_cam2gripper:));
- //
- // void cv::calibrateRobotWorldHandEye(vector_Mat R_world2cam, vector_Mat t_world2cam, vector_Mat R_base2gripper, vector_Mat t_base2gripper, Mat& R_base2world, Mat& t_base2world, Mat& R_gripper2cam, Mat& t_gripper2cam, RobotWorldHandEyeCalibrationMethod method = CALIB_ROBOT_WORLD_HAND_EYE_SHAH)
- //
- /**
- * Computes Robot-World/Hand-Eye calibration: `$$_{}^{w}\textrm{T}_b$$` and `$$_{}^{c}\textrm{T}_g$$`
- *
- * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the world frame to the camera frame (`$$_{}^{c}\textrm{T}_w$$`).
- * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
- * for all the transformations from world frame to the camera frame.
- * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the world frame to the camera frame (`$$_{}^{c}\textrm{T}_w$$`).
- * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
- * from world frame to the camera frame.
- * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the robot base frame to the gripper frame (`$$_{}^{g}\textrm{T}_b$$`).
- * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
- * for all the transformations from robot base frame to the gripper frame.
- * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the robot base frame to the gripper frame (`$$_{}^{g}\textrm{T}_b$$`).
- * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
- * from robot base frame to the gripper frame.
- * @param R_base2world Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the robot base frame to the world frame (`$$_{}^{w}\textrm{T}_b$$`).
- * @param t_base2world Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the robot base frame to the world frame (`$$_{}^{w}\textrm{T}_b$$`).
- * @param R_gripper2cam Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the gripper frame to the camera frame (`$$_{}^{c}\textrm{T}_g$$`).
- * @param t_gripper2cam Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the gripper frame to the camera frame (`$$_{}^{c}\textrm{T}_g$$`).
- * @param method One of the implemented Robot-World/Hand-Eye calibration method, see cv::RobotWorldHandEyeCalibrationMethod
- *
- * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
- * rotation then the translation (separable solutions):
- * - M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
- *
- * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
- * with the following implemented method:
- * - A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
- *
- * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
- * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
- *
- * ![](pics/robot-world_hand-eye_figure.png)
- *
- * The calibration procedure is the following:
- * - a static calibration pattern is used to estimate the transformation between the target frame
- * and the camera frame
- * - the robot gripper is moved in order to acquire several poses
- * - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- * instance the robot kinematics
- * `$$
- * \begin{bmatrix}
- * X_g\\
- * Y_g\\
- * Z_g\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_b\\
- * Y_b\\
- * Z_b\\
- * 1
- * \end{bmatrix}
- * $$`
- * - for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
- * for instance a pose estimation method (PnP) from 2D-3D point correspondences
- * `$$
- * \begin{bmatrix}
- * X_c\\
- * Y_c\\
- * Z_c\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_w\\
- * Y_w\\
- * Z_w\\
- * 1
- * \end{bmatrix}
- * $$`
- *
- * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
- * `$$
- * \begin{bmatrix}
- * X_w\\
- * Y_w\\
- * Z_w\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_b\\
- * Y_b\\
- * Z_b\\
- * 1
- * \end{bmatrix}
- * $$`
- * `$$
- * \begin{bmatrix}
- * X_c\\
- * Y_c\\
- * Z_c\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_g\\
- * Y_g\\
- * Z_g\\
- * 1
- * \end{bmatrix}
- * $$`
- *
- * This problem is also known as solving the `$$\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}$$` equation, with:
- * - `$$\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w$$`
- * - `$$\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b$$`
- * - `$$\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g$$`
- * - `$$\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b$$`
- *
- * \note
- * At least 3 measurements are required (input vectors size must be greater or equal to 3).
- */
- + (void)calibrateRobotWorldHandEye:(NSArray<Mat*>*)R_world2cam t_world2cam:(NSArray<Mat*>*)t_world2cam R_base2gripper:(NSArray<Mat*>*)R_base2gripper t_base2gripper:(NSArray<Mat*>*)t_base2gripper R_base2world:(Mat*)R_base2world t_base2world:(Mat*)t_base2world R_gripper2cam:(Mat*)R_gripper2cam t_gripper2cam:(Mat*)t_gripper2cam method:(RobotWorldHandEyeCalibrationMethod)method NS_SWIFT_NAME(calibrateRobotWorldHandEye(R_world2cam:t_world2cam:R_base2gripper:t_base2gripper:R_base2world:t_base2world:R_gripper2cam:t_gripper2cam:method:));
- /**
- * Computes Robot-World/Hand-Eye calibration: `$$_{}^{w}\textrm{T}_b$$` and `$$_{}^{c}\textrm{T}_g$$`
- *
- * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the world frame to the camera frame (`$$_{}^{c}\textrm{T}_w$$`).
- * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
- * for all the transformations from world frame to the camera frame.
- * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the world frame to the camera frame (`$$_{}^{c}\textrm{T}_w$$`).
- * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
- * from world frame to the camera frame.
- * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the robot base frame to the gripper frame (`$$_{}^{g}\textrm{T}_b$$`).
- * This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
- * for all the transformations from robot base frame to the gripper frame.
- * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the robot base frame to the gripper frame (`$$_{}^{g}\textrm{T}_b$$`).
- * This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
- * from robot base frame to the gripper frame.
- * @param R_base2world Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the robot base frame to the world frame (`$$_{}^{w}\textrm{T}_b$$`).
- * @param t_base2world Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the robot base frame to the world frame (`$$_{}^{w}\textrm{T}_b$$`).
- * @param R_gripper2cam Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
- * expressed in the gripper frame to the camera frame (`$$_{}^{c}\textrm{T}_g$$`).
- * @param t_gripper2cam Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
- * expressed in the gripper frame to the camera frame (`$$_{}^{c}\textrm{T}_g$$`).
- *
- * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
- * rotation then the translation (separable solutions):
- * - M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
- *
- * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
- * with the following implemented method:
- * - A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
- *
- * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
- * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
- *
- * ![](pics/robot-world_hand-eye_figure.png)
- *
- * The calibration procedure is the following:
- * - a static calibration pattern is used to estimate the transformation between the target frame
- * and the camera frame
- * - the robot gripper is moved in order to acquire several poses
- * - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- * instance the robot kinematics
- * `$$
- * \begin{bmatrix}
- * X_g\\
- * Y_g\\
- * Z_g\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_b\\
- * Y_b\\
- * Z_b\\
- * 1
- * \end{bmatrix}
- * $$`
- * - for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
- * for instance a pose estimation method (PnP) from 2D-3D point correspondences
- * `$$
- * \begin{bmatrix}
- * X_c\\
- * Y_c\\
- * Z_c\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_w\\
- * Y_w\\
- * Z_w\\
- * 1
- * \end{bmatrix}
- * $$`
- *
- * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
- * `$$
- * \begin{bmatrix}
- * X_w\\
- * Y_w\\
- * Z_w\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_b\\
- * Y_b\\
- * Z_b\\
- * 1
- * \end{bmatrix}
- * $$`
- * `$$
- * \begin{bmatrix}
- * X_c\\
- * Y_c\\
- * Z_c\\
- * 1
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\
- * 0_{1 \times 3} & 1
- * \end{bmatrix}
- * \begin{bmatrix}
- * X_g\\
- * Y_g\\
- * Z_g\\
- * 1
- * \end{bmatrix}
- * $$`
- *
- * This problem is also known as solving the `$$\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}$$` equation, with:
- * - `$$\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w$$`
- * - `$$\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b$$`
- * - `$$\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g$$`
- * - `$$\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b$$`
- *
- * \note
- * At least 3 measurements are required (input vectors size must be greater or equal to 3).
- */
- + (void)calibrateRobotWorldHandEye:(NSArray<Mat*>*)R_world2cam t_world2cam:(NSArray<Mat*>*)t_world2cam R_base2gripper:(NSArray<Mat*>*)R_base2gripper t_base2gripper:(NSArray<Mat*>*)t_base2gripper R_base2world:(Mat*)R_base2world t_base2world:(Mat*)t_base2world R_gripper2cam:(Mat*)R_gripper2cam t_gripper2cam:(Mat*)t_gripper2cam NS_SWIFT_NAME(calibrateRobotWorldHandEye(R_world2cam:t_world2cam:R_base2gripper:t_base2gripper:R_base2world:t_base2world:R_gripper2cam:t_gripper2cam:));
- //
- // void cv::convertPointsToHomogeneous(Mat src, Mat& dst)
- //
- /**
- * Converts points from Euclidean to homogeneous space.
- *
- * @param src Input vector of N-dimensional points.
- * @param dst Output vector of N+1-dimensional points.
- *
- * The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
- * point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).
- */
- + (void)convertPointsToHomogeneous:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(convertPointsToHomogeneous(src:dst:));
- //
- // void cv::convertPointsFromHomogeneous(Mat src, Mat& dst)
- //
- /**
- * Converts points from homogeneous to Euclidean space.
- *
- * @param src Input vector of N-dimensional points.
- * @param dst Output vector of N-1-dimensional points.
- *
- * The function converts points homogeneous to Euclidean space using perspective projection. That is,
- * each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
- * output point coordinates will be (0,0,0,...).
- */
- + (void)convertPointsFromHomogeneous:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(convertPointsFromHomogeneous(src:dst:));
- //
- // Mat cv::findFundamentalMat(Mat points1, Mat points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat& mask = Mat())
- //
- /**
- * Calculates a fundamental matrix from the corresponding points in two images.
- *
- * @param points1 Array of N points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param method Method for computing a fundamental matrix.
- * - REF: FM_7POINT for a 7-point algorithm. `$$N = 7$$`
- * - REF: FM_8POINT for an 8-point algorithm. `$$N \ge 8$$`
- * - REF: FM_RANSAC for the RANSAC algorithm. `$$N \ge 8$$`
- * - REF: FM_LMEDS for the LMedS algorithm. `$$N \ge 8$$`
- * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
- * of confidence (probability) that the estimated matrix is correct.
- * @param mask optional output mask
- * @param maxIters The maximum number of robust method iterations.
- *
- * The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T F [p_1; 1] = 0$$`
- *
- * where `$$F$$` is a fundamental matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively.
- *
- * The function calculates the fundamental matrix using one of four methods listed above and returns
- * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
- * algorithm, the function may return up to 3 solutions ( `$$9 \times 3$$` matrix that stores all 3
- * matrices sequentially).
- *
- * The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
- * epipolar lines corresponding to the specified points. It can also be passed to
- * #stereoRectifyUncalibrated to compute the rectification transformation. :
- *
- * // Example. Estimation of fundamental matrix using the RANSAC algorithm
- * int point_count = 100;
- * vector<Point2f> points1(point_count);
- * vector<Point2f> points2(point_count);
- *
- * // initialize the points here ...
- * for( int i = 0; i < point_count; i++ )
- * {
- * points1[i] = ...;
- * points2[i] = ...;
- * }
- *
- * Mat fundamental_matrix =
- * findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
- *
- */
- + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold confidence:(double)confidence maxIters:(int)maxIters mask:(Mat*)mask NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:confidence:maxIters:mask:));
- /**
- * Calculates a fundamental matrix from the corresponding points in two images.
- *
- * @param points1 Array of N points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param method Method for computing a fundamental matrix.
- * - REF: FM_7POINT for a 7-point algorithm. `$$N = 7$$`
- * - REF: FM_8POINT for an 8-point algorithm. `$$N \ge 8$$`
- * - REF: FM_RANSAC for the RANSAC algorithm. `$$N \ge 8$$`
- * - REF: FM_LMEDS for the LMedS algorithm. `$$N \ge 8$$`
- * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
- * of confidence (probability) that the estimated matrix is correct.
- * @param maxIters The maximum number of robust method iterations.
- *
- * The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T F [p_1; 1] = 0$$`
- *
- * where `$$F$$` is a fundamental matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively.
- *
- * The function calculates the fundamental matrix using one of four methods listed above and returns
- * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
- * algorithm, the function may return up to 3 solutions ( `$$9 \times 3$$` matrix that stores all 3
- * matrices sequentially).
- *
- * The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
- * epipolar lines corresponding to the specified points. It can also be passed to
- * #stereoRectifyUncalibrated to compute the rectification transformation. :
- *
- * // Example. Estimation of fundamental matrix using the RANSAC algorithm
- * int point_count = 100;
- * vector<Point2f> points1(point_count);
- * vector<Point2f> points2(point_count);
- *
- * // initialize the points here ...
- * for( int i = 0; i < point_count; i++ )
- * {
- * points1[i] = ...;
- * points2[i] = ...;
- * }
- *
- * Mat fundamental_matrix =
- * findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
- *
- */
- + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold confidence:(double)confidence maxIters:(int)maxIters NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:confidence:maxIters:));
- //
- // Mat cv::findFundamentalMat(Mat points1, Mat points2, int method = FM_RANSAC, double ransacReprojThreshold = 3., double confidence = 0.99, Mat& mask = Mat())
- //
- + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold confidence:(double)confidence mask:(Mat*)mask NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:confidence:mask:));
- + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold confidence:(double)confidence NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:confidence:));
- + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:ransacReprojThreshold:));
- + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 method:(int)method NS_SWIFT_NAME(findFundamentalMat(points1:points2:method:));
- + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 NS_SWIFT_NAME(findFundamentalMat(points1:points2:));
- //
- // Mat cv::findFundamentalMat(Mat points1, Mat points2, Mat& mask, UsacParams params)
- //
- + (Mat*)findFundamentalMat:(Mat*)points1 points2:(Mat*)points2 mask:(Mat*)mask params:(UsacParams*)params NS_SWIFT_NAME(findFundamentalMat(points1:points2:mask:params:));
- //
- // Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat())
- //
- /**
- * Calculates an essential matrix from the corresponding points in two images.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- * @param maxIters The maximum number of robust method iterations.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method prob:(double)prob threshold:(double)threshold maxIters:(int)maxIters mask:(Mat*)mask NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:prob:threshold:maxIters:mask:));
- /**
- * Calculates an essential matrix from the corresponding points in two images.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- * @param maxIters The maximum number of robust method iterations.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method prob:(double)prob threshold:(double)threshold maxIters:(int)maxIters NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:prob:threshold:maxIters:));
- /**
- * Calculates an essential matrix from the corresponding points in two images.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method prob:(double)prob threshold:(double)threshold NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:prob:threshold:));
- /**
- * Calculates an essential matrix from the corresponding points in two images.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method prob:(double)prob NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:prob:));
- /**
- * Calculates an essential matrix from the corresponding points in two images.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * confidence (probability) that the estimated matrix is correct.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix method:(int)method NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:method:));
- /**
- * Calculates an essential matrix from the corresponding points in two images.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * confidence (probability) that the estimated matrix is correct.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix:));
- //
- // Mat cv::findEssentialMat(Mat points1, Mat points2, double focal = 1.0, Point2d pp = Point2d(0, 0), int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat())
- //
- /**
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param focal focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * @param pp principal point of the camera.
- * @param method Method for computing a fundamental matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- * @param maxIters The maximum number of robust method iterations.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method prob:(double)prob threshold:(double)threshold maxIters:(int)maxIters mask:(Mat*)mask NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:prob:threshold:maxIters:mask:));
- /**
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param focal focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * @param pp principal point of the camera.
- * @param method Method for computing a fundamental matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- * @param maxIters The maximum number of robust method iterations.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method prob:(double)prob threshold:(double)threshold maxIters:(int)maxIters NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:prob:threshold:maxIters:));
- /**
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param focal focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * @param pp principal point of the camera.
- * @param method Method for computing a fundamental matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method prob:(double)prob threshold:(double)threshold NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:prob:threshold:));
- /**
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param focal focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * @param pp principal point of the camera.
- * @param method Method for computing a fundamental matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method prob:(double)prob NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:prob:));
- /**
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param focal focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * @param pp principal point of the camera.
- * @param method Method for computing a fundamental matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * confidence (probability) that the estimated matrix is correct.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp method:(int)method NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:method:));
- /**
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param focal focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * @param pp principal point of the camera.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * confidence (probability) that the estimated matrix is correct.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal pp:(Point2d*)pp NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:pp:));
- /**
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param focal focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * confidence (probability) that the estimated matrix is correct.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 focal:(double)focal NS_SWIFT_NAME(findEssentialMat(points1:points2:focal:));
- /**
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * are feature points from cameras with same focal length and principal point.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * confidence (probability) that the estimated matrix is correct.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 NS_SWIFT_NAME(findEssentialMat(points1:points2:));
- //
- // Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
- //
- /**
- * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param distCoeffs1 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param distCoeffs2 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 method:(int)method prob:(double)prob threshold:(double)threshold mask:(Mat*)mask NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:method:prob:threshold:mask:));
- /**
- * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param distCoeffs1 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param distCoeffs2 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 method:(int)method prob:(double)prob threshold:(double)threshold NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:method:prob:threshold:));
- /**
- * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param distCoeffs1 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param distCoeffs2 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 method:(int)method prob:(double)prob NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:method:prob:));
- /**
- * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param distCoeffs1 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param distCoeffs2 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * confidence (probability) that the estimated matrix is correct.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 method:(int)method NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:method:));
- /**
- * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
- *
- * @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
- * be floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param cameraMatrix2 Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera matrix. If this assumption does not hold for your use case, use
- * #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
- * to normalized image coordinates, which are valid for the identity camera matrix. When
- * passing these coordinates, pass the identity matrix for this parameter.
- * @param distCoeffs1 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param distCoeffs2 Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * confidence (probability) that the estimated matrix is correct.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * for the other points. The array is computed only in the RANSAC and LMedS methods.
- *
- * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- *
- * `$$[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0$$`
- *
- * where `$$E$$` is an essential matrix, `$$p_1$$` and `$$p_2$$` are corresponding points in the first and the
- * second images, respectively. The result of this function may be passed further to
- * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:));
- //
- // Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat& mask, UsacParams params)
- //
- + (Mat*)findEssentialMat:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 cameraMatrix2:(Mat*)cameraMatrix2 dist_coeff1:(Mat*)dist_coeff1 dist_coeff2:(Mat*)dist_coeff2 mask:(Mat*)mask params:(UsacParams*)params NS_SWIFT_NAME(findEssentialMat(points1:points2:cameraMatrix1:cameraMatrix2:dist_coeff1:dist_coeff2:mask:params:));
- //
- // void cv::decomposeEssentialMat(Mat E, Mat& R1, Mat& R2, Mat& t)
- //
- /**
- * Decompose an essential matrix to possible rotations and translation.
- *
- * @param E The input essential matrix.
- * @param R1 One possible rotation matrix.
- * @param R2 Another possible rotation matrix.
- * @param t One possible translation.
- *
- * This function decomposes the essential matrix E using svd decomposition CITE: HartleyZ00. In
- * general, four possible poses exist for the decomposition of E. They are `$$[R_1, t]$$`,
- * `$$[R_1, -t]$$`, `$$[R_2, t]$$`, `$$[R_2, -t]$$`.
- *
- * If E gives the epipolar constraint `$$[p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0$$` between the image
- * points `$$p_1$$` in the first image and `$$p_2$$` in second image, then any of the tuples
- * `$$[R_1, t]$$`, `$$[R_1, -t]$$`, `$$[R_2, t]$$`, `$$[R_2, -t]$$` is a change of basis from the first
- * camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one
- * can only get the direction of the translation. For this reason, the translation t is returned with
- * unit length.
- */
- + (void)decomposeEssentialMat:(Mat*)E R1:(Mat*)R1 R2:(Mat*)R2 t:(Mat*)t NS_SWIFT_NAME(decomposeEssentialMat(E:R1:R2:t:));
- //
- // int cv::recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat& E, Mat& R, Mat& t, int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
- //
- /**
- * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- * inliers that pass the check.
- *
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param E The output essential matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * described below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- * possible pose hypotheses by doing cheirality check. The cheirality check means that the
- * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- *
- * This function can be used to process the output E and mask from REF: findEssentialMat. In this
- * scenario, points1 and points2 are the same input for findEssentialMat.:
- *
- * // Example. Estimation of fundamental matrix using the RANSAC algorithm
- * int point_count = 100;
- * vector<Point2f> points1(point_count);
- * vector<Point2f> points2(point_count);
- *
- * // initialize the points here ...
- * for( int i = 0; i < point_count; i++ )
- * {
- * points1[i] = ...;
- * points2[i] = ...;
- * }
- *
- * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- *
- * // Output: Essential matrix, relative rotation and relative translation.
- * Mat E, R, t, mask;
- *
- * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- *
- */
- + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t method:(int)method prob:(double)prob threshold:(double)threshold mask:(Mat*)mask NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:method:prob:threshold:mask:));
- /**
- * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- * inliers that pass the check.
- *
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param E The output essential matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * described below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- * possible pose hypotheses by doing cheirality check. The cheirality check means that the
- * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- *
- * This function can be used to process the output E and mask from REF: findEssentialMat. In this
- * scenario, points1 and points2 are the same input for findEssentialMat.:
- *
- * // Example. Estimation of fundamental matrix using the RANSAC algorithm
- * int point_count = 100;
- * vector<Point2f> points1(point_count);
- * vector<Point2f> points2(point_count);
- *
- * // initialize the points here ...
- * for( int i = 0; i < point_count; i++ )
- * {
- * points1[i] = ...;
- * points2[i] = ...;
- * }
- *
- * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- *
- * // Output: Essential matrix, relative rotation and relative translation.
- * Mat E, R, t, mask;
- *
- * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- *
- */
- + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t method:(int)method prob:(double)prob threshold:(double)threshold NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:method:prob:threshold:));
- /**
- * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- * inliers that pass the check.
- *
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param E The output essential matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * described below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- * confidence (probability) that the estimated matrix is correct.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- * possible pose hypotheses by doing cheirality check. The cheirality check means that the
- * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- *
- * This function can be used to process the output E and mask from REF: findEssentialMat. In this
- * scenario, points1 and points2 are the same input for findEssentialMat.:
- *
- * // Example. Estimation of fundamental matrix using the RANSAC algorithm
- * int point_count = 100;
- * vector<Point2f> points1(point_count);
- * vector<Point2f> points2(point_count);
- *
- * // initialize the points here ...
- * for( int i = 0; i < point_count; i++ )
- * {
- * points1[i] = ...;
- * points2[i] = ...;
- * }
- *
- * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- *
- * // Output: Essential matrix, relative rotation and relative translation.
- * Mat E, R, t, mask;
- *
- * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- *
- */
- + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t method:(int)method prob:(double)prob NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:method:prob:));
- /**
- * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- * inliers that pass the check.
- *
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param E The output essential matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * described below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param method Method for computing an essential matrix.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * confidence (probability) that the estimated matrix is correct.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- * possible pose hypotheses by doing cheirality check. The cheirality check means that the
- * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- *
- * This function can be used to process the output E and mask from REF: findEssentialMat. In this
- * scenario, points1 and points2 are the same input for findEssentialMat.:
- *
- * // Example. Estimation of fundamental matrix using the RANSAC algorithm
- * int point_count = 100;
- * vector<Point2f> points1(point_count);
- * vector<Point2f> points2(point_count);
- *
- * // initialize the points here ...
- * for( int i = 0; i < point_count; i++ )
- * {
- * points1[i] = ...;
- * points2[i] = ...;
- * }
- *
- * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- *
- * // Output: Essential matrix, relative rotation and relative translation.
- * Mat E, R, t, mask;
- *
- * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- *
- */
- + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t method:(int)method NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:method:));
- /**
- * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- * inliers that pass the check.
- *
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
- * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
- * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
- * REF: calibrateCamera.
- * @param E The output essential matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * described below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * - REF: RANSAC for the RANSAC algorithm.
- * - REF: LMEDS for the LMedS algorithm.
- * confidence (probability) that the estimated matrix is correct.
- * line in pixels, beyond which the point is considered an outlier and is not used for computing the
- * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- * point localization, image resolution, and the image noise.
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- * possible pose hypotheses by doing cheirality check. The cheirality check means that the
- * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- *
- * This function can be used to process the output E and mask from REF: findEssentialMat. In this
- * scenario, points1 and points2 are the same input for findEssentialMat.:
- *
- * // Example. Estimation of fundamental matrix using the RANSAC algorithm
- * int point_count = 100;
- * vector<Point2f> points1(point_count);
- * vector<Point2f> points2(point_count);
- *
- * // initialize the points here ...
- * for( int i = 0; i < point_count; i++ )
- * {
- * points1[i] = ...;
- * points2[i] = ...;
- * }
- *
- * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- *
- * // Output: Essential matrix, relative rotation and relative translation.
- * Mat E, R, t, mask;
- *
- * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- *
- */
- + (int)recoverPose:(Mat*)points1 points2:(Mat*)points2 cameraMatrix1:(Mat*)cameraMatrix1 distCoeffs1:(Mat*)distCoeffs1 cameraMatrix2:(Mat*)cameraMatrix2 distCoeffs2:(Mat*)distCoeffs2 E:(Mat*)E R:(Mat*)R t:(Mat*)t NS_SWIFT_NAME(recoverPose(points1:points2:cameraMatrix1:distCoeffs1:cameraMatrix2:distCoeffs2:E:R:t:));
- //
- // int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, Mat& mask = Mat())
- //
- /**
- * Recovers the relative camera rotation and the translation from an estimated essential
- * matrix and the corresponding points in two images, using cheirality check. Returns the number of
- * inliers that pass the check.
- *
- * @param E The input essential matrix.
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * described below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- * possible pose hypotheses by doing cheirality check. The cheirality check means that the
- * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- *
- * This function can be used to process the output E and mask from REF: findEssentialMat. In this
- * scenario, points1 and points2 are the same input for #findEssentialMat :
- *
- * // Example. Estimation of fundamental matrix using the RANSAC algorithm
- * int point_count = 100;
- * vector<Point2f> points1(point_count);
- * vector<Point2f> points2(point_count);
- *
- * // initialize the points here ...
- * for( int i = 0; i < point_count; i++ )
- * {
- * points1[i] = ...;
- * points2[i] = ...;
- * }
- *
- * // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
- * Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
- *
- * Mat E, R, t, mask;
- *
- * E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
- * recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
- *
- */
- + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t mask:(Mat*)mask NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:mask:));
- /**
- * Recovers the relative camera rotation and the translation from an estimated essential
- * matrix and the corresponding points in two images, using cheirality check. Returns the number of
- * inliers that pass the check.
- *
- * @param E The input essential matrix.
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * described below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- * possible pose hypotheses by doing cheirality check. The cheirality check means that the
- * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- *
- * This function can be used to process the output E and mask from REF: findEssentialMat. In this
- * scenario, points1 and points2 are the same input for #findEssentialMat :
- *
- * // Example. Estimation of fundamental matrix using the RANSAC algorithm
- * int point_count = 100;
- * vector<Point2f> points1(point_count);
- * vector<Point2f> points2(point_count);
- *
- * // initialize the points here ...
- * for( int i = 0; i < point_count; i++ )
- * {
- * points1[i] = ...;
- * points2[i] = ...;
- * }
- *
- * // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
- * Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
- *
- * Mat E, R, t, mask;
- *
- * E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
- * recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
- *
- */
- + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:));
- //
- // int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat& R, Mat& t, double focal = 1.0, Point2d pp = Point2d(0, 0), Mat& mask = Mat())
- //
- /**
- *
- * @param E The input essential matrix.
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * description below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * @param pp principal point of the camera.
- * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 R:(Mat*)R t:(Mat*)t focal:(double)focal pp:(Point2d*)pp mask:(Mat*)mask NS_SWIFT_NAME(recoverPose(E:points1:points2:R:t:focal:pp:mask:));
- /**
- *
- * @param E The input essential matrix.
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * description below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * @param pp principal point of the camera.
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 R:(Mat*)R t:(Mat*)t focal:(double)focal pp:(Point2d*)pp NS_SWIFT_NAME(recoverPose(E:points1:points2:R:t:focal:pp:));
- /**
- *
- * @param E The input essential matrix.
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * description below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
- * are feature points from cameras with same focal length and principal point.
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 R:(Mat*)R t:(Mat*)t focal:(double)focal NS_SWIFT_NAME(recoverPose(E:points1:points2:R:t:focal:));
- /**
- *
- * @param E The input essential matrix.
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1 .
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * description below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * are feature points from cameras with same focal length and principal point.
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function differs from the one above that it computes camera intrinsic matrix from focal length and
- * principal point:
- *
- * `$$A =
- * \begin{bmatrix}
- * f & 0 & x_{pp} \\
- * 0 & f & y_{pp} \\
- * 0 & 0 & 1
- * \end{bmatrix}$$`
- */
- + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 R:(Mat*)R t:(Mat*)t NS_SWIFT_NAME(recoverPose(E:points1:points2:R:t:));
- //
- // int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, double distanceThresh, Mat& mask = Mat(), Mat& triangulatedPoints = Mat())
- //
- /**
- *
- * @param E The input essential matrix.
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1.
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * description below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
- * points).
- * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- * @param triangulatedPoints 3D points which were reconstructed by triangulation.
- *
- * This function differs from the one above that it outputs the triangulated 3D point that are used for
- * the cheirality check.
- */
- + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t distanceThresh:(double)distanceThresh mask:(Mat*)mask triangulatedPoints:(Mat*)triangulatedPoints NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:distanceThresh:mask:triangulatedPoints:));
- /**
- *
- * @param E The input essential matrix.
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1.
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * description below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
- * points).
- * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function differs from the one above that it outputs the triangulated 3D point that are used for
- * the cheirality check.
- */
- + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t distanceThresh:(double)distanceThresh mask:(Mat*)mask NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:distanceThresh:mask:));
- /**
- *
- * @param E The input essential matrix.
- * @param points1 Array of N 2D points from the first image. The point coordinates should be
- * floating-point (single or double precision).
- * @param points2 Array of the second image points of the same size and format as points1.
- * @param cameraMatrix Camera intrinsic matrix `$$\cameramatrix{A}$$` .
- * Note that this function assumes that points1 and points2 are feature points from cameras with the
- * same camera intrinsic matrix.
- * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- * that performs a change of basis from the first camera's coordinate system to the second camera's
- * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- * description below.
- * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- * length.
- * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
- * points).
- * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- * recover pose. In the output mask only inliers which pass the cheirality check.
- *
- * This function differs from the one above that it outputs the triangulated 3D point that are used for
- * the cheirality check.
- */
- + (int)recoverPose:(Mat*)E points1:(Mat*)points1 points2:(Mat*)points2 cameraMatrix:(Mat*)cameraMatrix R:(Mat*)R t:(Mat*)t distanceThresh:(double)distanceThresh NS_SWIFT_NAME(recoverPose(E:points1:points2:cameraMatrix:R:t:distanceThresh:));
- //
- // void cv::computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat& lines)
- //
- /**
- * For points in an image of a stereo pair, computes the corresponding epilines in the other image.
- *
- * @param points Input points. `$$N \times 1$$` or `$$1 \times N$$` matrix of type CV_32FC2 or
- * vector\<Point2f\> .
- * @param whichImage Index of the image (1 or 2) that contains the points .
- * @param F Fundamental matrix that can be estimated using #findFundamentalMat or #stereoRectify .
- * @param lines Output vector of the epipolar lines corresponding to the points in the other image.
- * Each line `$$ax + by + c=0$$` is encoded by 3 numbers `$$(a, b, c)$$` .
- *
- * For every point in one of the two images of a stereo pair, the function finds the equation of the
- * corresponding epipolar line in the other image.
- *
- * From the fundamental matrix definition (see #findFundamentalMat ), line `$$l^{(2)}_i$$` in the second
- * image for the point `$$p^{(1)}_i$$` in the first image (when whichImage=1 ) is computed as:
- *
- * `$$l^{(2)}_i = F p^{(1)}_i$$`
- *
- * And vice versa, when whichImage=2, `$$l^{(1)}_i$$` is computed from `$$p^{(2)}_i$$` as:
- *
- * `$$l^{(1)}_i = F^T p^{(2)}_i$$`
- *
- * Line coefficients are defined up to a scale. They are normalized so that `$$a_i^2+b_i^2=1$$` .
- */
- + (void)computeCorrespondEpilines:(Mat*)points whichImage:(int)whichImage F:(Mat*)F lines:(Mat*)lines NS_SWIFT_NAME(computeCorrespondEpilines(points:whichImage:F:lines:));
- //
- // void cv::triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D)
- //
- /**
- * This function reconstructs 3-dimensional points (in homogeneous coordinates) by using
- * their observations with a stereo camera.
- *
- * @param projMatr1 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points
- * given in the world's coordinate system into the first image.
- * @param projMatr2 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points
- * given in the world's coordinate system into the second image.
- * @param projPoints1 2xN array of feature points in the first image. In the case of the c++ version,
- * it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
- * @param projPoints2 2xN array of corresponding points in the second image. In the case of the c++
- * version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
- * @param points4D 4xN array of reconstructed points in homogeneous coordinates. These points are
- * returned in the world's coordinate system.
- *
- * NOTE:
- * Keep in mind that all input data should be of float type in order for this function to work.
- *
- * NOTE:
- * If the projection matrices from REF: stereoRectify are used, then the returned points are
- * represented in the first camera's rectified coordinate system.
- *
- * @sa
- * reprojectImageTo3D
- */
- + (void)triangulatePoints:(Mat*)projMatr1 projMatr2:(Mat*)projMatr2 projPoints1:(Mat*)projPoints1 projPoints2:(Mat*)projPoints2 points4D:(Mat*)points4D NS_SWIFT_NAME(triangulatePoints(projMatr1:projMatr2:projPoints1:projPoints2:points4D:));
- //
- // void cv::correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2)
- //
- /**
- * Refines coordinates of corresponding points.
- *
- * @param F 3x3 fundamental matrix.
- * @param points1 1xN array containing the first set of points.
- * @param points2 1xN array containing the second set of points.
- * @param newPoints1 The optimized points1.
- * @param newPoints2 The optimized points2.
- *
- * The function implements the Optimal Triangulation Method (see Multiple View Geometry for details).
- * For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it
- * computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric
- * error `$$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2$$` (where `$$d(a,b)$$` is the
- * geometric distance between points `$$a$$` and `$$b$$` ) subject to the epipolar constraint
- * `$$newPoints2^T * F * newPoints1 = 0$$` .
- */
- + (void)correctMatches:(Mat*)F points1:(Mat*)points1 points2:(Mat*)points2 newPoints1:(Mat*)newPoints1 newPoints2:(Mat*)newPoints2 NS_SWIFT_NAME(correctMatches(F:points1:points2:newPoints1:newPoints2:));
- //
- // void cv::filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat())
- //
- /**
- * Filters off small noise blobs (speckles) in the disparity map
- *
- * @param img The input 16-bit signed disparity image
- * @param newVal The disparity value used to paint-off the speckles
- * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
- * affected by the algorithm
- * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
- * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
- * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
- * account when specifying this parameter value.
- * @param buf The optional temporary buffer to avoid memory allocation within the function.
- */
- + (void)filterSpeckles:(Mat*)img newVal:(double)newVal maxSpeckleSize:(int)maxSpeckleSize maxDiff:(double)maxDiff buf:(Mat*)buf NS_SWIFT_NAME(filterSpeckles(img:newVal:maxSpeckleSize:maxDiff:buf:));
- /**
- * Filters off small noise blobs (speckles) in the disparity map
- *
- * @param img The input 16-bit signed disparity image
- * @param newVal The disparity value used to paint-off the speckles
- * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
- * affected by the algorithm
- * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
- * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
- * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
- * account when specifying this parameter value.
- */
- + (void)filterSpeckles:(Mat*)img newVal:(double)newVal maxSpeckleSize:(int)maxSpeckleSize maxDiff:(double)maxDiff NS_SWIFT_NAME(filterSpeckles(img:newVal:maxSpeckleSize:maxDiff:));
- //
- // Rect cv::getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize)
- //
- + (Rect2i*)getValidDisparityROI:(Rect2i*)roi1 roi2:(Rect2i*)roi2 minDisparity:(int)minDisparity numberOfDisparities:(int)numberOfDisparities blockSize:(int)blockSize NS_SWIFT_NAME(getValidDisparityROI(roi1:roi2:minDisparity:numberOfDisparities:blockSize:));
- //
- // void cv::validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1)
- //
- + (void)validateDisparity:(Mat*)disparity cost:(Mat*)cost minDisparity:(int)minDisparity numberOfDisparities:(int)numberOfDisparities disp12MaxDisp:(int)disp12MaxDisp NS_SWIFT_NAME(validateDisparity(disparity:cost:minDisparity:numberOfDisparities:disp12MaxDisp:));
- + (void)validateDisparity:(Mat*)disparity cost:(Mat*)cost minDisparity:(int)minDisparity numberOfDisparities:(int)numberOfDisparities NS_SWIFT_NAME(validateDisparity(disparity:cost:minDisparity:numberOfDisparities:));
- //
- // void cv::reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1)
- //
- /**
- * Reprojects a disparity image to 3D space.
- *
- * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
- * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
- * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
- * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
- * being used here.
- * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
- * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
- * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
- * camera's rectified coordinate system.
- * @param Q `$$4 \times 4$$` perspective transformation matrix that can be obtained with
- * REF: stereoRectify.
- * @param handleMissingValues Indicates, whether the function should handle missing values (i.e.
- * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
- * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
- * to 3D points with a very large Z value (currently set to 10000).
- * @param ddepth The optional output array depth. If it is -1, the output image will have CV_32F
- * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
- *
- * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
- * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
- * computes:
- *
- * `$$\begin{bmatrix}
- * X \\
- * Y \\
- * Z \\
- * W
- * \end{bmatrix} = Q \begin{bmatrix}
- * x \\
- * y \\
- * \texttt{disparity} (x,y) \\
- * z
- * \end{bmatrix}.$$`
- *
- * @sa
- * To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
- */
- + (void)reprojectImageTo3D:(Mat*)disparity _3dImage:(Mat*)_3dImage Q:(Mat*)Q handleMissingValues:(BOOL)handleMissingValues ddepth:(int)ddepth NS_SWIFT_NAME(reprojectImageTo3D(disparity:_3dImage:Q:handleMissingValues:ddepth:));
- /**
- * Reprojects a disparity image to 3D space.
- *
- * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
- * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
- * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
- * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
- * being used here.
- * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
- * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
- * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
- * camera's rectified coordinate system.
- * @param Q `$$4 \times 4$$` perspective transformation matrix that can be obtained with
- * REF: stereoRectify.
- * @param handleMissingValues Indicates, whether the function should handle missing values (i.e.
- * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
- * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
- * to 3D points with a very large Z value (currently set to 10000).
- * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
- *
- * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
- * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
- * computes:
- *
- * `$$\begin{bmatrix}
- * X \\
- * Y \\
- * Z \\
- * W
- * \end{bmatrix} = Q \begin{bmatrix}
- * x \\
- * y \\
- * \texttt{disparity} (x,y) \\
- * z
- * \end{bmatrix}.$$`
- *
- * @sa
- * To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
- */
- + (void)reprojectImageTo3D:(Mat*)disparity _3dImage:(Mat*)_3dImage Q:(Mat*)Q handleMissingValues:(BOOL)handleMissingValues NS_SWIFT_NAME(reprojectImageTo3D(disparity:_3dImage:Q:handleMissingValues:));
- /**
- * Reprojects a disparity image to 3D space.
- *
- * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
- * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
- * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
- * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
- * being used here.
- * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
- * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
- * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
- * camera's rectified coordinate system.
- * @param Q `$$4 \times 4$$` perspective transformation matrix that can be obtained with
- * REF: stereoRectify.
- * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
- * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
- * to 3D points with a very large Z value (currently set to 10000).
- * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
- *
- * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
- * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
- * computes:
- *
- * `$$\begin{bmatrix}
- * X \\
- * Y \\
- * Z \\
- * W
- * \end{bmatrix} = Q \begin{bmatrix}
- * x \\
- * y \\
- * \texttt{disparity} (x,y) \\
- * z
- * \end{bmatrix}.$$`
- *
- * @sa
- * To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
- */
- + (void)reprojectImageTo3D:(Mat*)disparity _3dImage:(Mat*)_3dImage Q:(Mat*)Q NS_SWIFT_NAME(reprojectImageTo3D(disparity:_3dImage:Q:));
- //
- // double cv::sampsonDistance(Mat pt1, Mat pt2, Mat F)
- //
- /**
- * Calculates the Sampson Distance between two points.
- *
- * The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as:
- * `$$
- * sd( \texttt{pt1} , \texttt{pt2} )=
- * \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2}
- * {((\texttt{F} \cdot \texttt{pt1})(0))^2 +
- * ((\texttt{F} \cdot \texttt{pt1})(1))^2 +
- * ((\texttt{F}^t \cdot \texttt{pt2})(0))^2 +
- * ((\texttt{F}^t \cdot \texttt{pt2})(1))^2}
- * $$`
- * The fundamental matrix may be calculated using the #findFundamentalMat function. See CITE: HartleyZ00 11.4.3 for details.
- * @param pt1 first homogeneous 2d point
- * @param pt2 second homogeneous 2d point
- * @param F fundamental matrix
- * @return The computed Sampson distance.
- */
- + (double)sampsonDistance:(Mat*)pt1 pt2:(Mat*)pt2 F:(Mat*)F NS_SWIFT_NAME(sampsonDistance(pt1:pt2:F:));
- //
- // int cv::estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
- //
- /**
- * Computes an optimal affine transformation between two 3D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * z\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12} & a_{13}\\
- * a_{21} & a_{22} & a_{23}\\
- * a_{31} & a_{32} & a_{33}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * Z\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * b_3\\
- * \end{bmatrix}
- * $$`
- *
- * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
- * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
- * @param out Output 3D affine transformation matrix `$$3 \times 4$$` of the form
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & a_{13} & b_1\\
- * a_{21} & a_{22} & a_{23} & b_2\\
- * a_{31} & a_{32} & a_{33} & b_3\\
- * \end{bmatrix}
- * $$`
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
- * an inlier.
- * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- *
- * The function estimates an optimal 3D affine transformation between two 3D point sets using the
- * RANSAC algorithm.
- */
- + (int)estimateAffine3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers ransacThreshold:(double)ransacThreshold confidence:(double)confidence NS_SWIFT_NAME(estimateAffine3D(src:dst:out:inliers:ransacThreshold:confidence:));
- /**
- * Computes an optimal affine transformation between two 3D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * z\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12} & a_{13}\\
- * a_{21} & a_{22} & a_{23}\\
- * a_{31} & a_{32} & a_{33}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * Z\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * b_3\\
- * \end{bmatrix}
- * $$`
- *
- * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
- * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
- * @param out Output 3D affine transformation matrix `$$3 \times 4$$` of the form
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & a_{13} & b_1\\
- * a_{21} & a_{22} & a_{23} & b_2\\
- * a_{31} & a_{32} & a_{33} & b_3\\
- * \end{bmatrix}
- * $$`
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
- * an inlier.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- *
- * The function estimates an optimal 3D affine transformation between two 3D point sets using the
- * RANSAC algorithm.
- */
- + (int)estimateAffine3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers ransacThreshold:(double)ransacThreshold NS_SWIFT_NAME(estimateAffine3D(src:dst:out:inliers:ransacThreshold:));
- /**
- * Computes an optimal affine transformation between two 3D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * z\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12} & a_{13}\\
- * a_{21} & a_{22} & a_{23}\\
- * a_{31} & a_{32} & a_{33}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * Z\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * b_3\\
- * \end{bmatrix}
- * $$`
- *
- * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
- * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
- * @param out Output 3D affine transformation matrix `$$3 \times 4$$` of the form
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & a_{13} & b_1\\
- * a_{21} & a_{22} & a_{23} & b_2\\
- * a_{31} & a_{32} & a_{33} & b_3\\
- * \end{bmatrix}
- * $$`
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * an inlier.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- *
- * The function estimates an optimal 3D affine transformation between two 3D point sets using the
- * RANSAC algorithm.
- */
- + (int)estimateAffine3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers NS_SWIFT_NAME(estimateAffine3D(src:dst:out:inliers:));
- //
- // Mat cv::estimateAffine3D(Mat src, Mat dst, double* scale = nullptr, bool force_rotation = true)
- //
- /**
- * Computes an optimal affine transformation between two 3D point sets.
- *
- * It computes `$$R,s,t$$` minimizing `$$\sum{i} dst_i - c \cdot R \cdot src_i $$`
- * where `$$R$$` is a 3x3 rotation matrix, `$$t$$` is a 3x1 translation vector and `$$s$$` is a
- * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
- * The estimated affine transform has a homogeneous scale which is a subclass of affine
- * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
- * points each.
- *
- * @param src First input 3D point set.
- * @param dst Second input 3D point set.
- * @param scale If null is passed, the scale parameter c will be assumed to be 1.0.
- * Else the pointed-to variable will be set to the optimal scale.
- * @param force_rotation If true, the returned rotation will never be a reflection.
- * This might be unwanted, e.g. when optimizing a transform between a right- and a
- * left-handed coordinate system.
- * @return 3D affine transformation matrix `$$3 \times 4$$` of the form
- * `$$T =
- * \begin{bmatrix}
- * R & t\\
- * \end{bmatrix}
- * $$`
- */
- + (Mat*)estimateAffine3D:(Mat*)src dst:(Mat*)dst scale:(double*)scale force_rotation:(BOOL)force_rotation NS_SWIFT_NAME(estimateAffine3D(src:dst:scale:force_rotation:));
- /**
- * Computes an optimal affine transformation between two 3D point sets.
- *
- * It computes `$$R,s,t$$` minimizing `$$\sum{i} dst_i - c \cdot R \cdot src_i $$`
- * where `$$R$$` is a 3x3 rotation matrix, `$$t$$` is a 3x1 translation vector and `$$s$$` is a
- * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
- * The estimated affine transform has a homogeneous scale which is a subclass of affine
- * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
- * points each.
- *
- * @param src First input 3D point set.
- * @param dst Second input 3D point set.
- * @param scale If null is passed, the scale parameter c will be assumed to be 1.0.
- * Else the pointed-to variable will be set to the optimal scale.
- * This might be unwanted, e.g. when optimizing a transform between a right- and a
- * left-handed coordinate system.
- * @return 3D affine transformation matrix `$$3 \times 4$$` of the form
- * `$$T =
- * \begin{bmatrix}
- * R & t\\
- * \end{bmatrix}
- * $$`
- */
- + (Mat*)estimateAffine3D:(Mat*)src dst:(Mat*)dst scale:(double*)scale NS_SWIFT_NAME(estimateAffine3D(src:dst:scale:));
- /**
- * Computes an optimal affine transformation between two 3D point sets.
- *
- * It computes `$$R,s,t$$` minimizing `$$\sum{i} dst_i - c \cdot R \cdot src_i $$`
- * where `$$R$$` is a 3x3 rotation matrix, `$$t$$` is a 3x1 translation vector and `$$s$$` is a
- * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
- * The estimated affine transform has a homogeneous scale which is a subclass of affine
- * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
- * points each.
- *
- * @param src First input 3D point set.
- * @param dst Second input 3D point set.
- * Else the pointed-to variable will be set to the optimal scale.
- * This might be unwanted, e.g. when optimizing a transform between a right- and a
- * left-handed coordinate system.
- * @return 3D affine transformation matrix `$$3 \times 4$$` of the form
- * `$$T =
- * \begin{bmatrix}
- * R & t\\
- * \end{bmatrix}
- * $$`
- */
- + (Mat*)estimateAffine3D:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(estimateAffine3D(src:dst:));
- //
- // int cv::estimateTranslation3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
- //
- /**
- * Computes an optimal translation between two 3D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * z\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * X\\
- * Y\\
- * Z\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * b_3\\
- * \end{bmatrix}
- * $$`
- *
- * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
- * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
- * @param out Output 3D translation vector `$$3 \times 1$$` of the form
- * `$$
- * \begin{bmatrix}
- * b_1 \\
- * b_2 \\
- * b_3 \\
- * \end{bmatrix}
- * $$`
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
- * an inlier.
- * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- *
- * The function estimates an optimal 3D translation between two 3D point sets using the
- * RANSAC algorithm.
- *
- */
- + (int)estimateTranslation3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers ransacThreshold:(double)ransacThreshold confidence:(double)confidence NS_SWIFT_NAME(estimateTranslation3D(src:dst:out:inliers:ransacThreshold:confidence:));
- /**
- * Computes an optimal translation between two 3D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * z\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * X\\
- * Y\\
- * Z\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * b_3\\
- * \end{bmatrix}
- * $$`
- *
- * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
- * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
- * @param out Output 3D translation vector `$$3 \times 1$$` of the form
- * `$$
- * \begin{bmatrix}
- * b_1 \\
- * b_2 \\
- * b_3 \\
- * \end{bmatrix}
- * $$`
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
- * an inlier.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- *
- * The function estimates an optimal 3D translation between two 3D point sets using the
- * RANSAC algorithm.
- *
- */
- + (int)estimateTranslation3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers ransacThreshold:(double)ransacThreshold NS_SWIFT_NAME(estimateTranslation3D(src:dst:out:inliers:ransacThreshold:));
- /**
- * Computes an optimal translation between two 3D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * z\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * X\\
- * Y\\
- * Z\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * b_3\\
- * \end{bmatrix}
- * $$`
- *
- * @param src First input 3D point set containing `$$(X,Y,Z)$$`.
- * @param dst Second input 3D point set containing `$$(x,y,z)$$`.
- * @param out Output 3D translation vector `$$3 \times 1$$` of the form
- * `$$
- * \begin{bmatrix}
- * b_1 \\
- * b_2 \\
- * b_3 \\
- * \end{bmatrix}
- * $$`
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * an inlier.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- *
- * The function estimates an optimal 3D translation between two 3D point sets using the
- * RANSAC algorithm.
- *
- */
- + (int)estimateTranslation3D:(Mat*)src dst:(Mat*)dst out:(Mat*)out inliers:(Mat*)inliers NS_SWIFT_NAME(estimateTranslation3D(src:dst:out:inliers:));
- //
- // Mat cv::estimateAffine2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
- //
- /**
- * Computes an optimal affine transformation between two 2D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12}\\
- * a_{21} & a_{22}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * \end{bmatrix}
- * $$`
- *
- * @param from First input 2D point set containing `$$(X,Y)$$`.
- * @param to Second input 2D point set containing `$$(x,y)$$`.
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
- * a point as an inlier. Applies only to RANSAC.
- * @param maxIters The maximum number of robust method iterations.
- * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
- * could not be estimated. The returned matrix has the following form:
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & b_1\\
- * a_{21} & a_{22} & b_2\\
- * \end{bmatrix}
- * $$`
- *
- * The function estimates an optimal 2D affine transformation between two 2D point sets using the
- * selected robust algorithm.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters confidence:(double)confidence refineIters:(size_t)refineIters NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:));
- /**
- * Computes an optimal affine transformation between two 2D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12}\\
- * a_{21} & a_{22}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * \end{bmatrix}
- * $$`
- *
- * @param from First input 2D point set containing `$$(X,Y)$$`.
- * @param to Second input 2D point set containing `$$(x,y)$$`.
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
- * a point as an inlier. Applies only to RANSAC.
- * @param maxIters The maximum number of robust method iterations.
- * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
- * could not be estimated. The returned matrix has the following form:
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & b_1\\
- * a_{21} & a_{22} & b_2\\
- * \end{bmatrix}
- * $$`
- *
- * The function estimates an optimal 2D affine transformation between two 2D point sets using the
- * selected robust algorithm.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters confidence:(double)confidence NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:ransacReprojThreshold:maxIters:confidence:));
- /**
- * Computes an optimal affine transformation between two 2D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12}\\
- * a_{21} & a_{22}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * \end{bmatrix}
- * $$`
- *
- * @param from First input 2D point set containing `$$(X,Y)$$`.
- * @param to Second input 2D point set containing `$$(x,y)$$`.
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
- * a point as an inlier. Applies only to RANSAC.
- * @param maxIters The maximum number of robust method iterations.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
- * could not be estimated. The returned matrix has the following form:
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & b_1\\
- * a_{21} & a_{22} & b_2\\
- * \end{bmatrix}
- * $$`
- *
- * The function estimates an optimal 2D affine transformation between two 2D point sets using the
- * selected robust algorithm.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:ransacReprojThreshold:maxIters:));
- /**
- * Computes an optimal affine transformation between two 2D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12}\\
- * a_{21} & a_{22}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * \end{bmatrix}
- * $$`
- *
- * @param from First input 2D point set containing `$$(X,Y)$$`.
- * @param to Second input 2D point set containing `$$(x,y)$$`.
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
- * a point as an inlier. Applies only to RANSAC.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
- * could not be estimated. The returned matrix has the following form:
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & b_1\\
- * a_{21} & a_{22} & b_2\\
- * \end{bmatrix}
- * $$`
- *
- * The function estimates an optimal 2D affine transformation between two 2D point sets using the
- * selected robust algorithm.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:ransacReprojThreshold:));
- /**
- * Computes an optimal affine transformation between two 2D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12}\\
- * a_{21} & a_{22}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * \end{bmatrix}
- * $$`
- *
- * @param from First input 2D point set containing `$$(X,Y)$$`.
- * @param to Second input 2D point set containing `$$(x,y)$$`.
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * a point as an inlier. Applies only to RANSAC.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
- * could not be estimated. The returned matrix has the following form:
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & b_1\\
- * a_{21} & a_{22} & b_2\\
- * \end{bmatrix}
- * $$`
- *
- * The function estimates an optimal 2D affine transformation between two 2D point sets using the
- * selected robust algorithm.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:method:));
- /**
- * Computes an optimal affine transformation between two 2D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12}\\
- * a_{21} & a_{22}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * \end{bmatrix}
- * $$`
- *
- * @param from First input 2D point set containing `$$(X,Y)$$`.
- * @param to Second input 2D point set containing `$$(x,y)$$`.
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * a point as an inlier. Applies only to RANSAC.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
- * could not be estimated. The returned matrix has the following form:
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & b_1\\
- * a_{21} & a_{22} & b_2\\
- * \end{bmatrix}
- * $$`
- *
- * The function estimates an optimal 2D affine transformation between two 2D point sets using the
- * selected robust algorithm.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers NS_SWIFT_NAME(estimateAffine2D(from:to:inliers:));
- /**
- * Computes an optimal affine transformation between two 2D point sets.
- *
- * It computes
- * `$$
- * \begin{bmatrix}
- * x\\
- * y\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * a_{11} & a_{12}\\
- * a_{21} & a_{22}\\
- * \end{bmatrix}
- * \begin{bmatrix}
- * X\\
- * Y\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * \end{bmatrix}
- * $$`
- *
- * @param from First input 2D point set containing `$$(X,Y)$$`.
- * @param to Second input 2D point set containing `$$(x,y)$$`.
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * a point as an inlier. Applies only to RANSAC.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation matrix `$$2 \times 3$$` or empty matrix if transformation
- * could not be estimated. The returned matrix has the following form:
- * `$$
- * \begin{bmatrix}
- * a_{11} & a_{12} & b_1\\
- * a_{21} & a_{22} & b_2\\
- * \end{bmatrix}
- * $$`
- *
- * The function estimates an optimal 2D affine transformation between two 2D point sets using the
- * selected robust algorithm.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffinePartial2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffine2D:(Mat*)from to:(Mat*)to NS_SWIFT_NAME(estimateAffine2D(from:to:));
- //
- // Mat cv::estimateAffine2D(Mat pts1, Mat pts2, Mat& inliers, UsacParams params)
- //
- + (Mat*)estimateAffine2D:(Mat*)pts1 pts2:(Mat*)pts2 inliers:(Mat*)inliers params:(UsacParams*)params NS_SWIFT_NAME(estimateAffine2D(pts1:pts2:inliers:params:));
- //
- // Mat cv::estimateAffinePartial2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
- //
- /**
- * Computes an optimal limited affine transformation with 4 degrees of freedom between
- * two 2D point sets.
- *
- * @param from First input 2D point set.
- * @param to Second input 2D point set.
- * @param inliers Output vector indicating which points are inliers.
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
- * a point as an inlier. Applies only to RANSAC.
- * @param maxIters The maximum number of robust method iterations.
- * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
- * empty matrix if transformation could not be estimated.
- *
- * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- * estimation.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * Estimated transformation matrix is:
- * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- * \end{bmatrix} $$`
- * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
- * translations in `$$ x, y $$` axes respectively.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters confidence:(double)confidence refineIters:(size_t)refineIters NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:));
- /**
- * Computes an optimal limited affine transformation with 4 degrees of freedom between
- * two 2D point sets.
- *
- * @param from First input 2D point set.
- * @param to Second input 2D point set.
- * @param inliers Output vector indicating which points are inliers.
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
- * a point as an inlier. Applies only to RANSAC.
- * @param maxIters The maximum number of robust method iterations.
- * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
- * empty matrix if transformation could not be estimated.
- *
- * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- * estimation.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * Estimated transformation matrix is:
- * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- * \end{bmatrix} $$`
- * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
- * translations in `$$ x, y $$` axes respectively.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters confidence:(double)confidence NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:ransacReprojThreshold:maxIters:confidence:));
- /**
- * Computes an optimal limited affine transformation with 4 degrees of freedom between
- * two 2D point sets.
- *
- * @param from First input 2D point set.
- * @param to Second input 2D point set.
- * @param inliers Output vector indicating which points are inliers.
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
- * a point as an inlier. Applies only to RANSAC.
- * @param maxIters The maximum number of robust method iterations.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
- * empty matrix if transformation could not be estimated.
- *
- * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- * estimation.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * Estimated transformation matrix is:
- * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- * \end{bmatrix} $$`
- * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
- * translations in `$$ x, y $$` axes respectively.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold maxIters:(size_t)maxIters NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:ransacReprojThreshold:maxIters:));
- /**
- * Computes an optimal limited affine transformation with 4 degrees of freedom between
- * two 2D point sets.
- *
- * @param from First input 2D point set.
- * @param to Second input 2D point set.
- * @param inliers Output vector indicating which points are inliers.
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
- * a point as an inlier. Applies only to RANSAC.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
- * empty matrix if transformation could not be estimated.
- *
- * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- * estimation.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * Estimated transformation matrix is:
- * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- * \end{bmatrix} $$`
- * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
- * translations in `$$ x, y $$` axes respectively.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method ransacReprojThreshold:(double)ransacReprojThreshold NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:ransacReprojThreshold:));
- /**
- * Computes an optimal limited affine transformation with 4 degrees of freedom between
- * two 2D point sets.
- *
- * @param from First input 2D point set.
- * @param to Second input 2D point set.
- * @param inliers Output vector indicating which points are inliers.
- * @param method Robust method used to compute transformation. The following methods are possible:
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * a point as an inlier. Applies only to RANSAC.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
- * empty matrix if transformation could not be estimated.
- *
- * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- * estimation.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * Estimated transformation matrix is:
- * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- * \end{bmatrix} $$`
- * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
- * translations in `$$ x, y $$` axes respectively.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers method:(int)method NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:method:));
- /**
- * Computes an optimal limited affine transformation with 4 degrees of freedom between
- * two 2D point sets.
- *
- * @param from First input 2D point set.
- * @param to Second input 2D point set.
- * @param inliers Output vector indicating which points are inliers.
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * a point as an inlier. Applies only to RANSAC.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
- * empty matrix if transformation could not be estimated.
- *
- * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- * estimation.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * Estimated transformation matrix is:
- * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- * \end{bmatrix} $$`
- * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
- * translations in `$$ x, y $$` axes respectively.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to inliers:(Mat*)inliers NS_SWIFT_NAME(estimateAffinePartial2D(from:to:inliers:));
- /**
- * Computes an optimal limited affine transformation with 4 degrees of freedom between
- * two 2D point sets.
- *
- * @param from First input 2D point set.
- * @param to Second input 2D point set.
- * - REF: RANSAC - RANSAC-based robust method
- * - REF: LMEDS - Least-Median robust method
- * RANSAC is the default method.
- * a point as an inlier. Applies only to RANSAC.
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- * Passing 0 will disable refining, so the output matrix will be output of robust method.
- *
- * @return Output 2D affine transformation (4 degrees of freedom) matrix `$$2 \times 3$$` or
- * empty matrix if transformation could not be estimated.
- *
- * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- * estimation.
- *
- * The computed transformation is then refined further (using only inliers) with the
- * Levenberg-Marquardt method to reduce the re-projection error even more.
- *
- * Estimated transformation matrix is:
- * `$$ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- * \end{bmatrix} $$`
- * Where `$$ \theta $$` is the rotation angle, `$$ s $$` the scaling factor and `$$ t_x, t_y $$` are
- * translations in `$$ x, y $$` axes respectively.
- *
- * NOTE:
- * The RANSAC method can handle practically any ratio of outliers but need a threshold to
- * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- * correctly only when there are more than 50% of inliers.
- *
- * @see `+estimateAffine2D:to:inliers:method:ransacReprojThreshold:maxIters:confidence:refineIters:`, `getAffineTransform`
- */
- + (Mat*)estimateAffinePartial2D:(Mat*)from to:(Mat*)to NS_SWIFT_NAME(estimateAffinePartial2D(from:to:));
- //
- // int cv::decomposeHomographyMat(Mat H, Mat K, vector_Mat& rotations, vector_Mat& translations, vector_Mat& normals)
- //
- /**
- * Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
- *
- * @param H The input homography matrix between two images.
- * @param K The input camera intrinsic matrix.
- * @param rotations Array of rotation matrices.
- * @param translations Array of translation matrices.
- * @param normals Array of plane normal matrices.
- *
- * This function extracts relative camera motion between two views of a planar object and returns up to
- * four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of
- * the homography matrix H is described in detail in CITE: Malis.
- *
- * If the homography H, induced by the plane, gives the constraint
- * `$$s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}$$` on the source image points
- * `$$p_i$$` and the destination image points `$$p'_i$$`, then the tuple of rotations[k] and
- * translations[k] is a change of basis from the source camera's coordinate system to the destination
- * camera's coordinate system. However, by decomposing H, one can only get the translation normalized
- * by the (typically unknown) depth of the scene, i.e. its direction but with normalized length.
- *
- * If point correspondences are available, at least two solutions may further be invalidated, by
- * applying positive depth constraint, i.e. all points must be in front of the camera.
- */
- + (int)decomposeHomographyMat:(Mat*)H K:(Mat*)K rotations:(NSMutableArray<Mat*>*)rotations translations:(NSMutableArray<Mat*>*)translations normals:(NSMutableArray<Mat*>*)normals NS_SWIFT_NAME(decomposeHomographyMat(H:K:rotations:translations:normals:));
- //
- // void cv::filterHomographyDecompByVisibleRefpoints(vector_Mat rotations, vector_Mat normals, Mat beforePoints, Mat afterPoints, Mat& possibleSolutions, Mat pointsMask = Mat())
- //
- /**
- * Filters homography decompositions based on additional information.
- *
- * @param rotations Vector of rotation matrices.
- * @param normals Vector of plane normal matrices.
- * @param beforePoints Vector of (rectified) visible reference points before the homography is applied
- * @param afterPoints Vector of (rectified) visible reference points after the homography is applied
- * @param possibleSolutions Vector of int indices representing the viable solution set after filtering
- * @param pointsMask optional Mat/Vector of 8u type representing the mask for the inliers as given by the #findHomography function
- *
- * This function is intended to filter the output of the #decomposeHomographyMat based on additional
- * information as described in CITE: Malis . The summary of the method: the #decomposeHomographyMat function
- * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
- * sets of points visible in the camera frame before and after the homography transformation is applied,
- * we can determine which are the true potential solutions and which are the opposites by verifying which
- * homographies are consistent with all visible reference points being in front of the camera. The inputs
- * are left unchanged; the filtered solution set is returned as indices into the existing one.
- */
- + (void)filterHomographyDecompByVisibleRefpoints:(NSArray<Mat*>*)rotations normals:(NSArray<Mat*>*)normals beforePoints:(Mat*)beforePoints afterPoints:(Mat*)afterPoints possibleSolutions:(Mat*)possibleSolutions pointsMask:(Mat*)pointsMask NS_SWIFT_NAME(filterHomographyDecompByVisibleRefpoints(rotations:normals:beforePoints:afterPoints:possibleSolutions:pointsMask:));
- /**
- * Filters homography decompositions based on additional information.
- *
- * @param rotations Vector of rotation matrices.
- * @param normals Vector of plane normal matrices.
- * @param beforePoints Vector of (rectified) visible reference points before the homography is applied
- * @param afterPoints Vector of (rectified) visible reference points after the homography is applied
- * @param possibleSolutions Vector of int indices representing the viable solution set after filtering
- *
- * This function is intended to filter the output of the #decomposeHomographyMat based on additional
- * information as described in CITE: Malis . The summary of the method: the #decomposeHomographyMat function
- * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
- * sets of points visible in the camera frame before and after the homography transformation is applied,
- * we can determine which are the true potential solutions and which are the opposites by verifying which
- * homographies are consistent with all visible reference points being in front of the camera. The inputs
- * are left unchanged; the filtered solution set is returned as indices into the existing one.
- */
- + (void)filterHomographyDecompByVisibleRefpoints:(NSArray<Mat*>*)rotations normals:(NSArray<Mat*>*)normals beforePoints:(Mat*)beforePoints afterPoints:(Mat*)afterPoints possibleSolutions:(Mat*)possibleSolutions NS_SWIFT_NAME(filterHomographyDecompByVisibleRefpoints(rotations:normals:beforePoints:afterPoints:possibleSolutions:));
- //
- // void cv::undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat())
- //
- /**
- * Transforms an image to compensate for lens distortion.
- *
- * The function transforms an image to compensate radial and tangential lens distortion.
- *
- * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
- * (with bilinear interpolation). See the former function for details of the transformation being
- * performed.
- *
- * Those pixels in the destination image, for which there is no correspondent pixels in the source
- * image, are filled with zeros (black color).
- *
- * A particular subset of the source image that will be visible in the corrected image can be regulated
- * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
- * newCameraMatrix depending on your requirements.
- *
- * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
- * the resolution of images is different from the resolution used at the calibration stage, `$$f_x,
- * f_y, c_x$$` and `$$c_y$$` need to be scaled accordingly, while the distortion coefficients remain
- * the same.
- *
- * @param src Input (distorted) image.
- * @param dst Output (corrected) image that has the same size and type as src .
- * @param cameraMatrix Input camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as
- * cameraMatrix but you may additionally scale and shift the result by using a different matrix.
- */
- + (void)undistort:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs newCameraMatrix:(Mat*)newCameraMatrix NS_SWIFT_NAME(undistort(src:dst:cameraMatrix:distCoeffs:newCameraMatrix:));
- /**
- * Transforms an image to compensate for lens distortion.
- *
- * The function transforms an image to compensate radial and tangential lens distortion.
- *
- * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
- * (with bilinear interpolation). See the former function for details of the transformation being
- * performed.
- *
- * Those pixels in the destination image, for which there is no correspondent pixels in the source
- * image, are filled with zeros (black color).
- *
- * A particular subset of the source image that will be visible in the corrected image can be regulated
- * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
- * newCameraMatrix depending on your requirements.
- *
- * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
- * the resolution of images is different from the resolution used at the calibration stage, `$$f_x,
- * f_y, c_x$$` and `$$c_y$$` need to be scaled accordingly, while the distortion coefficients remain
- * the same.
- *
- * @param src Input (distorted) image.
- * @param dst Output (corrected) image that has the same size and type as src .
- * @param cameraMatrix Input camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * cameraMatrix but you may additionally scale and shift the result by using a different matrix.
- */
- + (void)undistort:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(undistort(src:dst:cameraMatrix:distCoeffs:));
- //
- // void cv::initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
- //
- /**
- * Computes the undistortion and rectification transformation map.
- *
- * The function computes the joint undistortion and rectification transformation and represents the
- * result in the form of maps for #remap. The undistorted image looks like original, as if it is
- * captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a
- * monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by
- * #getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera,
- * newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
- *
- * Also, this new camera is oriented differently in the coordinate space, according to R. That, for
- * example, helps to align two heads of a stereo camera so that the epipolar lines on both images
- * become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera).
- *
- * The function actually builds the maps for the inverse mapping algorithm that is used by #remap. That
- * is, for each pixel `$$(u, v)$$` in the destination (corrected and rectified) image, the function
- * computes the corresponding coordinates in the source image (that is, in the original image from
- * camera). The following process is applied:
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} }
- * \begin{array}{l}
- * x \leftarrow (u - {c'}_x)/{f'}_x \\
- * y \leftarrow (v - {c'}_y)/{f'}_y \\
- * {[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\
- * x' \leftarrow X/W \\
- * y' \leftarrow Y/W \\
- * r^2 \leftarrow x'^2 + y'^2 \\
- * x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
- * + 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\
- * y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
- * + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
- * s\vecthree{x'''}{y'''}{1} =
- * \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)}
- * {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
- * {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\
- * map_x(u,v) \leftarrow x''' f_x + c_x \\
- * map_y(u,v) \leftarrow y''' f_y + c_y
- * \end{array}
- * $$`
- * where `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * are the distortion coefficients.
- *
- * In case of a stereo camera, this function is called twice: once for each camera head, after
- * #stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera
- * was not calibrated, it is still possible to compute the rectification transformations directly from
- * the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes
- * homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
- * space. R can be computed from H as
- * `$$\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}$$`
- * where cameraMatrix can be chosen arbitrarily.
- *
- * @param cameraMatrix Input camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,
- * computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
- * is assumed. In cvInitUndistortMap R assumed to be an identity matrix.
- * @param newCameraMatrix New camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}$$`.
- * @param size Undistorted image size.
- * @param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
- * @param map1 The first output map.
- * @param map2 The second output map.
- */
- + (void)initUndistortRectifyMap:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R newCameraMatrix:(Mat*)newCameraMatrix size:(Size2i*)size m1type:(int)m1type map1:(Mat*)map1 map2:(Mat*)map2 NS_SWIFT_NAME(initUndistortRectifyMap(cameraMatrix:distCoeffs:R:newCameraMatrix:size:m1type:map1:map2:));
- //
- // void cv::initInverseRectificationMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
- //
- /**
- * Computes the projection and inverse-rectification transformation map. In essense, this is the inverse of
- * #initUndistortRectifyMap to accomodate stereo-rectification of projectors ('inverse-cameras') in projector-camera pairs.
- *
- * The function computes the joint projection and inverse rectification transformation and represents the
- * result in the form of maps for #remap. The projected image looks like a distorted version of the original which,
- * once projected by a projector, should visually match the original. In case of a monocular camera, newCameraMatrix
- * is usually equal to cameraMatrix, or it can be computed by
- * #getOptimalNewCameraMatrix for a better control over scaling. In case of a projector-camera pair,
- * newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
- *
- * The projector is oriented differently in the coordinate space, according to R. In case of projector-camera pairs,
- * this helps align the projector (in the same manner as #initUndistortRectifyMap for the camera) to create a stereo-rectified pair. This
- * allows epipolar lines on both images to become horizontal and have the same y-coordinate (in case of a horizontally aligned projector-camera pair).
- *
- * The function builds the maps for the inverse mapping algorithm that is used by #remap. That
- * is, for each pixel `$$(u, v)$$` in the destination (projected and inverse-rectified) image, the function
- * computes the corresponding coordinates in the source image (that is, in the original digital image). The following process is applied:
- *
- * `$$
- * \begin{array}{l}
- * \text{newCameraMatrix}\\
- * x \leftarrow (u - {c'}_x)/{f'}_x \\
- * y \leftarrow (v - {c'}_y)/{f'}_y \\
- *
- * \\\text{Undistortion}
- * \\\scriptsize{\textit{though equation shown is for radial undistortion, function implements cv::undistortPoints()}}\\
- * r^2 \leftarrow x^2 + y^2 \\
- * \theta \leftarrow \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\\
- * x' \leftarrow \frac{x}{\theta} \\
- * y' \leftarrow \frac{y}{\theta} \\
- *
- * \\\text{Rectification}\\
- * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
- * x'' \leftarrow X/W \\
- * y'' \leftarrow Y/W \\
- *
- * \\\text{cameraMatrix}\\
- * map_x(u,v) \leftarrow x'' f_x + c_x \\
- * map_y(u,v) \leftarrow y'' f_y + c_y
- * \end{array}
- * $$`
- * where `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * are the distortion coefficients vector distCoeffs.
- *
- * In case of a stereo-rectified projector-camera pair, this function is called for the projector while #initUndistortRectifyMap is called for the camera head.
- * This is done after #stereoRectify, which in turn is called after #stereoCalibrate. If the projector-camera pair
- * is not calibrated, it is still possible to compute the rectification transformations directly from
- * the fundamental matrix using #stereoRectifyUncalibrated. For the projector and camera, the function computes
- * homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
- * space. R can be computed from H as
- * `$$\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}$$`
- * where cameraMatrix can be chosen arbitrarily.
- *
- * @param cameraMatrix Input camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2,
- * computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
- * is assumed.
- * @param newCameraMatrix New camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}$$`.
- * @param size Distorted image size.
- * @param m1type Type of the first output map. Can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
- * @param map1 The first output map for #remap.
- * @param map2 The second output map for #remap.
- */
- + (void)initInverseRectificationMap:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R newCameraMatrix:(Mat*)newCameraMatrix size:(Size2i*)size m1type:(int)m1type map1:(Mat*)map1 map2:(Mat*)map2 NS_SWIFT_NAME(initInverseRectificationMap(cameraMatrix:distCoeffs:R:newCameraMatrix:size:m1type:map1:map2:));
- //
- // Mat cv::getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false)
- //
- /**
- * Returns the default new camera matrix.
- *
- * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
- * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
- *
- * In the latter case, the new camera matrix will be:
- *
- * `$$\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,$$`
- *
- * where `$$f_x$$` and `$$f_y$$` are `$$(0,0)$$` and `$$(1,1)$$` elements of cameraMatrix, respectively.
- *
- * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
- * move the principal point. However, when you work with stereo, it is important to move the principal
- * points in both views to the same y-coordinate (which is required by most of stereo correspondence
- * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
- * each view where the principal points are located at the center.
- *
- * @param cameraMatrix Input camera matrix.
- * @param imgsize Camera view image size in pixels.
- * @param centerPrincipalPoint Location of the principal point in the new camera matrix. The
- * parameter indicates whether this location should be at the image center or not.
- */
- + (Mat*)getDefaultNewCameraMatrix:(Mat*)cameraMatrix imgsize:(Size2i*)imgsize centerPrincipalPoint:(BOOL)centerPrincipalPoint NS_SWIFT_NAME(getDefaultNewCameraMatrix(cameraMatrix:imgsize:centerPrincipalPoint:));
- /**
- * Returns the default new camera matrix.
- *
- * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
- * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
- *
- * In the latter case, the new camera matrix will be:
- *
- * `$$\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,$$`
- *
- * where `$$f_x$$` and `$$f_y$$` are `$$(0,0)$$` and `$$(1,1)$$` elements of cameraMatrix, respectively.
- *
- * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
- * move the principal point. However, when you work with stereo, it is important to move the principal
- * points in both views to the same y-coordinate (which is required by most of stereo correspondence
- * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
- * each view where the principal points are located at the center.
- *
- * @param cameraMatrix Input camera matrix.
- * @param imgsize Camera view image size in pixels.
- * parameter indicates whether this location should be at the image center or not.
- */
- + (Mat*)getDefaultNewCameraMatrix:(Mat*)cameraMatrix imgsize:(Size2i*)imgsize NS_SWIFT_NAME(getDefaultNewCameraMatrix(cameraMatrix:imgsize:));
- /**
- * Returns the default new camera matrix.
- *
- * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
- * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
- *
- * In the latter case, the new camera matrix will be:
- *
- * `$$\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,$$`
- *
- * where `$$f_x$$` and `$$f_y$$` are `$$(0,0)$$` and `$$(1,1)$$` elements of cameraMatrix, respectively.
- *
- * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
- * move the principal point. However, when you work with stereo, it is important to move the principal
- * points in both views to the same y-coordinate (which is required by most of stereo correspondence
- * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
- * each view where the principal points are located at the center.
- *
- * @param cameraMatrix Input camera matrix.
- * parameter indicates whether this location should be at the image center or not.
- */
- + (Mat*)getDefaultNewCameraMatrix:(Mat*)cameraMatrix NS_SWIFT_NAME(getDefaultNewCameraMatrix(cameraMatrix:));
- //
- // void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat())
- //
- /**
- * Computes the ideal point coordinates from the observed point coordinates.
- *
- * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
- * sparse set of points instead of a raster image. Also the function performs a reverse transformation
- * to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
- * planar object, it does, up to a translation vector, if the proper R is specified.
- *
- * For each observed point coordinate `$$(u, v)$$` the function computes:
- * `$$
- * \begin{array}{l}
- * x^{"} \leftarrow (u - c_x)/f_x \\
- * y^{"} \leftarrow (v - c_y)/f_y \\
- * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
- * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
- * x \leftarrow X/W \\
- * y \leftarrow Y/W \\
- * \text{only performed if P is specified:} \\
- * u' \leftarrow x {f'}_x + {c'}_x \\
- * v' \leftarrow y {f'}_y + {c'}_y
- * \end{array}
- * $$`
- *
- * where *undistort* is an approximate iterative algorithm that estimates the normalized original
- * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
- * coordinates do not depend on the camera matrix).
- *
- * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
- * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
- * vector\<Point2f\> ).
- * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
- * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
- * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
- * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
- * @param P New camera matrix (3x3) or new projection matrix (3x4) `$$\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}$$`. P1 or P2 computed by
- * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
- */
- + (void)undistortPoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R P:(Mat*)P NS_SWIFT_NAME(undistortPoints(src:dst:cameraMatrix:distCoeffs:R:P:));
- /**
- * Computes the ideal point coordinates from the observed point coordinates.
- *
- * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
- * sparse set of points instead of a raster image. Also the function performs a reverse transformation
- * to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
- * planar object, it does, up to a translation vector, if the proper R is specified.
- *
- * For each observed point coordinate `$$(u, v)$$` the function computes:
- * `$$
- * \begin{array}{l}
- * x^{"} \leftarrow (u - c_x)/f_x \\
- * y^{"} \leftarrow (v - c_y)/f_y \\
- * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
- * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
- * x \leftarrow X/W \\
- * y \leftarrow Y/W \\
- * \text{only performed if P is specified:} \\
- * u' \leftarrow x {f'}_x + {c'}_x \\
- * v' \leftarrow y {f'}_y + {c'}_y
- * \end{array}
- * $$`
- *
- * where *undistort* is an approximate iterative algorithm that estimates the normalized original
- * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
- * coordinates do not depend on the camera matrix).
- *
- * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
- * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
- * vector\<Point2f\> ).
- * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
- * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
- * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
- * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
- * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
- */
- + (void)undistortPoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R NS_SWIFT_NAME(undistortPoints(src:dst:cameraMatrix:distCoeffs:R:));
- /**
- * Computes the ideal point coordinates from the observed point coordinates.
- *
- * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
- * sparse set of points instead of a raster image. Also the function performs a reverse transformation
- * to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
- * planar object, it does, up to a translation vector, if the proper R is specified.
- *
- * For each observed point coordinate `$$(u, v)$$` the function computes:
- * `$$
- * \begin{array}{l}
- * x^{"} \leftarrow (u - c_x)/f_x \\
- * y^{"} \leftarrow (v - c_y)/f_y \\
- * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
- * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
- * x \leftarrow X/W \\
- * y \leftarrow Y/W \\
- * \text{only performed if P is specified:} \\
- * u' \leftarrow x {f'}_x + {c'}_x \\
- * v' \leftarrow y {f'}_y + {c'}_y
- * \end{array}
- * $$`
- *
- * where *undistort* is an approximate iterative algorithm that estimates the normalized original
- * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
- * coordinates do not depend on the camera matrix).
- *
- * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
- * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
- * vector\<Point2f\> ).
- * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
- * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
- * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * @param distCoeffs Input vector of distortion coefficients
- * `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])$$`
- * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
- * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
- */
- + (void)undistortPoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(undistortPoints(src:dst:cameraMatrix:distCoeffs:));
- //
- // void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria)
- //
- /**
- *
- * NOTE: Default version of #undistortPoints does 5 iterations to compute undistorted points.
- */
- + (void)undistortPointsIter:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs R:(Mat*)R P:(Mat*)P criteria:(TermCriteria*)criteria NS_SWIFT_NAME(undistortPoints(src:dst:cameraMatrix:distCoeffs:R:P:criteria:));
- //
- // void cv::undistortImagePoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, TermCriteria arg1 = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 0.01))
- //
- /**
- * Compute undistorted image points position
- *
- * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or
- * CV_64FC2) (or vector\<Point2f\> ).
- * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector\<Point2f\> ).
- * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * @param distCoeffs Distortion coefficients
- */
- + (void)undistortImagePoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs arg1:(TermCriteria*)arg1 NS_SWIFT_NAME(undistortImagePoints(src:dst:cameraMatrix:distCoeffs:arg1:));
- /**
- * Compute undistorted image points position
- *
- * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or
- * CV_64FC2) (or vector\<Point2f\> ).
- * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector\<Point2f\> ).
- * @param cameraMatrix Camera matrix `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` .
- * @param distCoeffs Distortion coefficients
- */
- + (void)undistortImagePoints:(Mat*)src dst:(Mat*)dst cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(undistortImagePoints(src:dst:cameraMatrix:distCoeffs:));
- //
- // void cv::fisheye::projectPoints(Mat objectPoints, Mat& imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha = 0, Mat& jacobian = Mat())
- //
- + (void)projectPoints:(Mat*)objectPoints imagePoints:(Mat*)imagePoints rvec:(Mat*)rvec tvec:(Mat*)tvec K:(Mat*)K D:(Mat*)D alpha:(double)alpha jacobian:(Mat*)jacobian NS_SWIFT_NAME(projectPoints(objectPoints:imagePoints:rvec:tvec:K:D:alpha:jacobian:));
- + (void)projectPoints:(Mat*)objectPoints imagePoints:(Mat*)imagePoints rvec:(Mat*)rvec tvec:(Mat*)tvec K:(Mat*)K D:(Mat*)D alpha:(double)alpha NS_SWIFT_NAME(projectPoints(objectPoints:imagePoints:rvec:tvec:K:D:alpha:));
- + (void)projectPoints:(Mat*)objectPoints imagePoints:(Mat*)imagePoints rvec:(Mat*)rvec tvec:(Mat*)tvec K:(Mat*)K D:(Mat*)D NS_SWIFT_NAME(projectPoints(objectPoints:imagePoints:rvec:tvec:K:D:));
- //
- // void cv::fisheye::distortPoints(Mat undistorted, Mat& distorted, Mat K, Mat D, double alpha = 0)
- //
- /**
- * Distorts 2D points using fisheye model.
- *
- * @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is
- * the number of points in the view.
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param alpha The skew coefficient.
- * @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
- *
- * Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
- * This means if you want to distort image points you have to multiply them with `$$K^{-1}$$`.
- */
- + (void)distortPoints:(Mat*)undistorted distorted:(Mat*)distorted K:(Mat*)K D:(Mat*)D alpha:(double)alpha NS_SWIFT_NAME(distortPoints(undistorted:distorted:K:D:alpha:));
- /**
- * Distorts 2D points using fisheye model.
- *
- * @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is
- * the number of points in the view.
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
- *
- * Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
- * This means if you want to distort image points you have to multiply them with `$$K^{-1}$$`.
- */
- + (void)distortPoints:(Mat*)undistorted distorted:(Mat*)distorted K:(Mat*)K D:(Mat*)D NS_SWIFT_NAME(distortPoints(undistorted:distorted:K:D:));
- //
- // void cv::fisheye::undistortPoints(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat R = Mat(), Mat P = Mat(), TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 1e-8))
- //
- /**
- * Undistorts 2D points using fisheye model
- *
- * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the
- * number of points in the view.
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- * 1-channel or 1x1 3-channel
- * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- * @param criteria Termination criteria
- * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
- */
- + (void)undistortPoints:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D R:(Mat*)R P:(Mat*)P criteria:(TermCriteria*)criteria NS_SWIFT_NAME(undistortPoints(distorted:undistorted:K:D:R:P:criteria:));
- /**
- * Undistorts 2D points using fisheye model
- *
- * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the
- * number of points in the view.
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- * 1-channel or 1x1 3-channel
- * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
- */
- + (void)undistortPoints:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D R:(Mat*)R P:(Mat*)P NS_SWIFT_NAME(undistortPoints(distorted:undistorted:K:D:R:P:));
- /**
- * Undistorts 2D points using fisheye model
- *
- * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the
- * number of points in the view.
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- * 1-channel or 1x1 3-channel
- * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
- */
- + (void)undistortPoints:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D R:(Mat*)R NS_SWIFT_NAME(undistortPoints(distorted:undistorted:K:D:R:));
- /**
- * Undistorts 2D points using fisheye model
- *
- * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the
- * number of points in the view.
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * 1-channel or 1x1 3-channel
- * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
- */
- + (void)undistortPoints:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D NS_SWIFT_NAME(undistortPoints(distorted:undistorted:K:D:));
- //
- // void cv::fisheye::initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat& map1, Mat& map2)
- //
- /**
- * Computes undistortion and rectification maps for image transform by #remap. If D is empty zero
- * distortion is used, if R or P is empty identity matrixes are used.
- *
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- * 1-channel or 1x1 3-channel
- * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- * @param size Undistorted image size.
- * @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See #convertMaps
- * for details.
- * @param map1 The first output map.
- * @param map2 The second output map.
- */
- + (void)initUndistortRectifyMap:(Mat*)K D:(Mat*)D R:(Mat*)R P:(Mat*)P size:(Size2i*)size m1type:(int)m1type map1:(Mat*)map1 map2:(Mat*)map2 NS_SWIFT_NAME(initUndistortRectifyMap(K:D:R:P:size:m1type:map1:map2:));
- //
- // void cv::fisheye::undistortImage(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat Knew = cv::Mat(), Size new_size = Size())
- //
- /**
- * Transforms an image to compensate for fisheye lens distortion.
- *
- * @param distorted image with fisheye lens distortion.
- * @param undistorted Output image with compensated fisheye lens distortion.
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
- * may additionally scale and shift the result by using a different matrix.
- * @param new_size the new size
- *
- * The function transforms an image to compensate radial and tangential lens distortion.
- *
- * The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
- * (with bilinear interpolation). See the former function for details of the transformation being
- * performed.
- *
- * See below the results of undistortImage.
- * - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
- * k_4, k_5, k_6) of distortion were optimized under calibration)
- * - b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
- * k_3, k_4) of fisheye distortion were optimized under calibration)
- * - c\) original image was captured with fisheye lens
- *
- * Pictures a) and b) almost the same. But if we consider points of image located far from the center
- * of image, we can notice that on image a) these points are distorted.
- *
- * ![image](pics/fisheye_undistorted.jpg)
- */
- + (void)undistortImage:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D Knew:(Mat*)Knew new_size:(Size2i*)new_size NS_SWIFT_NAME(undistortImage(distorted:undistorted:K:D:Knew:new_size:));
- /**
- * Transforms an image to compensate for fisheye lens distortion.
- *
- * @param distorted image with fisheye lens distortion.
- * @param undistorted Output image with compensated fisheye lens distortion.
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
- * may additionally scale and shift the result by using a different matrix.
- *
- * The function transforms an image to compensate radial and tangential lens distortion.
- *
- * The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
- * (with bilinear interpolation). See the former function for details of the transformation being
- * performed.
- *
- * See below the results of undistortImage.
- * - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
- * k_4, k_5, k_6) of distortion were optimized under calibration)
- * - b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
- * k_3, k_4) of fisheye distortion were optimized under calibration)
- * - c\) original image was captured with fisheye lens
- *
- * Pictures a) and b) almost the same. But if we consider points of image located far from the center
- * of image, we can notice that on image a) these points are distorted.
- *
- * ![image](pics/fisheye_undistorted.jpg)
- */
- + (void)undistortImage:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D Knew:(Mat*)Knew NS_SWIFT_NAME(undistortImage(distorted:undistorted:K:D:Knew:));
- /**
- * Transforms an image to compensate for fisheye lens distortion.
- *
- * @param distorted image with fisheye lens distortion.
- * @param undistorted Output image with compensated fisheye lens distortion.
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * may additionally scale and shift the result by using a different matrix.
- *
- * The function transforms an image to compensate radial and tangential lens distortion.
- *
- * The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
- * (with bilinear interpolation). See the former function for details of the transformation being
- * performed.
- *
- * See below the results of undistortImage.
- * - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
- * k_4, k_5, k_6) of distortion were optimized under calibration)
- * - b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
- * k_3, k_4) of fisheye distortion were optimized under calibration)
- * - c\) original image was captured with fisheye lens
- *
- * Pictures a) and b) almost the same. But if we consider points of image located far from the center
- * of image, we can notice that on image a) these points are distorted.
- *
- * ![image](pics/fisheye_undistorted.jpg)
- */
- + (void)undistortImage:(Mat*)distorted undistorted:(Mat*)undistorted K:(Mat*)K D:(Mat*)D NS_SWIFT_NAME(undistortImage(distorted:undistorted:K:D:));
- //
- // void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat& P, double balance = 0.0, Size new_size = Size(), double fov_scale = 1.0)
- //
- /**
- * Estimates new camera intrinsic matrix for undistortion or rectification.
- *
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param image_size Size of the image
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- * 1-channel or 1x1 3-channel
- * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- * @param balance Sets the new focal length in range between the min focal length and the max focal
- * length. Balance is in range of [0, 1].
- * @param new_size the new size
- * @param fov_scale Divisor for new focal length.
- */
- + (void)estimateNewCameraMatrixForUndistortRectify:(Mat*)K D:(Mat*)D image_size:(Size2i*)image_size R:(Mat*)R P:(Mat*)P balance:(double)balance new_size:(Size2i*)new_size fov_scale:(double)fov_scale NS_SWIFT_NAME(estimateNewCameraMatrixForUndistortRectify(K:D:image_size:R:P:balance:new_size:fov_scale:));
- /**
- * Estimates new camera intrinsic matrix for undistortion or rectification.
- *
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param image_size Size of the image
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- * 1-channel or 1x1 3-channel
- * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- * @param balance Sets the new focal length in range between the min focal length and the max focal
- * length. Balance is in range of [0, 1].
- * @param new_size the new size
- */
- + (void)estimateNewCameraMatrixForUndistortRectify:(Mat*)K D:(Mat*)D image_size:(Size2i*)image_size R:(Mat*)R P:(Mat*)P balance:(double)balance new_size:(Size2i*)new_size NS_SWIFT_NAME(estimateNewCameraMatrixForUndistortRectify(K:D:image_size:R:P:balance:new_size:));
- /**
- * Estimates new camera intrinsic matrix for undistortion or rectification.
- *
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param image_size Size of the image
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- * 1-channel or 1x1 3-channel
- * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- * @param balance Sets the new focal length in range between the min focal length and the max focal
- * length. Balance is in range of [0, 1].
- */
- + (void)estimateNewCameraMatrixForUndistortRectify:(Mat*)K D:(Mat*)D image_size:(Size2i*)image_size R:(Mat*)R P:(Mat*)P balance:(double)balance NS_SWIFT_NAME(estimateNewCameraMatrixForUndistortRectify(K:D:image_size:R:P:balance:));
- /**
- * Estimates new camera intrinsic matrix for undistortion or rectification.
- *
- * @param K Camera intrinsic matrix `$$cameramatrix{K}$$`.
- * @param image_size Size of the image
- * @param D Input vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- * 1-channel or 1x1 3-channel
- * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- * length. Balance is in range of [0, 1].
- */
- + (void)estimateNewCameraMatrixForUndistortRectify:(Mat*)K D:(Mat*)D image_size:(Size2i*)image_size R:(Mat*)R P:(Mat*)P NS_SWIFT_NAME(estimateNewCameraMatrixForUndistortRectify(K:D:image_size:R:P:));
- //
- // double cv::fisheye::calibrate(vector_Mat objectPoints, vector_Mat imagePoints, Size image_size, Mat& K, Mat& D, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
- //
- /**
- * Performs camera calibration
- *
- * @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
- * coordinate space.
- * @param imagePoints vector of vectors of the projections of calibration pattern points.
- * imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
- * objectPoints[i].size() for each i.
- * @param image_size Size of the image used only to initialize the camera intrinsic matrix.
- * @param K Output 3x3 floating-point camera intrinsic matrix
- * `$$\cameramatrix{A}$$` . If
- * REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
- * initialized before calling the function.
- * @param D Output vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
- * That is, each k-th rotation vector together with the corresponding k-th translation vector (see
- * the next output parameter description) brings the calibration pattern from the model coordinate
- * space (in which object points are specified) to the world coordinate space, that is, a real
- * position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
- * @param tvecs Output vector of translation vectors estimated for each pattern view.
- * @param flags Different flags that may be zero or a combination of the following values:
- * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- * of intrinsic optimization.
- * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
- * are set to zeros and stay zero.
- * - REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- * - REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
- * optimization. It is the `$$max(width,height)/\pi$$` or the provided `$$f_x$$`, `$$f_y$$` when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- * @param criteria Termination criteria for the iterative optimization algorithm.
- */
- + (double)calibrate:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints image_size:(Size2i*)image_size K:(Mat*)K D:(Mat*)D rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrate(objectPoints:imagePoints:image_size:K:D:rvecs:tvecs:flags:criteria:));
- /**
- * Performs camera calibration
- *
- * @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
- * coordinate space.
- * @param imagePoints vector of vectors of the projections of calibration pattern points.
- * imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
- * objectPoints[i].size() for each i.
- * @param image_size Size of the image used only to initialize the camera intrinsic matrix.
- * @param K Output 3x3 floating-point camera intrinsic matrix
- * `$$\cameramatrix{A}$$` . If
- * REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
- * initialized before calling the function.
- * @param D Output vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
- * That is, each k-th rotation vector together with the corresponding k-th translation vector (see
- * the next output parameter description) brings the calibration pattern from the model coordinate
- * space (in which object points are specified) to the world coordinate space, that is, a real
- * position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
- * @param tvecs Output vector of translation vectors estimated for each pattern view.
- * @param flags Different flags that may be zero or a combination of the following values:
- * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- * of intrinsic optimization.
- * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
- * are set to zeros and stay zero.
- * - REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- * - REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
- * optimization. It is the `$$max(width,height)/\pi$$` or the provided `$$f_x$$`, `$$f_y$$` when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- */
- + (double)calibrate:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints image_size:(Size2i*)image_size K:(Mat*)K D:(Mat*)D rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags NS_SWIFT_NAME(calibrate(objectPoints:imagePoints:image_size:K:D:rvecs:tvecs:flags:));
- /**
- * Performs camera calibration
- *
- * @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
- * coordinate space.
- * @param imagePoints vector of vectors of the projections of calibration pattern points.
- * imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
- * objectPoints[i].size() for each i.
- * @param image_size Size of the image used only to initialize the camera intrinsic matrix.
- * @param K Output 3x3 floating-point camera intrinsic matrix
- * `$$\cameramatrix{A}$$` . If
- * REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
- * initialized before calling the function.
- * @param D Output vector of distortion coefficients `$$\distcoeffsfisheye$$`.
- * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
- * That is, each k-th rotation vector together with the corresponding k-th translation vector (see
- * the next output parameter description) brings the calibration pattern from the model coordinate
- * space (in which object points are specified) to the world coordinate space, that is, a real
- * position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
- * @param tvecs Output vector of translation vectors estimated for each pattern view.
- * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- * of intrinsic optimization.
- * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
- * are set to zeros and stay zero.
- * - REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- * - REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
- * optimization. It is the `$$max(width,height)/\pi$$` or the provided `$$f_x$$`, `$$f_y$$` when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- */
- + (double)calibrate:(NSArray<Mat*>*)objectPoints imagePoints:(NSArray<Mat*>*)imagePoints image_size:(Size2i*)image_size K:(Mat*)K D:(Mat*)D rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs NS_SWIFT_NAME(calibrate(objectPoints:imagePoints:image_size:K:D:rvecs:tvecs:));
- //
- // void cv::fisheye::stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags, Size newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0)
- //
- /**
- * Stereo rectification for fisheye camera model
- *
- * @param K1 First camera intrinsic matrix.
- * @param D1 First camera distortion parameters.
- * @param K2 Second camera intrinsic matrix.
- * @param D2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix between the coordinate systems of the first and the second
- * cameras.
- * @param tvec Translation vector between coordinate systems of the cameras.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see reprojectImageTo3D ).
- * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * @param newImageSize New image resolution after rectification. The same size should be passed to
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * @param balance Sets the new focal length in range between the min focal length and the max focal
- * length. Balance is in range of [0, 1].
- * @param fov_scale Divisor for new focal length.
- */
- + (void)stereoRectify:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R tvec:(Mat*)tvec R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags newImageSize:(Size2i*)newImageSize balance:(double)balance fov_scale:(double)fov_scale NS_SWIFT_NAME(stereoRectify(K1:D1:K2:D2:imageSize:R:tvec:R1:R2:P1:P2:Q:flags:newImageSize:balance:fov_scale:));
- /**
- * Stereo rectification for fisheye camera model
- *
- * @param K1 First camera intrinsic matrix.
- * @param D1 First camera distortion parameters.
- * @param K2 Second camera intrinsic matrix.
- * @param D2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix between the coordinate systems of the first and the second
- * cameras.
- * @param tvec Translation vector between coordinate systems of the cameras.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see reprojectImageTo3D ).
- * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * @param newImageSize New image resolution after rectification. The same size should be passed to
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * @param balance Sets the new focal length in range between the min focal length and the max focal
- * length. Balance is in range of [0, 1].
- */
- + (void)stereoRectify:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R tvec:(Mat*)tvec R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags newImageSize:(Size2i*)newImageSize balance:(double)balance NS_SWIFT_NAME(stereoRectify(K1:D1:K2:D2:imageSize:R:tvec:R1:R2:P1:P2:Q:flags:newImageSize:balance:));
- /**
- * Stereo rectification for fisheye camera model
- *
- * @param K1 First camera intrinsic matrix.
- * @param D1 First camera distortion parameters.
- * @param K2 Second camera intrinsic matrix.
- * @param D2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix between the coordinate systems of the first and the second
- * cameras.
- * @param tvec Translation vector between coordinate systems of the cameras.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see reprojectImageTo3D ).
- * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * @param newImageSize New image resolution after rectification. The same size should be passed to
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * length. Balance is in range of [0, 1].
- */
- + (void)stereoRectify:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R tvec:(Mat*)tvec R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags newImageSize:(Size2i*)newImageSize NS_SWIFT_NAME(stereoRectify(K1:D1:K2:D2:imageSize:R:tvec:R1:R2:P1:P2:Q:flags:newImageSize:));
- /**
- * Stereo rectification for fisheye camera model
- *
- * @param K1 First camera intrinsic matrix.
- * @param D1 First camera distortion parameters.
- * @param K2 Second camera intrinsic matrix.
- * @param D2 Second camera distortion parameters.
- * @param imageSize Size of the image used for stereo calibration.
- * @param R Rotation matrix between the coordinate systems of the first and the second
- * cameras.
- * @param tvec Translation vector between coordinate systems of the cameras.
- * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
- * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
- * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- * camera.
- * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- * camera.
- * @param Q Output `$$4 \times 4$$` disparity-to-depth mapping matrix (see reprojectImageTo3D ).
- * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
- * the function makes the principal points of each camera have the same pixel coordinates in the
- * rectified views. And if the flag is not set, the function may still shift the images in the
- * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- * useful image area.
- * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- * is passed (default), it is set to the original imageSize . Setting it to larger value can help you
- * preserve details in the original image, especially when there is a big radial distortion.
- * length. Balance is in range of [0, 1].
- */
- + (void)stereoRectify:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R tvec:(Mat*)tvec R1:(Mat*)R1 R2:(Mat*)R2 P1:(Mat*)P1 P2:(Mat*)P2 Q:(Mat*)Q flags:(int)flags NS_SWIFT_NAME(stereoRectify(K1:D1:K2:D2:imageSize:R:tvec:R1:R2:P1:P2:Q:flags:));
- //
- // double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
- //
- /**
- * Performs stereo calibration
- *
- * @param objectPoints Vector of vectors of the calibration pattern points.
- * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
- * observed by the first camera.
- * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
- * observed by the second camera.
- * @param K1 Input/output first camera intrinsic matrix:
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}$$` , `$$j = 0,\, 1$$` . If
- * any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
- * some or all of the matrix components must be initialized.
- * @param D1 Input/output vector of distortion coefficients `$$\distcoeffsfisheye$$` of 4 elements.
- * @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
- * @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
- * similar to D1 .
- * @param imageSize Size of the image used only to initialize camera intrinsic matrix.
- * @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
- * @param T Output translation vector between the coordinate systems of the cameras.
- * @param flags Different flags that may be zero or a combination of the following values:
- * - REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
- * are estimated.
- * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
- * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- * center (imageSize is used), and focal distances are computed in a least-squares fashion.
- * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- * of intrinsic optimization.
- * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
- * zero.
- * @param criteria Termination criteria for the iterative optimization algorithm.
- */
- + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 K1:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:K1:D1:K2:D2:imageSize:R:T:flags:criteria:));
- /**
- * Performs stereo calibration
- *
- * @param objectPoints Vector of vectors of the calibration pattern points.
- * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
- * observed by the first camera.
- * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
- * observed by the second camera.
- * @param K1 Input/output first camera intrinsic matrix:
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}$$` , `$$j = 0,\, 1$$` . If
- * any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
- * some or all of the matrix components must be initialized.
- * @param D1 Input/output vector of distortion coefficients `$$\distcoeffsfisheye$$` of 4 elements.
- * @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
- * @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
- * similar to D1 .
- * @param imageSize Size of the image used only to initialize camera intrinsic matrix.
- * @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
- * @param T Output translation vector between the coordinate systems of the cameras.
- * @param flags Different flags that may be zero or a combination of the following values:
- * - REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
- * are estimated.
- * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
- * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- * center (imageSize is used), and focal distances are computed in a least-squares fashion.
- * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- * of intrinsic optimization.
- * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
- * zero.
- */
- + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 K1:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T flags:(int)flags NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:K1:D1:K2:D2:imageSize:R:T:flags:));
- /**
- * Performs stereo calibration
- *
- * @param objectPoints Vector of vectors of the calibration pattern points.
- * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
- * observed by the first camera.
- * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
- * observed by the second camera.
- * @param K1 Input/output first camera intrinsic matrix:
- * `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } \vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}$$` , `$$j = 0,\, 1$$` . If
- * any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
- * some or all of the matrix components must be initialized.
- * @param D1 Input/output vector of distortion coefficients `$$\distcoeffsfisheye$$` of 4 elements.
- * @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
- * @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
- * similar to D1 .
- * @param imageSize Size of the image used only to initialize camera intrinsic matrix.
- * @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
- * @param T Output translation vector between the coordinate systems of the cameras.
- * - REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
- * are estimated.
- * - REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
- * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- * center (imageSize is used), and focal distances are computed in a least-squares fashion.
- * - REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- * of intrinsic optimization.
- * - REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- * - REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- * - REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
- * zero.
- */
- + (double)stereoCalibrate:(NSArray<Mat*>*)objectPoints imagePoints1:(NSArray<Mat*>*)imagePoints1 imagePoints2:(NSArray<Mat*>*)imagePoints2 K1:(Mat*)K1 D1:(Mat*)D1 K2:(Mat*)K2 D2:(Mat*)D2 imageSize:(Size2i*)imageSize R:(Mat*)R T:(Mat*)T NS_SWIFT_NAME(stereoCalibrate(objectPoints:imagePoints1:imagePoints2:K1:D1:K2:D2:imageSize:R:T:));
- @end
- NS_ASSUME_NONNULL_END
|