jsimd_neon.S 139 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435
  1. /*
  2. * ARMv8 NEON optimizations for libjpeg-turbo
  3. *
  4. * Copyright (C) 2009-2011, Nokia Corporation and/or its subsidiary(-ies).
  5. * All Rights Reserved.
  6. * Author: Siarhei Siamashka <siarhei.siamashka@nokia.com>
  7. * Copyright (C) 2013-2014, Linaro Limited. All Rights Reserved.
  8. * Author: Ragesh Radhakrishnan <ragesh.r@linaro.org>
  9. * Copyright (C) 2014-2016, D. R. Commander. All Rights Reserved.
  10. * Copyright (C) 2015-2016, 2018, Matthieu Darbois. All Rights Reserved.
  11. * Copyright (C) 2016, Siarhei Siamashka. All Rights Reserved.
  12. *
  13. * This software is provided 'as-is', without any express or implied
  14. * warranty. In no event will the authors be held liable for any damages
  15. * arising from the use of this software.
  16. *
  17. * Permission is granted to anyone to use this software for any purpose,
  18. * including commercial applications, and to alter it and redistribute it
  19. * freely, subject to the following restrictions:
  20. *
  21. * 1. The origin of this software must not be misrepresented; you must not
  22. * claim that you wrote the original software. If you use this software
  23. * in a product, an acknowledgment in the product documentation would be
  24. * appreciated but is not required.
  25. * 2. Altered source versions must be plainly marked as such, and must not be
  26. * misrepresented as being the original software.
  27. * 3. This notice may not be removed or altered from any source distribution.
  28. */
  29. #if defined(__linux__) && defined(__ELF__)
  30. .section .note.GNU-stack, "", %progbits /* mark stack as non-executable */
  31. #endif
  32. #if defined(__APPLE__)
  33. .section __DATA, __const
  34. #elif defined(_WIN32)
  35. .section .rdata
  36. #else
  37. .section .rodata, "a", %progbits
  38. #endif
  39. /* Constants for jsimd_idct_islow_neon() */
  40. #define F_0_298 2446 /* FIX(0.298631336) */
  41. #define F_0_390 3196 /* FIX(0.390180644) */
  42. #define F_0_541 4433 /* FIX(0.541196100) */
  43. #define F_0_765 6270 /* FIX(0.765366865) */
  44. #define F_0_899 7373 /* FIX(0.899976223) */
  45. #define F_1_175 9633 /* FIX(1.175875602) */
  46. #define F_1_501 12299 /* FIX(1.501321110) */
  47. #define F_1_847 15137 /* FIX(1.847759065) */
  48. #define F_1_961 16069 /* FIX(1.961570560) */
  49. #define F_2_053 16819 /* FIX(2.053119869) */
  50. #define F_2_562 20995 /* FIX(2.562915447) */
  51. #define F_3_072 25172 /* FIX(3.072711026) */
  52. .balign 16
  53. Ljsimd_idct_islow_neon_consts:
  54. .short F_0_298
  55. .short -F_0_390
  56. .short F_0_541
  57. .short F_0_765
  58. .short - F_0_899
  59. .short F_1_175
  60. .short F_1_501
  61. .short - F_1_847
  62. .short - F_1_961
  63. .short F_2_053
  64. .short - F_2_562
  65. .short F_3_072
  66. .short 0 /* padding */
  67. .short 0
  68. .short 0
  69. .short 0
  70. #undef F_0_298
  71. #undef F_0_390
  72. #undef F_0_541
  73. #undef F_0_765
  74. #undef F_0_899
  75. #undef F_1_175
  76. #undef F_1_501
  77. #undef F_1_847
  78. #undef F_1_961
  79. #undef F_2_053
  80. #undef F_2_562
  81. #undef F_3_072
  82. /* Constants for jsimd_idct_ifast_neon() */
  83. .balign 16
  84. Ljsimd_idct_ifast_neon_consts:
  85. .short (277 * 128 - 256 * 128) /* XFIX_1_082392200 */
  86. .short (362 * 128 - 256 * 128) /* XFIX_1_414213562 */
  87. .short (473 * 128 - 256 * 128) /* XFIX_1_847759065 */
  88. .short (669 * 128 - 512 * 128) /* XFIX_2_613125930 */
  89. /* Constants for jsimd_idct_4x4_neon() and jsimd_idct_2x2_neon() */
  90. #define CONST_BITS 13
  91. #define FIX_0_211164243 (1730) /* FIX(0.211164243) */
  92. #define FIX_0_509795579 (4176) /* FIX(0.509795579) */
  93. #define FIX_0_601344887 (4926) /* FIX(0.601344887) */
  94. #define FIX_0_720959822 (5906) /* FIX(0.720959822) */
  95. #define FIX_0_765366865 (6270) /* FIX(0.765366865) */
  96. #define FIX_0_850430095 (6967) /* FIX(0.850430095) */
  97. #define FIX_0_899976223 (7373) /* FIX(0.899976223) */
  98. #define FIX_1_061594337 (8697) /* FIX(1.061594337) */
  99. #define FIX_1_272758580 (10426) /* FIX(1.272758580) */
  100. #define FIX_1_451774981 (11893) /* FIX(1.451774981) */
  101. #define FIX_1_847759065 (15137) /* FIX(1.847759065) */
  102. #define FIX_2_172734803 (17799) /* FIX(2.172734803) */
  103. #define FIX_2_562915447 (20995) /* FIX(2.562915447) */
  104. #define FIX_3_624509785 (29692) /* FIX(3.624509785) */
  105. .balign 16
  106. Ljsimd_idct_4x4_neon_consts:
  107. .short FIX_1_847759065 /* v0.h[0] */
  108. .short -FIX_0_765366865 /* v0.h[1] */
  109. .short -FIX_0_211164243 /* v0.h[2] */
  110. .short FIX_1_451774981 /* v0.h[3] */
  111. .short -FIX_2_172734803 /* d1[0] */
  112. .short FIX_1_061594337 /* d1[1] */
  113. .short -FIX_0_509795579 /* d1[2] */
  114. .short -FIX_0_601344887 /* d1[3] */
  115. .short FIX_0_899976223 /* v2.h[0] */
  116. .short FIX_2_562915447 /* v2.h[1] */
  117. .short 1 << (CONST_BITS + 1) /* v2.h[2] */
  118. .short 0 /* v2.h[3] */
  119. .balign 8
  120. Ljsimd_idct_2x2_neon_consts:
  121. .short -FIX_0_720959822 /* v14[0] */
  122. .short FIX_0_850430095 /* v14[1] */
  123. .short -FIX_1_272758580 /* v14[2] */
  124. .short FIX_3_624509785 /* v14[3] */
  125. /* Constants for jsimd_ycc_*_neon() */
  126. .balign 16
  127. Ljsimd_ycc_rgb_neon_consts:
  128. .short 0, 0, 0, 0
  129. .short 22971, -11277, -23401, 29033
  130. .short -128, -128, -128, -128
  131. .short -128, -128, -128, -128
  132. /* Constants for jsimd_*_ycc_neon() */
  133. .balign 16
  134. Ljsimd_rgb_ycc_neon_consts:
  135. .short 19595, 38470, 7471, 11059
  136. .short 21709, 32768, 27439, 5329
  137. .short 32767, 128, 32767, 128
  138. .short 32767, 128, 32767, 128
  139. /* Constants for jsimd_fdct_islow_neon() */
  140. #define F_0_298 2446 /* FIX(0.298631336) */
  141. #define F_0_390 3196 /* FIX(0.390180644) */
  142. #define F_0_541 4433 /* FIX(0.541196100) */
  143. #define F_0_765 6270 /* FIX(0.765366865) */
  144. #define F_0_899 7373 /* FIX(0.899976223) */
  145. #define F_1_175 9633 /* FIX(1.175875602) */
  146. #define F_1_501 12299 /* FIX(1.501321110) */
  147. #define F_1_847 15137 /* FIX(1.847759065) */
  148. #define F_1_961 16069 /* FIX(1.961570560) */
  149. #define F_2_053 16819 /* FIX(2.053119869) */
  150. #define F_2_562 20995 /* FIX(2.562915447) */
  151. #define F_3_072 25172 /* FIX(3.072711026) */
  152. .balign 16
  153. Ljsimd_fdct_islow_neon_consts:
  154. .short F_0_298
  155. .short -F_0_390
  156. .short F_0_541
  157. .short F_0_765
  158. .short - F_0_899
  159. .short F_1_175
  160. .short F_1_501
  161. .short - F_1_847
  162. .short - F_1_961
  163. .short F_2_053
  164. .short - F_2_562
  165. .short F_3_072
  166. .short 0 /* padding */
  167. .short 0
  168. .short 0
  169. .short 0
  170. #undef F_0_298
  171. #undef F_0_390
  172. #undef F_0_541
  173. #undef F_0_765
  174. #undef F_0_899
  175. #undef F_1_175
  176. #undef F_1_501
  177. #undef F_1_847
  178. #undef F_1_961
  179. #undef F_2_053
  180. #undef F_2_562
  181. #undef F_3_072
  182. /* Constants for jsimd_fdct_ifast_neon() */
  183. .balign 16
  184. Ljsimd_fdct_ifast_neon_consts:
  185. .short (98 * 128) /* XFIX_0_382683433 */
  186. .short (139 * 128) /* XFIX_0_541196100 */
  187. .short (181 * 128) /* XFIX_0_707106781 */
  188. .short (334 * 128 - 256 * 128) /* XFIX_1_306562965 */
  189. /* Constants for jsimd_h2*_downsample_neon() */
  190. .balign 16
  191. Ljsimd_h2_downsample_neon_consts:
  192. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  193. 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F /* diff 0 */
  194. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  195. 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0E /* diff 1 */
  196. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  197. 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0D, 0x0D /* diff 2 */
  198. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  199. 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0C, 0x0C, 0x0C /* diff 3 */
  200. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  201. 0x08, 0x09, 0x0A, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B /* diff 4 */
  202. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  203. 0x08, 0x09, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A /* diff 5 */
  204. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  205. 0x08, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09 /* diff 6 */
  206. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  207. 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08 /* diff 7 */
  208. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  209. 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07 /* diff 8 */
  210. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, \
  211. 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 /* diff 9 */
  212. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x05, 0x05, \
  213. 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05 /* diff 10 */
  214. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x04, 0x04, 0x04, \
  215. 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 /* diff 11 */
  216. .byte 0x00, 0x01, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, \
  217. 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 /* diff 12 */
  218. .byte 0x00, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, \
  219. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02 /* diff 13 */
  220. .byte 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, \
  221. 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 /* diff 14 */
  222. .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
  223. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* diff 15 */
  224. /* Constants for jsimd_huff_encode_one_block_neon() */
  225. .balign 16
  226. Ljsimd_huff_encode_one_block_neon_consts:
  227. .byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, \
  228. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
  229. .byte 0, 1, 2, 3, 16, 17, 32, 33, \
  230. 18, 19, 4, 5, 6, 7, 20, 21 /* L0 => L3 : 4 lines OK */
  231. .byte 34, 35, 48, 49, 255, 255, 50, 51, \
  232. 36, 37, 22, 23, 8, 9, 10, 11 /* L0 => L3 : 4 lines OK */
  233. .byte 8, 9, 22, 23, 36, 37, 50, 51, \
  234. 255, 255, 255, 255, 255, 255, 52, 53 /* L1 => L4 : 4 lines OK */
  235. .byte 54, 55, 40, 41, 26, 27, 12, 13, \
  236. 14, 15, 28, 29, 42, 43, 56, 57 /* L0 => L3 : 4 lines OK */
  237. .byte 6, 7, 20, 21, 34, 35, 48, 49, \
  238. 50, 51, 36, 37, 22, 23, 8, 9 /* L4 => L7 : 4 lines OK */
  239. .byte 42, 43, 28, 29, 14, 15, 30, 31, \
  240. 44, 45, 58, 59, 255, 255, 255, 255 /* L1 => L4 : 4 lines OK */
  241. .byte 255, 255, 255, 255, 56, 57, 42, 43, \
  242. 28, 29, 14, 15, 30, 31, 44, 45 /* L3 => L6 : 4 lines OK */
  243. .byte 26, 27, 40, 41, 42, 43, 28, 29, \
  244. 14, 15, 30, 31, 44, 45, 46, 47 /* L5 => L7 : 3 lines OK */
  245. .byte 255, 255, 255, 255, 0, 1, 255, 255, \
  246. 255, 255, 255, 255, 255, 255, 255, 255 /* L4 : 1 lines OK */
  247. .byte 255, 255, 255, 255, 255, 255, 255, 255, \
  248. 0, 1, 16, 17, 2, 3, 255, 255 /* L5 => L6 : 2 lines OK */
  249. .byte 255, 255, 255, 255, 255, 255, 255, 255, \
  250. 255, 255, 255, 255, 8, 9, 22, 23 /* L5 => L6 : 2 lines OK */
  251. .byte 4, 5, 6, 7, 255, 255, 255, 255, \
  252. 255, 255, 255, 255, 255, 255, 255, 255 /* L7 : 1 line OK */
  253. .text
  254. #define RESPECT_STRICT_ALIGNMENT 1
  255. /*****************************************************************************/
  256. /* Supplementary macro for setting function attributes */
  257. .macro asm_function fname
  258. #ifdef __APPLE__
  259. .private_extern _\fname
  260. .globl _\fname
  261. _\fname:
  262. #else
  263. .global \fname
  264. #ifdef __ELF__
  265. .hidden \fname
  266. .type \fname, %function
  267. #endif
  268. \fname:
  269. #endif
  270. .endm
  271. /* Get symbol location */
  272. .macro get_symbol_loc reg, symbol
  273. #ifdef __APPLE__
  274. adrp \reg, \symbol@PAGE
  275. add \reg, \reg, \symbol@PAGEOFF
  276. #else
  277. adrp \reg, \symbol
  278. add \reg, \reg, :lo12:\symbol
  279. #endif
  280. .endm
  281. /* Transpose elements of single 128 bit registers */
  282. .macro transpose_single x0, x1, xi, xilen, literal
  283. ins \xi\xilen[0], \x0\xilen[0]
  284. ins \x1\xilen[0], \x0\xilen[1]
  285. trn1 \x0\literal, \x0\literal, \x1\literal
  286. trn2 \x1\literal, \xi\literal, \x1\literal
  287. .endm
  288. /* Transpose elements of 2 different registers */
  289. .macro transpose x0, x1, xi, xilen, literal
  290. mov \xi\xilen, \x0\xilen
  291. trn1 \x0\literal, \x0\literal, \x1\literal
  292. trn2 \x1\literal, \xi\literal, \x1\literal
  293. .endm
  294. /* Transpose a block of 4x4 coefficients in four 64-bit registers */
  295. .macro transpose_4x4_32 x0, x0len, x1, x1len, x2, x2len, x3, x3len, xi, xilen
  296. mov \xi\xilen, \x0\xilen
  297. trn1 \x0\x0len, \x0\x0len, \x2\x2len
  298. trn2 \x2\x2len, \xi\x0len, \x2\x2len
  299. mov \xi\xilen, \x1\xilen
  300. trn1 \x1\x1len, \x1\x1len, \x3\x3len
  301. trn2 \x3\x3len, \xi\x1len, \x3\x3len
  302. .endm
  303. .macro transpose_4x4_16 x0, x0len, x1, x1len, x2, x2len, x3, x3len, xi, xilen
  304. mov \xi\xilen, \x0\xilen
  305. trn1 \x0\x0len, \x0\x0len, \x1\x1len
  306. trn2 \x1\x2len, \xi\x0len, \x1\x2len
  307. mov \xi\xilen, \x2\xilen
  308. trn1 \x2\x2len, \x2\x2len, \x3\x3len
  309. trn2 \x3\x2len, \xi\x1len, \x3\x3len
  310. .endm
  311. .macro transpose_4x4 x0, x1, x2, x3, x5
  312. transpose_4x4_16 \x0, .4h, \x1, .4h, \x2, .4h, \x3, .4h, \x5, .16b
  313. transpose_4x4_32 \x0, .2s, \x1, .2s, \x2, .2s, \x3, .2s, \x5, .16b
  314. .endm
  315. .macro transpose_8x8 l0, l1, l2, l3, l4, l5, l6, l7, t0, t1, t2, t3
  316. trn1 \t0\().8h, \l0\().8h, \l1\().8h
  317. trn1 \t1\().8h, \l2\().8h, \l3\().8h
  318. trn1 \t2\().8h, \l4\().8h, \l5\().8h
  319. trn1 \t3\().8h, \l6\().8h, \l7\().8h
  320. trn2 \l1\().8h, \l0\().8h, \l1\().8h
  321. trn2 \l3\().8h, \l2\().8h, \l3\().8h
  322. trn2 \l5\().8h, \l4\().8h, \l5\().8h
  323. trn2 \l7\().8h, \l6\().8h, \l7\().8h
  324. trn1 \l4\().4s, \t2\().4s, \t3\().4s
  325. trn2 \t3\().4s, \t2\().4s, \t3\().4s
  326. trn1 \t2\().4s, \t0\().4s, \t1\().4s
  327. trn2 \l2\().4s, \t0\().4s, \t1\().4s
  328. trn1 \t0\().4s, \l1\().4s, \l3\().4s
  329. trn2 \l3\().4s, \l1\().4s, \l3\().4s
  330. trn2 \t1\().4s, \l5\().4s, \l7\().4s
  331. trn1 \l5\().4s, \l5\().4s, \l7\().4s
  332. trn2 \l6\().2d, \l2\().2d, \t3\().2d
  333. trn1 \l0\().2d, \t2\().2d, \l4\().2d
  334. trn1 \l1\().2d, \t0\().2d, \l5\().2d
  335. trn2 \l7\().2d, \l3\().2d, \t1\().2d
  336. trn1 \l2\().2d, \l2\().2d, \t3\().2d
  337. trn2 \l4\().2d, \t2\().2d, \l4\().2d
  338. trn1 \l3\().2d, \l3\().2d, \t1\().2d
  339. trn2 \l5\().2d, \t0\().2d, \l5\().2d
  340. .endm
  341. #define CENTERJSAMPLE 128
  342. /*****************************************************************************/
  343. /*
  344. * Perform dequantization and inverse DCT on one block of coefficients.
  345. *
  346. * GLOBAL(void)
  347. * jsimd_idct_islow_neon(void *dct_table, JCOEFPTR coef_block,
  348. * JSAMPARRAY output_buf, JDIMENSION output_col)
  349. */
  350. #define CONST_BITS 13
  351. #define PASS1_BITS 2
  352. #define XFIX_P_0_298 v0.h[0]
  353. #define XFIX_N_0_390 v0.h[1]
  354. #define XFIX_P_0_541 v0.h[2]
  355. #define XFIX_P_0_765 v0.h[3]
  356. #define XFIX_N_0_899 v0.h[4]
  357. #define XFIX_P_1_175 v0.h[5]
  358. #define XFIX_P_1_501 v0.h[6]
  359. #define XFIX_N_1_847 v0.h[7]
  360. #define XFIX_N_1_961 v1.h[0]
  361. #define XFIX_P_2_053 v1.h[1]
  362. #define XFIX_N_2_562 v1.h[2]
  363. #define XFIX_P_3_072 v1.h[3]
  364. asm_function jsimd_idct_islow_neon
  365. DCT_TABLE .req x0
  366. COEF_BLOCK .req x1
  367. OUTPUT_BUF .req x2
  368. OUTPUT_COL .req x3
  369. TMP1 .req x0
  370. TMP2 .req x1
  371. TMP3 .req x9
  372. TMP4 .req x10
  373. TMP5 .req x11
  374. TMP6 .req x12
  375. TMP7 .req x13
  376. TMP8 .req x14
  377. /* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  378. guarantee that the upper (unused) 32 bits of x3 are valid. This
  379. instruction ensures that those bits are set to zero. */
  380. uxtw x3, w3
  381. sub sp, sp, #64
  382. get_symbol_loc x15, Ljsimd_idct_islow_neon_consts
  383. mov x10, sp
  384. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x10], #32
  385. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x10], #32
  386. ld1 {v0.8h, v1.8h}, [x15]
  387. ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [COEF_BLOCK], #64
  388. ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [DCT_TABLE], #64
  389. ld1 {v6.8h, v7.8h, v8.8h, v9.8h}, [COEF_BLOCK], #64
  390. ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [DCT_TABLE], #64
  391. cmeq v16.8h, v3.8h, #0
  392. cmeq v26.8h, v4.8h, #0
  393. cmeq v27.8h, v5.8h, #0
  394. cmeq v28.8h, v6.8h, #0
  395. cmeq v29.8h, v7.8h, #0
  396. cmeq v30.8h, v8.8h, #0
  397. cmeq v31.8h, v9.8h, #0
  398. and v10.16b, v16.16b, v26.16b
  399. and v11.16b, v27.16b, v28.16b
  400. and v12.16b, v29.16b, v30.16b
  401. and v13.16b, v31.16b, v10.16b
  402. and v14.16b, v11.16b, v12.16b
  403. mul v2.8h, v2.8h, v18.8h
  404. and v15.16b, v13.16b, v14.16b
  405. shl v10.8h, v2.8h, #(PASS1_BITS)
  406. sqxtn v16.8b, v15.8h
  407. mov TMP1, v16.d[0]
  408. mvn TMP2, TMP1
  409. cbnz TMP2, 2f
  410. /* case all AC coeffs are zeros */
  411. dup v2.2d, v10.d[0]
  412. dup v6.2d, v10.d[1]
  413. mov v3.16b, v2.16b
  414. mov v7.16b, v6.16b
  415. mov v4.16b, v2.16b
  416. mov v8.16b, v6.16b
  417. mov v5.16b, v2.16b
  418. mov v9.16b, v6.16b
  419. 1:
  420. /* for this transpose, we should organise data like this:
  421. * 00, 01, 02, 03, 40, 41, 42, 43
  422. * 10, 11, 12, 13, 50, 51, 52, 53
  423. * 20, 21, 22, 23, 60, 61, 62, 63
  424. * 30, 31, 32, 33, 70, 71, 72, 73
  425. * 04, 05, 06, 07, 44, 45, 46, 47
  426. * 14, 15, 16, 17, 54, 55, 56, 57
  427. * 24, 25, 26, 27, 64, 65, 66, 67
  428. * 34, 35, 36, 37, 74, 75, 76, 77
  429. */
  430. trn1 v28.8h, v2.8h, v3.8h
  431. trn1 v29.8h, v4.8h, v5.8h
  432. trn1 v30.8h, v6.8h, v7.8h
  433. trn1 v31.8h, v8.8h, v9.8h
  434. trn2 v16.8h, v2.8h, v3.8h
  435. trn2 v17.8h, v4.8h, v5.8h
  436. trn2 v18.8h, v6.8h, v7.8h
  437. trn2 v19.8h, v8.8h, v9.8h
  438. trn1 v2.4s, v28.4s, v29.4s
  439. trn1 v6.4s, v30.4s, v31.4s
  440. trn1 v3.4s, v16.4s, v17.4s
  441. trn1 v7.4s, v18.4s, v19.4s
  442. trn2 v4.4s, v28.4s, v29.4s
  443. trn2 v8.4s, v30.4s, v31.4s
  444. trn2 v5.4s, v16.4s, v17.4s
  445. trn2 v9.4s, v18.4s, v19.4s
  446. /* Even part: reverse the even part of the forward DCT. */
  447. add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  448. add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  449. smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  450. sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  451. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  452. sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  453. mov v21.16b, v19.16b /* tmp3 = z1 */
  454. mov v20.16b, v18.16b /* tmp3 = z1 */
  455. smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  456. smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  457. sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  458. smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  459. smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  460. sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  461. sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  462. add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
  463. sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
  464. add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
  465. sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
  466. add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
  467. sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
  468. add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
  469. sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
  470. /* Odd part per figure 8; the matrix is unitary and hence its
  471. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  472. */
  473. add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  474. add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  475. add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  476. add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  477. add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
  478. smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  479. smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  480. smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  481. smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  482. smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  483. smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  484. smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  485. smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  486. smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  487. smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  488. smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  489. smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  490. smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  491. smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  492. smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  493. smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  494. smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  495. smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  496. add v23.4s, v23.4s, v27.4s /* z3 += z5 */
  497. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  498. add v25.4s, v25.4s, v27.4s /* z4 += z5 */
  499. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  500. add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
  501. add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
  502. add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
  503. add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
  504. add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
  505. add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
  506. add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
  507. add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
  508. add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
  509. add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
  510. add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
  511. add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
  512. add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
  513. add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
  514. add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
  515. add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
  516. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  517. add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
  518. add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
  519. sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
  520. sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
  521. add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
  522. add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
  523. sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
  524. sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
  525. add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
  526. add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
  527. sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
  528. sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
  529. add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
  530. add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
  531. sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
  532. sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
  533. shrn v2.4h, v18.4s, #16 /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) */
  534. shrn v9.4h, v20.4s, #16 /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) */
  535. shrn v3.4h, v22.4s, #16 /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) */
  536. shrn v8.4h, v24.4s, #16 /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) */
  537. shrn v4.4h, v26.4s, #16 /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) */
  538. shrn v7.4h, v28.4s, #16 /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) */
  539. shrn v5.4h, v14.4s, #16 /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) */
  540. shrn v6.4h, v16.4s, #16 /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) */
  541. shrn2 v2.8h, v19.4s, #16 /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) */
  542. shrn2 v9.8h, v21.4s, #16 /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) */
  543. shrn2 v3.8h, v23.4s, #16 /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) */
  544. shrn2 v8.8h, v25.4s, #16 /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) */
  545. shrn2 v4.8h, v27.4s, #16 /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) */
  546. shrn2 v7.8h, v29.4s, #16 /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) */
  547. shrn2 v5.8h, v15.4s, #16 /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) */
  548. shrn2 v6.8h, v17.4s, #16 /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) */
  549. movi v0.16b, #(CENTERJSAMPLE)
  550. /* Prepare pointers (dual-issue with NEON instructions) */
  551. ldp TMP1, TMP2, [OUTPUT_BUF], 16
  552. sqrshrn v28.8b, v2.8h, #(CONST_BITS+PASS1_BITS+3-16)
  553. ldp TMP3, TMP4, [OUTPUT_BUF], 16
  554. sqrshrn v29.8b, v3.8h, #(CONST_BITS+PASS1_BITS+3-16)
  555. add TMP1, TMP1, OUTPUT_COL
  556. sqrshrn v30.8b, v4.8h, #(CONST_BITS+PASS1_BITS+3-16)
  557. add TMP2, TMP2, OUTPUT_COL
  558. sqrshrn v31.8b, v5.8h, #(CONST_BITS+PASS1_BITS+3-16)
  559. add TMP3, TMP3, OUTPUT_COL
  560. sqrshrn2 v28.16b, v6.8h, #(CONST_BITS+PASS1_BITS+3-16)
  561. add TMP4, TMP4, OUTPUT_COL
  562. sqrshrn2 v29.16b, v7.8h, #(CONST_BITS+PASS1_BITS+3-16)
  563. ldp TMP5, TMP6, [OUTPUT_BUF], 16
  564. sqrshrn2 v30.16b, v8.8h, #(CONST_BITS+PASS1_BITS+3-16)
  565. ldp TMP7, TMP8, [OUTPUT_BUF], 16
  566. sqrshrn2 v31.16b, v9.8h, #(CONST_BITS+PASS1_BITS+3-16)
  567. add TMP5, TMP5, OUTPUT_COL
  568. add v16.16b, v28.16b, v0.16b
  569. add TMP6, TMP6, OUTPUT_COL
  570. add v18.16b, v29.16b, v0.16b
  571. add TMP7, TMP7, OUTPUT_COL
  572. add v20.16b, v30.16b, v0.16b
  573. add TMP8, TMP8, OUTPUT_COL
  574. add v22.16b, v31.16b, v0.16b
  575. /* Transpose the final 8-bit samples */
  576. trn1 v28.16b, v16.16b, v18.16b
  577. trn1 v30.16b, v20.16b, v22.16b
  578. trn2 v29.16b, v16.16b, v18.16b
  579. trn2 v31.16b, v20.16b, v22.16b
  580. trn1 v16.8h, v28.8h, v30.8h
  581. trn2 v18.8h, v28.8h, v30.8h
  582. trn1 v20.8h, v29.8h, v31.8h
  583. trn2 v22.8h, v29.8h, v31.8h
  584. uzp1 v28.4s, v16.4s, v18.4s
  585. uzp2 v30.4s, v16.4s, v18.4s
  586. uzp1 v29.4s, v20.4s, v22.4s
  587. uzp2 v31.4s, v20.4s, v22.4s
  588. /* Store results to the output buffer */
  589. st1 {v28.d}[0], [TMP1]
  590. st1 {v29.d}[0], [TMP2]
  591. st1 {v28.d}[1], [TMP3]
  592. st1 {v29.d}[1], [TMP4]
  593. st1 {v30.d}[0], [TMP5]
  594. st1 {v31.d}[0], [TMP6]
  595. st1 {v30.d}[1], [TMP7]
  596. st1 {v31.d}[1], [TMP8]
  597. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], #32
  598. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], #32
  599. blr x30
  600. .balign 16
  601. 2:
  602. mul v3.8h, v3.8h, v19.8h
  603. mul v4.8h, v4.8h, v20.8h
  604. mul v5.8h, v5.8h, v21.8h
  605. add TMP4, xzr, TMP2, LSL #32
  606. mul v6.8h, v6.8h, v22.8h
  607. mul v7.8h, v7.8h, v23.8h
  608. adds TMP3, xzr, TMP2, LSR #32
  609. mul v8.8h, v8.8h, v24.8h
  610. mul v9.8h, v9.8h, v25.8h
  611. b.ne 3f
  612. /* Right AC coef is zero */
  613. dup v15.2d, v10.d[1]
  614. /* Even part: reverse the even part of the forward DCT. */
  615. add v18.4h, v4.4h, v8.4h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  616. add v22.4h, v2.4h, v6.4h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  617. sub v26.4h, v2.4h, v6.4h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  618. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  619. sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  620. mov v20.16b, v18.16b /* tmp3 = z1 */
  621. sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  622. smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  623. smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  624. add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
  625. sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
  626. add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
  627. sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
  628. /* Odd part per figure 8; the matrix is unitary and hence its
  629. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  630. */
  631. add v22.4h, v9.4h, v5.4h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  632. add v24.4h, v7.4h, v3.4h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  633. add v18.4h, v9.4h, v3.4h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  634. add v20.4h, v7.4h, v5.4h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  635. add v26.4h, v22.4h, v24.4h /* z5 = z3 + z4 */
  636. smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  637. smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  638. smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  639. smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  640. smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  641. smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  642. smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  643. smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  644. smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  645. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  646. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  647. add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
  648. add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
  649. add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
  650. add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
  651. add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
  652. add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
  653. add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
  654. add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
  655. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  656. add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
  657. sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
  658. add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
  659. sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
  660. add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
  661. sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
  662. add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
  663. sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
  664. rshrn v2.4h, v18.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  665. rshrn v3.4h, v22.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  666. rshrn v4.4h, v26.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  667. rshrn v5.4h, v14.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  668. rshrn2 v2.8h, v16.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  669. rshrn2 v3.8h, v28.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  670. rshrn2 v4.8h, v24.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  671. rshrn2 v5.8h, v20.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  672. mov v6.16b, v15.16b
  673. mov v7.16b, v15.16b
  674. mov v8.16b, v15.16b
  675. mov v9.16b, v15.16b
  676. b 1b
  677. .balign 16
  678. 3:
  679. cbnz TMP4, 4f
  680. /* Left AC coef is zero */
  681. dup v14.2d, v10.d[0]
  682. /* Even part: reverse the even part of the forward DCT. */
  683. add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  684. add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  685. smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  686. sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  687. sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  688. mov v21.16b, v19.16b /* tmp3 = z1 */
  689. smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  690. sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  691. smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  692. add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
  693. sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
  694. add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
  695. sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
  696. /* Odd part per figure 8; the matrix is unitary and hence its
  697. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  698. */
  699. add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  700. add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  701. add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  702. add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  703. add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
  704. smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  705. smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  706. smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  707. smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  708. smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  709. smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  710. smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  711. smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  712. smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  713. add v23.4s, v23.4s, v27.4s /* z3 += z5 */
  714. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  715. add v25.4s, v25.4s, v27.4s /* z4 += z5 */
  716. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  717. add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
  718. add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
  719. add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
  720. add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
  721. add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
  722. add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
  723. add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
  724. add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
  725. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  726. add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
  727. sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
  728. add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
  729. sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
  730. add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
  731. sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
  732. add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
  733. sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
  734. mov v2.16b, v14.16b
  735. mov v3.16b, v14.16b
  736. mov v4.16b, v14.16b
  737. mov v5.16b, v14.16b
  738. rshrn v6.4h, v19.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  739. rshrn v7.4h, v23.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  740. rshrn v8.4h, v27.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  741. rshrn v9.4h, v15.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  742. rshrn2 v6.8h, v17.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  743. rshrn2 v7.8h, v29.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  744. rshrn2 v8.8h, v25.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  745. rshrn2 v9.8h, v21.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  746. b 1b
  747. .balign 16
  748. 4:
  749. /* "No" AC coef is zero */
  750. /* Even part: reverse the even part of the forward DCT. */
  751. add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  752. add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  753. smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  754. sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  755. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  756. sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  757. mov v21.16b, v19.16b /* tmp3 = z1 */
  758. mov v20.16b, v18.16b /* tmp3 = z1 */
  759. smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  760. smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  761. sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  762. smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  763. smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  764. sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  765. sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  766. add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
  767. sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
  768. add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
  769. sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
  770. add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
  771. sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
  772. add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
  773. sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
  774. /* Odd part per figure 8; the matrix is unitary and hence its
  775. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  776. */
  777. add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  778. add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  779. add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  780. add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  781. add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
  782. smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  783. smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  784. smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  785. smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  786. smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  787. smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  788. smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  789. smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  790. smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  791. smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  792. smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  793. smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  794. smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  795. smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  796. smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  797. smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  798. smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  799. smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  800. add v23.4s, v23.4s, v27.4s /* z3 += z5 */
  801. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  802. add v25.4s, v25.4s, v27.4s /* z4 += z5 */
  803. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  804. add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
  805. add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
  806. add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
  807. add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
  808. add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
  809. add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
  810. add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
  811. add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
  812. add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
  813. add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
  814. add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
  815. add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
  816. add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
  817. add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
  818. add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
  819. add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
  820. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  821. add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
  822. add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
  823. sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
  824. sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
  825. add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
  826. add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
  827. sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
  828. sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
  829. add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
  830. add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
  831. sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
  832. sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
  833. add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
  834. add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
  835. sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
  836. sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
  837. rshrn v2.4h, v18.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  838. rshrn v3.4h, v22.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  839. rshrn v4.4h, v26.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  840. rshrn v5.4h, v14.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  841. rshrn v6.4h, v19.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  842. rshrn v7.4h, v23.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  843. rshrn v8.4h, v27.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  844. rshrn v9.4h, v15.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  845. rshrn2 v2.8h, v16.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  846. rshrn2 v3.8h, v28.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  847. rshrn2 v4.8h, v24.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  848. rshrn2 v5.8h, v20.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  849. rshrn2 v6.8h, v17.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  850. rshrn2 v7.8h, v29.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  851. rshrn2 v8.8h, v25.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  852. rshrn2 v9.8h, v21.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  853. b 1b
  854. .unreq DCT_TABLE
  855. .unreq COEF_BLOCK
  856. .unreq OUTPUT_BUF
  857. .unreq OUTPUT_COL
  858. .unreq TMP1
  859. .unreq TMP2
  860. .unreq TMP3
  861. .unreq TMP4
  862. .unreq TMP5
  863. .unreq TMP6
  864. .unreq TMP7
  865. .unreq TMP8
  866. #undef CENTERJSAMPLE
  867. #undef CONST_BITS
  868. #undef PASS1_BITS
  869. #undef XFIX_P_0_298
  870. #undef XFIX_N_0_390
  871. #undef XFIX_P_0_541
  872. #undef XFIX_P_0_765
  873. #undef XFIX_N_0_899
  874. #undef XFIX_P_1_175
  875. #undef XFIX_P_1_501
  876. #undef XFIX_N_1_847
  877. #undef XFIX_N_1_961
  878. #undef XFIX_P_2_053
  879. #undef XFIX_N_2_562
  880. #undef XFIX_P_3_072
  881. /*****************************************************************************/
  882. /*
  883. * jsimd_idct_ifast_neon
  884. *
  885. * This function contains a fast, not so accurate integer implementation of
  886. * the inverse DCT (Discrete Cosine Transform). It uses the same calculations
  887. * and produces exactly the same output as IJG's original 'jpeg_idct_ifast'
  888. * function from jidctfst.c
  889. *
  890. * Normally 1-D AAN DCT needs 5 multiplications and 29 additions.
  891. * But in ARM NEON case some extra additions are required because VQDMULH
  892. * instruction can't handle the constants larger than 1. So the expressions
  893. * like "x * 1.082392200" have to be converted to "x * 0.082392200 + x",
  894. * which introduces an extra addition. Overall, there are 6 extra additions
  895. * per 1-D IDCT pass, totalling to 5 VQDMULH and 35 VADD/VSUB instructions.
  896. */
  897. #define XFIX_1_082392200 v0.h[0]
  898. #define XFIX_1_414213562 v0.h[1]
  899. #define XFIX_1_847759065 v0.h[2]
  900. #define XFIX_2_613125930 v0.h[3]
  901. asm_function jsimd_idct_ifast_neon
  902. DCT_TABLE .req x0
  903. COEF_BLOCK .req x1
  904. OUTPUT_BUF .req x2
  905. OUTPUT_COL .req x3
  906. TMP1 .req x0
  907. TMP2 .req x1
  908. TMP3 .req x9
  909. TMP4 .req x10
  910. TMP5 .req x11
  911. TMP6 .req x12
  912. TMP7 .req x13
  913. TMP8 .req x14
  914. /* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  915. guarantee that the upper (unused) 32 bits of x3 are valid. This
  916. instruction ensures that those bits are set to zero. */
  917. uxtw x3, w3
  918. /* Load and dequantize coefficients into NEON registers
  919. * with the following allocation:
  920. * 0 1 2 3 | 4 5 6 7
  921. * ---------+--------
  922. * 0 | d16 | d17 ( v16.8h )
  923. * 1 | d18 | d19 ( v17.8h )
  924. * 2 | d20 | d21 ( v18.8h )
  925. * 3 | d22 | d23 ( v19.8h )
  926. * 4 | d24 | d25 ( v20.8h )
  927. * 5 | d26 | d27 ( v21.8h )
  928. * 6 | d28 | d29 ( v22.8h )
  929. * 7 | d30 | d31 ( v23.8h )
  930. */
  931. /* Save NEON registers used in fast IDCT */
  932. get_symbol_loc TMP5, Ljsimd_idct_ifast_neon_consts
  933. ld1 {v16.8h, v17.8h}, [COEF_BLOCK], 32
  934. ld1 {v0.8h, v1.8h}, [DCT_TABLE], 32
  935. ld1 {v18.8h, v19.8h}, [COEF_BLOCK], 32
  936. mul v16.8h, v16.8h, v0.8h
  937. ld1 {v2.8h, v3.8h}, [DCT_TABLE], 32
  938. mul v17.8h, v17.8h, v1.8h
  939. ld1 {v20.8h, v21.8h}, [COEF_BLOCK], 32
  940. mul v18.8h, v18.8h, v2.8h
  941. ld1 {v0.8h, v1.8h}, [DCT_TABLE], 32
  942. mul v19.8h, v19.8h, v3.8h
  943. ld1 {v22.8h, v23.8h}, [COEF_BLOCK], 32
  944. mul v20.8h, v20.8h, v0.8h
  945. ld1 {v2.8h, v3.8h}, [DCT_TABLE], 32
  946. mul v22.8h, v22.8h, v2.8h
  947. mul v21.8h, v21.8h, v1.8h
  948. ld1 {v0.4h}, [TMP5] /* load constants */
  949. mul v23.8h, v23.8h, v3.8h
  950. /* 1-D IDCT, pass 1 */
  951. sub v2.8h, v18.8h, v22.8h
  952. add v22.8h, v18.8h, v22.8h
  953. sub v1.8h, v19.8h, v21.8h
  954. add v21.8h, v19.8h, v21.8h
  955. sub v5.8h, v17.8h, v23.8h
  956. add v23.8h, v17.8h, v23.8h
  957. sqdmulh v4.8h, v2.8h, XFIX_1_414213562
  958. sqdmulh v6.8h, v1.8h, XFIX_2_613125930
  959. add v3.8h, v1.8h, v1.8h
  960. sub v1.8h, v5.8h, v1.8h
  961. add v18.8h, v2.8h, v4.8h
  962. sqdmulh v4.8h, v1.8h, XFIX_1_847759065
  963. sub v2.8h, v23.8h, v21.8h
  964. add v3.8h, v3.8h, v6.8h
  965. sqdmulh v6.8h, v2.8h, XFIX_1_414213562
  966. add v1.8h, v1.8h, v4.8h
  967. sqdmulh v4.8h, v5.8h, XFIX_1_082392200
  968. sub v18.8h, v18.8h, v22.8h
  969. add v2.8h, v2.8h, v6.8h
  970. sub v6.8h, v16.8h, v20.8h
  971. add v20.8h, v16.8h, v20.8h
  972. add v17.8h, v5.8h, v4.8h
  973. add v5.8h, v6.8h, v18.8h
  974. sub v18.8h, v6.8h, v18.8h
  975. add v6.8h, v23.8h, v21.8h
  976. add v16.8h, v20.8h, v22.8h
  977. sub v3.8h, v6.8h, v3.8h
  978. sub v20.8h, v20.8h, v22.8h
  979. sub v3.8h, v3.8h, v1.8h
  980. sub v1.8h, v17.8h, v1.8h
  981. add v2.8h, v3.8h, v2.8h
  982. sub v23.8h, v16.8h, v6.8h
  983. add v1.8h, v1.8h, v2.8h
  984. add v16.8h, v16.8h, v6.8h
  985. add v22.8h, v5.8h, v3.8h
  986. sub v17.8h, v5.8h, v3.8h
  987. sub v21.8h, v18.8h, v2.8h
  988. add v18.8h, v18.8h, v2.8h
  989. sub v19.8h, v20.8h, v1.8h
  990. add v20.8h, v20.8h, v1.8h
  991. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v28, v29, v30, v31
  992. /* 1-D IDCT, pass 2 */
  993. sub v2.8h, v18.8h, v22.8h
  994. add v22.8h, v18.8h, v22.8h
  995. sub v1.8h, v19.8h, v21.8h
  996. add v21.8h, v19.8h, v21.8h
  997. sub v5.8h, v17.8h, v23.8h
  998. add v23.8h, v17.8h, v23.8h
  999. sqdmulh v4.8h, v2.8h, XFIX_1_414213562
  1000. sqdmulh v6.8h, v1.8h, XFIX_2_613125930
  1001. add v3.8h, v1.8h, v1.8h
  1002. sub v1.8h, v5.8h, v1.8h
  1003. add v18.8h, v2.8h, v4.8h
  1004. sqdmulh v4.8h, v1.8h, XFIX_1_847759065
  1005. sub v2.8h, v23.8h, v21.8h
  1006. add v3.8h, v3.8h, v6.8h
  1007. sqdmulh v6.8h, v2.8h, XFIX_1_414213562
  1008. add v1.8h, v1.8h, v4.8h
  1009. sqdmulh v4.8h, v5.8h, XFIX_1_082392200
  1010. sub v18.8h, v18.8h, v22.8h
  1011. add v2.8h, v2.8h, v6.8h
  1012. sub v6.8h, v16.8h, v20.8h
  1013. add v20.8h, v16.8h, v20.8h
  1014. add v17.8h, v5.8h, v4.8h
  1015. add v5.8h, v6.8h, v18.8h
  1016. sub v18.8h, v6.8h, v18.8h
  1017. add v6.8h, v23.8h, v21.8h
  1018. add v16.8h, v20.8h, v22.8h
  1019. sub v3.8h, v6.8h, v3.8h
  1020. sub v20.8h, v20.8h, v22.8h
  1021. sub v3.8h, v3.8h, v1.8h
  1022. sub v1.8h, v17.8h, v1.8h
  1023. add v2.8h, v3.8h, v2.8h
  1024. sub v23.8h, v16.8h, v6.8h
  1025. add v1.8h, v1.8h, v2.8h
  1026. add v16.8h, v16.8h, v6.8h
  1027. add v22.8h, v5.8h, v3.8h
  1028. sub v17.8h, v5.8h, v3.8h
  1029. sub v21.8h, v18.8h, v2.8h
  1030. add v18.8h, v18.8h, v2.8h
  1031. sub v19.8h, v20.8h, v1.8h
  1032. add v20.8h, v20.8h, v1.8h
  1033. /* Descale to 8-bit and range limit */
  1034. movi v0.16b, #0x80
  1035. /* Prepare pointers (dual-issue with NEON instructions) */
  1036. ldp TMP1, TMP2, [OUTPUT_BUF], 16
  1037. sqshrn v28.8b, v16.8h, #5
  1038. ldp TMP3, TMP4, [OUTPUT_BUF], 16
  1039. sqshrn v29.8b, v17.8h, #5
  1040. add TMP1, TMP1, OUTPUT_COL
  1041. sqshrn v30.8b, v18.8h, #5
  1042. add TMP2, TMP2, OUTPUT_COL
  1043. sqshrn v31.8b, v19.8h, #5
  1044. add TMP3, TMP3, OUTPUT_COL
  1045. sqshrn2 v28.16b, v20.8h, #5
  1046. add TMP4, TMP4, OUTPUT_COL
  1047. sqshrn2 v29.16b, v21.8h, #5
  1048. ldp TMP5, TMP6, [OUTPUT_BUF], 16
  1049. sqshrn2 v30.16b, v22.8h, #5
  1050. ldp TMP7, TMP8, [OUTPUT_BUF], 16
  1051. sqshrn2 v31.16b, v23.8h, #5
  1052. add TMP5, TMP5, OUTPUT_COL
  1053. add v16.16b, v28.16b, v0.16b
  1054. add TMP6, TMP6, OUTPUT_COL
  1055. add v18.16b, v29.16b, v0.16b
  1056. add TMP7, TMP7, OUTPUT_COL
  1057. add v20.16b, v30.16b, v0.16b
  1058. add TMP8, TMP8, OUTPUT_COL
  1059. add v22.16b, v31.16b, v0.16b
  1060. /* Transpose the final 8-bit samples */
  1061. trn1 v28.16b, v16.16b, v18.16b
  1062. trn1 v30.16b, v20.16b, v22.16b
  1063. trn2 v29.16b, v16.16b, v18.16b
  1064. trn2 v31.16b, v20.16b, v22.16b
  1065. trn1 v16.8h, v28.8h, v30.8h
  1066. trn2 v18.8h, v28.8h, v30.8h
  1067. trn1 v20.8h, v29.8h, v31.8h
  1068. trn2 v22.8h, v29.8h, v31.8h
  1069. uzp1 v28.4s, v16.4s, v18.4s
  1070. uzp2 v30.4s, v16.4s, v18.4s
  1071. uzp1 v29.4s, v20.4s, v22.4s
  1072. uzp2 v31.4s, v20.4s, v22.4s
  1073. /* Store results to the output buffer */
  1074. st1 {v28.d}[0], [TMP1]
  1075. st1 {v29.d}[0], [TMP2]
  1076. st1 {v28.d}[1], [TMP3]
  1077. st1 {v29.d}[1], [TMP4]
  1078. st1 {v30.d}[0], [TMP5]
  1079. st1 {v31.d}[0], [TMP6]
  1080. st1 {v30.d}[1], [TMP7]
  1081. st1 {v31.d}[1], [TMP8]
  1082. blr x30
  1083. .unreq DCT_TABLE
  1084. .unreq COEF_BLOCK
  1085. .unreq OUTPUT_BUF
  1086. .unreq OUTPUT_COL
  1087. .unreq TMP1
  1088. .unreq TMP2
  1089. .unreq TMP3
  1090. .unreq TMP4
  1091. .unreq TMP5
  1092. .unreq TMP6
  1093. .unreq TMP7
  1094. .unreq TMP8
  1095. /*****************************************************************************/
  1096. /*
  1097. * jsimd_idct_4x4_neon
  1098. *
  1099. * This function contains inverse-DCT code for getting reduced-size
  1100. * 4x4 pixels output from an 8x8 DCT block. It uses the same calculations
  1101. * and produces exactly the same output as IJG's original 'jpeg_idct_4x4'
  1102. * function from jpeg-6b (jidctred.c).
  1103. *
  1104. * NOTE: jpeg-8 has an improved implementation of 4x4 inverse-DCT, which
  1105. * requires much less arithmetic operations and hence should be faster.
  1106. * The primary purpose of this particular NEON optimized function is
  1107. * bit exact compatibility with jpeg-6b.
  1108. *
  1109. * TODO: a bit better instructions scheduling can be achieved by expanding
  1110. * idct_helper/transpose_4x4 macros and reordering instructions,
  1111. * but readability will suffer somewhat.
  1112. */
  1113. .macro idct_helper x4, x6, x8, x10, x12, x14, x16, shift, y26, y27, y28, y29
  1114. smull v28.4s, \x4, v2.h[2]
  1115. smlal v28.4s, \x8, v0.h[0]
  1116. smlal v28.4s, \x14, v0.h[1]
  1117. smull v26.4s, \x16, v1.h[2]
  1118. smlal v26.4s, \x12, v1.h[3]
  1119. smlal v26.4s, \x10, v2.h[0]
  1120. smlal v26.4s, \x6, v2.h[1]
  1121. smull v30.4s, \x4, v2.h[2]
  1122. smlsl v30.4s, \x8, v0.h[0]
  1123. smlsl v30.4s, \x14, v0.h[1]
  1124. smull v24.4s, \x16, v0.h[2]
  1125. smlal v24.4s, \x12, v0.h[3]
  1126. smlal v24.4s, \x10, v1.h[0]
  1127. smlal v24.4s, \x6, v1.h[1]
  1128. add v20.4s, v28.4s, v26.4s
  1129. sub v28.4s, v28.4s, v26.4s
  1130. .if \shift > 16
  1131. srshr v20.4s, v20.4s, #\shift
  1132. srshr v28.4s, v28.4s, #\shift
  1133. xtn \y26, v20.4s
  1134. xtn \y29, v28.4s
  1135. .else
  1136. rshrn \y26, v20.4s, #\shift
  1137. rshrn \y29, v28.4s, #\shift
  1138. .endif
  1139. add v20.4s, v30.4s, v24.4s
  1140. sub v30.4s, v30.4s, v24.4s
  1141. .if \shift > 16
  1142. srshr v20.4s, v20.4s, #\shift
  1143. srshr v30.4s, v30.4s, #\shift
  1144. xtn \y27, v20.4s
  1145. xtn \y28, v30.4s
  1146. .else
  1147. rshrn \y27, v20.4s, #\shift
  1148. rshrn \y28, v30.4s, #\shift
  1149. .endif
  1150. .endm
  1151. asm_function jsimd_idct_4x4_neon
  1152. DCT_TABLE .req x0
  1153. COEF_BLOCK .req x1
  1154. OUTPUT_BUF .req x2
  1155. OUTPUT_COL .req x3
  1156. TMP1 .req x0
  1157. TMP2 .req x1
  1158. TMP3 .req x2
  1159. TMP4 .req x15
  1160. /* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  1161. guarantee that the upper (unused) 32 bits of x3 are valid. This
  1162. instruction ensures that those bits are set to zero. */
  1163. uxtw x3, w3
  1164. /* Save all used NEON registers */
  1165. sub sp, sp, 64
  1166. mov x9, sp
  1167. /* Load constants (v3.4h is just used for padding) */
  1168. get_symbol_loc TMP4, Ljsimd_idct_4x4_neon_consts
  1169. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1170. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1171. ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [TMP4]
  1172. /* Load all COEF_BLOCK into NEON registers with the following allocation:
  1173. * 0 1 2 3 | 4 5 6 7
  1174. * ---------+--------
  1175. * 0 | v4.4h | v5.4h
  1176. * 1 | v6.4h | v7.4h
  1177. * 2 | v8.4h | v9.4h
  1178. * 3 | v10.4h | v11.4h
  1179. * 4 | - | -
  1180. * 5 | v12.4h | v13.4h
  1181. * 6 | v14.4h | v15.4h
  1182. * 7 | v16.4h | v17.4h
  1183. */
  1184. ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [COEF_BLOCK], 32
  1185. ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [COEF_BLOCK], 32
  1186. add COEF_BLOCK, COEF_BLOCK, #16
  1187. ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [COEF_BLOCK], 32
  1188. ld1 {v16.4h, v17.4h}, [COEF_BLOCK], 16
  1189. /* dequantize */
  1190. ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [DCT_TABLE], 32
  1191. mul v4.4h, v4.4h, v18.4h
  1192. mul v5.4h, v5.4h, v19.4h
  1193. ins v4.d[1], v5.d[0] /* 128 bit q4 */
  1194. ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [DCT_TABLE], 32
  1195. mul v6.4h, v6.4h, v20.4h
  1196. mul v7.4h, v7.4h, v21.4h
  1197. ins v6.d[1], v7.d[0] /* 128 bit q6 */
  1198. mul v8.4h, v8.4h, v22.4h
  1199. mul v9.4h, v9.4h, v23.4h
  1200. ins v8.d[1], v9.d[0] /* 128 bit q8 */
  1201. add DCT_TABLE, DCT_TABLE, #16
  1202. ld1 {v26.4h, v27.4h, v28.4h, v29.4h}, [DCT_TABLE], 32
  1203. mul v10.4h, v10.4h, v24.4h
  1204. mul v11.4h, v11.4h, v25.4h
  1205. ins v10.d[1], v11.d[0] /* 128 bit q10 */
  1206. mul v12.4h, v12.4h, v26.4h
  1207. mul v13.4h, v13.4h, v27.4h
  1208. ins v12.d[1], v13.d[0] /* 128 bit q12 */
  1209. ld1 {v30.4h, v31.4h}, [DCT_TABLE], 16
  1210. mul v14.4h, v14.4h, v28.4h
  1211. mul v15.4h, v15.4h, v29.4h
  1212. ins v14.d[1], v15.d[0] /* 128 bit q14 */
  1213. mul v16.4h, v16.4h, v30.4h
  1214. mul v17.4h, v17.4h, v31.4h
  1215. ins v16.d[1], v17.d[0] /* 128 bit q16 */
  1216. /* Pass 1 */
  1217. idct_helper v4.4h, v6.4h, v8.4h, v10.4h, v12.4h, v14.4h, v16.4h, 12, \
  1218. v4.4h, v6.4h, v8.4h, v10.4h
  1219. transpose_4x4 v4, v6, v8, v10, v3
  1220. ins v10.d[1], v11.d[0]
  1221. idct_helper v5.4h, v7.4h, v9.4h, v11.4h, v13.4h, v15.4h, v17.4h, 12, \
  1222. v5.4h, v7.4h, v9.4h, v11.4h
  1223. transpose_4x4 v5, v7, v9, v11, v3
  1224. ins v10.d[1], v11.d[0]
  1225. /* Pass 2 */
  1226. idct_helper v4.4h, v6.4h, v8.4h, v10.4h, v7.4h, v9.4h, v11.4h, 19, \
  1227. v26.4h, v27.4h, v28.4h, v29.4h
  1228. transpose_4x4 v26, v27, v28, v29, v3
  1229. /* Range limit */
  1230. movi v30.8h, #0x80
  1231. ins v26.d[1], v27.d[0]
  1232. ins v28.d[1], v29.d[0]
  1233. add v26.8h, v26.8h, v30.8h
  1234. add v28.8h, v28.8h, v30.8h
  1235. sqxtun v26.8b, v26.8h
  1236. sqxtun v27.8b, v28.8h
  1237. /* Store results to the output buffer */
  1238. ldp TMP1, TMP2, [OUTPUT_BUF], 16
  1239. ldp TMP3, TMP4, [OUTPUT_BUF]
  1240. add TMP1, TMP1, OUTPUT_COL
  1241. add TMP2, TMP2, OUTPUT_COL
  1242. add TMP3, TMP3, OUTPUT_COL
  1243. add TMP4, TMP4, OUTPUT_COL
  1244. #if defined(__ARMEL__) && !RESPECT_STRICT_ALIGNMENT
  1245. /* We can use much less instructions on little endian systems if the
  1246. * OS kernel is not configured to trap unaligned memory accesses
  1247. */
  1248. st1 {v26.s}[0], [TMP1], 4
  1249. st1 {v27.s}[0], [TMP3], 4
  1250. st1 {v26.s}[1], [TMP2], 4
  1251. st1 {v27.s}[1], [TMP4], 4
  1252. #else
  1253. st1 {v26.b}[0], [TMP1], 1
  1254. st1 {v27.b}[0], [TMP3], 1
  1255. st1 {v26.b}[1], [TMP1], 1
  1256. st1 {v27.b}[1], [TMP3], 1
  1257. st1 {v26.b}[2], [TMP1], 1
  1258. st1 {v27.b}[2], [TMP3], 1
  1259. st1 {v26.b}[3], [TMP1], 1
  1260. st1 {v27.b}[3], [TMP3], 1
  1261. st1 {v26.b}[4], [TMP2], 1
  1262. st1 {v27.b}[4], [TMP4], 1
  1263. st1 {v26.b}[5], [TMP2], 1
  1264. st1 {v27.b}[5], [TMP4], 1
  1265. st1 {v26.b}[6], [TMP2], 1
  1266. st1 {v27.b}[6], [TMP4], 1
  1267. st1 {v26.b}[7], [TMP2], 1
  1268. st1 {v27.b}[7], [TMP4], 1
  1269. #endif
  1270. /* vpop {v8.4h - v15.4h} ;not available */
  1271. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1272. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1273. blr x30
  1274. .unreq DCT_TABLE
  1275. .unreq COEF_BLOCK
  1276. .unreq OUTPUT_BUF
  1277. .unreq OUTPUT_COL
  1278. .unreq TMP1
  1279. .unreq TMP2
  1280. .unreq TMP3
  1281. .unreq TMP4
  1282. .purgem idct_helper
  1283. /*****************************************************************************/
  1284. /*
  1285. * jsimd_idct_2x2_neon
  1286. *
  1287. * This function contains inverse-DCT code for getting reduced-size
  1288. * 2x2 pixels output from an 8x8 DCT block. It uses the same calculations
  1289. * and produces exactly the same output as IJG's original 'jpeg_idct_2x2'
  1290. * function from jpeg-6b (jidctred.c).
  1291. *
  1292. * NOTE: jpeg-8 has an improved implementation of 2x2 inverse-DCT, which
  1293. * requires much less arithmetic operations and hence should be faster.
  1294. * The primary purpose of this particular NEON optimized function is
  1295. * bit exact compatibility with jpeg-6b.
  1296. */
  1297. .macro idct_helper x4, x6, x10, x12, x16, shift, y26, y27
  1298. sshll v15.4s, \x4, #15
  1299. smull v26.4s, \x6, v14.h[3]
  1300. smlal v26.4s, \x10, v14.h[2]
  1301. smlal v26.4s, \x12, v14.h[1]
  1302. smlal v26.4s, \x16, v14.h[0]
  1303. add v20.4s, v15.4s, v26.4s
  1304. sub v15.4s, v15.4s, v26.4s
  1305. .if \shift > 16
  1306. srshr v20.4s, v20.4s, #\shift
  1307. srshr v15.4s, v15.4s, #\shift
  1308. xtn \y26, v20.4s
  1309. xtn \y27, v15.4s
  1310. .else
  1311. rshrn \y26, v20.4s, #\shift
  1312. rshrn \y27, v15.4s, #\shift
  1313. .endif
  1314. .endm
  1315. asm_function jsimd_idct_2x2_neon
  1316. DCT_TABLE .req x0
  1317. COEF_BLOCK .req x1
  1318. OUTPUT_BUF .req x2
  1319. OUTPUT_COL .req x3
  1320. TMP1 .req x0
  1321. TMP2 .req x15
  1322. /* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  1323. guarantee that the upper (unused) 32 bits of x3 are valid. This
  1324. instruction ensures that those bits are set to zero. */
  1325. uxtw x3, w3
  1326. /* vpush {v8.4h - v15.4h} ; not available */
  1327. sub sp, sp, 64
  1328. mov x9, sp
  1329. /* Load constants */
  1330. get_symbol_loc TMP2, Ljsimd_idct_2x2_neon_consts
  1331. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1332. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1333. ld1 {v14.4h}, [TMP2]
  1334. /* Load all COEF_BLOCK into NEON registers with the following allocation:
  1335. * 0 1 2 3 | 4 5 6 7
  1336. * ---------+--------
  1337. * 0 | v4.4h | v5.4h
  1338. * 1 | v6.4h | v7.4h
  1339. * 2 | - | -
  1340. * 3 | v10.4h | v11.4h
  1341. * 4 | - | -
  1342. * 5 | v12.4h | v13.4h
  1343. * 6 | - | -
  1344. * 7 | v16.4h | v17.4h
  1345. */
  1346. ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [COEF_BLOCK], 32
  1347. add COEF_BLOCK, COEF_BLOCK, #16
  1348. ld1 {v10.4h, v11.4h}, [COEF_BLOCK], 16
  1349. add COEF_BLOCK, COEF_BLOCK, #16
  1350. ld1 {v12.4h, v13.4h}, [COEF_BLOCK], 16
  1351. add COEF_BLOCK, COEF_BLOCK, #16
  1352. ld1 {v16.4h, v17.4h}, [COEF_BLOCK], 16
  1353. /* Dequantize */
  1354. ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [DCT_TABLE], 32
  1355. mul v4.4h, v4.4h, v18.4h
  1356. mul v5.4h, v5.4h, v19.4h
  1357. ins v4.d[1], v5.d[0]
  1358. mul v6.4h, v6.4h, v20.4h
  1359. mul v7.4h, v7.4h, v21.4h
  1360. ins v6.d[1], v7.d[0]
  1361. add DCT_TABLE, DCT_TABLE, #16
  1362. ld1 {v24.4h, v25.4h}, [DCT_TABLE], 16
  1363. mul v10.4h, v10.4h, v24.4h
  1364. mul v11.4h, v11.4h, v25.4h
  1365. ins v10.d[1], v11.d[0]
  1366. add DCT_TABLE, DCT_TABLE, #16
  1367. ld1 {v26.4h, v27.4h}, [DCT_TABLE], 16
  1368. mul v12.4h, v12.4h, v26.4h
  1369. mul v13.4h, v13.4h, v27.4h
  1370. ins v12.d[1], v13.d[0]
  1371. add DCT_TABLE, DCT_TABLE, #16
  1372. ld1 {v30.4h, v31.4h}, [DCT_TABLE], 16
  1373. mul v16.4h, v16.4h, v30.4h
  1374. mul v17.4h, v17.4h, v31.4h
  1375. ins v16.d[1], v17.d[0]
  1376. /* Pass 1 */
  1377. #if 0
  1378. idct_helper v4.4h, v6.4h, v10.4h, v12.4h, v16.4h, 13, v4.4h, v6.4h
  1379. transpose_4x4 v4.4h, v6.4h, v8.4h, v10.4h
  1380. idct_helper v5.4h, v7.4h, v11.4h, v13.4h, v17.4h, 13, v5.4h, v7.4h
  1381. transpose_4x4 v5.4h, v7.4h, v9.4h, v11.4h
  1382. #else
  1383. smull v26.4s, v6.4h, v14.h[3]
  1384. smlal v26.4s, v10.4h, v14.h[2]
  1385. smlal v26.4s, v12.4h, v14.h[1]
  1386. smlal v26.4s, v16.4h, v14.h[0]
  1387. smull v24.4s, v7.4h, v14.h[3]
  1388. smlal v24.4s, v11.4h, v14.h[2]
  1389. smlal v24.4s, v13.4h, v14.h[1]
  1390. smlal v24.4s, v17.4h, v14.h[0]
  1391. sshll v15.4s, v4.4h, #15
  1392. sshll v30.4s, v5.4h, #15
  1393. add v20.4s, v15.4s, v26.4s
  1394. sub v15.4s, v15.4s, v26.4s
  1395. rshrn v4.4h, v20.4s, #13
  1396. rshrn v6.4h, v15.4s, #13
  1397. add v20.4s, v30.4s, v24.4s
  1398. sub v15.4s, v30.4s, v24.4s
  1399. rshrn v5.4h, v20.4s, #13
  1400. rshrn v7.4h, v15.4s, #13
  1401. ins v4.d[1], v5.d[0]
  1402. ins v6.d[1], v7.d[0]
  1403. transpose v4, v6, v3, .16b, .8h
  1404. transpose v6, v10, v3, .16b, .4s
  1405. ins v11.d[0], v10.d[1]
  1406. ins v7.d[0], v6.d[1]
  1407. #endif
  1408. /* Pass 2 */
  1409. idct_helper v4.4h, v6.4h, v10.4h, v7.4h, v11.4h, 20, v26.4h, v27.4h
  1410. /* Range limit */
  1411. movi v30.8h, #0x80
  1412. ins v26.d[1], v27.d[0]
  1413. add v26.8h, v26.8h, v30.8h
  1414. sqxtun v30.8b, v26.8h
  1415. ins v26.d[0], v30.d[0]
  1416. sqxtun v27.8b, v26.8h
  1417. /* Store results to the output buffer */
  1418. ldp TMP1, TMP2, [OUTPUT_BUF]
  1419. add TMP1, TMP1, OUTPUT_COL
  1420. add TMP2, TMP2, OUTPUT_COL
  1421. st1 {v26.b}[0], [TMP1], 1
  1422. st1 {v27.b}[4], [TMP1], 1
  1423. st1 {v26.b}[1], [TMP2], 1
  1424. st1 {v27.b}[5], [TMP2], 1
  1425. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1426. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1427. blr x30
  1428. .unreq DCT_TABLE
  1429. .unreq COEF_BLOCK
  1430. .unreq OUTPUT_BUF
  1431. .unreq OUTPUT_COL
  1432. .unreq TMP1
  1433. .unreq TMP2
  1434. .purgem idct_helper
  1435. /*****************************************************************************/
  1436. /*
  1437. * jsimd_ycc_extrgb_convert_neon
  1438. * jsimd_ycc_extbgr_convert_neon
  1439. * jsimd_ycc_extrgbx_convert_neon
  1440. * jsimd_ycc_extbgrx_convert_neon
  1441. * jsimd_ycc_extxbgr_convert_neon
  1442. * jsimd_ycc_extxrgb_convert_neon
  1443. *
  1444. * Colorspace conversion YCbCr -> RGB
  1445. */
  1446. .macro do_load size
  1447. .if \size == 8
  1448. ld1 {v4.8b}, [U], 8
  1449. ld1 {v5.8b}, [V], 8
  1450. ld1 {v0.8b}, [Y], 8
  1451. prfm pldl1keep, [U, #64]
  1452. prfm pldl1keep, [V, #64]
  1453. prfm pldl1keep, [Y, #64]
  1454. .elseif \size == 4
  1455. ld1 {v4.b}[0], [U], 1
  1456. ld1 {v4.b}[1], [U], 1
  1457. ld1 {v4.b}[2], [U], 1
  1458. ld1 {v4.b}[3], [U], 1
  1459. ld1 {v5.b}[0], [V], 1
  1460. ld1 {v5.b}[1], [V], 1
  1461. ld1 {v5.b}[2], [V], 1
  1462. ld1 {v5.b}[3], [V], 1
  1463. ld1 {v0.b}[0], [Y], 1
  1464. ld1 {v0.b}[1], [Y], 1
  1465. ld1 {v0.b}[2], [Y], 1
  1466. ld1 {v0.b}[3], [Y], 1
  1467. .elseif \size == 2
  1468. ld1 {v4.b}[4], [U], 1
  1469. ld1 {v4.b}[5], [U], 1
  1470. ld1 {v5.b}[4], [V], 1
  1471. ld1 {v5.b}[5], [V], 1
  1472. ld1 {v0.b}[4], [Y], 1
  1473. ld1 {v0.b}[5], [Y], 1
  1474. .elseif \size == 1
  1475. ld1 {v4.b}[6], [U], 1
  1476. ld1 {v5.b}[6], [V], 1
  1477. ld1 {v0.b}[6], [Y], 1
  1478. .else
  1479. .error unsupported macroblock size
  1480. .endif
  1481. .endm
  1482. .macro do_store bpp, size, fast_st3
  1483. .if \bpp == 24
  1484. .if \size == 8
  1485. .if \fast_st3 == 1
  1486. st3 {v10.8b, v11.8b, v12.8b}, [RGB], 24
  1487. .else
  1488. st1 {v10.b}[0], [RGB], #1
  1489. st1 {v11.b}[0], [RGB], #1
  1490. st1 {v12.b}[0], [RGB], #1
  1491. st1 {v10.b}[1], [RGB], #1
  1492. st1 {v11.b}[1], [RGB], #1
  1493. st1 {v12.b}[1], [RGB], #1
  1494. st1 {v10.b}[2], [RGB], #1
  1495. st1 {v11.b}[2], [RGB], #1
  1496. st1 {v12.b}[2], [RGB], #1
  1497. st1 {v10.b}[3], [RGB], #1
  1498. st1 {v11.b}[3], [RGB], #1
  1499. st1 {v12.b}[3], [RGB], #1
  1500. st1 {v10.b}[4], [RGB], #1
  1501. st1 {v11.b}[4], [RGB], #1
  1502. st1 {v12.b}[4], [RGB], #1
  1503. st1 {v10.b}[5], [RGB], #1
  1504. st1 {v11.b}[5], [RGB], #1
  1505. st1 {v12.b}[5], [RGB], #1
  1506. st1 {v10.b}[6], [RGB], #1
  1507. st1 {v11.b}[6], [RGB], #1
  1508. st1 {v12.b}[6], [RGB], #1
  1509. st1 {v10.b}[7], [RGB], #1
  1510. st1 {v11.b}[7], [RGB], #1
  1511. st1 {v12.b}[7], [RGB], #1
  1512. .endif
  1513. .elseif \size == 4
  1514. st3 {v10.b, v11.b, v12.b}[0], [RGB], 3
  1515. st3 {v10.b, v11.b, v12.b}[1], [RGB], 3
  1516. st3 {v10.b, v11.b, v12.b}[2], [RGB], 3
  1517. st3 {v10.b, v11.b, v12.b}[3], [RGB], 3
  1518. .elseif \size == 2
  1519. st3 {v10.b, v11.b, v12.b}[4], [RGB], 3
  1520. st3 {v10.b, v11.b, v12.b}[5], [RGB], 3
  1521. .elseif \size == 1
  1522. st3 {v10.b, v11.b, v12.b}[6], [RGB], 3
  1523. .else
  1524. .error unsupported macroblock size
  1525. .endif
  1526. .elseif \bpp == 32
  1527. .if \size == 8
  1528. st4 {v10.8b, v11.8b, v12.8b, v13.8b}, [RGB], 32
  1529. .elseif \size == 4
  1530. st4 {v10.b, v11.b, v12.b, v13.b}[0], [RGB], 4
  1531. st4 {v10.b, v11.b, v12.b, v13.b}[1], [RGB], 4
  1532. st4 {v10.b, v11.b, v12.b, v13.b}[2], [RGB], 4
  1533. st4 {v10.b, v11.b, v12.b, v13.b}[3], [RGB], 4
  1534. .elseif \size == 2
  1535. st4 {v10.b, v11.b, v12.b, v13.b}[4], [RGB], 4
  1536. st4 {v10.b, v11.b, v12.b, v13.b}[5], [RGB], 4
  1537. .elseif \size == 1
  1538. st4 {v10.b, v11.b, v12.b, v13.b}[6], [RGB], 4
  1539. .else
  1540. .error unsupported macroblock size
  1541. .endif
  1542. .elseif \bpp == 16
  1543. .if \size == 8
  1544. st1 {v25.8h}, [RGB], 16
  1545. .elseif \size == 4
  1546. st1 {v25.4h}, [RGB], 8
  1547. .elseif \size == 2
  1548. st1 {v25.h}[4], [RGB], 2
  1549. st1 {v25.h}[5], [RGB], 2
  1550. .elseif \size == 1
  1551. st1 {v25.h}[6], [RGB], 2
  1552. .else
  1553. .error unsupported macroblock size
  1554. .endif
  1555. .else
  1556. .error unsupported bpp
  1557. .endif
  1558. .endm
  1559. .macro generate_jsimd_ycc_rgb_convert_neon colorid, bpp, r_offs, rsize, \
  1560. g_offs, gsize, b_offs, bsize, \
  1561. defsize, fast_st3
  1562. /*
  1563. * 2-stage pipelined YCbCr->RGB conversion
  1564. */
  1565. .macro do_yuv_to_rgb_stage1
  1566. uaddw v6.8h, v2.8h, v4.8b /* q3 = u - 128 */
  1567. uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
  1568. smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
  1569. smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
  1570. smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
  1571. smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
  1572. smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
  1573. smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
  1574. smull v28.4s, v6.4h, v1.h[3] /* multiply by 29033 */
  1575. smull2 v30.4s, v6.8h, v1.h[3] /* multiply by 29033 */
  1576. .endm
  1577. .macro do_yuv_to_rgb_stage2
  1578. rshrn v20.4h, v20.4s, #15
  1579. rshrn2 v20.8h, v22.4s, #15
  1580. rshrn v24.4h, v24.4s, #14
  1581. rshrn2 v24.8h, v26.4s, #14
  1582. rshrn v28.4h, v28.4s, #14
  1583. rshrn2 v28.8h, v30.4s, #14
  1584. uaddw v20.8h, v20.8h, v0.8b
  1585. uaddw v24.8h, v24.8h, v0.8b
  1586. uaddw v28.8h, v28.8h, v0.8b
  1587. .if \bpp != 16
  1588. sqxtun v1\g_offs\defsize, v20.8h
  1589. sqxtun v1\r_offs\defsize, v24.8h
  1590. sqxtun v1\b_offs\defsize, v28.8h
  1591. .else
  1592. sqshlu v21.8h, v20.8h, #8
  1593. sqshlu v25.8h, v24.8h, #8
  1594. sqshlu v29.8h, v28.8h, #8
  1595. sri v25.8h, v21.8h, #5
  1596. sri v25.8h, v29.8h, #11
  1597. .endif
  1598. .endm
  1599. .macro do_yuv_to_rgb_stage2_store_load_stage1 fast_st3
  1600. rshrn v20.4h, v20.4s, #15
  1601. rshrn v24.4h, v24.4s, #14
  1602. rshrn v28.4h, v28.4s, #14
  1603. ld1 {v4.8b}, [U], 8
  1604. rshrn2 v20.8h, v22.4s, #15
  1605. rshrn2 v24.8h, v26.4s, #14
  1606. rshrn2 v28.8h, v30.4s, #14
  1607. ld1 {v5.8b}, [V], 8
  1608. uaddw v20.8h, v20.8h, v0.8b
  1609. uaddw v24.8h, v24.8h, v0.8b
  1610. uaddw v28.8h, v28.8h, v0.8b
  1611. .if \bpp != 16 /**************** rgb24/rgb32 ******************************/
  1612. sqxtun v1\g_offs\defsize, v20.8h
  1613. ld1 {v0.8b}, [Y], 8
  1614. sqxtun v1\r_offs\defsize, v24.8h
  1615. prfm pldl1keep, [U, #64]
  1616. prfm pldl1keep, [V, #64]
  1617. prfm pldl1keep, [Y, #64]
  1618. sqxtun v1\b_offs\defsize, v28.8h
  1619. uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */
  1620. uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
  1621. smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
  1622. smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
  1623. smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
  1624. smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
  1625. smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
  1626. smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
  1627. .else /**************************** rgb565 ********************************/
  1628. sqshlu v21.8h, v20.8h, #8
  1629. sqshlu v25.8h, v24.8h, #8
  1630. sqshlu v29.8h, v28.8h, #8
  1631. uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */
  1632. uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
  1633. ld1 {v0.8b}, [Y], 8
  1634. smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
  1635. smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
  1636. smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
  1637. smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
  1638. sri v25.8h, v21.8h, #5
  1639. smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
  1640. smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
  1641. prfm pldl1keep, [U, #64]
  1642. prfm pldl1keep, [V, #64]
  1643. prfm pldl1keep, [Y, #64]
  1644. sri v25.8h, v29.8h, #11
  1645. .endif
  1646. do_store \bpp, 8, \fast_st3
  1647. smull v28.4s, v6.4h, v1.h[3] /* multiply by 29033 */
  1648. smull2 v30.4s, v6.8h, v1.h[3] /* multiply by 29033 */
  1649. .endm
  1650. .macro do_yuv_to_rgb
  1651. do_yuv_to_rgb_stage1
  1652. do_yuv_to_rgb_stage2
  1653. .endm
  1654. .if \fast_st3 == 1
  1655. asm_function jsimd_ycc_\colorid\()_convert_neon
  1656. .else
  1657. asm_function jsimd_ycc_\colorid\()_convert_neon_slowst3
  1658. .endif
  1659. OUTPUT_WIDTH .req w0
  1660. INPUT_BUF .req x1
  1661. INPUT_ROW .req w2
  1662. OUTPUT_BUF .req x3
  1663. NUM_ROWS .req w4
  1664. INPUT_BUF0 .req x5
  1665. INPUT_BUF1 .req x6
  1666. INPUT_BUF2 .req x1
  1667. RGB .req x7
  1668. Y .req x9
  1669. U .req x10
  1670. V .req x11
  1671. N .req w15
  1672. sub sp, sp, 64
  1673. mov x9, sp
  1674. /* Load constants to d1, d2, d3 (v0.4h is just used for padding) */
  1675. get_symbol_loc x15, Ljsimd_ycc_rgb_neon_consts
  1676. /* Save NEON registers */
  1677. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1678. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1679. ld1 {v0.4h, v1.4h}, [x15], 16
  1680. ld1 {v2.8h}, [x15]
  1681. ldr INPUT_BUF0, [INPUT_BUF]
  1682. ldr INPUT_BUF1, [INPUT_BUF, #8]
  1683. ldr INPUT_BUF2, [INPUT_BUF, #16]
  1684. .unreq INPUT_BUF
  1685. /* Initially set v10, v11.4h, v12.8b, d13 to 0xFF */
  1686. movi v10.16b, #255
  1687. movi v13.16b, #255
  1688. /* Outer loop over scanlines */
  1689. cmp NUM_ROWS, #1
  1690. b.lt 9f
  1691. 0:
  1692. ldr Y, [INPUT_BUF0, INPUT_ROW, uxtw #3]
  1693. ldr U, [INPUT_BUF1, INPUT_ROW, uxtw #3]
  1694. mov N, OUTPUT_WIDTH
  1695. ldr V, [INPUT_BUF2, INPUT_ROW, uxtw #3]
  1696. add INPUT_ROW, INPUT_ROW, #1
  1697. ldr RGB, [OUTPUT_BUF], #8
  1698. /* Inner loop over pixels */
  1699. subs N, N, #8
  1700. b.lt 3f
  1701. do_load 8
  1702. do_yuv_to_rgb_stage1
  1703. subs N, N, #8
  1704. b.lt 2f
  1705. 1:
  1706. do_yuv_to_rgb_stage2_store_load_stage1 \fast_st3
  1707. subs N, N, #8
  1708. b.ge 1b
  1709. 2:
  1710. do_yuv_to_rgb_stage2
  1711. do_store \bpp, 8, \fast_st3
  1712. tst N, #7
  1713. b.eq 8f
  1714. 3:
  1715. tst N, #4
  1716. b.eq 3f
  1717. do_load 4
  1718. 3:
  1719. tst N, #2
  1720. b.eq 4f
  1721. do_load 2
  1722. 4:
  1723. tst N, #1
  1724. b.eq 5f
  1725. do_load 1
  1726. 5:
  1727. do_yuv_to_rgb
  1728. tst N, #4
  1729. b.eq 6f
  1730. do_store \bpp, 4, \fast_st3
  1731. 6:
  1732. tst N, #2
  1733. b.eq 7f
  1734. do_store \bpp, 2, \fast_st3
  1735. 7:
  1736. tst N, #1
  1737. b.eq 8f
  1738. do_store \bpp, 1, \fast_st3
  1739. 8:
  1740. subs NUM_ROWS, NUM_ROWS, #1
  1741. b.gt 0b
  1742. 9:
  1743. /* Restore all registers and return */
  1744. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1745. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1746. br x30
  1747. .unreq OUTPUT_WIDTH
  1748. .unreq INPUT_ROW
  1749. .unreq OUTPUT_BUF
  1750. .unreq NUM_ROWS
  1751. .unreq INPUT_BUF0
  1752. .unreq INPUT_BUF1
  1753. .unreq INPUT_BUF2
  1754. .unreq RGB
  1755. .unreq Y
  1756. .unreq U
  1757. .unreq V
  1758. .unreq N
  1759. .purgem do_yuv_to_rgb
  1760. .purgem do_yuv_to_rgb_stage1
  1761. .purgem do_yuv_to_rgb_stage2
  1762. .purgem do_yuv_to_rgb_stage2_store_load_stage1
  1763. .endm
  1764. /*--------------------------------- id ----- bpp R rsize G gsize B bsize defsize fast_st3*/
  1765. generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, .4h, 1, .4h, 2, .4h, .8b, 1
  1766. generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, .4h, 1, .4h, 0, .4h, .8b, 1
  1767. generate_jsimd_ycc_rgb_convert_neon extrgbx, 32, 0, .4h, 1, .4h, 2, .4h, .8b, 1
  1768. generate_jsimd_ycc_rgb_convert_neon extbgrx, 32, 2, .4h, 1, .4h, 0, .4h, .8b, 1
  1769. generate_jsimd_ycc_rgb_convert_neon extxbgr, 32, 3, .4h, 2, .4h, 1, .4h, .8b, 1
  1770. generate_jsimd_ycc_rgb_convert_neon extxrgb, 32, 1, .4h, 2, .4h, 3, .4h, .8b, 1
  1771. generate_jsimd_ycc_rgb_convert_neon rgb565, 16, 0, .4h, 0, .4h, 0, .4h, .8b, 1
  1772. generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, .4h, 1, .4h, 2, .4h, .8b, 0
  1773. generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, .4h, 1, .4h, 0, .4h, .8b, 0
  1774. .purgem do_load
  1775. .purgem do_store
  1776. /*****************************************************************************/
  1777. /*
  1778. * jsimd_extrgb_ycc_convert_neon
  1779. * jsimd_extbgr_ycc_convert_neon
  1780. * jsimd_extrgbx_ycc_convert_neon
  1781. * jsimd_extbgrx_ycc_convert_neon
  1782. * jsimd_extxbgr_ycc_convert_neon
  1783. * jsimd_extxrgb_ycc_convert_neon
  1784. *
  1785. * Colorspace conversion RGB -> YCbCr
  1786. */
  1787. .macro do_store size
  1788. .if \size == 8
  1789. st1 {v20.8b}, [Y], #8
  1790. st1 {v21.8b}, [U], #8
  1791. st1 {v22.8b}, [V], #8
  1792. .elseif \size == 4
  1793. st1 {v20.b}[0], [Y], #1
  1794. st1 {v20.b}[1], [Y], #1
  1795. st1 {v20.b}[2], [Y], #1
  1796. st1 {v20.b}[3], [Y], #1
  1797. st1 {v21.b}[0], [U], #1
  1798. st1 {v21.b}[1], [U], #1
  1799. st1 {v21.b}[2], [U], #1
  1800. st1 {v21.b}[3], [U], #1
  1801. st1 {v22.b}[0], [V], #1
  1802. st1 {v22.b}[1], [V], #1
  1803. st1 {v22.b}[2], [V], #1
  1804. st1 {v22.b}[3], [V], #1
  1805. .elseif \size == 2
  1806. st1 {v20.b}[4], [Y], #1
  1807. st1 {v20.b}[5], [Y], #1
  1808. st1 {v21.b}[4], [U], #1
  1809. st1 {v21.b}[5], [U], #1
  1810. st1 {v22.b}[4], [V], #1
  1811. st1 {v22.b}[5], [V], #1
  1812. .elseif \size == 1
  1813. st1 {v20.b}[6], [Y], #1
  1814. st1 {v21.b}[6], [U], #1
  1815. st1 {v22.b}[6], [V], #1
  1816. .else
  1817. .error unsupported macroblock size
  1818. .endif
  1819. .endm
  1820. .macro do_load bpp, size, fast_ld3
  1821. .if \bpp == 24
  1822. .if \size == 8
  1823. .if \fast_ld3 == 1
  1824. ld3 {v10.8b, v11.8b, v12.8b}, [RGB], #24
  1825. .else
  1826. ld1 {v10.b}[0], [RGB], #1
  1827. ld1 {v11.b}[0], [RGB], #1
  1828. ld1 {v12.b}[0], [RGB], #1
  1829. ld1 {v10.b}[1], [RGB], #1
  1830. ld1 {v11.b}[1], [RGB], #1
  1831. ld1 {v12.b}[1], [RGB], #1
  1832. ld1 {v10.b}[2], [RGB], #1
  1833. ld1 {v11.b}[2], [RGB], #1
  1834. ld1 {v12.b}[2], [RGB], #1
  1835. ld1 {v10.b}[3], [RGB], #1
  1836. ld1 {v11.b}[3], [RGB], #1
  1837. ld1 {v12.b}[3], [RGB], #1
  1838. ld1 {v10.b}[4], [RGB], #1
  1839. ld1 {v11.b}[4], [RGB], #1
  1840. ld1 {v12.b}[4], [RGB], #1
  1841. ld1 {v10.b}[5], [RGB], #1
  1842. ld1 {v11.b}[5], [RGB], #1
  1843. ld1 {v12.b}[5], [RGB], #1
  1844. ld1 {v10.b}[6], [RGB], #1
  1845. ld1 {v11.b}[6], [RGB], #1
  1846. ld1 {v12.b}[6], [RGB], #1
  1847. ld1 {v10.b}[7], [RGB], #1
  1848. ld1 {v11.b}[7], [RGB], #1
  1849. ld1 {v12.b}[7], [RGB], #1
  1850. .endif
  1851. prfm pldl1keep, [RGB, #128]
  1852. .elseif \size == 4
  1853. ld3 {v10.b, v11.b, v12.b}[0], [RGB], #3
  1854. ld3 {v10.b, v11.b, v12.b}[1], [RGB], #3
  1855. ld3 {v10.b, v11.b, v12.b}[2], [RGB], #3
  1856. ld3 {v10.b, v11.b, v12.b}[3], [RGB], #3
  1857. .elseif \size == 2
  1858. ld3 {v10.b, v11.b, v12.b}[4], [RGB], #3
  1859. ld3 {v10.b, v11.b, v12.b}[5], [RGB], #3
  1860. .elseif \size == 1
  1861. ld3 {v10.b, v11.b, v12.b}[6], [RGB], #3
  1862. .else
  1863. .error unsupported macroblock size
  1864. .endif
  1865. .elseif \bpp == 32
  1866. .if \size == 8
  1867. ld4 {v10.8b, v11.8b, v12.8b, v13.8b}, [RGB], #32
  1868. prfm pldl1keep, [RGB, #128]
  1869. .elseif \size == 4
  1870. ld4 {v10.b, v11.b, v12.b, v13.b}[0], [RGB], #4
  1871. ld4 {v10.b, v11.b, v12.b, v13.b}[1], [RGB], #4
  1872. ld4 {v10.b, v11.b, v12.b, v13.b}[2], [RGB], #4
  1873. ld4 {v10.b, v11.b, v12.b, v13.b}[3], [RGB], #4
  1874. .elseif \size == 2
  1875. ld4 {v10.b, v11.b, v12.b, v13.b}[4], [RGB], #4
  1876. ld4 {v10.b, v11.b, v12.b, v13.b}[5], [RGB], #4
  1877. .elseif \size == 1
  1878. ld4 {v10.b, v11.b, v12.b, v13.b}[6], [RGB], #4
  1879. .else
  1880. .error unsupported macroblock size
  1881. .endif
  1882. .else
  1883. .error unsupported bpp
  1884. .endif
  1885. .endm
  1886. .macro generate_jsimd_rgb_ycc_convert_neon colorid, bpp, r_offs, g_offs, \
  1887. b_offs, fast_ld3
  1888. /*
  1889. * 2-stage pipelined RGB->YCbCr conversion
  1890. */
  1891. .macro do_rgb_to_yuv_stage1
  1892. ushll v4.8h, v1\r_offs\().8b, #0 /* r = v4 */
  1893. ushll v6.8h, v1\g_offs\().8b, #0 /* g = v6 */
  1894. ushll v8.8h, v1\b_offs\().8b, #0 /* b = v8 */
  1895. rev64 v18.4s, v1.4s
  1896. rev64 v26.4s, v1.4s
  1897. rev64 v28.4s, v1.4s
  1898. rev64 v30.4s, v1.4s
  1899. umull v14.4s, v4.4h, v0.h[0]
  1900. umull2 v16.4s, v4.8h, v0.h[0]
  1901. umlsl v18.4s, v4.4h, v0.h[3]
  1902. umlsl2 v26.4s, v4.8h, v0.h[3]
  1903. umlal v28.4s, v4.4h, v0.h[5]
  1904. umlal2 v30.4s, v4.8h, v0.h[5]
  1905. umlal v14.4s, v6.4h, v0.h[1]
  1906. umlal2 v16.4s, v6.8h, v0.h[1]
  1907. umlsl v18.4s, v6.4h, v0.h[4]
  1908. umlsl2 v26.4s, v6.8h, v0.h[4]
  1909. umlsl v28.4s, v6.4h, v0.h[6]
  1910. umlsl2 v30.4s, v6.8h, v0.h[6]
  1911. umlal v14.4s, v8.4h, v0.h[2]
  1912. umlal2 v16.4s, v8.8h, v0.h[2]
  1913. umlal v18.4s, v8.4h, v0.h[5]
  1914. umlal2 v26.4s, v8.8h, v0.h[5]
  1915. umlsl v28.4s, v8.4h, v0.h[7]
  1916. umlsl2 v30.4s, v8.8h, v0.h[7]
  1917. .endm
  1918. .macro do_rgb_to_yuv_stage2
  1919. rshrn v20.4h, v14.4s, #16
  1920. shrn v22.4h, v18.4s, #16
  1921. shrn v24.4h, v28.4s, #16
  1922. rshrn2 v20.8h, v16.4s, #16
  1923. shrn2 v22.8h, v26.4s, #16
  1924. shrn2 v24.8h, v30.4s, #16
  1925. xtn v20.8b, v20.8h /* v20 = y */
  1926. xtn v21.8b, v22.8h /* v21 = u */
  1927. xtn v22.8b, v24.8h /* v22 = v */
  1928. .endm
  1929. .macro do_rgb_to_yuv
  1930. do_rgb_to_yuv_stage1
  1931. do_rgb_to_yuv_stage2
  1932. .endm
  1933. /* TODO: expand macros and interleave instructions if some in-order
  1934. * ARM64 processor actually can dual-issue LOAD/STORE with ALU */
  1935. .macro do_rgb_to_yuv_stage2_store_load_stage1 fast_ld3
  1936. do_rgb_to_yuv_stage2
  1937. do_load \bpp, 8, \fast_ld3
  1938. st1 {v20.8b}, [Y], #8
  1939. st1 {v21.8b}, [U], #8
  1940. st1 {v22.8b}, [V], #8
  1941. do_rgb_to_yuv_stage1
  1942. .endm
  1943. .if \fast_ld3 == 1
  1944. asm_function jsimd_\colorid\()_ycc_convert_neon
  1945. .else
  1946. asm_function jsimd_\colorid\()_ycc_convert_neon_slowld3
  1947. .endif
  1948. OUTPUT_WIDTH .req w0
  1949. INPUT_BUF .req x1
  1950. OUTPUT_BUF .req x2
  1951. OUTPUT_ROW .req w3
  1952. NUM_ROWS .req w4
  1953. OUTPUT_BUF0 .req x5
  1954. OUTPUT_BUF1 .req x6
  1955. OUTPUT_BUF2 .req x2 /* OUTPUT_BUF */
  1956. RGB .req x7
  1957. Y .req x9
  1958. U .req x10
  1959. V .req x11
  1960. N .req w12
  1961. /* Load constants to d0, d1, d2, d3 */
  1962. get_symbol_loc x13, Ljsimd_rgb_ycc_neon_consts
  1963. ld1 {v0.8h, v1.8h}, [x13]
  1964. ldr OUTPUT_BUF0, [OUTPUT_BUF]
  1965. ldr OUTPUT_BUF1, [OUTPUT_BUF, #8]
  1966. ldr OUTPUT_BUF2, [OUTPUT_BUF, #16]
  1967. .unreq OUTPUT_BUF
  1968. /* Save NEON registers */
  1969. sub sp, sp, #64
  1970. mov x9, sp
  1971. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1972. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1973. /* Outer loop over scanlines */
  1974. cmp NUM_ROWS, #1
  1975. b.lt 9f
  1976. 0:
  1977. ldr Y, [OUTPUT_BUF0, OUTPUT_ROW, uxtw #3]
  1978. ldr U, [OUTPUT_BUF1, OUTPUT_ROW, uxtw #3]
  1979. mov N, OUTPUT_WIDTH
  1980. ldr V, [OUTPUT_BUF2, OUTPUT_ROW, uxtw #3]
  1981. add OUTPUT_ROW, OUTPUT_ROW, #1
  1982. ldr RGB, [INPUT_BUF], #8
  1983. /* Inner loop over pixels */
  1984. subs N, N, #8
  1985. b.lt 3f
  1986. do_load \bpp, 8, \fast_ld3
  1987. do_rgb_to_yuv_stage1
  1988. subs N, N, #8
  1989. b.lt 2f
  1990. 1:
  1991. do_rgb_to_yuv_stage2_store_load_stage1 \fast_ld3
  1992. subs N, N, #8
  1993. b.ge 1b
  1994. 2:
  1995. do_rgb_to_yuv_stage2
  1996. do_store 8
  1997. tst N, #7
  1998. b.eq 8f
  1999. 3:
  2000. tbz N, #2, 3f
  2001. do_load \bpp, 4, \fast_ld3
  2002. 3:
  2003. tbz N, #1, 4f
  2004. do_load \bpp, 2, \fast_ld3
  2005. 4:
  2006. tbz N, #0, 5f
  2007. do_load \bpp, 1, \fast_ld3
  2008. 5:
  2009. do_rgb_to_yuv
  2010. tbz N, #2, 6f
  2011. do_store 4
  2012. 6:
  2013. tbz N, #1, 7f
  2014. do_store 2
  2015. 7:
  2016. tbz N, #0, 8f
  2017. do_store 1
  2018. 8:
  2019. subs NUM_ROWS, NUM_ROWS, #1
  2020. b.gt 0b
  2021. 9:
  2022. /* Restore all registers and return */
  2023. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  2024. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  2025. br x30
  2026. .unreq OUTPUT_WIDTH
  2027. .unreq OUTPUT_ROW
  2028. .unreq INPUT_BUF
  2029. .unreq NUM_ROWS
  2030. .unreq OUTPUT_BUF0
  2031. .unreq OUTPUT_BUF1
  2032. .unreq OUTPUT_BUF2
  2033. .unreq RGB
  2034. .unreq Y
  2035. .unreq U
  2036. .unreq V
  2037. .unreq N
  2038. .purgem do_rgb_to_yuv
  2039. .purgem do_rgb_to_yuv_stage1
  2040. .purgem do_rgb_to_yuv_stage2
  2041. .purgem do_rgb_to_yuv_stage2_store_load_stage1
  2042. .endm
  2043. /*--------------------------------- id ----- bpp R G B Fast LD3 */
  2044. generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2, 1
  2045. generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0, 1
  2046. generate_jsimd_rgb_ycc_convert_neon extrgbx, 32, 0, 1, 2, 1
  2047. generate_jsimd_rgb_ycc_convert_neon extbgrx, 32, 2, 1, 0, 1
  2048. generate_jsimd_rgb_ycc_convert_neon extxbgr, 32, 3, 2, 1, 1
  2049. generate_jsimd_rgb_ycc_convert_neon extxrgb, 32, 1, 2, 3, 1
  2050. generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2, 0
  2051. generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0, 0
  2052. .purgem do_load
  2053. .purgem do_store
  2054. /*****************************************************************************/
  2055. /*
  2056. * Load data into workspace, applying unsigned->signed conversion
  2057. *
  2058. * TODO: can be combined with 'jsimd_fdct_ifast_neon' to get
  2059. * rid of VST1.16 instructions
  2060. */
  2061. asm_function jsimd_convsamp_neon
  2062. SAMPLE_DATA .req x0
  2063. START_COL .req x1
  2064. WORKSPACE .req x2
  2065. TMP1 .req x9
  2066. TMP2 .req x10
  2067. TMP3 .req x11
  2068. TMP4 .req x12
  2069. TMP5 .req x13
  2070. TMP6 .req x14
  2071. TMP7 .req x15
  2072. TMP8 .req x4
  2073. TMPDUP .req w3
  2074. /* START_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  2075. guarantee that the upper (unused) 32 bits of x1 are valid. This
  2076. instruction ensures that those bits are set to zero. */
  2077. uxtw x1, w1
  2078. mov TMPDUP, #128
  2079. ldp TMP1, TMP2, [SAMPLE_DATA], 16
  2080. ldp TMP3, TMP4, [SAMPLE_DATA], 16
  2081. dup v0.8b, TMPDUP
  2082. add TMP1, TMP1, START_COL
  2083. add TMP2, TMP2, START_COL
  2084. ldp TMP5, TMP6, [SAMPLE_DATA], 16
  2085. add TMP3, TMP3, START_COL
  2086. add TMP4, TMP4, START_COL
  2087. ldp TMP7, TMP8, [SAMPLE_DATA], 16
  2088. add TMP5, TMP5, START_COL
  2089. add TMP6, TMP6, START_COL
  2090. ld1 {v16.8b}, [TMP1]
  2091. add TMP7, TMP7, START_COL
  2092. add TMP8, TMP8, START_COL
  2093. ld1 {v17.8b}, [TMP2]
  2094. usubl v16.8h, v16.8b, v0.8b
  2095. ld1 {v18.8b}, [TMP3]
  2096. usubl v17.8h, v17.8b, v0.8b
  2097. ld1 {v19.8b}, [TMP4]
  2098. usubl v18.8h, v18.8b, v0.8b
  2099. ld1 {v20.8b}, [TMP5]
  2100. usubl v19.8h, v19.8b, v0.8b
  2101. ld1 {v21.8b}, [TMP6]
  2102. st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [WORKSPACE], 64
  2103. usubl v20.8h, v20.8b, v0.8b
  2104. ld1 {v22.8b}, [TMP7]
  2105. usubl v21.8h, v21.8b, v0.8b
  2106. ld1 {v23.8b}, [TMP8]
  2107. usubl v22.8h, v22.8b, v0.8b
  2108. usubl v23.8h, v23.8b, v0.8b
  2109. st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [WORKSPACE], 64
  2110. br x30
  2111. .unreq SAMPLE_DATA
  2112. .unreq START_COL
  2113. .unreq WORKSPACE
  2114. .unreq TMP1
  2115. .unreq TMP2
  2116. .unreq TMP3
  2117. .unreq TMP4
  2118. .unreq TMP5
  2119. .unreq TMP6
  2120. .unreq TMP7
  2121. .unreq TMP8
  2122. .unreq TMPDUP
  2123. /*****************************************************************************/
  2124. /*
  2125. * jsimd_fdct_islow_neon
  2126. *
  2127. * This file contains a slow-but-accurate integer implementation of the
  2128. * forward DCT (Discrete Cosine Transform). The following code is based
  2129. * directly on the IJG''s original jfdctint.c; see the jfdctint.c for
  2130. * more details.
  2131. *
  2132. * TODO: can be combined with 'jsimd_convsamp_neon' to get
  2133. * rid of a bunch of VLD1.16 instructions
  2134. */
  2135. #define CONST_BITS 13
  2136. #define PASS1_BITS 2
  2137. #define DESCALE_P1 (CONST_BITS - PASS1_BITS)
  2138. #define DESCALE_P2 (CONST_BITS + PASS1_BITS)
  2139. #define XFIX_P_0_298 v0.h[0]
  2140. #define XFIX_N_0_390 v0.h[1]
  2141. #define XFIX_P_0_541 v0.h[2]
  2142. #define XFIX_P_0_765 v0.h[3]
  2143. #define XFIX_N_0_899 v0.h[4]
  2144. #define XFIX_P_1_175 v0.h[5]
  2145. #define XFIX_P_1_501 v0.h[6]
  2146. #define XFIX_N_1_847 v0.h[7]
  2147. #define XFIX_N_1_961 v1.h[0]
  2148. #define XFIX_P_2_053 v1.h[1]
  2149. #define XFIX_N_2_562 v1.h[2]
  2150. #define XFIX_P_3_072 v1.h[3]
  2151. asm_function jsimd_fdct_islow_neon
  2152. DATA .req x0
  2153. TMP .req x9
  2154. /* Load constants */
  2155. get_symbol_loc TMP, Ljsimd_fdct_islow_neon_consts
  2156. ld1 {v0.8h, v1.8h}, [TMP]
  2157. /* Save NEON registers */
  2158. sub sp, sp, #64
  2159. mov x10, sp
  2160. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x10], 32
  2161. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x10], 32
  2162. /* Load all DATA into NEON registers with the following allocation:
  2163. * 0 1 2 3 | 4 5 6 7
  2164. * ---------+--------
  2165. * 0 | d16 | d17 | v16.8h
  2166. * 1 | d18 | d19 | v17.8h
  2167. * 2 | d20 | d21 | v18.8h
  2168. * 3 | d22 | d23 | v19.8h
  2169. * 4 | d24 | d25 | v20.8h
  2170. * 5 | d26 | d27 | v21.8h
  2171. * 6 | d28 | d29 | v22.8h
  2172. * 7 | d30 | d31 | v23.8h
  2173. */
  2174. ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  2175. ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  2176. sub DATA, DATA, #64
  2177. /* Transpose */
  2178. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v31, v2, v3, v4
  2179. /* 1-D FDCT */
  2180. add v24.8h, v16.8h, v23.8h /* tmp0 = dataptr[0] + dataptr[7]; */
  2181. sub v31.8h, v16.8h, v23.8h /* tmp7 = dataptr[0] - dataptr[7]; */
  2182. add v25.8h, v17.8h, v22.8h /* tmp1 = dataptr[1] + dataptr[6]; */
  2183. sub v30.8h, v17.8h, v22.8h /* tmp6 = dataptr[1] - dataptr[6]; */
  2184. add v26.8h, v18.8h, v21.8h /* tmp2 = dataptr[2] + dataptr[5]; */
  2185. sub v29.8h, v18.8h, v21.8h /* tmp5 = dataptr[2] - dataptr[5]; */
  2186. add v27.8h, v19.8h, v20.8h /* tmp3 = dataptr[3] + dataptr[4]; */
  2187. sub v28.8h, v19.8h, v20.8h /* tmp4 = dataptr[3] - dataptr[4]; */
  2188. /* even part */
  2189. add v8.8h, v24.8h, v27.8h /* tmp10 = tmp0 + tmp3; */
  2190. sub v9.8h, v24.8h, v27.8h /* tmp13 = tmp0 - tmp3; */
  2191. add v10.8h, v25.8h, v26.8h /* tmp11 = tmp1 + tmp2; */
  2192. sub v11.8h, v25.8h, v26.8h /* tmp12 = tmp1 - tmp2; */
  2193. add v16.8h, v8.8h, v10.8h /* tmp10 + tmp11 */
  2194. sub v20.8h, v8.8h, v10.8h /* tmp10 - tmp11 */
  2195. add v18.8h, v11.8h, v9.8h /* tmp12 + tmp13 */
  2196. shl v16.8h, v16.8h, #PASS1_BITS /* dataptr[0] = (DCTELEM)LEFT_SHIFT(tmp10 + tmp11, PASS1_BITS); */
  2197. shl v20.8h, v20.8h, #PASS1_BITS /* dataptr[4] = (DCTELEM)LEFT_SHIFT(tmp10 - tmp11, PASS1_BITS); */
  2198. smull2 v24.4s, v18.8h, XFIX_P_0_541 /* z1 hi = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  2199. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1 lo = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  2200. mov v22.16b, v18.16b
  2201. mov v25.16b, v24.16b
  2202. smlal v18.4s, v9.4h, XFIX_P_0_765 /* lo z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  2203. smlal2 v24.4s, v9.8h, XFIX_P_0_765 /* hi z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  2204. smlal v22.4s, v11.4h, XFIX_N_1_847 /* lo z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  2205. smlal2 v25.4s, v11.8h, XFIX_N_1_847 /* hi z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  2206. rshrn v18.4h, v18.4s, #DESCALE_P1
  2207. rshrn v22.4h, v22.4s, #DESCALE_P1
  2208. rshrn2 v18.8h, v24.4s, #DESCALE_P1 /* dataptr[2] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp13, XFIX_P_0_765), CONST_BITS-PASS1_BITS); */
  2209. rshrn2 v22.8h, v25.4s, #DESCALE_P1 /* dataptr[6] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp12, XFIX_N_1_847), CONST_BITS-PASS1_BITS); */
  2210. /* Odd part */
  2211. add v8.8h, v28.8h, v31.8h /* z1 = tmp4 + tmp7; */
  2212. add v9.8h, v29.8h, v30.8h /* z2 = tmp5 + tmp6; */
  2213. add v10.8h, v28.8h, v30.8h /* z3 = tmp4 + tmp6; */
  2214. add v11.8h, v29.8h, v31.8h /* z4 = tmp5 + tmp7; */
  2215. smull v4.4s, v10.4h, XFIX_P_1_175 /* z5 lo = z3 lo * XFIX_P_1_175 */
  2216. smull2 v5.4s, v10.8h, XFIX_P_1_175
  2217. smlal v4.4s, v11.4h, XFIX_P_1_175 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602); */
  2218. smlal2 v5.4s, v11.8h, XFIX_P_1_175
  2219. smull2 v24.4s, v28.8h, XFIX_P_0_298
  2220. smull2 v25.4s, v29.8h, XFIX_P_2_053
  2221. smull2 v26.4s, v30.8h, XFIX_P_3_072
  2222. smull2 v27.4s, v31.8h, XFIX_P_1_501
  2223. smull v28.4s, v28.4h, XFIX_P_0_298 /* tmp4 = MULTIPLY(tmp4, FIX_0_298631336); */
  2224. smull v29.4s, v29.4h, XFIX_P_2_053 /* tmp5 = MULTIPLY(tmp5, FIX_2_053119869); */
  2225. smull v30.4s, v30.4h, XFIX_P_3_072 /* tmp6 = MULTIPLY(tmp6, FIX_3_072711026); */
  2226. smull v31.4s, v31.4h, XFIX_P_1_501 /* tmp7 = MULTIPLY(tmp7, FIX_1_501321110); */
  2227. smull2 v12.4s, v8.8h, XFIX_N_0_899
  2228. smull2 v13.4s, v9.8h, XFIX_N_2_562
  2229. smull2 v14.4s, v10.8h, XFIX_N_1_961
  2230. smull2 v15.4s, v11.8h, XFIX_N_0_390
  2231. smull v8.4s, v8.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223); */
  2232. smull v9.4s, v9.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447); */
  2233. smull v10.4s, v10.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560); */
  2234. smull v11.4s, v11.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644); */
  2235. add v10.4s, v10.4s, v4.4s /* z3 += z5 */
  2236. add v14.4s, v14.4s, v5.4s
  2237. add v11.4s, v11.4s, v4.4s /* z4 += z5 */
  2238. add v15.4s, v15.4s, v5.4s
  2239. add v28.4s, v28.4s, v8.4s /* tmp4 += z1 */
  2240. add v24.4s, v24.4s, v12.4s
  2241. add v29.4s, v29.4s, v9.4s /* tmp5 += z2 */
  2242. add v25.4s, v25.4s, v13.4s
  2243. add v30.4s, v30.4s, v10.4s /* tmp6 += z3 */
  2244. add v26.4s, v26.4s, v14.4s
  2245. add v31.4s, v31.4s, v11.4s /* tmp7 += z4 */
  2246. add v27.4s, v27.4s, v15.4s
  2247. add v28.4s, v28.4s, v10.4s /* tmp4 += z3 */
  2248. add v24.4s, v24.4s, v14.4s
  2249. add v29.4s, v29.4s, v11.4s /* tmp5 += z4 */
  2250. add v25.4s, v25.4s, v15.4s
  2251. add v30.4s, v30.4s, v9.4s /* tmp6 += z2 */
  2252. add v26.4s, v26.4s, v13.4s
  2253. add v31.4s, v31.4s, v8.4s /* tmp7 += z1 */
  2254. add v27.4s, v27.4s, v12.4s
  2255. rshrn v23.4h, v28.4s, #DESCALE_P1
  2256. rshrn v21.4h, v29.4s, #DESCALE_P1
  2257. rshrn v19.4h, v30.4s, #DESCALE_P1
  2258. rshrn v17.4h, v31.4s, #DESCALE_P1
  2259. rshrn2 v23.8h, v24.4s, #DESCALE_P1 /* dataptr[7] = (DCTELEM)DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); */
  2260. rshrn2 v21.8h, v25.4s, #DESCALE_P1 /* dataptr[5] = (DCTELEM)DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); */
  2261. rshrn2 v19.8h, v26.4s, #DESCALE_P1 /* dataptr[3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); */
  2262. rshrn2 v17.8h, v27.4s, #DESCALE_P1 /* dataptr[1] = (DCTELEM)DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); */
  2263. /* Transpose */
  2264. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v31, v2, v3, v4
  2265. /* 1-D FDCT */
  2266. add v24.8h, v16.8h, v23.8h /* tmp0 = dataptr[0] + dataptr[7]; */
  2267. sub v31.8h, v16.8h, v23.8h /* tmp7 = dataptr[0] - dataptr[7]; */
  2268. add v25.8h, v17.8h, v22.8h /* tmp1 = dataptr[1] + dataptr[6]; */
  2269. sub v30.8h, v17.8h, v22.8h /* tmp6 = dataptr[1] - dataptr[6]; */
  2270. add v26.8h, v18.8h, v21.8h /* tmp2 = dataptr[2] + dataptr[5]; */
  2271. sub v29.8h, v18.8h, v21.8h /* tmp5 = dataptr[2] - dataptr[5]; */
  2272. add v27.8h, v19.8h, v20.8h /* tmp3 = dataptr[3] + dataptr[4]; */
  2273. sub v28.8h, v19.8h, v20.8h /* tmp4 = dataptr[3] - dataptr[4]; */
  2274. /* even part */
  2275. add v8.8h, v24.8h, v27.8h /* tmp10 = tmp0 + tmp3; */
  2276. sub v9.8h, v24.8h, v27.8h /* tmp13 = tmp0 - tmp3; */
  2277. add v10.8h, v25.8h, v26.8h /* tmp11 = tmp1 + tmp2; */
  2278. sub v11.8h, v25.8h, v26.8h /* tmp12 = tmp1 - tmp2; */
  2279. add v16.8h, v8.8h, v10.8h /* tmp10 + tmp11 */
  2280. sub v20.8h, v8.8h, v10.8h /* tmp10 - tmp11 */
  2281. add v18.8h, v11.8h, v9.8h /* tmp12 + tmp13 */
  2282. srshr v16.8h, v16.8h, #PASS1_BITS /* dataptr[0] = (DCTELEM)DESCALE(tmp10 + tmp11, PASS1_BITS); */
  2283. srshr v20.8h, v20.8h, #PASS1_BITS /* dataptr[4] = (DCTELEM)DESCALE(tmp10 - tmp11, PASS1_BITS); */
  2284. smull2 v24.4s, v18.8h, XFIX_P_0_541 /* z1 hi = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  2285. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1 lo = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  2286. mov v22.16b, v18.16b
  2287. mov v25.16b, v24.16b
  2288. smlal v18.4s, v9.4h, XFIX_P_0_765 /* lo z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  2289. smlal2 v24.4s, v9.8h, XFIX_P_0_765 /* hi z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  2290. smlal v22.4s, v11.4h, XFIX_N_1_847 /* lo z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  2291. smlal2 v25.4s, v11.8h, XFIX_N_1_847 /* hi z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  2292. rshrn v18.4h, v18.4s, #DESCALE_P2
  2293. rshrn v22.4h, v22.4s, #DESCALE_P2
  2294. rshrn2 v18.8h, v24.4s, #DESCALE_P2 /* dataptr[2] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp13, XFIX_P_0_765), CONST_BITS-PASS1_BITS); */
  2295. rshrn2 v22.8h, v25.4s, #DESCALE_P2 /* dataptr[6] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp12, XFIX_N_1_847), CONST_BITS-PASS1_BITS); */
  2296. /* Odd part */
  2297. add v8.8h, v28.8h, v31.8h /* z1 = tmp4 + tmp7; */
  2298. add v9.8h, v29.8h, v30.8h /* z2 = tmp5 + tmp6; */
  2299. add v10.8h, v28.8h, v30.8h /* z3 = tmp4 + tmp6; */
  2300. add v11.8h, v29.8h, v31.8h /* z4 = tmp5 + tmp7; */
  2301. smull v4.4s, v10.4h, XFIX_P_1_175 /* z5 lo = z3 lo * XFIX_P_1_175 */
  2302. smull2 v5.4s, v10.8h, XFIX_P_1_175
  2303. smlal v4.4s, v11.4h, XFIX_P_1_175 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602); */
  2304. smlal2 v5.4s, v11.8h, XFIX_P_1_175
  2305. smull2 v24.4s, v28.8h, XFIX_P_0_298
  2306. smull2 v25.4s, v29.8h, XFIX_P_2_053
  2307. smull2 v26.4s, v30.8h, XFIX_P_3_072
  2308. smull2 v27.4s, v31.8h, XFIX_P_1_501
  2309. smull v28.4s, v28.4h, XFIX_P_0_298 /* tmp4 = MULTIPLY(tmp4, FIX_0_298631336); */
  2310. smull v29.4s, v29.4h, XFIX_P_2_053 /* tmp5 = MULTIPLY(tmp5, FIX_2_053119869); */
  2311. smull v30.4s, v30.4h, XFIX_P_3_072 /* tmp6 = MULTIPLY(tmp6, FIX_3_072711026); */
  2312. smull v31.4s, v31.4h, XFIX_P_1_501 /* tmp7 = MULTIPLY(tmp7, FIX_1_501321110); */
  2313. smull2 v12.4s, v8.8h, XFIX_N_0_899
  2314. smull2 v13.4s, v9.8h, XFIX_N_2_562
  2315. smull2 v14.4s, v10.8h, XFIX_N_1_961
  2316. smull2 v15.4s, v11.8h, XFIX_N_0_390
  2317. smull v8.4s, v8.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223); */
  2318. smull v9.4s, v9.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447); */
  2319. smull v10.4s, v10.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560); */
  2320. smull v11.4s, v11.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644); */
  2321. add v10.4s, v10.4s, v4.4s
  2322. add v14.4s, v14.4s, v5.4s
  2323. add v11.4s, v11.4s, v4.4s
  2324. add v15.4s, v15.4s, v5.4s
  2325. add v28.4s, v28.4s, v8.4s /* tmp4 += z1 */
  2326. add v24.4s, v24.4s, v12.4s
  2327. add v29.4s, v29.4s, v9.4s /* tmp5 += z2 */
  2328. add v25.4s, v25.4s, v13.4s
  2329. add v30.4s, v30.4s, v10.4s /* tmp6 += z3 */
  2330. add v26.4s, v26.4s, v14.4s
  2331. add v31.4s, v31.4s, v11.4s /* tmp7 += z4 */
  2332. add v27.4s, v27.4s, v15.4s
  2333. add v28.4s, v28.4s, v10.4s /* tmp4 += z3 */
  2334. add v24.4s, v24.4s, v14.4s
  2335. add v29.4s, v29.4s, v11.4s /* tmp5 += z4 */
  2336. add v25.4s, v25.4s, v15.4s
  2337. add v30.4s, v30.4s, v9.4s /* tmp6 += z2 */
  2338. add v26.4s, v26.4s, v13.4s
  2339. add v31.4s, v31.4s, v8.4s /* tmp7 += z1 */
  2340. add v27.4s, v27.4s, v12.4s
  2341. rshrn v23.4h, v28.4s, #DESCALE_P2
  2342. rshrn v21.4h, v29.4s, #DESCALE_P2
  2343. rshrn v19.4h, v30.4s, #DESCALE_P2
  2344. rshrn v17.4h, v31.4s, #DESCALE_P2
  2345. rshrn2 v23.8h, v24.4s, #DESCALE_P2 /* dataptr[7] = (DCTELEM)DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); */
  2346. rshrn2 v21.8h, v25.4s, #DESCALE_P2 /* dataptr[5] = (DCTELEM)DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); */
  2347. rshrn2 v19.8h, v26.4s, #DESCALE_P2 /* dataptr[3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); */
  2348. rshrn2 v17.8h, v27.4s, #DESCALE_P2 /* dataptr[1] = (DCTELEM)DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); */
  2349. /* store results */
  2350. st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  2351. st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  2352. /* Restore NEON registers */
  2353. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  2354. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  2355. br x30
  2356. .unreq DATA
  2357. .unreq TMP
  2358. #undef XFIX_P_0_298
  2359. #undef XFIX_N_0_390
  2360. #undef XFIX_P_0_541
  2361. #undef XFIX_P_0_765
  2362. #undef XFIX_N_0_899
  2363. #undef XFIX_P_1_175
  2364. #undef XFIX_P_1_501
  2365. #undef XFIX_N_1_847
  2366. #undef XFIX_N_1_961
  2367. #undef XFIX_P_2_053
  2368. #undef XFIX_N_2_562
  2369. #undef XFIX_P_3_072
  2370. /*****************************************************************************/
  2371. /*
  2372. * jsimd_fdct_ifast_neon
  2373. *
  2374. * This function contains a fast, not so accurate integer implementation of
  2375. * the forward DCT (Discrete Cosine Transform). It uses the same calculations
  2376. * and produces exactly the same output as IJG's original 'jpeg_fdct_ifast'
  2377. * function from jfdctfst.c
  2378. *
  2379. * TODO: can be combined with 'jsimd_convsamp_neon' to get
  2380. * rid of a bunch of VLD1.16 instructions
  2381. */
  2382. #undef XFIX_0_541196100
  2383. #define XFIX_0_382683433 v0.h[0]
  2384. #define XFIX_0_541196100 v0.h[1]
  2385. #define XFIX_0_707106781 v0.h[2]
  2386. #define XFIX_1_306562965 v0.h[3]
  2387. asm_function jsimd_fdct_ifast_neon
  2388. DATA .req x0
  2389. TMP .req x9
  2390. /* Load constants */
  2391. get_symbol_loc TMP, Ljsimd_fdct_ifast_neon_consts
  2392. ld1 {v0.4h}, [TMP]
  2393. /* Load all DATA into NEON registers with the following allocation:
  2394. * 0 1 2 3 | 4 5 6 7
  2395. * ---------+--------
  2396. * 0 | d16 | d17 | v0.8h
  2397. * 1 | d18 | d19 | q9
  2398. * 2 | d20 | d21 | q10
  2399. * 3 | d22 | d23 | q11
  2400. * 4 | d24 | d25 | q12
  2401. * 5 | d26 | d27 | q13
  2402. * 6 | d28 | d29 | q14
  2403. * 7 | d30 | d31 | q15
  2404. */
  2405. ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  2406. ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  2407. mov TMP, #2
  2408. sub DATA, DATA, #64
  2409. 1:
  2410. /* Transpose */
  2411. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v1, v2, v3, v4
  2412. subs TMP, TMP, #1
  2413. /* 1-D FDCT */
  2414. add v4.8h, v19.8h, v20.8h
  2415. sub v20.8h, v19.8h, v20.8h
  2416. sub v28.8h, v18.8h, v21.8h
  2417. add v18.8h, v18.8h, v21.8h
  2418. sub v29.8h, v17.8h, v22.8h
  2419. add v17.8h, v17.8h, v22.8h
  2420. sub v21.8h, v16.8h, v23.8h
  2421. add v16.8h, v16.8h, v23.8h
  2422. sub v6.8h, v17.8h, v18.8h
  2423. sub v7.8h, v16.8h, v4.8h
  2424. add v5.8h, v17.8h, v18.8h
  2425. add v6.8h, v6.8h, v7.8h
  2426. add v4.8h, v16.8h, v4.8h
  2427. sqdmulh v6.8h, v6.8h, XFIX_0_707106781
  2428. add v19.8h, v20.8h, v28.8h
  2429. add v16.8h, v4.8h, v5.8h
  2430. sub v20.8h, v4.8h, v5.8h
  2431. add v5.8h, v28.8h, v29.8h
  2432. add v29.8h, v29.8h, v21.8h
  2433. sqdmulh v5.8h, v5.8h, XFIX_0_707106781
  2434. sub v28.8h, v19.8h, v29.8h
  2435. add v18.8h, v7.8h, v6.8h
  2436. sqdmulh v28.8h, v28.8h, XFIX_0_382683433
  2437. sub v22.8h, v7.8h, v6.8h
  2438. sqdmulh v19.8h, v19.8h, XFIX_0_541196100
  2439. sqdmulh v7.8h, v29.8h, XFIX_1_306562965
  2440. add v6.8h, v21.8h, v5.8h
  2441. sub v5.8h, v21.8h, v5.8h
  2442. add v29.8h, v29.8h, v28.8h
  2443. add v19.8h, v19.8h, v28.8h
  2444. add v29.8h, v29.8h, v7.8h
  2445. add v21.8h, v5.8h, v19.8h
  2446. sub v19.8h, v5.8h, v19.8h
  2447. add v17.8h, v6.8h, v29.8h
  2448. sub v23.8h, v6.8h, v29.8h
  2449. b.ne 1b
  2450. /* store results */
  2451. st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  2452. st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  2453. br x30
  2454. .unreq DATA
  2455. .unreq TMP
  2456. #undef XFIX_0_382683433
  2457. #undef XFIX_0_541196100
  2458. #undef XFIX_0_707106781
  2459. #undef XFIX_1_306562965
  2460. /*****************************************************************************/
  2461. /*
  2462. * GLOBAL(void)
  2463. * jsimd_quantize_neon(JCOEFPTR coef_block, DCTELEM *divisors,
  2464. * DCTELEM *workspace);
  2465. *
  2466. */
  2467. asm_function jsimd_quantize_neon
  2468. COEF_BLOCK .req x0
  2469. DIVISORS .req x1
  2470. WORKSPACE .req x2
  2471. RECIPROCAL .req DIVISORS
  2472. CORRECTION .req x9
  2473. SHIFT .req x10
  2474. LOOP_COUNT .req x11
  2475. mov LOOP_COUNT, #2
  2476. add CORRECTION, DIVISORS, #(64 * 2)
  2477. add SHIFT, DIVISORS, #(64 * 6)
  2478. 1:
  2479. subs LOOP_COUNT, LOOP_COUNT, #1
  2480. ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [WORKSPACE], 64
  2481. ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [CORRECTION], 64
  2482. abs v20.8h, v0.8h
  2483. abs v21.8h, v1.8h
  2484. abs v22.8h, v2.8h
  2485. abs v23.8h, v3.8h
  2486. ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [RECIPROCAL], 64
  2487. add v20.8h, v20.8h, v4.8h /* add correction */
  2488. add v21.8h, v21.8h, v5.8h
  2489. add v22.8h, v22.8h, v6.8h
  2490. add v23.8h, v23.8h, v7.8h
  2491. umull v4.4s, v20.4h, v28.4h /* multiply by reciprocal */
  2492. umull2 v16.4s, v20.8h, v28.8h
  2493. umull v5.4s, v21.4h, v29.4h
  2494. umull2 v17.4s, v21.8h, v29.8h
  2495. umull v6.4s, v22.4h, v30.4h /* multiply by reciprocal */
  2496. umull2 v18.4s, v22.8h, v30.8h
  2497. umull v7.4s, v23.4h, v31.4h
  2498. umull2 v19.4s, v23.8h, v31.8h
  2499. ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [SHIFT], 64
  2500. shrn v4.4h, v4.4s, #16
  2501. shrn v5.4h, v5.4s, #16
  2502. shrn v6.4h, v6.4s, #16
  2503. shrn v7.4h, v7.4s, #16
  2504. shrn2 v4.8h, v16.4s, #16
  2505. shrn2 v5.8h, v17.4s, #16
  2506. shrn2 v6.8h, v18.4s, #16
  2507. shrn2 v7.8h, v19.4s, #16
  2508. neg v24.8h, v24.8h
  2509. neg v25.8h, v25.8h
  2510. neg v26.8h, v26.8h
  2511. neg v27.8h, v27.8h
  2512. sshr v0.8h, v0.8h, #15 /* extract sign */
  2513. sshr v1.8h, v1.8h, #15
  2514. sshr v2.8h, v2.8h, #15
  2515. sshr v3.8h, v3.8h, #15
  2516. ushl v4.8h, v4.8h, v24.8h /* shift */
  2517. ushl v5.8h, v5.8h, v25.8h
  2518. ushl v6.8h, v6.8h, v26.8h
  2519. ushl v7.8h, v7.8h, v27.8h
  2520. eor v4.16b, v4.16b, v0.16b /* restore sign */
  2521. eor v5.16b, v5.16b, v1.16b
  2522. eor v6.16b, v6.16b, v2.16b
  2523. eor v7.16b, v7.16b, v3.16b
  2524. sub v4.8h, v4.8h, v0.8h
  2525. sub v5.8h, v5.8h, v1.8h
  2526. sub v6.8h, v6.8h, v2.8h
  2527. sub v7.8h, v7.8h, v3.8h
  2528. st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [COEF_BLOCK], 64
  2529. b.ne 1b
  2530. br x30 /* return */
  2531. .unreq COEF_BLOCK
  2532. .unreq DIVISORS
  2533. .unreq WORKSPACE
  2534. .unreq RECIPROCAL
  2535. .unreq CORRECTION
  2536. .unreq SHIFT
  2537. .unreq LOOP_COUNT
  2538. /*****************************************************************************/
  2539. /*
  2540. * Downsample pixel values of a single component.
  2541. * This version handles the common case of 2:1 horizontal and 1:1 vertical,
  2542. * without smoothing.
  2543. *
  2544. * GLOBAL(void)
  2545. * jsimd_h2v1_downsample_neon(JDIMENSION image_width, int max_v_samp_factor,
  2546. * JDIMENSION v_samp_factor,
  2547. * JDIMENSION width_in_blocks,
  2548. * JSAMPARRAY input_data, JSAMPARRAY output_data);
  2549. */
  2550. asm_function jsimd_h2v1_downsample_neon
  2551. IMAGE_WIDTH .req x0
  2552. MAX_V_SAMP .req x1
  2553. V_SAMP .req x2
  2554. BLOCK_WIDTH .req x3
  2555. INPUT_DATA .req x4
  2556. OUTPUT_DATA .req x5
  2557. OUTPTR .req x9
  2558. INPTR .req x10
  2559. TMP1 .req x11
  2560. TMP2 .req x12
  2561. TMP3 .req x13
  2562. TMPDUP .req w15
  2563. mov TMPDUP, #0x10000
  2564. lsl TMP2, BLOCK_WIDTH, #4
  2565. sub TMP2, TMP2, IMAGE_WIDTH
  2566. get_symbol_loc TMP3, Ljsimd_h2_downsample_neon_consts
  2567. add TMP3, TMP3, TMP2, lsl #4
  2568. dup v16.4s, TMPDUP
  2569. ld1 {v18.16b}, [TMP3]
  2570. 1: /* row loop */
  2571. ldr INPTR, [INPUT_DATA], #8
  2572. ldr OUTPTR, [OUTPUT_DATA], #8
  2573. subs TMP1, BLOCK_WIDTH, #1
  2574. b.eq 3f
  2575. 2: /* columns */
  2576. ld1 {v0.16b}, [INPTR], #16
  2577. mov v4.16b, v16.16b
  2578. subs TMP1, TMP1, #1
  2579. uadalp v4.8h, v0.16b
  2580. shrn v6.8b, v4.8h, #1
  2581. st1 {v6.8b}, [OUTPTR], #8
  2582. b.ne 2b
  2583. 3: /* last columns */
  2584. ld1 {v0.16b}, [INPTR]
  2585. mov v4.16b, v16.16b
  2586. subs V_SAMP, V_SAMP, #1
  2587. /* expand right */
  2588. tbl v2.16b, {v0.16b}, v18.16b
  2589. uadalp v4.8h, v2.16b
  2590. shrn v6.8b, v4.8h, #1
  2591. st1 {v6.8b}, [OUTPTR], #8
  2592. b.ne 1b
  2593. br x30
  2594. .unreq IMAGE_WIDTH
  2595. .unreq MAX_V_SAMP
  2596. .unreq V_SAMP
  2597. .unreq BLOCK_WIDTH
  2598. .unreq INPUT_DATA
  2599. .unreq OUTPUT_DATA
  2600. .unreq OUTPTR
  2601. .unreq INPTR
  2602. .unreq TMP1
  2603. .unreq TMP2
  2604. .unreq TMP3
  2605. .unreq TMPDUP
  2606. /*****************************************************************************/
  2607. /*
  2608. * Downsample pixel values of a single component.
  2609. * This version handles the common case of 2:1 horizontal and 2:1 vertical,
  2610. * without smoothing.
  2611. *
  2612. * GLOBAL(void)
  2613. * jsimd_h2v2_downsample_neon(JDIMENSION image_width, int max_v_samp_factor,
  2614. * JDIMENSION v_samp_factor,
  2615. * JDIMENSION width_in_blocks,
  2616. * JSAMPARRAY input_data, JSAMPARRAY output_data);
  2617. */
  2618. .balign 16
  2619. asm_function jsimd_h2v2_downsample_neon
  2620. IMAGE_WIDTH .req x0
  2621. MAX_V_SAMP .req x1
  2622. V_SAMP .req x2
  2623. BLOCK_WIDTH .req x3
  2624. INPUT_DATA .req x4
  2625. OUTPUT_DATA .req x5
  2626. OUTPTR .req x9
  2627. INPTR0 .req x10
  2628. INPTR1 .req x14
  2629. TMP1 .req x11
  2630. TMP2 .req x12
  2631. TMP3 .req x13
  2632. TMPDUP .req w15
  2633. mov TMPDUP, #1
  2634. lsl TMP2, BLOCK_WIDTH, #4
  2635. lsl TMPDUP, TMPDUP, #17
  2636. sub TMP2, TMP2, IMAGE_WIDTH
  2637. get_symbol_loc TMP3, Ljsimd_h2_downsample_neon_consts
  2638. orr TMPDUP, TMPDUP, #1
  2639. add TMP3, TMP3, TMP2, lsl #4
  2640. dup v16.4s, TMPDUP
  2641. ld1 {v18.16b}, [TMP3]
  2642. 1: /* row loop */
  2643. ldr INPTR0, [INPUT_DATA], #8
  2644. ldr OUTPTR, [OUTPUT_DATA], #8
  2645. ldr INPTR1, [INPUT_DATA], #8
  2646. subs TMP1, BLOCK_WIDTH, #1
  2647. b.eq 3f
  2648. 2: /* columns */
  2649. ld1 {v0.16b}, [INPTR0], #16
  2650. ld1 {v1.16b}, [INPTR1], #16
  2651. mov v4.16b, v16.16b
  2652. subs TMP1, TMP1, #1
  2653. uadalp v4.8h, v0.16b
  2654. uadalp v4.8h, v1.16b
  2655. shrn v6.8b, v4.8h, #2
  2656. st1 {v6.8b}, [OUTPTR], #8
  2657. b.ne 2b
  2658. 3: /* last columns */
  2659. ld1 {v0.16b}, [INPTR0], #16
  2660. ld1 {v1.16b}, [INPTR1], #16
  2661. mov v4.16b, v16.16b
  2662. subs V_SAMP, V_SAMP, #1
  2663. /* expand right */
  2664. tbl v2.16b, {v0.16b}, v18.16b
  2665. tbl v3.16b, {v1.16b}, v18.16b
  2666. uadalp v4.8h, v2.16b
  2667. uadalp v4.8h, v3.16b
  2668. shrn v6.8b, v4.8h, #2
  2669. st1 {v6.8b}, [OUTPTR], #8
  2670. b.ne 1b
  2671. br x30
  2672. .unreq IMAGE_WIDTH
  2673. .unreq MAX_V_SAMP
  2674. .unreq V_SAMP
  2675. .unreq BLOCK_WIDTH
  2676. .unreq INPUT_DATA
  2677. .unreq OUTPUT_DATA
  2678. .unreq OUTPTR
  2679. .unreq INPTR0
  2680. .unreq INPTR1
  2681. .unreq TMP1
  2682. .unreq TMP2
  2683. .unreq TMP3
  2684. .unreq TMPDUP
  2685. /*****************************************************************************/
  2686. /*
  2687. * GLOBAL(JOCTET *)
  2688. * jsimd_huff_encode_one_block(working_state *state, JOCTET *buffer,
  2689. * JCOEFPTR block, int last_dc_val,
  2690. * c_derived_tbl *dctbl, c_derived_tbl *actbl)
  2691. *
  2692. */
  2693. BUFFER .req x1
  2694. PUT_BUFFER .req x6
  2695. PUT_BITS .req x7
  2696. PUT_BITSw .req w7
  2697. .macro emit_byte
  2698. sub PUT_BITS, PUT_BITS, #0x8
  2699. lsr x19, PUT_BUFFER, PUT_BITS
  2700. uxtb w19, w19
  2701. strb w19, [BUFFER, #1]!
  2702. cmp w19, #0xff
  2703. b.ne 14f
  2704. strb wzr, [BUFFER, #1]!
  2705. 14:
  2706. .endm
  2707. .macro put_bits CODE, SIZE
  2708. lsl PUT_BUFFER, PUT_BUFFER, \SIZE
  2709. add PUT_BITS, PUT_BITS, \SIZE
  2710. orr PUT_BUFFER, PUT_BUFFER, \CODE
  2711. .endm
  2712. .macro checkbuf31
  2713. cmp PUT_BITS, #0x20
  2714. b.lt 31f
  2715. emit_byte
  2716. emit_byte
  2717. emit_byte
  2718. emit_byte
  2719. 31:
  2720. .endm
  2721. .macro checkbuf47
  2722. cmp PUT_BITS, #0x30
  2723. b.lt 47f
  2724. emit_byte
  2725. emit_byte
  2726. emit_byte
  2727. emit_byte
  2728. emit_byte
  2729. emit_byte
  2730. 47:
  2731. .endm
  2732. .macro generate_jsimd_huff_encode_one_block fast_tbl
  2733. .if \fast_tbl == 1
  2734. asm_function jsimd_huff_encode_one_block_neon
  2735. .else
  2736. asm_function jsimd_huff_encode_one_block_neon_slowtbl
  2737. .endif
  2738. sub sp, sp, 272
  2739. sub BUFFER, BUFFER, #0x1 /* BUFFER=buffer-- */
  2740. /* Save ARM registers */
  2741. stp x19, x20, [sp]
  2742. get_symbol_loc x15, Ljsimd_huff_encode_one_block_neon_consts
  2743. ldr PUT_BUFFER, [x0, #0x10]
  2744. ldr PUT_BITSw, [x0, #0x18]
  2745. ldrsh w12, [x2] /* load DC coeff in w12 */
  2746. /* prepare data */
  2747. .if \fast_tbl == 1
  2748. ld1 {v23.16b}, [x15], #16
  2749. ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x15], #64
  2750. ld1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x15], #64
  2751. ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x15], #64
  2752. ld1 {v24.16b, v25.16b, v26.16b, v27.16b}, [x2], #64
  2753. ld1 {v28.16b, v29.16b, v30.16b, v31.16b}, [x2], #64
  2754. sub w12, w12, w3 /* last_dc_val, not used afterwards */
  2755. /* ZigZag 8x8 */
  2756. tbl v0.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v0.16b
  2757. tbl v1.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v1.16b
  2758. tbl v2.16b, {v25.16b, v26.16b, v27.16b, v28.16b}, v2.16b
  2759. tbl v3.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v3.16b
  2760. tbl v4.16b, {v28.16b, v29.16b, v30.16b, v31.16b}, v4.16b
  2761. tbl v5.16b, {v25.16b, v26.16b, v27.16b, v28.16b}, v5.16b
  2762. tbl v6.16b, {v27.16b, v28.16b, v29.16b, v30.16b}, v6.16b
  2763. tbl v7.16b, {v29.16b, v30.16b, v31.16b}, v7.16b
  2764. ins v0.h[0], w12
  2765. tbx v1.16b, {v28.16b}, v16.16b
  2766. tbx v2.16b, {v29.16b, v30.16b}, v17.16b
  2767. tbx v5.16b, {v29.16b, v30.16b}, v18.16b
  2768. tbx v6.16b, {v31.16b}, v19.16b
  2769. .else
  2770. add x13, x2, #0x22
  2771. sub w12, w12, w3 /* last_dc_val, not used afterwards */
  2772. ld1 {v23.16b}, [x15]
  2773. add x14, x2, #0x18
  2774. add x3, x2, #0x36
  2775. ins v0.h[0], w12
  2776. add x9, x2, #0x2
  2777. ld1 {v1.h}[0], [x13]
  2778. add x15, x2, #0x30
  2779. ld1 {v2.h}[0], [x14]
  2780. add x19, x2, #0x26
  2781. ld1 {v3.h}[0], [x3]
  2782. add x20, x2, #0x28
  2783. ld1 {v0.h}[1], [x9]
  2784. add x12, x2, #0x10
  2785. ld1 {v1.h}[1], [x15]
  2786. add x13, x2, #0x40
  2787. ld1 {v2.h}[1], [x19]
  2788. add x14, x2, #0x34
  2789. ld1 {v3.h}[1], [x20]
  2790. add x3, x2, #0x1a
  2791. ld1 {v0.h}[2], [x12]
  2792. add x9, x2, #0x20
  2793. ld1 {v1.h}[2], [x13]
  2794. add x15, x2, #0x32
  2795. ld1 {v2.h}[2], [x14]
  2796. add x19, x2, #0x42
  2797. ld1 {v3.h}[2], [x3]
  2798. add x20, x2, #0xc
  2799. ld1 {v0.h}[3], [x9]
  2800. add x12, x2, #0x12
  2801. ld1 {v1.h}[3], [x15]
  2802. add x13, x2, #0x24
  2803. ld1 {v2.h}[3], [x19]
  2804. add x14, x2, #0x50
  2805. ld1 {v3.h}[3], [x20]
  2806. add x3, x2, #0xe
  2807. ld1 {v0.h}[4], [x12]
  2808. add x9, x2, #0x4
  2809. ld1 {v1.h}[4], [x13]
  2810. add x15, x2, #0x16
  2811. ld1 {v2.h}[4], [x14]
  2812. add x19, x2, #0x60
  2813. ld1 {v3.h}[4], [x3]
  2814. add x20, x2, #0x1c
  2815. ld1 {v0.h}[5], [x9]
  2816. add x12, x2, #0x6
  2817. ld1 {v1.h}[5], [x15]
  2818. add x13, x2, #0x8
  2819. ld1 {v2.h}[5], [x19]
  2820. add x14, x2, #0x52
  2821. ld1 {v3.h}[5], [x20]
  2822. add x3, x2, #0x2a
  2823. ld1 {v0.h}[6], [x12]
  2824. add x9, x2, #0x14
  2825. ld1 {v1.h}[6], [x13]
  2826. add x15, x2, #0xa
  2827. ld1 {v2.h}[6], [x14]
  2828. add x19, x2, #0x44
  2829. ld1 {v3.h}[6], [x3]
  2830. add x20, x2, #0x38
  2831. ld1 {v0.h}[7], [x9]
  2832. add x12, x2, #0x46
  2833. ld1 {v1.h}[7], [x15]
  2834. add x13, x2, #0x3a
  2835. ld1 {v2.h}[7], [x19]
  2836. add x14, x2, #0x74
  2837. ld1 {v3.h}[7], [x20]
  2838. add x3, x2, #0x6a
  2839. ld1 {v4.h}[0], [x12]
  2840. add x9, x2, #0x54
  2841. ld1 {v5.h}[0], [x13]
  2842. add x15, x2, #0x2c
  2843. ld1 {v6.h}[0], [x14]
  2844. add x19, x2, #0x76
  2845. ld1 {v7.h}[0], [x3]
  2846. add x20, x2, #0x78
  2847. ld1 {v4.h}[1], [x9]
  2848. add x12, x2, #0x62
  2849. ld1 {v5.h}[1], [x15]
  2850. add x13, x2, #0x1e
  2851. ld1 {v6.h}[1], [x19]
  2852. add x14, x2, #0x68
  2853. ld1 {v7.h}[1], [x20]
  2854. add x3, x2, #0x7a
  2855. ld1 {v4.h}[2], [x12]
  2856. add x9, x2, #0x70
  2857. ld1 {v5.h}[2], [x13]
  2858. add x15, x2, #0x2e
  2859. ld1 {v6.h}[2], [x14]
  2860. add x19, x2, #0x5a
  2861. ld1 {v7.h}[2], [x3]
  2862. add x20, x2, #0x6c
  2863. ld1 {v4.h}[3], [x9]
  2864. add x12, x2, #0x72
  2865. ld1 {v5.h}[3], [x15]
  2866. add x13, x2, #0x3c
  2867. ld1 {v6.h}[3], [x19]
  2868. add x14, x2, #0x4c
  2869. ld1 {v7.h}[3], [x20]
  2870. add x3, x2, #0x5e
  2871. ld1 {v4.h}[4], [x12]
  2872. add x9, x2, #0x64
  2873. ld1 {v5.h}[4], [x13]
  2874. add x15, x2, #0x4a
  2875. ld1 {v6.h}[4], [x14]
  2876. add x19, x2, #0x3e
  2877. ld1 {v7.h}[4], [x3]
  2878. add x20, x2, #0x6e
  2879. ld1 {v4.h}[5], [x9]
  2880. add x12, x2, #0x56
  2881. ld1 {v5.h}[5], [x15]
  2882. add x13, x2, #0x58
  2883. ld1 {v6.h}[5], [x19]
  2884. add x14, x2, #0x4e
  2885. ld1 {v7.h}[5], [x20]
  2886. add x3, x2, #0x7c
  2887. ld1 {v4.h}[6], [x12]
  2888. add x9, x2, #0x48
  2889. ld1 {v5.h}[6], [x13]
  2890. add x15, x2, #0x66
  2891. ld1 {v6.h}[6], [x14]
  2892. add x19, x2, #0x5c
  2893. ld1 {v7.h}[6], [x3]
  2894. add x20, x2, #0x7e
  2895. ld1 {v4.h}[7], [x9]
  2896. ld1 {v5.h}[7], [x15]
  2897. ld1 {v6.h}[7], [x19]
  2898. ld1 {v7.h}[7], [x20]
  2899. .endif
  2900. cmlt v24.8h, v0.8h, #0
  2901. cmlt v25.8h, v1.8h, #0
  2902. cmlt v26.8h, v2.8h, #0
  2903. cmlt v27.8h, v3.8h, #0
  2904. cmlt v28.8h, v4.8h, #0
  2905. cmlt v29.8h, v5.8h, #0
  2906. cmlt v30.8h, v6.8h, #0
  2907. cmlt v31.8h, v7.8h, #0
  2908. abs v0.8h, v0.8h
  2909. abs v1.8h, v1.8h
  2910. abs v2.8h, v2.8h
  2911. abs v3.8h, v3.8h
  2912. abs v4.8h, v4.8h
  2913. abs v5.8h, v5.8h
  2914. abs v6.8h, v6.8h
  2915. abs v7.8h, v7.8h
  2916. eor v24.16b, v24.16b, v0.16b
  2917. eor v25.16b, v25.16b, v1.16b
  2918. eor v26.16b, v26.16b, v2.16b
  2919. eor v27.16b, v27.16b, v3.16b
  2920. eor v28.16b, v28.16b, v4.16b
  2921. eor v29.16b, v29.16b, v5.16b
  2922. eor v30.16b, v30.16b, v6.16b
  2923. eor v31.16b, v31.16b, v7.16b
  2924. cmeq v16.8h, v0.8h, #0
  2925. cmeq v17.8h, v1.8h, #0
  2926. cmeq v18.8h, v2.8h, #0
  2927. cmeq v19.8h, v3.8h, #0
  2928. cmeq v20.8h, v4.8h, #0
  2929. cmeq v21.8h, v5.8h, #0
  2930. cmeq v22.8h, v6.8h, #0
  2931. xtn v16.8b, v16.8h
  2932. xtn v18.8b, v18.8h
  2933. xtn v20.8b, v20.8h
  2934. xtn v22.8b, v22.8h
  2935. umov w14, v0.h[0]
  2936. xtn2 v16.16b, v17.8h
  2937. umov w13, v24.h[0]
  2938. xtn2 v18.16b, v19.8h
  2939. clz w14, w14
  2940. xtn2 v20.16b, v21.8h
  2941. lsl w13, w13, w14
  2942. cmeq v17.8h, v7.8h, #0
  2943. sub w12, w14, #32
  2944. xtn2 v22.16b, v17.8h
  2945. lsr w13, w13, w14
  2946. and v16.16b, v16.16b, v23.16b
  2947. neg w12, w12
  2948. and v18.16b, v18.16b, v23.16b
  2949. add x3, x4, #0x400 /* r1 = dctbl->ehufsi */
  2950. and v20.16b, v20.16b, v23.16b
  2951. add x15, sp, #0x90 /* x15 = t2 */
  2952. and v22.16b, v22.16b, v23.16b
  2953. ldr w10, [x4, x12, lsl #2]
  2954. addp v16.16b, v16.16b, v18.16b
  2955. ldrb w11, [x3, x12]
  2956. addp v20.16b, v20.16b, v22.16b
  2957. checkbuf47
  2958. addp v16.16b, v16.16b, v20.16b
  2959. put_bits x10, x11
  2960. addp v16.16b, v16.16b, v18.16b
  2961. checkbuf47
  2962. umov x9, v16.D[0]
  2963. put_bits x13, x12
  2964. cnt v17.8b, v16.8b
  2965. mvn x9, x9
  2966. addv B18, v17.8b
  2967. add x4, x5, #0x400 /* x4 = actbl->ehufsi */
  2968. umov w12, v18.b[0]
  2969. lsr x9, x9, #0x1 /* clear AC coeff */
  2970. ldr w13, [x5, #0x3c0] /* x13 = actbl->ehufco[0xf0] */
  2971. rbit x9, x9 /* x9 = index0 */
  2972. ldrb w14, [x4, #0xf0] /* x14 = actbl->ehufsi[0xf0] */
  2973. cmp w12, #(64-8)
  2974. add x11, sp, #16
  2975. b.lt 4f
  2976. cbz x9, 6f
  2977. st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x11], #64
  2978. st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x11], #64
  2979. st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x11], #64
  2980. st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x11], #64
  2981. 1:
  2982. clz x2, x9
  2983. add x15, x15, x2, lsl #1
  2984. lsl x9, x9, x2
  2985. ldrh w20, [x15, #-126]
  2986. 2:
  2987. cmp x2, #0x10
  2988. b.lt 3f
  2989. sub x2, x2, #0x10
  2990. checkbuf47
  2991. put_bits x13, x14
  2992. b 2b
  2993. 3:
  2994. clz w20, w20
  2995. ldrh w3, [x15, #2]!
  2996. sub w11, w20, #32
  2997. lsl w3, w3, w20
  2998. neg w11, w11
  2999. lsr w3, w3, w20
  3000. add x2, x11, x2, lsl #4
  3001. lsl x9, x9, #0x1
  3002. ldr w12, [x5, x2, lsl #2]
  3003. ldrb w10, [x4, x2]
  3004. checkbuf31
  3005. put_bits x12, x10
  3006. put_bits x3, x11
  3007. cbnz x9, 1b
  3008. b 6f
  3009. 4:
  3010. movi v21.8h, #0x0010
  3011. clz v0.8h, v0.8h
  3012. clz v1.8h, v1.8h
  3013. clz v2.8h, v2.8h
  3014. clz v3.8h, v3.8h
  3015. clz v4.8h, v4.8h
  3016. clz v5.8h, v5.8h
  3017. clz v6.8h, v6.8h
  3018. clz v7.8h, v7.8h
  3019. ushl v24.8h, v24.8h, v0.8h
  3020. ushl v25.8h, v25.8h, v1.8h
  3021. ushl v26.8h, v26.8h, v2.8h
  3022. ushl v27.8h, v27.8h, v3.8h
  3023. ushl v28.8h, v28.8h, v4.8h
  3024. ushl v29.8h, v29.8h, v5.8h
  3025. ushl v30.8h, v30.8h, v6.8h
  3026. ushl v31.8h, v31.8h, v7.8h
  3027. neg v0.8h, v0.8h
  3028. neg v1.8h, v1.8h
  3029. neg v2.8h, v2.8h
  3030. neg v3.8h, v3.8h
  3031. neg v4.8h, v4.8h
  3032. neg v5.8h, v5.8h
  3033. neg v6.8h, v6.8h
  3034. neg v7.8h, v7.8h
  3035. ushl v24.8h, v24.8h, v0.8h
  3036. ushl v25.8h, v25.8h, v1.8h
  3037. ushl v26.8h, v26.8h, v2.8h
  3038. ushl v27.8h, v27.8h, v3.8h
  3039. ushl v28.8h, v28.8h, v4.8h
  3040. ushl v29.8h, v29.8h, v5.8h
  3041. ushl v30.8h, v30.8h, v6.8h
  3042. ushl v31.8h, v31.8h, v7.8h
  3043. add v0.8h, v21.8h, v0.8h
  3044. add v1.8h, v21.8h, v1.8h
  3045. add v2.8h, v21.8h, v2.8h
  3046. add v3.8h, v21.8h, v3.8h
  3047. add v4.8h, v21.8h, v4.8h
  3048. add v5.8h, v21.8h, v5.8h
  3049. add v6.8h, v21.8h, v6.8h
  3050. add v7.8h, v21.8h, v7.8h
  3051. st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x11], #64
  3052. st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x11], #64
  3053. st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x11], #64
  3054. st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x11], #64
  3055. 1:
  3056. clz x2, x9
  3057. add x15, x15, x2, lsl #1
  3058. lsl x9, x9, x2
  3059. ldrh w11, [x15, #-126]
  3060. 2:
  3061. cmp x2, #0x10
  3062. b.lt 3f
  3063. sub x2, x2, #0x10
  3064. checkbuf47
  3065. put_bits x13, x14
  3066. b 2b
  3067. 3:
  3068. ldrh w3, [x15, #2]!
  3069. add x2, x11, x2, lsl #4
  3070. lsl x9, x9, #0x1
  3071. ldr w12, [x5, x2, lsl #2]
  3072. ldrb w10, [x4, x2]
  3073. checkbuf31
  3074. put_bits x12, x10
  3075. put_bits x3, x11
  3076. cbnz x9, 1b
  3077. 6:
  3078. add x13, sp, #0x10e
  3079. cmp x15, x13
  3080. b.hs 1f
  3081. ldr w12, [x5]
  3082. ldrb w14, [x4]
  3083. checkbuf47
  3084. put_bits x12, x14
  3085. 1:
  3086. str PUT_BUFFER, [x0, #0x10]
  3087. str PUT_BITSw, [x0, #0x18]
  3088. ldp x19, x20, [sp], 16
  3089. add x0, BUFFER, #0x1
  3090. add sp, sp, 256
  3091. br x30
  3092. .endm
  3093. generate_jsimd_huff_encode_one_block 1
  3094. generate_jsimd_huff_encode_one_block 0
  3095. .unreq BUFFER
  3096. .unreq PUT_BUFFER
  3097. .unreq PUT_BITS
  3098. .unreq PUT_BITSw
  3099. .purgem emit_byte
  3100. .purgem put_bits
  3101. .purgem checkbuf31
  3102. .purgem checkbuf47