convert_from_argb.cc 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163
  1. /*
  2. * Copyright 2012 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/convert_from_argb.h"
  11. #include "libyuv/basic_types.h"
  12. #include "libyuv/cpu_id.h"
  13. #include "libyuv/planar_functions.h"
  14. #include "libyuv/row.h"
  15. #ifdef __cplusplus
  16. namespace libyuv {
  17. extern "C" {
  18. #endif
  19. // ARGB little endian (bgra in memory) to I444
  20. LIBYUV_API
  21. int ARGBToI444(const uint8_t* src_argb,
  22. int src_stride_argb,
  23. uint8_t* dst_y,
  24. int dst_stride_y,
  25. uint8_t* dst_u,
  26. int dst_stride_u,
  27. uint8_t* dst_v,
  28. int dst_stride_v,
  29. int width,
  30. int height) {
  31. int y;
  32. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  33. ARGBToYRow_C;
  34. void (*ARGBToUV444Row)(const uint8_t* src_argb, uint8_t* dst_u,
  35. uint8_t* dst_v, int width) = ARGBToUV444Row_C;
  36. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  37. return -1;
  38. }
  39. if (height < 0) {
  40. height = -height;
  41. src_argb = src_argb + (height - 1) * src_stride_argb;
  42. src_stride_argb = -src_stride_argb;
  43. }
  44. // Coalesce rows.
  45. if (src_stride_argb == width * 4 && dst_stride_y == width &&
  46. dst_stride_u == width && dst_stride_v == width) {
  47. width *= height;
  48. height = 1;
  49. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  50. }
  51. #if defined(HAS_ARGBTOUV444ROW_SSSE3)
  52. if (TestCpuFlag(kCpuHasSSSE3)) {
  53. ARGBToUV444Row = ARGBToUV444Row_Any_SSSE3;
  54. if (IS_ALIGNED(width, 16)) {
  55. ARGBToUV444Row = ARGBToUV444Row_SSSE3;
  56. }
  57. }
  58. #endif
  59. #if defined(HAS_ARGBTOUV444ROW_NEON)
  60. if (TestCpuFlag(kCpuHasNEON)) {
  61. ARGBToUV444Row = ARGBToUV444Row_Any_NEON;
  62. if (IS_ALIGNED(width, 8)) {
  63. ARGBToUV444Row = ARGBToUV444Row_NEON;
  64. }
  65. }
  66. #endif
  67. #if defined(HAS_ARGBTOUV444ROW_MMI)
  68. if (TestCpuFlag(kCpuHasMMI)) {
  69. ARGBToUV444Row = ARGBToUV444Row_Any_MMI;
  70. if (IS_ALIGNED(width, 8)) {
  71. ARGBToUV444Row = ARGBToUV444Row_MMI;
  72. }
  73. }
  74. #endif
  75. #if defined(HAS_ARGBTOUV444ROW_MSA)
  76. if (TestCpuFlag(kCpuHasMSA)) {
  77. ARGBToUV444Row = ARGBToUV444Row_Any_MSA;
  78. if (IS_ALIGNED(width, 16)) {
  79. ARGBToUV444Row = ARGBToUV444Row_MSA;
  80. }
  81. }
  82. #endif
  83. #if defined(HAS_ARGBTOYROW_SSSE3)
  84. if (TestCpuFlag(kCpuHasSSSE3)) {
  85. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  86. if (IS_ALIGNED(width, 16)) {
  87. ARGBToYRow = ARGBToYRow_SSSE3;
  88. }
  89. }
  90. #endif
  91. #if defined(HAS_ARGBTOYROW_AVX2)
  92. if (TestCpuFlag(kCpuHasAVX2)) {
  93. ARGBToYRow = ARGBToYRow_Any_AVX2;
  94. if (IS_ALIGNED(width, 32)) {
  95. ARGBToYRow = ARGBToYRow_AVX2;
  96. }
  97. }
  98. #endif
  99. #if defined(HAS_ARGBTOYROW_NEON)
  100. if (TestCpuFlag(kCpuHasNEON)) {
  101. ARGBToYRow = ARGBToYRow_Any_NEON;
  102. if (IS_ALIGNED(width, 8)) {
  103. ARGBToYRow = ARGBToYRow_NEON;
  104. }
  105. }
  106. #endif
  107. #if defined(HAS_ARGBTOYROW_MMI)
  108. if (TestCpuFlag(kCpuHasMMI)) {
  109. ARGBToYRow = ARGBToYRow_Any_MMI;
  110. if (IS_ALIGNED(width, 8)) {
  111. ARGBToYRow = ARGBToYRow_MMI;
  112. }
  113. }
  114. #endif
  115. #if defined(HAS_ARGBTOYROW_MSA)
  116. if (TestCpuFlag(kCpuHasMSA)) {
  117. ARGBToYRow = ARGBToYRow_Any_MSA;
  118. if (IS_ALIGNED(width, 16)) {
  119. ARGBToYRow = ARGBToYRow_MSA;
  120. }
  121. }
  122. #endif
  123. for (y = 0; y < height; ++y) {
  124. ARGBToUV444Row(src_argb, dst_u, dst_v, width);
  125. ARGBToYRow(src_argb, dst_y, width);
  126. src_argb += src_stride_argb;
  127. dst_y += dst_stride_y;
  128. dst_u += dst_stride_u;
  129. dst_v += dst_stride_v;
  130. }
  131. return 0;
  132. }
  133. // ARGB little endian (bgra in memory) to I422
  134. LIBYUV_API
  135. int ARGBToI422(const uint8_t* src_argb,
  136. int src_stride_argb,
  137. uint8_t* dst_y,
  138. int dst_stride_y,
  139. uint8_t* dst_u,
  140. int dst_stride_u,
  141. uint8_t* dst_v,
  142. int dst_stride_v,
  143. int width,
  144. int height) {
  145. int y;
  146. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  147. uint8_t* dst_u, uint8_t* dst_v, int width) =
  148. ARGBToUVRow_C;
  149. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  150. ARGBToYRow_C;
  151. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  152. return -1;
  153. }
  154. // Negative height means invert the image.
  155. if (height < 0) {
  156. height = -height;
  157. src_argb = src_argb + (height - 1) * src_stride_argb;
  158. src_stride_argb = -src_stride_argb;
  159. }
  160. // Coalesce rows.
  161. if (src_stride_argb == width * 4 && dst_stride_y == width &&
  162. dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
  163. width *= height;
  164. height = 1;
  165. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  166. }
  167. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  168. if (TestCpuFlag(kCpuHasSSSE3)) {
  169. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  170. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  171. if (IS_ALIGNED(width, 16)) {
  172. ARGBToUVRow = ARGBToUVRow_SSSE3;
  173. ARGBToYRow = ARGBToYRow_SSSE3;
  174. }
  175. }
  176. #endif
  177. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  178. if (TestCpuFlag(kCpuHasAVX2)) {
  179. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  180. ARGBToYRow = ARGBToYRow_Any_AVX2;
  181. if (IS_ALIGNED(width, 32)) {
  182. ARGBToUVRow = ARGBToUVRow_AVX2;
  183. ARGBToYRow = ARGBToYRow_AVX2;
  184. }
  185. }
  186. #endif
  187. #if defined(HAS_ARGBTOYROW_NEON)
  188. if (TestCpuFlag(kCpuHasNEON)) {
  189. ARGBToYRow = ARGBToYRow_Any_NEON;
  190. if (IS_ALIGNED(width, 8)) {
  191. ARGBToYRow = ARGBToYRow_NEON;
  192. }
  193. }
  194. #endif
  195. #if defined(HAS_ARGBTOUVROW_NEON)
  196. if (TestCpuFlag(kCpuHasNEON)) {
  197. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  198. if (IS_ALIGNED(width, 16)) {
  199. ARGBToUVRow = ARGBToUVRow_NEON;
  200. }
  201. }
  202. #endif
  203. #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
  204. if (TestCpuFlag(kCpuHasMMI)) {
  205. ARGBToYRow = ARGBToYRow_Any_MMI;
  206. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  207. if (IS_ALIGNED(width, 8)) {
  208. ARGBToYRow = ARGBToYRow_MMI;
  209. }
  210. if (IS_ALIGNED(width, 16)) {
  211. ARGBToUVRow = ARGBToUVRow_MMI;
  212. }
  213. }
  214. #endif
  215. #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
  216. if (TestCpuFlag(kCpuHasMSA)) {
  217. ARGBToYRow = ARGBToYRow_Any_MSA;
  218. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  219. if (IS_ALIGNED(width, 16)) {
  220. ARGBToYRow = ARGBToYRow_MSA;
  221. }
  222. if (IS_ALIGNED(width, 32)) {
  223. ARGBToUVRow = ARGBToUVRow_MSA;
  224. }
  225. }
  226. #endif
  227. for (y = 0; y < height; ++y) {
  228. ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
  229. ARGBToYRow(src_argb, dst_y, width);
  230. src_argb += src_stride_argb;
  231. dst_y += dst_stride_y;
  232. dst_u += dst_stride_u;
  233. dst_v += dst_stride_v;
  234. }
  235. return 0;
  236. }
  237. LIBYUV_API
  238. int ARGBToNV12(const uint8_t* src_argb,
  239. int src_stride_argb,
  240. uint8_t* dst_y,
  241. int dst_stride_y,
  242. uint8_t* dst_uv,
  243. int dst_stride_uv,
  244. int width,
  245. int height) {
  246. int y;
  247. int halfwidth = (width + 1) >> 1;
  248. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  249. uint8_t* dst_u, uint8_t* dst_v, int width) =
  250. ARGBToUVRow_C;
  251. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  252. ARGBToYRow_C;
  253. void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
  254. uint8_t* dst_uv, int width) = MergeUVRow_C;
  255. if (!src_argb || !dst_y || !dst_uv || width <= 0 || height == 0) {
  256. return -1;
  257. }
  258. // Negative height means invert the image.
  259. if (height < 0) {
  260. height = -height;
  261. src_argb = src_argb + (height - 1) * src_stride_argb;
  262. src_stride_argb = -src_stride_argb;
  263. }
  264. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  265. if (TestCpuFlag(kCpuHasSSSE3)) {
  266. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  267. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  268. if (IS_ALIGNED(width, 16)) {
  269. ARGBToUVRow = ARGBToUVRow_SSSE3;
  270. ARGBToYRow = ARGBToYRow_SSSE3;
  271. }
  272. }
  273. #endif
  274. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  275. if (TestCpuFlag(kCpuHasAVX2)) {
  276. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  277. ARGBToYRow = ARGBToYRow_Any_AVX2;
  278. if (IS_ALIGNED(width, 32)) {
  279. ARGBToUVRow = ARGBToUVRow_AVX2;
  280. ARGBToYRow = ARGBToYRow_AVX2;
  281. }
  282. }
  283. #endif
  284. #if defined(HAS_ARGBTOYROW_NEON)
  285. if (TestCpuFlag(kCpuHasNEON)) {
  286. ARGBToYRow = ARGBToYRow_Any_NEON;
  287. if (IS_ALIGNED(width, 8)) {
  288. ARGBToYRow = ARGBToYRow_NEON;
  289. }
  290. }
  291. #endif
  292. #if defined(HAS_ARGBTOUVROW_NEON)
  293. if (TestCpuFlag(kCpuHasNEON)) {
  294. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  295. if (IS_ALIGNED(width, 16)) {
  296. ARGBToUVRow = ARGBToUVRow_NEON;
  297. }
  298. }
  299. #endif
  300. #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
  301. if (TestCpuFlag(kCpuHasMMI)) {
  302. ARGBToYRow = ARGBToYRow_Any_MMI;
  303. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  304. if (IS_ALIGNED(width, 8)) {
  305. ARGBToYRow = ARGBToYRow_MMI;
  306. }
  307. if (IS_ALIGNED(width, 16)) {
  308. ARGBToUVRow = ARGBToUVRow_MMI;
  309. }
  310. }
  311. #endif
  312. #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
  313. if (TestCpuFlag(kCpuHasMSA)) {
  314. ARGBToYRow = ARGBToYRow_Any_MSA;
  315. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  316. if (IS_ALIGNED(width, 16)) {
  317. ARGBToYRow = ARGBToYRow_MSA;
  318. }
  319. if (IS_ALIGNED(width, 32)) {
  320. ARGBToUVRow = ARGBToUVRow_MSA;
  321. }
  322. }
  323. #endif
  324. #if defined(HAS_MERGEUVROW_SSE2)
  325. if (TestCpuFlag(kCpuHasSSE2)) {
  326. MergeUVRow_ = MergeUVRow_Any_SSE2;
  327. if (IS_ALIGNED(halfwidth, 16)) {
  328. MergeUVRow_ = MergeUVRow_SSE2;
  329. }
  330. }
  331. #endif
  332. #if defined(HAS_MERGEUVROW_AVX2)
  333. if (TestCpuFlag(kCpuHasAVX2)) {
  334. MergeUVRow_ = MergeUVRow_Any_AVX2;
  335. if (IS_ALIGNED(halfwidth, 32)) {
  336. MergeUVRow_ = MergeUVRow_AVX2;
  337. }
  338. }
  339. #endif
  340. #if defined(HAS_MERGEUVROW_NEON)
  341. if (TestCpuFlag(kCpuHasNEON)) {
  342. MergeUVRow_ = MergeUVRow_Any_NEON;
  343. if (IS_ALIGNED(halfwidth, 16)) {
  344. MergeUVRow_ = MergeUVRow_NEON;
  345. }
  346. }
  347. #endif
  348. #if defined(HAS_MERGEUVROW_MMI)
  349. if (TestCpuFlag(kCpuHasMMI)) {
  350. MergeUVRow_ = MergeUVRow_Any_MMI;
  351. if (IS_ALIGNED(halfwidth, 8)) {
  352. MergeUVRow_ = MergeUVRow_MMI;
  353. }
  354. }
  355. #endif
  356. #if defined(HAS_MERGEUVROW_MSA)
  357. if (TestCpuFlag(kCpuHasMSA)) {
  358. MergeUVRow_ = MergeUVRow_Any_MSA;
  359. if (IS_ALIGNED(halfwidth, 16)) {
  360. MergeUVRow_ = MergeUVRow_MSA;
  361. }
  362. }
  363. #endif
  364. {
  365. // Allocate a rows of uv.
  366. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  367. uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
  368. for (y = 0; y < height - 1; y += 2) {
  369. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  370. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  371. ARGBToYRow(src_argb, dst_y, width);
  372. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  373. src_argb += src_stride_argb * 2;
  374. dst_y += dst_stride_y * 2;
  375. dst_uv += dst_stride_uv;
  376. }
  377. if (height & 1) {
  378. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  379. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  380. ARGBToYRow(src_argb, dst_y, width);
  381. }
  382. free_aligned_buffer_64(row_u);
  383. }
  384. return 0;
  385. }
  386. // Same as NV12 but U and V swapped.
  387. LIBYUV_API
  388. int ARGBToNV21(const uint8_t* src_argb,
  389. int src_stride_argb,
  390. uint8_t* dst_y,
  391. int dst_stride_y,
  392. uint8_t* dst_vu,
  393. int dst_stride_vu,
  394. int width,
  395. int height) {
  396. int y;
  397. int halfwidth = (width + 1) >> 1;
  398. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  399. uint8_t* dst_u, uint8_t* dst_v, int width) =
  400. ARGBToUVRow_C;
  401. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  402. ARGBToYRow_C;
  403. void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
  404. uint8_t* dst_vu, int width) = MergeUVRow_C;
  405. if (!src_argb || !dst_y || !dst_vu || width <= 0 || height == 0) {
  406. return -1;
  407. }
  408. // Negative height means invert the image.
  409. if (height < 0) {
  410. height = -height;
  411. src_argb = src_argb + (height - 1) * src_stride_argb;
  412. src_stride_argb = -src_stride_argb;
  413. }
  414. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  415. if (TestCpuFlag(kCpuHasSSSE3)) {
  416. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  417. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  418. if (IS_ALIGNED(width, 16)) {
  419. ARGBToUVRow = ARGBToUVRow_SSSE3;
  420. ARGBToYRow = ARGBToYRow_SSSE3;
  421. }
  422. }
  423. #endif
  424. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  425. if (TestCpuFlag(kCpuHasAVX2)) {
  426. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  427. ARGBToYRow = ARGBToYRow_Any_AVX2;
  428. if (IS_ALIGNED(width, 32)) {
  429. ARGBToUVRow = ARGBToUVRow_AVX2;
  430. ARGBToYRow = ARGBToYRow_AVX2;
  431. }
  432. }
  433. #endif
  434. #if defined(HAS_ARGBTOYROW_NEON)
  435. if (TestCpuFlag(kCpuHasNEON)) {
  436. ARGBToYRow = ARGBToYRow_Any_NEON;
  437. if (IS_ALIGNED(width, 8)) {
  438. ARGBToYRow = ARGBToYRow_NEON;
  439. }
  440. }
  441. #endif
  442. #if defined(HAS_ARGBTOUVROW_NEON)
  443. if (TestCpuFlag(kCpuHasNEON)) {
  444. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  445. if (IS_ALIGNED(width, 16)) {
  446. ARGBToUVRow = ARGBToUVRow_NEON;
  447. }
  448. }
  449. #endif
  450. #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
  451. if (TestCpuFlag(kCpuHasMMI)) {
  452. ARGBToYRow = ARGBToYRow_Any_MMI;
  453. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  454. if (IS_ALIGNED(width, 8)) {
  455. ARGBToYRow = ARGBToYRow_MMI;
  456. }
  457. if (IS_ALIGNED(width, 16)) {
  458. ARGBToUVRow = ARGBToUVRow_MMI;
  459. }
  460. }
  461. #endif
  462. #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
  463. if (TestCpuFlag(kCpuHasMSA)) {
  464. ARGBToYRow = ARGBToYRow_Any_MSA;
  465. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  466. if (IS_ALIGNED(width, 16)) {
  467. ARGBToYRow = ARGBToYRow_MSA;
  468. }
  469. if (IS_ALIGNED(width, 32)) {
  470. ARGBToUVRow = ARGBToUVRow_MSA;
  471. }
  472. }
  473. #endif
  474. #if defined(HAS_MERGEUVROW_SSE2)
  475. if (TestCpuFlag(kCpuHasSSE2)) {
  476. MergeUVRow_ = MergeUVRow_Any_SSE2;
  477. if (IS_ALIGNED(halfwidth, 16)) {
  478. MergeUVRow_ = MergeUVRow_SSE2;
  479. }
  480. }
  481. #endif
  482. #if defined(HAS_MERGEUVROW_AVX2)
  483. if (TestCpuFlag(kCpuHasAVX2)) {
  484. MergeUVRow_ = MergeUVRow_Any_AVX2;
  485. if (IS_ALIGNED(halfwidth, 32)) {
  486. MergeUVRow_ = MergeUVRow_AVX2;
  487. }
  488. }
  489. #endif
  490. #if defined(HAS_MERGEUVROW_NEON)
  491. if (TestCpuFlag(kCpuHasNEON)) {
  492. MergeUVRow_ = MergeUVRow_Any_NEON;
  493. if (IS_ALIGNED(halfwidth, 16)) {
  494. MergeUVRow_ = MergeUVRow_NEON;
  495. }
  496. }
  497. #endif
  498. #if defined(HAS_MERGEUVROW_MMI)
  499. if (TestCpuFlag(kCpuHasMMI)) {
  500. MergeUVRow_ = MergeUVRow_Any_MMI;
  501. if (IS_ALIGNED(halfwidth, 8)) {
  502. MergeUVRow_ = MergeUVRow_MMI;
  503. }
  504. }
  505. #endif
  506. #if defined(HAS_MERGEUVROW_MSA)
  507. if (TestCpuFlag(kCpuHasMSA)) {
  508. MergeUVRow_ = MergeUVRow_Any_MSA;
  509. if (IS_ALIGNED(halfwidth, 16)) {
  510. MergeUVRow_ = MergeUVRow_MSA;
  511. }
  512. }
  513. #endif
  514. {
  515. // Allocate a rows of uv.
  516. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  517. uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
  518. for (y = 0; y < height - 1; y += 2) {
  519. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  520. MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
  521. ARGBToYRow(src_argb, dst_y, width);
  522. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  523. src_argb += src_stride_argb * 2;
  524. dst_y += dst_stride_y * 2;
  525. dst_vu += dst_stride_vu;
  526. }
  527. if (height & 1) {
  528. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  529. MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
  530. ARGBToYRow(src_argb, dst_y, width);
  531. }
  532. free_aligned_buffer_64(row_u);
  533. }
  534. return 0;
  535. }
  536. LIBYUV_API
  537. int ABGRToNV12(const uint8_t* src_abgr,
  538. int src_stride_abgr,
  539. uint8_t* dst_y,
  540. int dst_stride_y,
  541. uint8_t* dst_uv,
  542. int dst_stride_uv,
  543. int width,
  544. int height) {
  545. int y;
  546. int halfwidth = (width + 1) >> 1;
  547. void (*ABGRToUVRow)(const uint8_t* src_abgr0, int src_stride_abgr,
  548. uint8_t* dst_u, uint8_t* dst_v, int width) =
  549. ABGRToUVRow_C;
  550. void (*ABGRToYRow)(const uint8_t* src_abgr, uint8_t* dst_y, int width) =
  551. ABGRToYRow_C;
  552. void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
  553. uint8_t* dst_uv, int width) = MergeUVRow_C;
  554. if (!src_abgr || !dst_y || !dst_uv || width <= 0 || height == 0) {
  555. return -1;
  556. }
  557. // Negative height means invert the image.
  558. if (height < 0) {
  559. height = -height;
  560. src_abgr = src_abgr + (height - 1) * src_stride_abgr;
  561. src_stride_abgr = -src_stride_abgr;
  562. }
  563. #if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3)
  564. if (TestCpuFlag(kCpuHasSSSE3)) {
  565. ABGRToUVRow = ABGRToUVRow_Any_SSSE3;
  566. ABGRToYRow = ABGRToYRow_Any_SSSE3;
  567. if (IS_ALIGNED(width, 16)) {
  568. ABGRToUVRow = ABGRToUVRow_SSSE3;
  569. ABGRToYRow = ABGRToYRow_SSSE3;
  570. }
  571. }
  572. #endif
  573. #if defined(HAS_ABGRTOYROW_AVX2) && defined(HAS_ABGRTOUVROW_AVX2)
  574. if (TestCpuFlag(kCpuHasAVX2)) {
  575. ABGRToUVRow = ABGRToUVRow_Any_AVX2;
  576. ABGRToYRow = ABGRToYRow_Any_AVX2;
  577. if (IS_ALIGNED(width, 32)) {
  578. ABGRToUVRow = ABGRToUVRow_AVX2;
  579. ABGRToYRow = ABGRToYRow_AVX2;
  580. }
  581. }
  582. #endif
  583. #if defined(HAS_ABGRTOYROW_NEON)
  584. if (TestCpuFlag(kCpuHasNEON)) {
  585. ABGRToYRow = ABGRToYRow_Any_NEON;
  586. if (IS_ALIGNED(width, 8)) {
  587. ABGRToYRow = ABGRToYRow_NEON;
  588. }
  589. }
  590. #endif
  591. #if defined(HAS_ABGRTOUVROW_NEON)
  592. if (TestCpuFlag(kCpuHasNEON)) {
  593. ABGRToUVRow = ABGRToUVRow_Any_NEON;
  594. if (IS_ALIGNED(width, 16)) {
  595. ABGRToUVRow = ABGRToUVRow_NEON;
  596. }
  597. }
  598. #endif
  599. #if defined(HAS_ABGRTOYROW_MMI) && defined(HAS_ABGRTOUVROW_MMI)
  600. if (TestCpuFlag(kCpuHasMMI)) {
  601. ABGRToYRow = ABGRToYRow_Any_MMI;
  602. ABGRToUVRow = ABGRToUVRow_Any_MMI;
  603. if (IS_ALIGNED(width, 8)) {
  604. ABGRToYRow = ABGRToYRow_MMI;
  605. }
  606. if (IS_ALIGNED(width, 16)) {
  607. ABGRToUVRow = ABGRToUVRow_MMI;
  608. }
  609. }
  610. #endif
  611. #if defined(HAS_ABGRTOYROW_MSA) && defined(HAS_ABGRTOUVROW_MSA)
  612. if (TestCpuFlag(kCpuHasMSA)) {
  613. ABGRToYRow = ABGRToYRow_Any_MSA;
  614. ABGRToUVRow = ABGRToUVRow_Any_MSA;
  615. if (IS_ALIGNED(width, 16)) {
  616. ABGRToYRow = ABGRToYRow_MSA;
  617. }
  618. if (IS_ALIGNED(width, 32)) {
  619. ABGRToUVRow = ABGRToUVRow_MSA;
  620. }
  621. }
  622. #endif
  623. #if defined(HAS_MERGEUVROW_SSE2)
  624. if (TestCpuFlag(kCpuHasSSE2)) {
  625. MergeUVRow_ = MergeUVRow_Any_SSE2;
  626. if (IS_ALIGNED(halfwidth, 16)) {
  627. MergeUVRow_ = MergeUVRow_SSE2;
  628. }
  629. }
  630. #endif
  631. #if defined(HAS_MERGEUVROW_AVX2)
  632. if (TestCpuFlag(kCpuHasAVX2)) {
  633. MergeUVRow_ = MergeUVRow_Any_AVX2;
  634. if (IS_ALIGNED(halfwidth, 32)) {
  635. MergeUVRow_ = MergeUVRow_AVX2;
  636. }
  637. }
  638. #endif
  639. #if defined(HAS_MERGEUVROW_NEON)
  640. if (TestCpuFlag(kCpuHasNEON)) {
  641. MergeUVRow_ = MergeUVRow_Any_NEON;
  642. if (IS_ALIGNED(halfwidth, 16)) {
  643. MergeUVRow_ = MergeUVRow_NEON;
  644. }
  645. }
  646. #endif
  647. #if defined(HAS_MERGEUVROW_MMI)
  648. if (TestCpuFlag(kCpuHasMMI)) {
  649. MergeUVRow_ = MergeUVRow_Any_MMI;
  650. if (IS_ALIGNED(halfwidth, 8)) {
  651. MergeUVRow_ = MergeUVRow_MMI;
  652. }
  653. }
  654. #endif
  655. #if defined(HAS_MERGEUVROW_MSA)
  656. if (TestCpuFlag(kCpuHasMSA)) {
  657. MergeUVRow_ = MergeUVRow_Any_MSA;
  658. if (IS_ALIGNED(halfwidth, 16)) {
  659. MergeUVRow_ = MergeUVRow_MSA;
  660. }
  661. }
  662. #endif
  663. {
  664. // Allocate a rows of uv.
  665. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  666. uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
  667. for (y = 0; y < height - 1; y += 2) {
  668. ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width);
  669. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  670. ABGRToYRow(src_abgr, dst_y, width);
  671. ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
  672. src_abgr += src_stride_abgr * 2;
  673. dst_y += dst_stride_y * 2;
  674. dst_uv += dst_stride_uv;
  675. }
  676. if (height & 1) {
  677. ABGRToUVRow(src_abgr, 0, row_u, row_v, width);
  678. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  679. ABGRToYRow(src_abgr, dst_y, width);
  680. }
  681. free_aligned_buffer_64(row_u);
  682. }
  683. return 0;
  684. }
  685. // Same as NV12 but U and V swapped.
  686. LIBYUV_API
  687. int ABGRToNV21(const uint8_t* src_abgr,
  688. int src_stride_abgr,
  689. uint8_t* dst_y,
  690. int dst_stride_y,
  691. uint8_t* dst_vu,
  692. int dst_stride_vu,
  693. int width,
  694. int height) {
  695. int y;
  696. int halfwidth = (width + 1) >> 1;
  697. void (*ABGRToUVRow)(const uint8_t* src_abgr0, int src_stride_abgr,
  698. uint8_t* dst_u, uint8_t* dst_v, int width) =
  699. ABGRToUVRow_C;
  700. void (*ABGRToYRow)(const uint8_t* src_abgr, uint8_t* dst_y, int width) =
  701. ABGRToYRow_C;
  702. void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
  703. uint8_t* dst_vu, int width) = MergeUVRow_C;
  704. if (!src_abgr || !dst_y || !dst_vu || width <= 0 || height == 0) {
  705. return -1;
  706. }
  707. // Negative height means invert the image.
  708. if (height < 0) {
  709. height = -height;
  710. src_abgr = src_abgr + (height - 1) * src_stride_abgr;
  711. src_stride_abgr = -src_stride_abgr;
  712. }
  713. #if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3)
  714. if (TestCpuFlag(kCpuHasSSSE3)) {
  715. ABGRToUVRow = ABGRToUVRow_Any_SSSE3;
  716. ABGRToYRow = ABGRToYRow_Any_SSSE3;
  717. if (IS_ALIGNED(width, 16)) {
  718. ABGRToUVRow = ABGRToUVRow_SSSE3;
  719. ABGRToYRow = ABGRToYRow_SSSE3;
  720. }
  721. }
  722. #endif
  723. #if defined(HAS_ABGRTOYROW_AVX2) && defined(HAS_ABGRTOUVROW_AVX2)
  724. if (TestCpuFlag(kCpuHasAVX2)) {
  725. ABGRToUVRow = ABGRToUVRow_Any_AVX2;
  726. ABGRToYRow = ABGRToYRow_Any_AVX2;
  727. if (IS_ALIGNED(width, 32)) {
  728. ABGRToUVRow = ABGRToUVRow_AVX2;
  729. ABGRToYRow = ABGRToYRow_AVX2;
  730. }
  731. }
  732. #endif
  733. #if defined(HAS_ABGRTOYROW_NEON)
  734. if (TestCpuFlag(kCpuHasNEON)) {
  735. ABGRToYRow = ABGRToYRow_Any_NEON;
  736. if (IS_ALIGNED(width, 8)) {
  737. ABGRToYRow = ABGRToYRow_NEON;
  738. }
  739. }
  740. #endif
  741. #if defined(HAS_ABGRTOUVROW_NEON)
  742. if (TestCpuFlag(kCpuHasNEON)) {
  743. ABGRToUVRow = ABGRToUVRow_Any_NEON;
  744. if (IS_ALIGNED(width, 16)) {
  745. ABGRToUVRow = ABGRToUVRow_NEON;
  746. }
  747. }
  748. #endif
  749. #if defined(HAS_ABGRTOYROW_MMI) && defined(HAS_ABGRTOUVROW_MMI)
  750. if (TestCpuFlag(kCpuHasMMI)) {
  751. ABGRToYRow = ABGRToYRow_Any_MMI;
  752. ABGRToUVRow = ABGRToUVRow_Any_MMI;
  753. if (IS_ALIGNED(width, 8)) {
  754. ABGRToYRow = ABGRToYRow_MMI;
  755. }
  756. if (IS_ALIGNED(width, 16)) {
  757. ABGRToUVRow = ABGRToUVRow_MMI;
  758. }
  759. }
  760. #endif
  761. #if defined(HAS_ABGRTOYROW_MSA) && defined(HAS_ABGRTOUVROW_MSA)
  762. if (TestCpuFlag(kCpuHasMSA)) {
  763. ABGRToYRow = ABGRToYRow_Any_MSA;
  764. ABGRToUVRow = ABGRToUVRow_Any_MSA;
  765. if (IS_ALIGNED(width, 16)) {
  766. ABGRToYRow = ABGRToYRow_MSA;
  767. }
  768. if (IS_ALIGNED(width, 32)) {
  769. ABGRToUVRow = ABGRToUVRow_MSA;
  770. }
  771. }
  772. #endif
  773. #if defined(HAS_MERGEUVROW_SSE2)
  774. if (TestCpuFlag(kCpuHasSSE2)) {
  775. MergeUVRow_ = MergeUVRow_Any_SSE2;
  776. if (IS_ALIGNED(halfwidth, 16)) {
  777. MergeUVRow_ = MergeUVRow_SSE2;
  778. }
  779. }
  780. #endif
  781. #if defined(HAS_MERGEUVROW_AVX2)
  782. if (TestCpuFlag(kCpuHasAVX2)) {
  783. MergeUVRow_ = MergeUVRow_Any_AVX2;
  784. if (IS_ALIGNED(halfwidth, 32)) {
  785. MergeUVRow_ = MergeUVRow_AVX2;
  786. }
  787. }
  788. #endif
  789. #if defined(HAS_MERGEUVROW_NEON)
  790. if (TestCpuFlag(kCpuHasNEON)) {
  791. MergeUVRow_ = MergeUVRow_Any_NEON;
  792. if (IS_ALIGNED(halfwidth, 16)) {
  793. MergeUVRow_ = MergeUVRow_NEON;
  794. }
  795. }
  796. #endif
  797. #if defined(HAS_MERGEUVROW_MMI)
  798. if (TestCpuFlag(kCpuHasMMI)) {
  799. MergeUVRow_ = MergeUVRow_Any_MMI;
  800. if (IS_ALIGNED(halfwidth, 8)) {
  801. MergeUVRow_ = MergeUVRow_MMI;
  802. }
  803. }
  804. #endif
  805. #if defined(HAS_MERGEUVROW_MSA)
  806. if (TestCpuFlag(kCpuHasMSA)) {
  807. MergeUVRow_ = MergeUVRow_Any_MSA;
  808. if (IS_ALIGNED(halfwidth, 16)) {
  809. MergeUVRow_ = MergeUVRow_MSA;
  810. }
  811. }
  812. #endif
  813. {
  814. // Allocate a rows of uv.
  815. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  816. uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
  817. for (y = 0; y < height - 1; y += 2) {
  818. ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width);
  819. MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
  820. ABGRToYRow(src_abgr, dst_y, width);
  821. ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
  822. src_abgr += src_stride_abgr * 2;
  823. dst_y += dst_stride_y * 2;
  824. dst_vu += dst_stride_vu;
  825. }
  826. if (height & 1) {
  827. ABGRToUVRow(src_abgr, 0, row_u, row_v, width);
  828. MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
  829. ABGRToYRow(src_abgr, dst_y, width);
  830. }
  831. free_aligned_buffer_64(row_u);
  832. }
  833. return 0;
  834. }
  835. // Convert ARGB to YUY2.
  836. LIBYUV_API
  837. int ARGBToYUY2(const uint8_t* src_argb,
  838. int src_stride_argb,
  839. uint8_t* dst_yuy2,
  840. int dst_stride_yuy2,
  841. int width,
  842. int height) {
  843. int y;
  844. void (*ARGBToUVRow)(const uint8_t* src_argb, int src_stride_argb,
  845. uint8_t* dst_u, uint8_t* dst_v, int width) =
  846. ARGBToUVRow_C;
  847. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  848. ARGBToYRow_C;
  849. void (*I422ToYUY2Row)(const uint8_t* src_y, const uint8_t* src_u,
  850. const uint8_t* src_v, uint8_t* dst_yuy2, int width) =
  851. I422ToYUY2Row_C;
  852. if (!src_argb || !dst_yuy2 || width <= 0 || height == 0) {
  853. return -1;
  854. }
  855. // Negative height means invert the image.
  856. if (height < 0) {
  857. height = -height;
  858. dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
  859. dst_stride_yuy2 = -dst_stride_yuy2;
  860. }
  861. // Coalesce rows.
  862. if (src_stride_argb == width * 4 && dst_stride_yuy2 == width * 2) {
  863. width *= height;
  864. height = 1;
  865. src_stride_argb = dst_stride_yuy2 = 0;
  866. }
  867. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  868. if (TestCpuFlag(kCpuHasSSSE3)) {
  869. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  870. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  871. if (IS_ALIGNED(width, 16)) {
  872. ARGBToUVRow = ARGBToUVRow_SSSE3;
  873. ARGBToYRow = ARGBToYRow_SSSE3;
  874. }
  875. }
  876. #endif
  877. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  878. if (TestCpuFlag(kCpuHasAVX2)) {
  879. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  880. ARGBToYRow = ARGBToYRow_Any_AVX2;
  881. if (IS_ALIGNED(width, 32)) {
  882. ARGBToUVRow = ARGBToUVRow_AVX2;
  883. ARGBToYRow = ARGBToYRow_AVX2;
  884. }
  885. }
  886. #endif
  887. #if defined(HAS_ARGBTOYROW_NEON)
  888. if (TestCpuFlag(kCpuHasNEON)) {
  889. ARGBToYRow = ARGBToYRow_Any_NEON;
  890. if (IS_ALIGNED(width, 8)) {
  891. ARGBToYRow = ARGBToYRow_NEON;
  892. }
  893. }
  894. #endif
  895. #if defined(HAS_ARGBTOUVROW_NEON)
  896. if (TestCpuFlag(kCpuHasNEON)) {
  897. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  898. if (IS_ALIGNED(width, 16)) {
  899. ARGBToUVRow = ARGBToUVRow_NEON;
  900. }
  901. }
  902. #endif
  903. #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
  904. if (TestCpuFlag(kCpuHasMMI)) {
  905. ARGBToYRow = ARGBToYRow_Any_MMI;
  906. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  907. if (IS_ALIGNED(width, 8)) {
  908. ARGBToYRow = ARGBToYRow_MMI;
  909. }
  910. if (IS_ALIGNED(width, 16)) {
  911. ARGBToUVRow = ARGBToUVRow_MMI;
  912. }
  913. }
  914. #endif
  915. #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
  916. if (TestCpuFlag(kCpuHasMSA)) {
  917. ARGBToYRow = ARGBToYRow_Any_MSA;
  918. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  919. if (IS_ALIGNED(width, 16)) {
  920. ARGBToYRow = ARGBToYRow_MSA;
  921. }
  922. if (IS_ALIGNED(width, 32)) {
  923. ARGBToUVRow = ARGBToUVRow_MSA;
  924. }
  925. }
  926. #endif
  927. #if defined(HAS_I422TOYUY2ROW_SSE2)
  928. if (TestCpuFlag(kCpuHasSSE2)) {
  929. I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
  930. if (IS_ALIGNED(width, 16)) {
  931. I422ToYUY2Row = I422ToYUY2Row_SSE2;
  932. }
  933. }
  934. #endif
  935. #if defined(HAS_I422TOYUY2ROW_AVX2)
  936. if (TestCpuFlag(kCpuHasAVX2)) {
  937. I422ToYUY2Row = I422ToYUY2Row_Any_AVX2;
  938. if (IS_ALIGNED(width, 32)) {
  939. I422ToYUY2Row = I422ToYUY2Row_AVX2;
  940. }
  941. }
  942. #endif
  943. #if defined(HAS_I422TOYUY2ROW_NEON)
  944. if (TestCpuFlag(kCpuHasNEON)) {
  945. I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
  946. if (IS_ALIGNED(width, 16)) {
  947. I422ToYUY2Row = I422ToYUY2Row_NEON;
  948. }
  949. }
  950. #endif
  951. #if defined(HAS_I422TOYUY2ROW_MMI)
  952. if (TestCpuFlag(kCpuHasMMI)) {
  953. I422ToYUY2Row = I422ToYUY2Row_Any_MMI;
  954. if (IS_ALIGNED(width, 8)) {
  955. I422ToYUY2Row = I422ToYUY2Row_MMI;
  956. }
  957. }
  958. #endif
  959. #if defined(HAS_I422TOYUY2ROW_MSA)
  960. if (TestCpuFlag(kCpuHasMSA)) {
  961. I422ToYUY2Row = I422ToYUY2Row_Any_MSA;
  962. if (IS_ALIGNED(width, 32)) {
  963. I422ToYUY2Row = I422ToYUY2Row_MSA;
  964. }
  965. }
  966. #endif
  967. {
  968. // Allocate a rows of yuv.
  969. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  970. uint8_t* row_u = row_y + ((width + 63) & ~63);
  971. uint8_t* row_v = row_u + ((width + 63) & ~63) / 2;
  972. for (y = 0; y < height; ++y) {
  973. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  974. ARGBToYRow(src_argb, row_y, width);
  975. I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
  976. src_argb += src_stride_argb;
  977. dst_yuy2 += dst_stride_yuy2;
  978. }
  979. free_aligned_buffer_64(row_y);
  980. }
  981. return 0;
  982. }
  983. // Convert ARGB to UYVY.
  984. LIBYUV_API
  985. int ARGBToUYVY(const uint8_t* src_argb,
  986. int src_stride_argb,
  987. uint8_t* dst_uyvy,
  988. int dst_stride_uyvy,
  989. int width,
  990. int height) {
  991. int y;
  992. void (*ARGBToUVRow)(const uint8_t* src_argb, int src_stride_argb,
  993. uint8_t* dst_u, uint8_t* dst_v, int width) =
  994. ARGBToUVRow_C;
  995. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  996. ARGBToYRow_C;
  997. void (*I422ToUYVYRow)(const uint8_t* src_y, const uint8_t* src_u,
  998. const uint8_t* src_v, uint8_t* dst_uyvy, int width) =
  999. I422ToUYVYRow_C;
  1000. if (!src_argb || !dst_uyvy || width <= 0 || height == 0) {
  1001. return -1;
  1002. }
  1003. // Negative height means invert the image.
  1004. if (height < 0) {
  1005. height = -height;
  1006. dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
  1007. dst_stride_uyvy = -dst_stride_uyvy;
  1008. }
  1009. // Coalesce rows.
  1010. if (src_stride_argb == width * 4 && dst_stride_uyvy == width * 2) {
  1011. width *= height;
  1012. height = 1;
  1013. src_stride_argb = dst_stride_uyvy = 0;
  1014. }
  1015. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  1016. if (TestCpuFlag(kCpuHasSSSE3)) {
  1017. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  1018. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  1019. if (IS_ALIGNED(width, 16)) {
  1020. ARGBToUVRow = ARGBToUVRow_SSSE3;
  1021. ARGBToYRow = ARGBToYRow_SSSE3;
  1022. }
  1023. }
  1024. #endif
  1025. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  1026. if (TestCpuFlag(kCpuHasAVX2)) {
  1027. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  1028. ARGBToYRow = ARGBToYRow_Any_AVX2;
  1029. if (IS_ALIGNED(width, 32)) {
  1030. ARGBToUVRow = ARGBToUVRow_AVX2;
  1031. ARGBToYRow = ARGBToYRow_AVX2;
  1032. }
  1033. }
  1034. #endif
  1035. #if defined(HAS_ARGBTOYROW_NEON)
  1036. if (TestCpuFlag(kCpuHasNEON)) {
  1037. ARGBToYRow = ARGBToYRow_Any_NEON;
  1038. if (IS_ALIGNED(width, 8)) {
  1039. ARGBToYRow = ARGBToYRow_NEON;
  1040. }
  1041. }
  1042. #endif
  1043. #if defined(HAS_ARGBTOUVROW_NEON)
  1044. if (TestCpuFlag(kCpuHasNEON)) {
  1045. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  1046. if (IS_ALIGNED(width, 16)) {
  1047. ARGBToUVRow = ARGBToUVRow_NEON;
  1048. }
  1049. }
  1050. #endif
  1051. #if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI)
  1052. if (TestCpuFlag(kCpuHasMMI)) {
  1053. ARGBToYRow = ARGBToYRow_Any_MMI;
  1054. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  1055. if (IS_ALIGNED(width, 8)) {
  1056. ARGBToYRow = ARGBToYRow_MMI;
  1057. }
  1058. if (IS_ALIGNED(width, 16)) {
  1059. ARGBToUVRow = ARGBToUVRow_MMI;
  1060. }
  1061. }
  1062. #endif
  1063. #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA)
  1064. if (TestCpuFlag(kCpuHasMSA)) {
  1065. ARGBToYRow = ARGBToYRow_Any_MSA;
  1066. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  1067. if (IS_ALIGNED(width, 16)) {
  1068. ARGBToYRow = ARGBToYRow_MSA;
  1069. }
  1070. if (IS_ALIGNED(width, 32)) {
  1071. ARGBToUVRow = ARGBToUVRow_MSA;
  1072. }
  1073. }
  1074. #endif
  1075. #if defined(HAS_I422TOUYVYROW_SSE2)
  1076. if (TestCpuFlag(kCpuHasSSE2)) {
  1077. I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
  1078. if (IS_ALIGNED(width, 16)) {
  1079. I422ToUYVYRow = I422ToUYVYRow_SSE2;
  1080. }
  1081. }
  1082. #endif
  1083. #if defined(HAS_I422TOUYVYROW_AVX2)
  1084. if (TestCpuFlag(kCpuHasAVX2)) {
  1085. I422ToUYVYRow = I422ToUYVYRow_Any_AVX2;
  1086. if (IS_ALIGNED(width, 32)) {
  1087. I422ToUYVYRow = I422ToUYVYRow_AVX2;
  1088. }
  1089. }
  1090. #endif
  1091. #if defined(HAS_I422TOUYVYROW_NEON)
  1092. if (TestCpuFlag(kCpuHasNEON)) {
  1093. I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
  1094. if (IS_ALIGNED(width, 16)) {
  1095. I422ToUYVYRow = I422ToUYVYRow_NEON;
  1096. }
  1097. }
  1098. #endif
  1099. #if defined(HAS_I422TOUYVYROW_MMI)
  1100. if (TestCpuFlag(kCpuHasMMI)) {
  1101. I422ToUYVYRow = I422ToUYVYRow_Any_MMI;
  1102. if (IS_ALIGNED(width, 8)) {
  1103. I422ToUYVYRow = I422ToUYVYRow_MMI;
  1104. }
  1105. }
  1106. #endif
  1107. #if defined(HAS_I422TOUYVYROW_MSA)
  1108. if (TestCpuFlag(kCpuHasMSA)) {
  1109. I422ToUYVYRow = I422ToUYVYRow_Any_MSA;
  1110. if (IS_ALIGNED(width, 32)) {
  1111. I422ToUYVYRow = I422ToUYVYRow_MSA;
  1112. }
  1113. }
  1114. #endif
  1115. {
  1116. // Allocate a rows of yuv.
  1117. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  1118. uint8_t* row_u = row_y + ((width + 63) & ~63);
  1119. uint8_t* row_v = row_u + ((width + 63) & ~63) / 2;
  1120. for (y = 0; y < height; ++y) {
  1121. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  1122. ARGBToYRow(src_argb, row_y, width);
  1123. I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
  1124. src_argb += src_stride_argb;
  1125. dst_uyvy += dst_stride_uyvy;
  1126. }
  1127. free_aligned_buffer_64(row_y);
  1128. }
  1129. return 0;
  1130. }
  1131. // Convert ARGB to I400.
  1132. LIBYUV_API
  1133. int ARGBToI400(const uint8_t* src_argb,
  1134. int src_stride_argb,
  1135. uint8_t* dst_y,
  1136. int dst_stride_y,
  1137. int width,
  1138. int height) {
  1139. int y;
  1140. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  1141. ARGBToYRow_C;
  1142. if (!src_argb || !dst_y || width <= 0 || height == 0) {
  1143. return -1;
  1144. }
  1145. if (height < 0) {
  1146. height = -height;
  1147. src_argb = src_argb + (height - 1) * src_stride_argb;
  1148. src_stride_argb = -src_stride_argb;
  1149. }
  1150. // Coalesce rows.
  1151. if (src_stride_argb == width * 4 && dst_stride_y == width) {
  1152. width *= height;
  1153. height = 1;
  1154. src_stride_argb = dst_stride_y = 0;
  1155. }
  1156. #if defined(HAS_ARGBTOYROW_SSSE3)
  1157. if (TestCpuFlag(kCpuHasSSSE3)) {
  1158. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  1159. if (IS_ALIGNED(width, 16)) {
  1160. ARGBToYRow = ARGBToYRow_SSSE3;
  1161. }
  1162. }
  1163. #endif
  1164. #if defined(HAS_ARGBTOYROW_AVX2)
  1165. if (TestCpuFlag(kCpuHasAVX2)) {
  1166. ARGBToYRow = ARGBToYRow_Any_AVX2;
  1167. if (IS_ALIGNED(width, 32)) {
  1168. ARGBToYRow = ARGBToYRow_AVX2;
  1169. }
  1170. }
  1171. #endif
  1172. #if defined(HAS_ARGBTOYROW_NEON)
  1173. if (TestCpuFlag(kCpuHasNEON)) {
  1174. ARGBToYRow = ARGBToYRow_Any_NEON;
  1175. if (IS_ALIGNED(width, 8)) {
  1176. ARGBToYRow = ARGBToYRow_NEON;
  1177. }
  1178. }
  1179. #endif
  1180. #if defined(HAS_ARGBTOYROW_MMI)
  1181. if (TestCpuFlag(kCpuHasMMI)) {
  1182. ARGBToYRow = ARGBToYRow_Any_MMI;
  1183. if (IS_ALIGNED(width, 8)) {
  1184. ARGBToYRow = ARGBToYRow_MMI;
  1185. }
  1186. }
  1187. #endif
  1188. #if defined(HAS_ARGBTOYROW_MSA)
  1189. if (TestCpuFlag(kCpuHasMSA)) {
  1190. ARGBToYRow = ARGBToYRow_Any_MSA;
  1191. if (IS_ALIGNED(width, 16)) {
  1192. ARGBToYRow = ARGBToYRow_MSA;
  1193. }
  1194. }
  1195. #endif
  1196. for (y = 0; y < height; ++y) {
  1197. ARGBToYRow(src_argb, dst_y, width);
  1198. src_argb += src_stride_argb;
  1199. dst_y += dst_stride_y;
  1200. }
  1201. return 0;
  1202. }
  1203. // Shuffle table for converting ARGB to RGBA.
  1204. static const uvec8 kShuffleMaskARGBToRGBA = {
  1205. 3u, 0u, 1u, 2u, 7u, 4u, 5u, 6u, 11u, 8u, 9u, 10u, 15u, 12u, 13u, 14u};
  1206. // Convert ARGB to RGBA.
  1207. LIBYUV_API
  1208. int ARGBToRGBA(const uint8_t* src_argb,
  1209. int src_stride_argb,
  1210. uint8_t* dst_rgba,
  1211. int dst_stride_rgba,
  1212. int width,
  1213. int height) {
  1214. return ARGBShuffle(src_argb, src_stride_argb, dst_rgba, dst_stride_rgba,
  1215. (const uint8_t*)(&kShuffleMaskARGBToRGBA), width, height);
  1216. }
  1217. // Convert ARGB To RGB24.
  1218. LIBYUV_API
  1219. int ARGBToRGB24(const uint8_t* src_argb,
  1220. int src_stride_argb,
  1221. uint8_t* dst_rgb24,
  1222. int dst_stride_rgb24,
  1223. int width,
  1224. int height) {
  1225. int y;
  1226. void (*ARGBToRGB24Row)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
  1227. ARGBToRGB24Row_C;
  1228. if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) {
  1229. return -1;
  1230. }
  1231. if (height < 0) {
  1232. height = -height;
  1233. src_argb = src_argb + (height - 1) * src_stride_argb;
  1234. src_stride_argb = -src_stride_argb;
  1235. }
  1236. // Coalesce rows.
  1237. if (src_stride_argb == width * 4 && dst_stride_rgb24 == width * 3) {
  1238. width *= height;
  1239. height = 1;
  1240. src_stride_argb = dst_stride_rgb24 = 0;
  1241. }
  1242. #if defined(HAS_ARGBTORGB24ROW_SSSE3)
  1243. if (TestCpuFlag(kCpuHasSSSE3)) {
  1244. ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3;
  1245. if (IS_ALIGNED(width, 16)) {
  1246. ARGBToRGB24Row = ARGBToRGB24Row_SSSE3;
  1247. }
  1248. }
  1249. #endif
  1250. #if defined(HAS_ARGBTORGB24ROW_AVX2)
  1251. if (TestCpuFlag(kCpuHasAVX2)) {
  1252. ARGBToRGB24Row = ARGBToRGB24Row_Any_AVX2;
  1253. if (IS_ALIGNED(width, 32)) {
  1254. ARGBToRGB24Row = ARGBToRGB24Row_AVX2;
  1255. }
  1256. }
  1257. #endif
  1258. #if defined(HAS_ARGBTORGB24ROW_AVX512VBMI)
  1259. if (TestCpuFlag(kCpuHasAVX512VBMI)) {
  1260. ARGBToRGB24Row = ARGBToRGB24Row_Any_AVX512VBMI;
  1261. if (IS_ALIGNED(width, 32)) {
  1262. ARGBToRGB24Row = ARGBToRGB24Row_AVX512VBMI;
  1263. }
  1264. }
  1265. #endif
  1266. #if defined(HAS_ARGBTORGB24ROW_NEON)
  1267. if (TestCpuFlag(kCpuHasNEON)) {
  1268. ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON;
  1269. if (IS_ALIGNED(width, 8)) {
  1270. ARGBToRGB24Row = ARGBToRGB24Row_NEON;
  1271. }
  1272. }
  1273. #endif
  1274. #if defined(HAS_ARGBTORGB24ROW_MMI)
  1275. if (TestCpuFlag(kCpuHasMMI)) {
  1276. ARGBToRGB24Row = ARGBToRGB24Row_Any_MMI;
  1277. if (IS_ALIGNED(width, 4)) {
  1278. ARGBToRGB24Row = ARGBToRGB24Row_MMI;
  1279. }
  1280. }
  1281. #endif
  1282. #if defined(HAS_ARGBTORGB24ROW_MSA)
  1283. if (TestCpuFlag(kCpuHasMSA)) {
  1284. ARGBToRGB24Row = ARGBToRGB24Row_Any_MSA;
  1285. if (IS_ALIGNED(width, 16)) {
  1286. ARGBToRGB24Row = ARGBToRGB24Row_MSA;
  1287. }
  1288. }
  1289. #endif
  1290. for (y = 0; y < height; ++y) {
  1291. ARGBToRGB24Row(src_argb, dst_rgb24, width);
  1292. src_argb += src_stride_argb;
  1293. dst_rgb24 += dst_stride_rgb24;
  1294. }
  1295. return 0;
  1296. }
  1297. // Convert ARGB To RAW.
  1298. LIBYUV_API
  1299. int ARGBToRAW(const uint8_t* src_argb,
  1300. int src_stride_argb,
  1301. uint8_t* dst_raw,
  1302. int dst_stride_raw,
  1303. int width,
  1304. int height) {
  1305. int y;
  1306. void (*ARGBToRAWRow)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
  1307. ARGBToRAWRow_C;
  1308. if (!src_argb || !dst_raw || width <= 0 || height == 0) {
  1309. return -1;
  1310. }
  1311. if (height < 0) {
  1312. height = -height;
  1313. src_argb = src_argb + (height - 1) * src_stride_argb;
  1314. src_stride_argb = -src_stride_argb;
  1315. }
  1316. // Coalesce rows.
  1317. if (src_stride_argb == width * 4 && dst_stride_raw == width * 3) {
  1318. width *= height;
  1319. height = 1;
  1320. src_stride_argb = dst_stride_raw = 0;
  1321. }
  1322. #if defined(HAS_ARGBTORAWROW_SSSE3)
  1323. if (TestCpuFlag(kCpuHasSSSE3)) {
  1324. ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3;
  1325. if (IS_ALIGNED(width, 16)) {
  1326. ARGBToRAWRow = ARGBToRAWRow_SSSE3;
  1327. }
  1328. }
  1329. #endif
  1330. #if defined(HAS_ARGBTORAWROW_AVX2)
  1331. if (TestCpuFlag(kCpuHasAVX2)) {
  1332. ARGBToRAWRow = ARGBToRAWRow_Any_AVX2;
  1333. if (IS_ALIGNED(width, 32)) {
  1334. ARGBToRAWRow = ARGBToRAWRow_AVX2;
  1335. }
  1336. }
  1337. #endif
  1338. #if defined(HAS_ARGBTORAWROW_NEON)
  1339. if (TestCpuFlag(kCpuHasNEON)) {
  1340. ARGBToRAWRow = ARGBToRAWRow_Any_NEON;
  1341. if (IS_ALIGNED(width, 8)) {
  1342. ARGBToRAWRow = ARGBToRAWRow_NEON;
  1343. }
  1344. }
  1345. #endif
  1346. #if defined(HAS_ARGBTORAWROW_MMI)
  1347. if (TestCpuFlag(kCpuHasMMI)) {
  1348. ARGBToRAWRow = ARGBToRAWRow_Any_MMI;
  1349. if (IS_ALIGNED(width, 4)) {
  1350. ARGBToRAWRow = ARGBToRAWRow_MMI;
  1351. }
  1352. }
  1353. #endif
  1354. #if defined(HAS_ARGBTORAWROW_MSA)
  1355. if (TestCpuFlag(kCpuHasMSA)) {
  1356. ARGBToRAWRow = ARGBToRAWRow_Any_MSA;
  1357. if (IS_ALIGNED(width, 16)) {
  1358. ARGBToRAWRow = ARGBToRAWRow_MSA;
  1359. }
  1360. }
  1361. #endif
  1362. for (y = 0; y < height; ++y) {
  1363. ARGBToRAWRow(src_argb, dst_raw, width);
  1364. src_argb += src_stride_argb;
  1365. dst_raw += dst_stride_raw;
  1366. }
  1367. return 0;
  1368. }
  1369. // Ordered 8x8 dither for 888 to 565. Values from 0 to 7.
  1370. static const uint8_t kDither565_4x4[16] = {
  1371. 0, 4, 1, 5, 6, 2, 7, 3, 1, 5, 0, 4, 7, 3, 6, 2,
  1372. };
  1373. // Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
  1374. LIBYUV_API
  1375. int ARGBToRGB565Dither(const uint8_t* src_argb,
  1376. int src_stride_argb,
  1377. uint8_t* dst_rgb565,
  1378. int dst_stride_rgb565,
  1379. const uint8_t* dither4x4,
  1380. int width,
  1381. int height) {
  1382. int y;
  1383. void (*ARGBToRGB565DitherRow)(const uint8_t* src_argb, uint8_t* dst_rgb,
  1384. const uint32_t dither4, int width) =
  1385. ARGBToRGB565DitherRow_C;
  1386. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  1387. return -1;
  1388. }
  1389. if (height < 0) {
  1390. height = -height;
  1391. src_argb = src_argb + (height - 1) * src_stride_argb;
  1392. src_stride_argb = -src_stride_argb;
  1393. }
  1394. if (!dither4x4) {
  1395. dither4x4 = kDither565_4x4;
  1396. }
  1397. #if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
  1398. if (TestCpuFlag(kCpuHasSSE2)) {
  1399. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2;
  1400. if (IS_ALIGNED(width, 4)) {
  1401. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_SSE2;
  1402. }
  1403. }
  1404. #endif
  1405. #if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
  1406. if (TestCpuFlag(kCpuHasAVX2)) {
  1407. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_AVX2;
  1408. if (IS_ALIGNED(width, 8)) {
  1409. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_AVX2;
  1410. }
  1411. }
  1412. #endif
  1413. #if defined(HAS_ARGBTORGB565DITHERROW_NEON)
  1414. if (TestCpuFlag(kCpuHasNEON)) {
  1415. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_NEON;
  1416. if (IS_ALIGNED(width, 8)) {
  1417. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_NEON;
  1418. }
  1419. }
  1420. #endif
  1421. #if defined(HAS_ARGBTORGB565DITHERROW_MMI)
  1422. if (TestCpuFlag(kCpuHasMMI)) {
  1423. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MMI;
  1424. if (IS_ALIGNED(width, 4)) {
  1425. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MMI;
  1426. }
  1427. }
  1428. #endif
  1429. #if defined(HAS_ARGBTORGB565DITHERROW_MSA)
  1430. if (TestCpuFlag(kCpuHasMSA)) {
  1431. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MSA;
  1432. if (IS_ALIGNED(width, 8)) {
  1433. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MSA;
  1434. }
  1435. }
  1436. #endif
  1437. for (y = 0; y < height; ++y) {
  1438. ARGBToRGB565DitherRow(src_argb, dst_rgb565,
  1439. *(const uint32_t*)(dither4x4 + ((y & 3) << 2)),
  1440. width);
  1441. src_argb += src_stride_argb;
  1442. dst_rgb565 += dst_stride_rgb565;
  1443. }
  1444. return 0;
  1445. }
  1446. // Convert ARGB To RGB565.
  1447. // TODO(fbarchard): Consider using dither function low level with zeros.
  1448. LIBYUV_API
  1449. int ARGBToRGB565(const uint8_t* src_argb,
  1450. int src_stride_argb,
  1451. uint8_t* dst_rgb565,
  1452. int dst_stride_rgb565,
  1453. int width,
  1454. int height) {
  1455. int y;
  1456. void (*ARGBToRGB565Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
  1457. int width) = ARGBToRGB565Row_C;
  1458. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  1459. return -1;
  1460. }
  1461. if (height < 0) {
  1462. height = -height;
  1463. src_argb = src_argb + (height - 1) * src_stride_argb;
  1464. src_stride_argb = -src_stride_argb;
  1465. }
  1466. // Coalesce rows.
  1467. if (src_stride_argb == width * 4 && dst_stride_rgb565 == width * 2) {
  1468. width *= height;
  1469. height = 1;
  1470. src_stride_argb = dst_stride_rgb565 = 0;
  1471. }
  1472. #if defined(HAS_ARGBTORGB565ROW_SSE2)
  1473. if (TestCpuFlag(kCpuHasSSE2)) {
  1474. ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2;
  1475. if (IS_ALIGNED(width, 4)) {
  1476. ARGBToRGB565Row = ARGBToRGB565Row_SSE2;
  1477. }
  1478. }
  1479. #endif
  1480. #if defined(HAS_ARGBTORGB565ROW_AVX2)
  1481. if (TestCpuFlag(kCpuHasAVX2)) {
  1482. ARGBToRGB565Row = ARGBToRGB565Row_Any_AVX2;
  1483. if (IS_ALIGNED(width, 8)) {
  1484. ARGBToRGB565Row = ARGBToRGB565Row_AVX2;
  1485. }
  1486. }
  1487. #endif
  1488. #if defined(HAS_ARGBTORGB565ROW_NEON)
  1489. if (TestCpuFlag(kCpuHasNEON)) {
  1490. ARGBToRGB565Row = ARGBToRGB565Row_Any_NEON;
  1491. if (IS_ALIGNED(width, 8)) {
  1492. ARGBToRGB565Row = ARGBToRGB565Row_NEON;
  1493. }
  1494. }
  1495. #endif
  1496. #if defined(HAS_ARGBTORGB565ROW_MMI)
  1497. if (TestCpuFlag(kCpuHasMMI)) {
  1498. ARGBToRGB565Row = ARGBToRGB565Row_Any_MMI;
  1499. if (IS_ALIGNED(width, 4)) {
  1500. ARGBToRGB565Row = ARGBToRGB565Row_MMI;
  1501. }
  1502. }
  1503. #endif
  1504. #if defined(HAS_ARGBTORGB565ROW_MSA)
  1505. if (TestCpuFlag(kCpuHasMSA)) {
  1506. ARGBToRGB565Row = ARGBToRGB565Row_Any_MSA;
  1507. if (IS_ALIGNED(width, 8)) {
  1508. ARGBToRGB565Row = ARGBToRGB565Row_MSA;
  1509. }
  1510. }
  1511. #endif
  1512. for (y = 0; y < height; ++y) {
  1513. ARGBToRGB565Row(src_argb, dst_rgb565, width);
  1514. src_argb += src_stride_argb;
  1515. dst_rgb565 += dst_stride_rgb565;
  1516. }
  1517. return 0;
  1518. }
  1519. // Convert ARGB To ARGB1555.
  1520. LIBYUV_API
  1521. int ARGBToARGB1555(const uint8_t* src_argb,
  1522. int src_stride_argb,
  1523. uint8_t* dst_argb1555,
  1524. int dst_stride_argb1555,
  1525. int width,
  1526. int height) {
  1527. int y;
  1528. void (*ARGBToARGB1555Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
  1529. int width) = ARGBToARGB1555Row_C;
  1530. if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) {
  1531. return -1;
  1532. }
  1533. if (height < 0) {
  1534. height = -height;
  1535. src_argb = src_argb + (height - 1) * src_stride_argb;
  1536. src_stride_argb = -src_stride_argb;
  1537. }
  1538. // Coalesce rows.
  1539. if (src_stride_argb == width * 4 && dst_stride_argb1555 == width * 2) {
  1540. width *= height;
  1541. height = 1;
  1542. src_stride_argb = dst_stride_argb1555 = 0;
  1543. }
  1544. #if defined(HAS_ARGBTOARGB1555ROW_SSE2)
  1545. if (TestCpuFlag(kCpuHasSSE2)) {
  1546. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_SSE2;
  1547. if (IS_ALIGNED(width, 4)) {
  1548. ARGBToARGB1555Row = ARGBToARGB1555Row_SSE2;
  1549. }
  1550. }
  1551. #endif
  1552. #if defined(HAS_ARGBTOARGB1555ROW_AVX2)
  1553. if (TestCpuFlag(kCpuHasAVX2)) {
  1554. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_AVX2;
  1555. if (IS_ALIGNED(width, 8)) {
  1556. ARGBToARGB1555Row = ARGBToARGB1555Row_AVX2;
  1557. }
  1558. }
  1559. #endif
  1560. #if defined(HAS_ARGBTOARGB1555ROW_NEON)
  1561. if (TestCpuFlag(kCpuHasNEON)) {
  1562. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_NEON;
  1563. if (IS_ALIGNED(width, 8)) {
  1564. ARGBToARGB1555Row = ARGBToARGB1555Row_NEON;
  1565. }
  1566. }
  1567. #endif
  1568. #if defined(HAS_ARGBTOARGB1555ROW_MMI)
  1569. if (TestCpuFlag(kCpuHasMMI)) {
  1570. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MMI;
  1571. if (IS_ALIGNED(width, 4)) {
  1572. ARGBToARGB1555Row = ARGBToARGB1555Row_MMI;
  1573. }
  1574. }
  1575. #endif
  1576. #if defined(HAS_ARGBTOARGB1555ROW_MSA)
  1577. if (TestCpuFlag(kCpuHasMSA)) {
  1578. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MSA;
  1579. if (IS_ALIGNED(width, 8)) {
  1580. ARGBToARGB1555Row = ARGBToARGB1555Row_MSA;
  1581. }
  1582. }
  1583. #endif
  1584. for (y = 0; y < height; ++y) {
  1585. ARGBToARGB1555Row(src_argb, dst_argb1555, width);
  1586. src_argb += src_stride_argb;
  1587. dst_argb1555 += dst_stride_argb1555;
  1588. }
  1589. return 0;
  1590. }
  1591. // Convert ARGB To ARGB4444.
  1592. LIBYUV_API
  1593. int ARGBToARGB4444(const uint8_t* src_argb,
  1594. int src_stride_argb,
  1595. uint8_t* dst_argb4444,
  1596. int dst_stride_argb4444,
  1597. int width,
  1598. int height) {
  1599. int y;
  1600. void (*ARGBToARGB4444Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
  1601. int width) = ARGBToARGB4444Row_C;
  1602. if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) {
  1603. return -1;
  1604. }
  1605. if (height < 0) {
  1606. height = -height;
  1607. src_argb = src_argb + (height - 1) * src_stride_argb;
  1608. src_stride_argb = -src_stride_argb;
  1609. }
  1610. // Coalesce rows.
  1611. if (src_stride_argb == width * 4 && dst_stride_argb4444 == width * 2) {
  1612. width *= height;
  1613. height = 1;
  1614. src_stride_argb = dst_stride_argb4444 = 0;
  1615. }
  1616. #if defined(HAS_ARGBTOARGB4444ROW_SSE2)
  1617. if (TestCpuFlag(kCpuHasSSE2)) {
  1618. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_SSE2;
  1619. if (IS_ALIGNED(width, 4)) {
  1620. ARGBToARGB4444Row = ARGBToARGB4444Row_SSE2;
  1621. }
  1622. }
  1623. #endif
  1624. #if defined(HAS_ARGBTOARGB4444ROW_AVX2)
  1625. if (TestCpuFlag(kCpuHasAVX2)) {
  1626. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_AVX2;
  1627. if (IS_ALIGNED(width, 8)) {
  1628. ARGBToARGB4444Row = ARGBToARGB4444Row_AVX2;
  1629. }
  1630. }
  1631. #endif
  1632. #if defined(HAS_ARGBTOARGB4444ROW_NEON)
  1633. if (TestCpuFlag(kCpuHasNEON)) {
  1634. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_NEON;
  1635. if (IS_ALIGNED(width, 8)) {
  1636. ARGBToARGB4444Row = ARGBToARGB4444Row_NEON;
  1637. }
  1638. }
  1639. #endif
  1640. #if defined(HAS_ARGBTOARGB4444ROW_MMI)
  1641. if (TestCpuFlag(kCpuHasMMI)) {
  1642. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MMI;
  1643. if (IS_ALIGNED(width, 4)) {
  1644. ARGBToARGB4444Row = ARGBToARGB4444Row_MMI;
  1645. }
  1646. }
  1647. #endif
  1648. #if defined(HAS_ARGBTOARGB4444ROW_MSA)
  1649. if (TestCpuFlag(kCpuHasMSA)) {
  1650. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MSA;
  1651. if (IS_ALIGNED(width, 8)) {
  1652. ARGBToARGB4444Row = ARGBToARGB4444Row_MSA;
  1653. }
  1654. }
  1655. #endif
  1656. for (y = 0; y < height; ++y) {
  1657. ARGBToARGB4444Row(src_argb, dst_argb4444, width);
  1658. src_argb += src_stride_argb;
  1659. dst_argb4444 += dst_stride_argb4444;
  1660. }
  1661. return 0;
  1662. }
  1663. // Convert ABGR To AR30.
  1664. LIBYUV_API
  1665. int ABGRToAR30(const uint8_t* src_abgr,
  1666. int src_stride_abgr,
  1667. uint8_t* dst_ar30,
  1668. int dst_stride_ar30,
  1669. int width,
  1670. int height) {
  1671. int y;
  1672. void (*ABGRToAR30Row)(const uint8_t* src_abgr, uint8_t* dst_rgb, int width) =
  1673. ABGRToAR30Row_C;
  1674. if (!src_abgr || !dst_ar30 || width <= 0 || height == 0) {
  1675. return -1;
  1676. }
  1677. if (height < 0) {
  1678. height = -height;
  1679. src_abgr = src_abgr + (height - 1) * src_stride_abgr;
  1680. src_stride_abgr = -src_stride_abgr;
  1681. }
  1682. // Coalesce rows.
  1683. if (src_stride_abgr == width * 4 && dst_stride_ar30 == width * 4) {
  1684. width *= height;
  1685. height = 1;
  1686. src_stride_abgr = dst_stride_ar30 = 0;
  1687. }
  1688. #if defined(HAS_ABGRTOAR30ROW_SSSE3)
  1689. if (TestCpuFlag(kCpuHasSSSE3)) {
  1690. ABGRToAR30Row = ABGRToAR30Row_Any_SSSE3;
  1691. if (IS_ALIGNED(width, 4)) {
  1692. ABGRToAR30Row = ABGRToAR30Row_SSSE3;
  1693. }
  1694. }
  1695. #endif
  1696. #if defined(HAS_ABGRTOAR30ROW_AVX2)
  1697. if (TestCpuFlag(kCpuHasAVX2)) {
  1698. ABGRToAR30Row = ABGRToAR30Row_Any_AVX2;
  1699. if (IS_ALIGNED(width, 8)) {
  1700. ABGRToAR30Row = ABGRToAR30Row_AVX2;
  1701. }
  1702. }
  1703. #endif
  1704. for (y = 0; y < height; ++y) {
  1705. ABGRToAR30Row(src_abgr, dst_ar30, width);
  1706. src_abgr += src_stride_abgr;
  1707. dst_ar30 += dst_stride_ar30;
  1708. }
  1709. return 0;
  1710. }
  1711. // Convert ARGB To AR30.
  1712. LIBYUV_API
  1713. int ARGBToAR30(const uint8_t* src_argb,
  1714. int src_stride_argb,
  1715. uint8_t* dst_ar30,
  1716. int dst_stride_ar30,
  1717. int width,
  1718. int height) {
  1719. int y;
  1720. void (*ARGBToAR30Row)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
  1721. ARGBToAR30Row_C;
  1722. if (!src_argb || !dst_ar30 || width <= 0 || height == 0) {
  1723. return -1;
  1724. }
  1725. if (height < 0) {
  1726. height = -height;
  1727. src_argb = src_argb + (height - 1) * src_stride_argb;
  1728. src_stride_argb = -src_stride_argb;
  1729. }
  1730. // Coalesce rows.
  1731. if (src_stride_argb == width * 4 && dst_stride_ar30 == width * 4) {
  1732. width *= height;
  1733. height = 1;
  1734. src_stride_argb = dst_stride_ar30 = 0;
  1735. }
  1736. #if defined(HAS_ARGBTOAR30ROW_SSSE3)
  1737. if (TestCpuFlag(kCpuHasSSSE3)) {
  1738. ARGBToAR30Row = ARGBToAR30Row_Any_SSSE3;
  1739. if (IS_ALIGNED(width, 4)) {
  1740. ARGBToAR30Row = ARGBToAR30Row_SSSE3;
  1741. }
  1742. }
  1743. #endif
  1744. #if defined(HAS_ARGBTOAR30ROW_AVX2)
  1745. if (TestCpuFlag(kCpuHasAVX2)) {
  1746. ARGBToAR30Row = ARGBToAR30Row_Any_AVX2;
  1747. if (IS_ALIGNED(width, 8)) {
  1748. ARGBToAR30Row = ARGBToAR30Row_AVX2;
  1749. }
  1750. }
  1751. #endif
  1752. for (y = 0; y < height; ++y) {
  1753. ARGBToAR30Row(src_argb, dst_ar30, width);
  1754. src_argb += src_stride_argb;
  1755. dst_ar30 += dst_stride_ar30;
  1756. }
  1757. return 0;
  1758. }
  1759. // Convert ARGB to J420. (JPeg full range I420).
  1760. LIBYUV_API
  1761. int ARGBToJ420(const uint8_t* src_argb,
  1762. int src_stride_argb,
  1763. uint8_t* dst_yj,
  1764. int dst_stride_yj,
  1765. uint8_t* dst_u,
  1766. int dst_stride_u,
  1767. uint8_t* dst_v,
  1768. int dst_stride_v,
  1769. int width,
  1770. int height) {
  1771. int y;
  1772. void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb,
  1773. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1774. ARGBToUVJRow_C;
  1775. void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
  1776. ARGBToYJRow_C;
  1777. if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
  1778. return -1;
  1779. }
  1780. // Negative height means invert the image.
  1781. if (height < 0) {
  1782. height = -height;
  1783. src_argb = src_argb + (height - 1) * src_stride_argb;
  1784. src_stride_argb = -src_stride_argb;
  1785. }
  1786. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1787. if (TestCpuFlag(kCpuHasSSSE3)) {
  1788. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1789. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1790. if (IS_ALIGNED(width, 16)) {
  1791. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1792. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1793. }
  1794. }
  1795. #endif
  1796. #if defined(HAS_ARGBTOYJROW_AVX2)
  1797. if (TestCpuFlag(kCpuHasAVX2)) {
  1798. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1799. if (IS_ALIGNED(width, 32)) {
  1800. ARGBToYJRow = ARGBToYJRow_AVX2;
  1801. }
  1802. }
  1803. #endif
  1804. #if defined(HAS_ARGBTOYJROW_NEON)
  1805. if (TestCpuFlag(kCpuHasNEON)) {
  1806. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1807. if (IS_ALIGNED(width, 8)) {
  1808. ARGBToYJRow = ARGBToYJRow_NEON;
  1809. }
  1810. }
  1811. #endif
  1812. #if defined(HAS_ARGBTOUVJROW_NEON)
  1813. if (TestCpuFlag(kCpuHasNEON)) {
  1814. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1815. if (IS_ALIGNED(width, 16)) {
  1816. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1817. }
  1818. }
  1819. #endif
  1820. #if defined(HAS_ARGBTOYJROW_MMI) && defined(HAS_ARGBTOUVJROW_MMI)
  1821. if (TestCpuFlag(kCpuHasMMI)) {
  1822. ARGBToYJRow = ARGBToYJRow_Any_MMI;
  1823. ARGBToUVJRow = ARGBToUVJRow_Any_MMI;
  1824. if (IS_ALIGNED(width, 8)) {
  1825. ARGBToYJRow = ARGBToYJRow_MMI;
  1826. }
  1827. if (IS_ALIGNED(width, 16)) {
  1828. ARGBToUVJRow = ARGBToUVJRow_MMI;
  1829. }
  1830. }
  1831. #endif
  1832. #if defined(HAS_ARGBTOYJROW_MSA) && defined(HAS_ARGBTOUVJROW_MSA)
  1833. if (TestCpuFlag(kCpuHasMSA)) {
  1834. ARGBToYJRow = ARGBToYJRow_Any_MSA;
  1835. ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
  1836. if (IS_ALIGNED(width, 16)) {
  1837. ARGBToYJRow = ARGBToYJRow_MSA;
  1838. }
  1839. if (IS_ALIGNED(width, 32)) {
  1840. ARGBToUVJRow = ARGBToUVJRow_MSA;
  1841. }
  1842. }
  1843. #endif
  1844. for (y = 0; y < height - 1; y += 2) {
  1845. ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width);
  1846. ARGBToYJRow(src_argb, dst_yj, width);
  1847. ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width);
  1848. src_argb += src_stride_argb * 2;
  1849. dst_yj += dst_stride_yj * 2;
  1850. dst_u += dst_stride_u;
  1851. dst_v += dst_stride_v;
  1852. }
  1853. if (height & 1) {
  1854. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  1855. ARGBToYJRow(src_argb, dst_yj, width);
  1856. }
  1857. return 0;
  1858. }
  1859. // Convert ARGB to J422. (JPeg full range I422).
  1860. LIBYUV_API
  1861. int ARGBToJ422(const uint8_t* src_argb,
  1862. int src_stride_argb,
  1863. uint8_t* dst_yj,
  1864. int dst_stride_yj,
  1865. uint8_t* dst_u,
  1866. int dst_stride_u,
  1867. uint8_t* dst_v,
  1868. int dst_stride_v,
  1869. int width,
  1870. int height) {
  1871. int y;
  1872. void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb,
  1873. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1874. ARGBToUVJRow_C;
  1875. void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
  1876. ARGBToYJRow_C;
  1877. if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
  1878. return -1;
  1879. }
  1880. // Negative height means invert the image.
  1881. if (height < 0) {
  1882. height = -height;
  1883. src_argb = src_argb + (height - 1) * src_stride_argb;
  1884. src_stride_argb = -src_stride_argb;
  1885. }
  1886. // Coalesce rows.
  1887. if (src_stride_argb == width * 4 && dst_stride_yj == width &&
  1888. dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
  1889. width *= height;
  1890. height = 1;
  1891. src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0;
  1892. }
  1893. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1894. if (TestCpuFlag(kCpuHasSSSE3)) {
  1895. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1896. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1897. if (IS_ALIGNED(width, 16)) {
  1898. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1899. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1900. }
  1901. }
  1902. #endif
  1903. #if defined(HAS_ARGBTOYJROW_AVX2)
  1904. if (TestCpuFlag(kCpuHasAVX2)) {
  1905. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1906. if (IS_ALIGNED(width, 32)) {
  1907. ARGBToYJRow = ARGBToYJRow_AVX2;
  1908. }
  1909. }
  1910. #endif
  1911. #if defined(HAS_ARGBTOYJROW_NEON)
  1912. if (TestCpuFlag(kCpuHasNEON)) {
  1913. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1914. if (IS_ALIGNED(width, 8)) {
  1915. ARGBToYJRow = ARGBToYJRow_NEON;
  1916. }
  1917. }
  1918. #endif
  1919. #if defined(HAS_ARGBTOUVJROW_NEON)
  1920. if (TestCpuFlag(kCpuHasNEON)) {
  1921. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1922. if (IS_ALIGNED(width, 16)) {
  1923. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1924. }
  1925. }
  1926. #endif
  1927. #if defined(HAS_ARGBTOYJROW_MMI) && defined(HAS_ARGBTOUVJROW_MMI)
  1928. if (TestCpuFlag(kCpuHasMMI)) {
  1929. ARGBToYJRow = ARGBToYJRow_Any_MMI;
  1930. ARGBToUVJRow = ARGBToUVJRow_Any_MMI;
  1931. if (IS_ALIGNED(width, 8)) {
  1932. ARGBToYJRow = ARGBToYJRow_MMI;
  1933. }
  1934. if (IS_ALIGNED(width, 16)) {
  1935. ARGBToUVJRow = ARGBToUVJRow_MMI;
  1936. }
  1937. }
  1938. #endif
  1939. #if defined(HAS_ARGBTOYJROW_MSA) && defined(HAS_ARGBTOUVJROW_MSA)
  1940. if (TestCpuFlag(kCpuHasMSA)) {
  1941. ARGBToYJRow = ARGBToYJRow_Any_MSA;
  1942. ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
  1943. if (IS_ALIGNED(width, 16)) {
  1944. ARGBToYJRow = ARGBToYJRow_MSA;
  1945. }
  1946. if (IS_ALIGNED(width, 32)) {
  1947. ARGBToUVJRow = ARGBToUVJRow_MSA;
  1948. }
  1949. }
  1950. #endif
  1951. for (y = 0; y < height; ++y) {
  1952. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  1953. ARGBToYJRow(src_argb, dst_yj, width);
  1954. src_argb += src_stride_argb;
  1955. dst_yj += dst_stride_yj;
  1956. dst_u += dst_stride_u;
  1957. dst_v += dst_stride_v;
  1958. }
  1959. return 0;
  1960. }
  1961. // Convert ARGB to J400.
  1962. LIBYUV_API
  1963. int ARGBToJ400(const uint8_t* src_argb,
  1964. int src_stride_argb,
  1965. uint8_t* dst_yj,
  1966. int dst_stride_yj,
  1967. int width,
  1968. int height) {
  1969. int y;
  1970. void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
  1971. ARGBToYJRow_C;
  1972. if (!src_argb || !dst_yj || width <= 0 || height == 0) {
  1973. return -1;
  1974. }
  1975. if (height < 0) {
  1976. height = -height;
  1977. src_argb = src_argb + (height - 1) * src_stride_argb;
  1978. src_stride_argb = -src_stride_argb;
  1979. }
  1980. // Coalesce rows.
  1981. if (src_stride_argb == width * 4 && dst_stride_yj == width) {
  1982. width *= height;
  1983. height = 1;
  1984. src_stride_argb = dst_stride_yj = 0;
  1985. }
  1986. #if defined(HAS_ARGBTOYJROW_SSSE3)
  1987. if (TestCpuFlag(kCpuHasSSSE3)) {
  1988. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1989. if (IS_ALIGNED(width, 16)) {
  1990. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1991. }
  1992. }
  1993. #endif
  1994. #if defined(HAS_ARGBTOYJROW_AVX2)
  1995. if (TestCpuFlag(kCpuHasAVX2)) {
  1996. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1997. if (IS_ALIGNED(width, 32)) {
  1998. ARGBToYJRow = ARGBToYJRow_AVX2;
  1999. }
  2000. }
  2001. #endif
  2002. #if defined(HAS_ARGBTOYJROW_NEON)
  2003. if (TestCpuFlag(kCpuHasNEON)) {
  2004. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  2005. if (IS_ALIGNED(width, 8)) {
  2006. ARGBToYJRow = ARGBToYJRow_NEON;
  2007. }
  2008. }
  2009. #endif
  2010. #if defined(HAS_ARGBTOYJROW_MMI)
  2011. if (TestCpuFlag(kCpuHasMMI)) {
  2012. ARGBToYJRow = ARGBToYJRow_Any_MMI;
  2013. if (IS_ALIGNED(width, 8)) {
  2014. ARGBToYJRow = ARGBToYJRow_MMI;
  2015. }
  2016. }
  2017. #endif
  2018. #if defined(HAS_ARGBTOYJROW_MSA)
  2019. if (TestCpuFlag(kCpuHasMSA)) {
  2020. ARGBToYJRow = ARGBToYJRow_Any_MSA;
  2021. if (IS_ALIGNED(width, 16)) {
  2022. ARGBToYJRow = ARGBToYJRow_MSA;
  2023. }
  2024. }
  2025. #endif
  2026. for (y = 0; y < height; ++y) {
  2027. ARGBToYJRow(src_argb, dst_yj, width);
  2028. src_argb += src_stride_argb;
  2029. dst_yj += dst_stride_yj;
  2030. }
  2031. return 0;
  2032. }
  2033. // Convert RGBA to J400.
  2034. LIBYUV_API
  2035. int RGBAToJ400(const uint8_t* src_rgba,
  2036. int src_stride_rgba,
  2037. uint8_t* dst_yj,
  2038. int dst_stride_yj,
  2039. int width,
  2040. int height) {
  2041. int y;
  2042. void (*RGBAToYJRow)(const uint8_t* src_rgba, uint8_t* dst_yj, int width) =
  2043. RGBAToYJRow_C;
  2044. if (!src_rgba || !dst_yj || width <= 0 || height == 0) {
  2045. return -1;
  2046. }
  2047. if (height < 0) {
  2048. height = -height;
  2049. src_rgba = src_rgba + (height - 1) * src_stride_rgba;
  2050. src_stride_rgba = -src_stride_rgba;
  2051. }
  2052. // Coalesce rows.
  2053. if (src_stride_rgba == width * 4 && dst_stride_yj == width) {
  2054. width *= height;
  2055. height = 1;
  2056. src_stride_rgba = dst_stride_yj = 0;
  2057. }
  2058. #if defined(HAS_RGBATOYJROW_SSSE3)
  2059. if (TestCpuFlag(kCpuHasSSSE3)) {
  2060. RGBAToYJRow = RGBAToYJRow_Any_SSSE3;
  2061. if (IS_ALIGNED(width, 16)) {
  2062. RGBAToYJRow = RGBAToYJRow_SSSE3;
  2063. }
  2064. }
  2065. #endif
  2066. #if defined(HAS_RGBATOYJROW_AVX2)
  2067. if (TestCpuFlag(kCpuHasAVX2)) {
  2068. RGBAToYJRow = RGBAToYJRow_Any_AVX2;
  2069. if (IS_ALIGNED(width, 32)) {
  2070. RGBAToYJRow = RGBAToYJRow_AVX2;
  2071. }
  2072. }
  2073. #endif
  2074. #if defined(HAS_RGBATOYJROW_NEON)
  2075. if (TestCpuFlag(kCpuHasNEON)) {
  2076. RGBAToYJRow = RGBAToYJRow_Any_NEON;
  2077. if (IS_ALIGNED(width, 8)) {
  2078. RGBAToYJRow = RGBAToYJRow_NEON;
  2079. }
  2080. }
  2081. #endif
  2082. #if defined(HAS_RGBATOYJROW_MMI)
  2083. if (TestCpuFlag(kCpuHasMMI)) {
  2084. RGBAToYJRow = RGBAToYJRow_Any_MMI;
  2085. if (IS_ALIGNED(width, 8)) {
  2086. RGBAToYJRow = RGBAToYJRow_MMI;
  2087. }
  2088. }
  2089. #endif
  2090. #if defined(HAS_RGBATOYJROW_MSA)
  2091. if (TestCpuFlag(kCpuHasMSA)) {
  2092. RGBAToYJRow = RGBAToYJRow_Any_MSA;
  2093. if (IS_ALIGNED(width, 16)) {
  2094. RGBAToYJRow = RGBAToYJRow_MSA;
  2095. }
  2096. }
  2097. #endif
  2098. for (y = 0; y < height; ++y) {
  2099. RGBAToYJRow(src_rgba, dst_yj, width);
  2100. src_rgba += src_stride_rgba;
  2101. dst_yj += dst_stride_yj;
  2102. }
  2103. return 0;
  2104. }
  2105. #ifdef __cplusplus
  2106. } // extern "C"
  2107. } // namespace libyuv
  2108. #endif