scale_neon.cc 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. /*
  2. * Copyright 2011 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/row.h"
  11. #ifdef __cplusplus
  12. namespace libyuv {
  13. extern "C" {
  14. #endif
  15. // This module is for GCC Neon.
  16. #if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
  17. !defined(__aarch64__)
  18. // NEON downscalers with interpolation.
  19. // Provided by Fritz Koenig
  20. // Read 32x1 throw away even pixels, and write 16x1.
  21. void ScaleRowDown2_NEON(const uint8_t* src_ptr,
  22. ptrdiff_t src_stride,
  23. uint8_t* dst,
  24. int dst_width) {
  25. (void)src_stride;
  26. asm volatile(
  27. "1: \n"
  28. // load even pixels into q0, odd into q1
  29. "vld2.8 {q0, q1}, [%0]! \n"
  30. "subs %2, %2, #16 \n" // 16 processed per loop
  31. "vst1.8 {q1}, [%1]! \n" // store odd pixels
  32. "bgt 1b \n"
  33. : "+r"(src_ptr), // %0
  34. "+r"(dst), // %1
  35. "+r"(dst_width) // %2
  36. :
  37. : "q0", "q1" // Clobber List
  38. );
  39. }
  40. // Read 32x1 average down and write 16x1.
  41. void ScaleRowDown2Linear_NEON(const uint8_t* src_ptr,
  42. ptrdiff_t src_stride,
  43. uint8_t* dst,
  44. int dst_width) {
  45. (void)src_stride;
  46. asm volatile(
  47. "1: \n"
  48. "vld2.8 {q0, q1}, [%0]! \n" // load 32 pixels
  49. "subs %2, %2, #16 \n" // 16 processed per loop
  50. "vrhadd.u8 q0, q0, q1 \n" // rounding half add
  51. "vst1.8 {q0}, [%1]! \n"
  52. "bgt 1b \n"
  53. : "+r"(src_ptr), // %0
  54. "+r"(dst), // %1
  55. "+r"(dst_width) // %2
  56. :
  57. : "q0", "q1" // Clobber List
  58. );
  59. }
  60. // Read 32x2 average down and write 16x1.
  61. void ScaleRowDown2Box_NEON(const uint8_t* src_ptr,
  62. ptrdiff_t src_stride,
  63. uint8_t* dst,
  64. int dst_width) {
  65. asm volatile(
  66. // change the stride to row 2 pointer
  67. "add %1, %0 \n"
  68. "1: \n"
  69. "vld1.8 {q0, q1}, [%0]! \n" // load row 1 and post inc
  70. "vld1.8 {q2, q3}, [%1]! \n" // load row 2 and post inc
  71. "subs %3, %3, #16 \n" // 16 processed per loop
  72. "vpaddl.u8 q0, q0 \n" // row 1 add adjacent
  73. "vpaddl.u8 q1, q1 \n"
  74. "vpadal.u8 q0, q2 \n" // row 2 add adjacent +
  75. // row1
  76. "vpadal.u8 q1, q3 \n"
  77. "vrshrn.u16 d0, q0, #2 \n" // downshift, round and
  78. // pack
  79. "vrshrn.u16 d1, q1, #2 \n"
  80. "vst1.8 {q0}, [%2]! \n"
  81. "bgt 1b \n"
  82. : "+r"(src_ptr), // %0
  83. "+r"(src_stride), // %1
  84. "+r"(dst), // %2
  85. "+r"(dst_width) // %3
  86. :
  87. : "q0", "q1", "q2", "q3" // Clobber List
  88. );
  89. }
  90. void ScaleRowDown4_NEON(const uint8_t* src_ptr,
  91. ptrdiff_t src_stride,
  92. uint8_t* dst_ptr,
  93. int dst_width) {
  94. (void)src_stride;
  95. asm volatile(
  96. "1: \n"
  97. "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0
  98. "subs %2, %2, #8 \n" // 8 processed per loop
  99. "vst1.8 {d2}, [%1]! \n"
  100. "bgt 1b \n"
  101. : "+r"(src_ptr), // %0
  102. "+r"(dst_ptr), // %1
  103. "+r"(dst_width) // %2
  104. :
  105. : "q0", "q1", "memory", "cc");
  106. }
  107. void ScaleRowDown4Box_NEON(const uint8_t* src_ptr,
  108. ptrdiff_t src_stride,
  109. uint8_t* dst_ptr,
  110. int dst_width) {
  111. const uint8_t* src_ptr1 = src_ptr + src_stride;
  112. const uint8_t* src_ptr2 = src_ptr + src_stride * 2;
  113. const uint8_t* src_ptr3 = src_ptr + src_stride * 3;
  114. asm volatile(
  115. "1: \n"
  116. "vld1.8 {q0}, [%0]! \n" // load up 16x4
  117. "vld1.8 {q1}, [%3]! \n"
  118. "vld1.8 {q2}, [%4]! \n"
  119. "vld1.8 {q3}, [%5]! \n"
  120. "subs %2, %2, #4 \n"
  121. "vpaddl.u8 q0, q0 \n"
  122. "vpadal.u8 q0, q1 \n"
  123. "vpadal.u8 q0, q2 \n"
  124. "vpadal.u8 q0, q3 \n"
  125. "vpaddl.u16 q0, q0 \n"
  126. "vrshrn.u32 d0, q0, #4 \n" // divide by 16 w/rounding
  127. "vmovn.u16 d0, q0 \n"
  128. "vst1.32 {d0[0]}, [%1]! \n"
  129. "bgt 1b \n"
  130. : "+r"(src_ptr), // %0
  131. "+r"(dst_ptr), // %1
  132. "+r"(dst_width), // %2
  133. "+r"(src_ptr1), // %3
  134. "+r"(src_ptr2), // %4
  135. "+r"(src_ptr3) // %5
  136. :
  137. : "q0", "q1", "q2", "q3", "memory", "cc");
  138. }
  139. // Down scale from 4 to 3 pixels. Use the neon multilane read/write
  140. // to load up the every 4th pixel into a 4 different registers.
  141. // Point samples 32 pixels to 24 pixels.
  142. void ScaleRowDown34_NEON(const uint8_t* src_ptr,
  143. ptrdiff_t src_stride,
  144. uint8_t* dst_ptr,
  145. int dst_width) {
  146. (void)src_stride;
  147. asm volatile(
  148. "1: \n"
  149. "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0
  150. "subs %2, %2, #24 \n"
  151. "vmov d2, d3 \n" // order d0, d1, d2
  152. "vst3.8 {d0, d1, d2}, [%1]! \n"
  153. "bgt 1b \n"
  154. : "+r"(src_ptr), // %0
  155. "+r"(dst_ptr), // %1
  156. "+r"(dst_width) // %2
  157. :
  158. : "d0", "d1", "d2", "d3", "memory", "cc");
  159. }
  160. void ScaleRowDown34_0_Box_NEON(const uint8_t* src_ptr,
  161. ptrdiff_t src_stride,
  162. uint8_t* dst_ptr,
  163. int dst_width) {
  164. asm volatile(
  165. "vmov.u8 d24, #3 \n"
  166. "add %3, %0 \n"
  167. "1: \n"
  168. "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0
  169. "vld4.8 {d4, d5, d6, d7}, [%3]! \n" // src line 1
  170. "subs %2, %2, #24 \n"
  171. // filter src line 0 with src line 1
  172. // expand chars to shorts to allow for room
  173. // when adding lines together
  174. "vmovl.u8 q8, d4 \n"
  175. "vmovl.u8 q9, d5 \n"
  176. "vmovl.u8 q10, d6 \n"
  177. "vmovl.u8 q11, d7 \n"
  178. // 3 * line_0 + line_1
  179. "vmlal.u8 q8, d0, d24 \n"
  180. "vmlal.u8 q9, d1, d24 \n"
  181. "vmlal.u8 q10, d2, d24 \n"
  182. "vmlal.u8 q11, d3, d24 \n"
  183. // (3 * line_0 + line_1) >> 2
  184. "vqrshrn.u16 d0, q8, #2 \n"
  185. "vqrshrn.u16 d1, q9, #2 \n"
  186. "vqrshrn.u16 d2, q10, #2 \n"
  187. "vqrshrn.u16 d3, q11, #2 \n"
  188. // a0 = (src[0] * 3 + s[1] * 1) >> 2
  189. "vmovl.u8 q8, d1 \n"
  190. "vmlal.u8 q8, d0, d24 \n"
  191. "vqrshrn.u16 d0, q8, #2 \n"
  192. // a1 = (src[1] * 1 + s[2] * 1) >> 1
  193. "vrhadd.u8 d1, d1, d2 \n"
  194. // a2 = (src[2] * 1 + s[3] * 3) >> 2
  195. "vmovl.u8 q8, d2 \n"
  196. "vmlal.u8 q8, d3, d24 \n"
  197. "vqrshrn.u16 d2, q8, #2 \n"
  198. "vst3.8 {d0, d1, d2}, [%1]! \n"
  199. "bgt 1b \n"
  200. : "+r"(src_ptr), // %0
  201. "+r"(dst_ptr), // %1
  202. "+r"(dst_width), // %2
  203. "+r"(src_stride) // %3
  204. :
  205. : "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "d24", "memory",
  206. "cc");
  207. }
  208. void ScaleRowDown34_1_Box_NEON(const uint8_t* src_ptr,
  209. ptrdiff_t src_stride,
  210. uint8_t* dst_ptr,
  211. int dst_width) {
  212. asm volatile(
  213. "vmov.u8 d24, #3 \n"
  214. "add %3, %0 \n"
  215. "1: \n"
  216. "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0
  217. "vld4.8 {d4, d5, d6, d7}, [%3]! \n" // src line 1
  218. "subs %2, %2, #24 \n"
  219. // average src line 0 with src line 1
  220. "vrhadd.u8 q0, q0, q2 \n"
  221. "vrhadd.u8 q1, q1, q3 \n"
  222. // a0 = (src[0] * 3 + s[1] * 1) >> 2
  223. "vmovl.u8 q3, d1 \n"
  224. "vmlal.u8 q3, d0, d24 \n"
  225. "vqrshrn.u16 d0, q3, #2 \n"
  226. // a1 = (src[1] * 1 + s[2] * 1) >> 1
  227. "vrhadd.u8 d1, d1, d2 \n"
  228. // a2 = (src[2] * 1 + s[3] * 3) >> 2
  229. "vmovl.u8 q3, d2 \n"
  230. "vmlal.u8 q3, d3, d24 \n"
  231. "vqrshrn.u16 d2, q3, #2 \n"
  232. "vst3.8 {d0, d1, d2}, [%1]! \n"
  233. "bgt 1b \n"
  234. : "+r"(src_ptr), // %0
  235. "+r"(dst_ptr), // %1
  236. "+r"(dst_width), // %2
  237. "+r"(src_stride) // %3
  238. :
  239. : "r4", "q0", "q1", "q2", "q3", "d24", "memory", "cc");
  240. }
  241. #define HAS_SCALEROWDOWN38_NEON
  242. static const uvec8 kShuf38 = {0, 3, 6, 8, 11, 14, 16, 19,
  243. 22, 24, 27, 30, 0, 0, 0, 0};
  244. static const uvec8 kShuf38_2 = {0, 8, 16, 2, 10, 17, 4, 12,
  245. 18, 6, 14, 19, 0, 0, 0, 0};
  246. static const vec16 kMult38_Div6 = {65536 / 12, 65536 / 12, 65536 / 12,
  247. 65536 / 12, 65536 / 12, 65536 / 12,
  248. 65536 / 12, 65536 / 12};
  249. static const vec16 kMult38_Div9 = {65536 / 18, 65536 / 18, 65536 / 18,
  250. 65536 / 18, 65536 / 18, 65536 / 18,
  251. 65536 / 18, 65536 / 18};
  252. // 32 -> 12
  253. void ScaleRowDown38_NEON(const uint8_t* src_ptr,
  254. ptrdiff_t src_stride,
  255. uint8_t* dst_ptr,
  256. int dst_width) {
  257. (void)src_stride;
  258. asm volatile(
  259. "vld1.8 {q3}, [%3] \n"
  260. "1: \n"
  261. "vld1.8 {d0, d1, d2, d3}, [%0]! \n"
  262. "subs %2, %2, #12 \n"
  263. "vtbl.u8 d4, {d0, d1, d2, d3}, d6 \n"
  264. "vtbl.u8 d5, {d0, d1, d2, d3}, d7 \n"
  265. "vst1.8 {d4}, [%1]! \n"
  266. "vst1.32 {d5[0]}, [%1]! \n"
  267. "bgt 1b \n"
  268. : "+r"(src_ptr), // %0
  269. "+r"(dst_ptr), // %1
  270. "+r"(dst_width) // %2
  271. : "r"(&kShuf38) // %3
  272. : "d0", "d1", "d2", "d3", "d4", "d5", "memory", "cc");
  273. }
  274. // 32x3 -> 12x1
  275. void OMITFP ScaleRowDown38_3_Box_NEON(const uint8_t* src_ptr,
  276. ptrdiff_t src_stride,
  277. uint8_t* dst_ptr,
  278. int dst_width) {
  279. const uint8_t* src_ptr1 = src_ptr + src_stride * 2;
  280. asm volatile(
  281. "vld1.16 {q13}, [%5] \n"
  282. "vld1.8 {q14}, [%6] \n"
  283. "vld1.8 {q15}, [%7] \n"
  284. "add %3, %0 \n"
  285. "1: \n"
  286. // d0 = 00 40 01 41 02 42 03 43
  287. // d1 = 10 50 11 51 12 52 13 53
  288. // d2 = 20 60 21 61 22 62 23 63
  289. // d3 = 30 70 31 71 32 72 33 73
  290. "vld4.8 {d0, d1, d2, d3}, [%0]! \n"
  291. "vld4.8 {d4, d5, d6, d7}, [%3]! \n"
  292. "vld4.8 {d16, d17, d18, d19}, [%4]! \n"
  293. "subs %2, %2, #12 \n"
  294. // Shuffle the input data around to get align the data
  295. // so adjacent data can be added. 0,1 - 2,3 - 4,5 - 6,7
  296. // d0 = 00 10 01 11 02 12 03 13
  297. // d1 = 40 50 41 51 42 52 43 53
  298. "vtrn.u8 d0, d1 \n"
  299. "vtrn.u8 d4, d5 \n"
  300. "vtrn.u8 d16, d17 \n"
  301. // d2 = 20 30 21 31 22 32 23 33
  302. // d3 = 60 70 61 71 62 72 63 73
  303. "vtrn.u8 d2, d3 \n"
  304. "vtrn.u8 d6, d7 \n"
  305. "vtrn.u8 d18, d19 \n"
  306. // d0 = 00+10 01+11 02+12 03+13
  307. // d2 = 40+50 41+51 42+52 43+53
  308. "vpaddl.u8 q0, q0 \n"
  309. "vpaddl.u8 q2, q2 \n"
  310. "vpaddl.u8 q8, q8 \n"
  311. // d3 = 60+70 61+71 62+72 63+73
  312. "vpaddl.u8 d3, d3 \n"
  313. "vpaddl.u8 d7, d7 \n"
  314. "vpaddl.u8 d19, d19 \n"
  315. // combine source lines
  316. "vadd.u16 q0, q2 \n"
  317. "vadd.u16 q0, q8 \n"
  318. "vadd.u16 d4, d3, d7 \n"
  319. "vadd.u16 d4, d19 \n"
  320. // dst_ptr[3] = (s[6 + st * 0] + s[7 + st * 0]
  321. // + s[6 + st * 1] + s[7 + st * 1]
  322. // + s[6 + st * 2] + s[7 + st * 2]) / 6
  323. "vqrdmulh.s16 q2, q2, q13 \n"
  324. "vmovn.u16 d4, q2 \n"
  325. // Shuffle 2,3 reg around so that 2 can be added to the
  326. // 0,1 reg and 3 can be added to the 4,5 reg. This
  327. // requires expanding from u8 to u16 as the 0,1 and 4,5
  328. // registers are already expanded. Then do transposes
  329. // to get aligned.
  330. // q2 = xx 20 xx 30 xx 21 xx 31 xx 22 xx 32 xx 23 xx 33
  331. "vmovl.u8 q1, d2 \n"
  332. "vmovl.u8 q3, d6 \n"
  333. "vmovl.u8 q9, d18 \n"
  334. // combine source lines
  335. "vadd.u16 q1, q3 \n"
  336. "vadd.u16 q1, q9 \n"
  337. // d4 = xx 20 xx 30 xx 22 xx 32
  338. // d5 = xx 21 xx 31 xx 23 xx 33
  339. "vtrn.u32 d2, d3 \n"
  340. // d4 = xx 20 xx 21 xx 22 xx 23
  341. // d5 = xx 30 xx 31 xx 32 xx 33
  342. "vtrn.u16 d2, d3 \n"
  343. // 0+1+2, 3+4+5
  344. "vadd.u16 q0, q1 \n"
  345. // Need to divide, but can't downshift as the the value
  346. // isn't a power of 2. So multiply by 65536 / n
  347. // and take the upper 16 bits.
  348. "vqrdmulh.s16 q0, q0, q15 \n"
  349. // Align for table lookup, vtbl requires registers to
  350. // be adjacent
  351. "vmov.u8 d2, d4 \n"
  352. "vtbl.u8 d3, {d0, d1, d2}, d28 \n"
  353. "vtbl.u8 d4, {d0, d1, d2}, d29 \n"
  354. "vst1.8 {d3}, [%1]! \n"
  355. "vst1.32 {d4[0]}, [%1]! \n"
  356. "bgt 1b \n"
  357. : "+r"(src_ptr), // %0
  358. "+r"(dst_ptr), // %1
  359. "+r"(dst_width), // %2
  360. "+r"(src_stride), // %3
  361. "+r"(src_ptr1) // %4
  362. : "r"(&kMult38_Div6), // %5
  363. "r"(&kShuf38_2), // %6
  364. "r"(&kMult38_Div9) // %7
  365. : "q0", "q1", "q2", "q3", "q8", "q9", "q13", "q14", "q15", "memory",
  366. "cc");
  367. }
  368. // 32x2 -> 12x1
  369. void ScaleRowDown38_2_Box_NEON(const uint8_t* src_ptr,
  370. ptrdiff_t src_stride,
  371. uint8_t* dst_ptr,
  372. int dst_width) {
  373. asm volatile(
  374. "vld1.16 {q13}, [%4] \n"
  375. "vld1.8 {q14}, [%5] \n"
  376. "add %3, %0 \n"
  377. "1: \n"
  378. // d0 = 00 40 01 41 02 42 03 43
  379. // d1 = 10 50 11 51 12 52 13 53
  380. // d2 = 20 60 21 61 22 62 23 63
  381. // d3 = 30 70 31 71 32 72 33 73
  382. "vld4.8 {d0, d1, d2, d3}, [%0]! \n"
  383. "vld4.8 {d4, d5, d6, d7}, [%3]! \n"
  384. "subs %2, %2, #12 \n"
  385. // Shuffle the input data around to get align the data
  386. // so adjacent data can be added. 0,1 - 2,3 - 4,5 - 6,7
  387. // d0 = 00 10 01 11 02 12 03 13
  388. // d1 = 40 50 41 51 42 52 43 53
  389. "vtrn.u8 d0, d1 \n"
  390. "vtrn.u8 d4, d5 \n"
  391. // d2 = 20 30 21 31 22 32 23 33
  392. // d3 = 60 70 61 71 62 72 63 73
  393. "vtrn.u8 d2, d3 \n"
  394. "vtrn.u8 d6, d7 \n"
  395. // d0 = 00+10 01+11 02+12 03+13
  396. // d2 = 40+50 41+51 42+52 43+53
  397. "vpaddl.u8 q0, q0 \n"
  398. "vpaddl.u8 q2, q2 \n"
  399. // d3 = 60+70 61+71 62+72 63+73
  400. "vpaddl.u8 d3, d3 \n"
  401. "vpaddl.u8 d7, d7 \n"
  402. // combine source lines
  403. "vadd.u16 q0, q2 \n"
  404. "vadd.u16 d4, d3, d7 \n"
  405. // dst_ptr[3] = (s[6] + s[7] + s[6+st] + s[7+st]) / 4
  406. "vqrshrn.u16 d4, q2, #2 \n"
  407. // Shuffle 2,3 reg around so that 2 can be added to the
  408. // 0,1 reg and 3 can be added to the 4,5 reg. This
  409. // requires expanding from u8 to u16 as the 0,1 and 4,5
  410. // registers are already expanded. Then do transposes
  411. // to get aligned.
  412. // q2 = xx 20 xx 30 xx 21 xx 31 xx 22 xx 32 xx 23 xx 33
  413. "vmovl.u8 q1, d2 \n"
  414. "vmovl.u8 q3, d6 \n"
  415. // combine source lines
  416. "vadd.u16 q1, q3 \n"
  417. // d4 = xx 20 xx 30 xx 22 xx 32
  418. // d5 = xx 21 xx 31 xx 23 xx 33
  419. "vtrn.u32 d2, d3 \n"
  420. // d4 = xx 20 xx 21 xx 22 xx 23
  421. // d5 = xx 30 xx 31 xx 32 xx 33
  422. "vtrn.u16 d2, d3 \n"
  423. // 0+1+2, 3+4+5
  424. "vadd.u16 q0, q1 \n"
  425. // Need to divide, but can't downshift as the the value
  426. // isn't a power of 2. So multiply by 65536 / n
  427. // and take the upper 16 bits.
  428. "vqrdmulh.s16 q0, q0, q13 \n"
  429. // Align for table lookup, vtbl requires registers to
  430. // be adjacent
  431. "vmov.u8 d2, d4 \n"
  432. "vtbl.u8 d3, {d0, d1, d2}, d28 \n"
  433. "vtbl.u8 d4, {d0, d1, d2}, d29 \n"
  434. "vst1.8 {d3}, [%1]! \n"
  435. "vst1.32 {d4[0]}, [%1]! \n"
  436. "bgt 1b \n"
  437. : "+r"(src_ptr), // %0
  438. "+r"(dst_ptr), // %1
  439. "+r"(dst_width), // %2
  440. "+r"(src_stride) // %3
  441. : "r"(&kMult38_Div6), // %4
  442. "r"(&kShuf38_2) // %5
  443. : "q0", "q1", "q2", "q3", "q13", "q14", "memory", "cc");
  444. }
  445. // Add a row of bytes to a row of shorts. Used for box filter.
  446. // Reads 16 bytes and accumulates to 16 shorts at a time.
  447. void ScaleAddRow_NEON(const uint8_t* src_ptr,
  448. uint16_t* dst_ptr,
  449. int src_width) {
  450. asm volatile(
  451. "1: \n"
  452. "vld1.16 {q1, q2}, [%1] \n" // load accumulator
  453. "vld1.8 {q0}, [%0]! \n" // load 16 bytes
  454. "vaddw.u8 q2, q2, d1 \n" // add
  455. "vaddw.u8 q1, q1, d0 \n"
  456. "vst1.16 {q1, q2}, [%1]! \n" // store accumulator
  457. "subs %2, %2, #16 \n" // 16 processed per loop
  458. "bgt 1b \n"
  459. : "+r"(src_ptr), // %0
  460. "+r"(dst_ptr), // %1
  461. "+r"(src_width) // %2
  462. :
  463. : "memory", "cc", "q0", "q1", "q2" // Clobber List
  464. );
  465. }
  466. // TODO(Yang Zhang): Investigate less load instructions for
  467. // the x/dx stepping
  468. #define LOAD2_DATA8_LANE(n) \
  469. "lsr %5, %3, #16 \n" \
  470. "add %6, %1, %5 \n" \
  471. "add %3, %3, %4 \n" \
  472. "vld2.8 {d6[" #n "], d7[" #n "]}, [%6] \n"
  473. // The NEON version mimics this formula (from row_common.cc):
  474. // #define BLENDER(a, b, f) (uint8_t)((int)(a) +
  475. // ((((int)((f)) * ((int)(b) - (int)(a))) + 0x8000) >> 16))
  476. void ScaleFilterCols_NEON(uint8_t* dst_ptr,
  477. const uint8_t* src_ptr,
  478. int dst_width,
  479. int x,
  480. int dx) {
  481. int dx_offset[4] = {0, 1, 2, 3};
  482. int* tmp = dx_offset;
  483. const uint8_t* src_tmp = src_ptr;
  484. asm volatile (
  485. "vdup.32 q0, %3 \n" // x
  486. "vdup.32 q1, %4 \n" // dx
  487. "vld1.32 {q2}, [%5] \n" // 0 1 2 3
  488. "vshl.i32 q3, q1, #2 \n" // 4 * dx
  489. "vmul.s32 q1, q1, q2 \n"
  490. // x , x + 1 * dx, x + 2 * dx, x + 3 * dx
  491. "vadd.s32 q1, q1, q0 \n"
  492. // x + 4 * dx, x + 5 * dx, x + 6 * dx, x + 7 * dx
  493. "vadd.s32 q2, q1, q3 \n"
  494. "vshl.i32 q0, q3, #1 \n" // 8 * dx
  495. "1: \n"
  496. LOAD2_DATA8_LANE(0)
  497. LOAD2_DATA8_LANE(1)
  498. LOAD2_DATA8_LANE(2)
  499. LOAD2_DATA8_LANE(3)
  500. LOAD2_DATA8_LANE(4)
  501. LOAD2_DATA8_LANE(5)
  502. LOAD2_DATA8_LANE(6)
  503. LOAD2_DATA8_LANE(7)
  504. "vmov q10, q1 \n"
  505. "vmov q11, q2 \n"
  506. "vuzp.16 q10, q11 \n"
  507. "vmovl.u8 q8, d6 \n"
  508. "vmovl.u8 q9, d7 \n"
  509. "vsubl.s16 q11, d18, d16 \n"
  510. "vsubl.s16 q12, d19, d17 \n"
  511. "vmovl.u16 q13, d20 \n"
  512. "vmovl.u16 q10, d21 \n"
  513. "vmul.s32 q11, q11, q13 \n"
  514. "vmul.s32 q12, q12, q10 \n"
  515. "vrshrn.s32 d18, q11, #16 \n"
  516. "vrshrn.s32 d19, q12, #16 \n"
  517. "vadd.s16 q8, q8, q9 \n"
  518. "vmovn.s16 d6, q8 \n"
  519. "vst1.8 {d6}, [%0]! \n" // store pixels
  520. "vadd.s32 q1, q1, q0 \n"
  521. "vadd.s32 q2, q2, q0 \n"
  522. "subs %2, %2, #8 \n" // 8 processed per loop
  523. "bgt 1b \n"
  524. : "+r"(dst_ptr), // %0
  525. "+r"(src_ptr), // %1
  526. "+r"(dst_width), // %2
  527. "+r"(x), // %3
  528. "+r"(dx), // %4
  529. "+r"(tmp), // %5
  530. "+r"(src_tmp) // %6
  531. :
  532. : "memory", "cc", "q0", "q1", "q2", "q3",
  533. "q8", "q9", "q10", "q11", "q12", "q13"
  534. );
  535. }
  536. #undef LOAD2_DATA8_LANE
  537. // 16x2 -> 16x1
  538. void ScaleFilterRows_NEON(uint8_t* dst_ptr,
  539. const uint8_t* src_ptr,
  540. ptrdiff_t src_stride,
  541. int dst_width,
  542. int source_y_fraction) {
  543. asm volatile(
  544. "cmp %4, #0 \n"
  545. "beq 100f \n"
  546. "add %2, %1 \n"
  547. "cmp %4, #64 \n"
  548. "beq 75f \n"
  549. "cmp %4, #128 \n"
  550. "beq 50f \n"
  551. "cmp %4, #192 \n"
  552. "beq 25f \n"
  553. "vdup.8 d5, %4 \n"
  554. "rsb %4, #256 \n"
  555. "vdup.8 d4, %4 \n"
  556. // General purpose row blend.
  557. "1: \n"
  558. "vld1.8 {q0}, [%1]! \n"
  559. "vld1.8 {q1}, [%2]! \n"
  560. "subs %3, %3, #16 \n"
  561. "vmull.u8 q13, d0, d4 \n"
  562. "vmull.u8 q14, d1, d4 \n"
  563. "vmlal.u8 q13, d2, d5 \n"
  564. "vmlal.u8 q14, d3, d5 \n"
  565. "vrshrn.u16 d0, q13, #8 \n"
  566. "vrshrn.u16 d1, q14, #8 \n"
  567. "vst1.8 {q0}, [%0]! \n"
  568. "bgt 1b \n"
  569. "b 99f \n"
  570. // Blend 25 / 75.
  571. "25: \n"
  572. "vld1.8 {q0}, [%1]! \n"
  573. "vld1.8 {q1}, [%2]! \n"
  574. "subs %3, %3, #16 \n"
  575. "vrhadd.u8 q0, q1 \n"
  576. "vrhadd.u8 q0, q1 \n"
  577. "vst1.8 {q0}, [%0]! \n"
  578. "bgt 25b \n"
  579. "b 99f \n"
  580. // Blend 50 / 50.
  581. "50: \n"
  582. "vld1.8 {q0}, [%1]! \n"
  583. "vld1.8 {q1}, [%2]! \n"
  584. "subs %3, %3, #16 \n"
  585. "vrhadd.u8 q0, q1 \n"
  586. "vst1.8 {q0}, [%0]! \n"
  587. "bgt 50b \n"
  588. "b 99f \n"
  589. // Blend 75 / 25.
  590. "75: \n"
  591. "vld1.8 {q1}, [%1]! \n"
  592. "vld1.8 {q0}, [%2]! \n"
  593. "subs %3, %3, #16 \n"
  594. "vrhadd.u8 q0, q1 \n"
  595. "vrhadd.u8 q0, q1 \n"
  596. "vst1.8 {q0}, [%0]! \n"
  597. "bgt 75b \n"
  598. "b 99f \n"
  599. // Blend 100 / 0 - Copy row unchanged.
  600. "100: \n"
  601. "vld1.8 {q0}, [%1]! \n"
  602. "subs %3, %3, #16 \n"
  603. "vst1.8 {q0}, [%0]! \n"
  604. "bgt 100b \n"
  605. "99: \n"
  606. "vst1.8 {d1[7]}, [%0] \n"
  607. : "+r"(dst_ptr), // %0
  608. "+r"(src_ptr), // %1
  609. "+r"(src_stride), // %2
  610. "+r"(dst_width), // %3
  611. "+r"(source_y_fraction) // %4
  612. :
  613. : "q0", "q1", "d4", "d5", "q13", "q14", "memory", "cc");
  614. }
  615. void ScaleARGBRowDown2_NEON(const uint8_t* src_ptr,
  616. ptrdiff_t src_stride,
  617. uint8_t* dst,
  618. int dst_width) {
  619. (void)src_stride;
  620. asm volatile(
  621. "1: \n"
  622. "vld4.32 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
  623. "vld4.32 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB
  624. "subs %2, %2, #8 \n" // 8 processed per loop
  625. "vmov q2, q1 \n" // load next 8 ARGB
  626. "vst2.32 {q2, q3}, [%1]! \n" // store odd pixels
  627. "bgt 1b \n"
  628. : "+r"(src_ptr), // %0
  629. "+r"(dst), // %1
  630. "+r"(dst_width) // %2
  631. :
  632. : "memory", "cc", "q0", "q1", "q2", "q3" // Clobber List
  633. );
  634. }
  635. // 46: f964 018d vld4.32 {d16,d18,d20,d22}, [r4]!
  636. // 4a: 3e04 subs r6, #4
  637. // 4c: f964 118d vld4.32 {d17,d19,d21,d23}, [r4]!
  638. // 50: ef64 21f4 vorr q9, q10, q10
  639. // 54: f942 038d vst2.32 {d16-d19}, [r2]!
  640. // 58: d1f5 bne.n 46 <ScaleARGBRowDown2_C+0x46>
  641. void ScaleARGBRowDown2Linear_NEON(const uint8_t* src_argb,
  642. ptrdiff_t src_stride,
  643. uint8_t* dst_argb,
  644. int dst_width) {
  645. (void)src_stride;
  646. asm volatile(
  647. "1: \n"
  648. "vld4.32 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
  649. "vld4.32 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB
  650. "subs %2, %2, #8 \n" // 8 processed per loop
  651. "vrhadd.u8 q0, q0, q1 \n" // rounding half add
  652. "vrhadd.u8 q1, q2, q3 \n" // rounding half add
  653. "vst2.32 {q0, q1}, [%1]! \n"
  654. "bgt 1b \n"
  655. : "+r"(src_argb), // %0
  656. "+r"(dst_argb), // %1
  657. "+r"(dst_width) // %2
  658. :
  659. : "memory", "cc", "q0", "q1", "q2", "q3" // Clobber List
  660. );
  661. }
  662. void ScaleARGBRowDown2Box_NEON(const uint8_t* src_ptr,
  663. ptrdiff_t src_stride,
  664. uint8_t* dst,
  665. int dst_width) {
  666. asm volatile(
  667. // change the stride to row 2 pointer
  668. "add %1, %1, %0 \n"
  669. "1: \n"
  670. "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
  671. "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB
  672. "subs %3, %3, #8 \n" // 8 processed per loop.
  673. "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts.
  674. "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
  675. "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts.
  676. "vpaddl.u8 q3, q3 \n" // A 16 bytes -> 8 shorts.
  677. "vld4.8 {d16, d18, d20, d22}, [%1]! \n" // load 8 more ARGB
  678. "vld4.8 {d17, d19, d21, d23}, [%1]! \n" // load last 8 ARGB
  679. "vpadal.u8 q0, q8 \n" // B 16 bytes -> 8 shorts.
  680. "vpadal.u8 q1, q9 \n" // G 16 bytes -> 8 shorts.
  681. "vpadal.u8 q2, q10 \n" // R 16 bytes -> 8 shorts.
  682. "vpadal.u8 q3, q11 \n" // A 16 bytes -> 8 shorts.
  683. "vrshrn.u16 d0, q0, #2 \n" // round and pack to bytes
  684. "vrshrn.u16 d1, q1, #2 \n"
  685. "vrshrn.u16 d2, q2, #2 \n"
  686. "vrshrn.u16 d3, q3, #2 \n"
  687. "vst4.8 {d0, d1, d2, d3}, [%2]! \n"
  688. "bgt 1b \n"
  689. : "+r"(src_ptr), // %0
  690. "+r"(src_stride), // %1
  691. "+r"(dst), // %2
  692. "+r"(dst_width) // %3
  693. :
  694. : "memory", "cc", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
  695. }
  696. // Reads 4 pixels at a time.
  697. // Alignment requirement: src_argb 4 byte aligned.
  698. void ScaleARGBRowDownEven_NEON(const uint8_t* src_argb,
  699. ptrdiff_t src_stride,
  700. int src_stepx,
  701. uint8_t* dst_argb,
  702. int dst_width) {
  703. (void)src_stride;
  704. asm volatile(
  705. "mov r12, %3, lsl #2 \n"
  706. "1: \n"
  707. "vld1.32 {d0[0]}, [%0], r12 \n"
  708. "vld1.32 {d0[1]}, [%0], r12 \n"
  709. "vld1.32 {d1[0]}, [%0], r12 \n"
  710. "vld1.32 {d1[1]}, [%0], r12 \n"
  711. "subs %2, %2, #4 \n" // 4 pixels per loop.
  712. "vst1.8 {q0}, [%1]! \n"
  713. "bgt 1b \n"
  714. : "+r"(src_argb), // %0
  715. "+r"(dst_argb), // %1
  716. "+r"(dst_width) // %2
  717. : "r"(src_stepx) // %3
  718. : "memory", "cc", "r12", "q0");
  719. }
  720. // Reads 4 pixels at a time.
  721. // Alignment requirement: src_argb 4 byte aligned.
  722. void ScaleARGBRowDownEvenBox_NEON(const uint8_t* src_argb,
  723. ptrdiff_t src_stride,
  724. int src_stepx,
  725. uint8_t* dst_argb,
  726. int dst_width) {
  727. asm volatile(
  728. "mov r12, %4, lsl #2 \n"
  729. "add %1, %1, %0 \n"
  730. "1: \n"
  731. "vld1.8 {d0}, [%0], r12 \n" // 4 2x2 blocks -> 2x1
  732. "vld1.8 {d1}, [%1], r12 \n"
  733. "vld1.8 {d2}, [%0], r12 \n"
  734. "vld1.8 {d3}, [%1], r12 \n"
  735. "vld1.8 {d4}, [%0], r12 \n"
  736. "vld1.8 {d5}, [%1], r12 \n"
  737. "vld1.8 {d6}, [%0], r12 \n"
  738. "vld1.8 {d7}, [%1], r12 \n"
  739. "vaddl.u8 q0, d0, d1 \n"
  740. "vaddl.u8 q1, d2, d3 \n"
  741. "vaddl.u8 q2, d4, d5 \n"
  742. "vaddl.u8 q3, d6, d7 \n"
  743. "vswp.8 d1, d2 \n" // ab_cd -> ac_bd
  744. "vswp.8 d5, d6 \n" // ef_gh -> eg_fh
  745. "vadd.u16 q0, q0, q1 \n" // (a+b)_(c+d)
  746. "vadd.u16 q2, q2, q3 \n" // (e+f)_(g+h)
  747. "vrshrn.u16 d0, q0, #2 \n" // first 2 pixels.
  748. "vrshrn.u16 d1, q2, #2 \n" // next 2 pixels.
  749. "subs %3, %3, #4 \n" // 4 pixels per loop.
  750. "vst1.8 {q0}, [%2]! \n"
  751. "bgt 1b \n"
  752. : "+r"(src_argb), // %0
  753. "+r"(src_stride), // %1
  754. "+r"(dst_argb), // %2
  755. "+r"(dst_width) // %3
  756. : "r"(src_stepx) // %4
  757. : "memory", "cc", "r12", "q0", "q1", "q2", "q3");
  758. }
  759. // TODO(Yang Zhang): Investigate less load instructions for
  760. // the x/dx stepping
  761. #define LOAD1_DATA32_LANE(dn, n) \
  762. "lsr %5, %3, #16 \n" \
  763. "add %6, %1, %5, lsl #2 \n" \
  764. "add %3, %3, %4 \n" \
  765. "vld1.32 {" #dn "[" #n "]}, [%6] \n"
  766. void ScaleARGBCols_NEON(uint8_t* dst_argb,
  767. const uint8_t* src_argb,
  768. int dst_width,
  769. int x,
  770. int dx) {
  771. int tmp;
  772. const uint8_t* src_tmp = src_argb;
  773. asm volatile(
  774. "1: \n"
  775. // clang-format off
  776. LOAD1_DATA32_LANE(d0, 0)
  777. LOAD1_DATA32_LANE(d0, 1)
  778. LOAD1_DATA32_LANE(d1, 0)
  779. LOAD1_DATA32_LANE(d1, 1)
  780. LOAD1_DATA32_LANE(d2, 0)
  781. LOAD1_DATA32_LANE(d2, 1)
  782. LOAD1_DATA32_LANE(d3, 0)
  783. LOAD1_DATA32_LANE(d3, 1)
  784. // clang-format on
  785. "vst1.32 {q0, q1}, [%0]! \n" // store pixels
  786. "subs %2, %2, #8 \n" // 8 processed per loop
  787. "bgt 1b \n"
  788. : "+r"(dst_argb), // %0
  789. "+r"(src_argb), // %1
  790. "+r"(dst_width), // %2
  791. "+r"(x), // %3
  792. "+r"(dx), // %4
  793. "=&r"(tmp), // %5
  794. "+r"(src_tmp) // %6
  795. :
  796. : "memory", "cc", "q0", "q1");
  797. }
  798. #undef LOAD1_DATA32_LANE
  799. // TODO(Yang Zhang): Investigate less load instructions for
  800. // the x/dx stepping
  801. #define LOAD2_DATA32_LANE(dn1, dn2, n) \
  802. "lsr %5, %3, #16 \n" \
  803. "add %6, %1, %5, lsl #2 \n" \
  804. "add %3, %3, %4 \n" \
  805. "vld2.32 {" #dn1 "[" #n "], " #dn2 "[" #n "]}, [%6] \n"
  806. void ScaleARGBFilterCols_NEON(uint8_t* dst_argb,
  807. const uint8_t* src_argb,
  808. int dst_width,
  809. int x,
  810. int dx) {
  811. int dx_offset[4] = {0, 1, 2, 3};
  812. int* tmp = dx_offset;
  813. const uint8_t* src_tmp = src_argb;
  814. asm volatile (
  815. "vdup.32 q0, %3 \n" // x
  816. "vdup.32 q1, %4 \n" // dx
  817. "vld1.32 {q2}, [%5] \n" // 0 1 2 3
  818. "vshl.i32 q9, q1, #2 \n" // 4 * dx
  819. "vmul.s32 q1, q1, q2 \n"
  820. "vmov.i8 q3, #0x7f \n" // 0x7F
  821. "vmov.i16 q15, #0x7f \n" // 0x7F
  822. // x , x + 1 * dx, x + 2 * dx, x + 3 * dx
  823. "vadd.s32 q8, q1, q0 \n"
  824. "1: \n"
  825. // d0, d1: a
  826. // d2, d3: b
  827. LOAD2_DATA32_LANE(d0, d2, 0)
  828. LOAD2_DATA32_LANE(d0, d2, 1)
  829. LOAD2_DATA32_LANE(d1, d3, 0)
  830. LOAD2_DATA32_LANE(d1, d3, 1)
  831. "vshrn.i32 d22, q8, #9 \n"
  832. "vand.16 d22, d22, d30 \n"
  833. "vdup.8 d24, d22[0] \n"
  834. "vdup.8 d25, d22[2] \n"
  835. "vdup.8 d26, d22[4] \n"
  836. "vdup.8 d27, d22[6] \n"
  837. "vext.8 d4, d24, d25, #4 \n"
  838. "vext.8 d5, d26, d27, #4 \n" // f
  839. "veor.8 q10, q2, q3 \n" // 0x7f ^ f
  840. "vmull.u8 q11, d0, d20 \n"
  841. "vmull.u8 q12, d1, d21 \n"
  842. "vmull.u8 q13, d2, d4 \n"
  843. "vmull.u8 q14, d3, d5 \n"
  844. "vadd.i16 q11, q11, q13 \n"
  845. "vadd.i16 q12, q12, q14 \n"
  846. "vshrn.i16 d0, q11, #7 \n"
  847. "vshrn.i16 d1, q12, #7 \n"
  848. "vst1.32 {d0, d1}, [%0]! \n" // store pixels
  849. "vadd.s32 q8, q8, q9 \n"
  850. "subs %2, %2, #4 \n" // 4 processed per loop
  851. "bgt 1b \n"
  852. : "+r"(dst_argb), // %0
  853. "+r"(src_argb), // %1
  854. "+r"(dst_width), // %2
  855. "+r"(x), // %3
  856. "+r"(dx), // %4
  857. "+r"(tmp), // %5
  858. "+r"(src_tmp) // %6
  859. :
  860. : "memory", "cc", "q0", "q1", "q2", "q3", "q8", "q9",
  861. "q10", "q11", "q12", "q13", "q14", "q15"
  862. );
  863. }
  864. #undef LOAD2_DATA32_LANE
  865. #endif // defined(__ARM_NEON__) && !defined(__aarch64__)
  866. #ifdef __cplusplus
  867. } // extern "C"
  868. } // namespace libyuv
  869. #endif