scale_argb.cc 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. /*
  2. * Copyright 2011 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/scale.h"
  11. #include <assert.h>
  12. #include <string.h>
  13. #include "libyuv/cpu_id.h"
  14. #include "libyuv/planar_functions.h" // For CopyARGB
  15. #include "libyuv/row.h"
  16. #include "libyuv/scale_row.h"
  17. #ifdef __cplusplus
  18. namespace libyuv {
  19. extern "C" {
  20. #endif
  21. static __inline int Abs(int v) {
  22. return v >= 0 ? v : -v;
  23. }
  24. // ScaleARGB ARGB, 1/2
  25. // This is an optimized version for scaling down a ARGB to 1/2 of
  26. // its original size.
  27. static void ScaleARGBDown2(int src_width,
  28. int src_height,
  29. int dst_width,
  30. int dst_height,
  31. int src_stride,
  32. int dst_stride,
  33. const uint8_t* src_argb,
  34. uint8_t* dst_argb,
  35. int x,
  36. int dx,
  37. int y,
  38. int dy,
  39. enum FilterMode filtering) {
  40. int j;
  41. int row_stride = src_stride * (dy >> 16);
  42. void (*ScaleARGBRowDown2)(const uint8_t* src_argb, ptrdiff_t src_stride,
  43. uint8_t* dst_argb, int dst_width) =
  44. filtering == kFilterNone
  45. ? ScaleARGBRowDown2_C
  46. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_C
  47. : ScaleARGBRowDown2Box_C);
  48. (void)src_width;
  49. (void)src_height;
  50. (void)dx;
  51. assert(dx == 65536 * 2); // Test scale factor of 2.
  52. assert((dy & 0x1ffff) == 0); // Test vertical scale is multiple of 2.
  53. // Advance to odd row, even column.
  54. if (filtering == kFilterBilinear) {
  55. src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
  56. } else {
  57. src_argb += (y >> 16) * src_stride + ((x >> 16) - 1) * 4;
  58. }
  59. #if defined(HAS_SCALEARGBROWDOWN2_SSE2)
  60. if (TestCpuFlag(kCpuHasSSE2)) {
  61. ScaleARGBRowDown2 =
  62. filtering == kFilterNone
  63. ? ScaleARGBRowDown2_Any_SSE2
  64. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_SSE2
  65. : ScaleARGBRowDown2Box_Any_SSE2);
  66. if (IS_ALIGNED(dst_width, 4)) {
  67. ScaleARGBRowDown2 =
  68. filtering == kFilterNone
  69. ? ScaleARGBRowDown2_SSE2
  70. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_SSE2
  71. : ScaleARGBRowDown2Box_SSE2);
  72. }
  73. }
  74. #endif
  75. #if defined(HAS_SCALEARGBROWDOWN2_NEON)
  76. if (TestCpuFlag(kCpuHasNEON)) {
  77. ScaleARGBRowDown2 =
  78. filtering == kFilterNone
  79. ? ScaleARGBRowDown2_Any_NEON
  80. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_NEON
  81. : ScaleARGBRowDown2Box_Any_NEON);
  82. if (IS_ALIGNED(dst_width, 8)) {
  83. ScaleARGBRowDown2 =
  84. filtering == kFilterNone
  85. ? ScaleARGBRowDown2_NEON
  86. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_NEON
  87. : ScaleARGBRowDown2Box_NEON);
  88. }
  89. }
  90. #endif
  91. #if defined(HAS_SCALEARGBROWDOWN2_MMI)
  92. if (TestCpuFlag(kCpuHasMMI)) {
  93. ScaleARGBRowDown2 =
  94. filtering == kFilterNone
  95. ? ScaleARGBRowDown2_Any_MMI
  96. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_MMI
  97. : ScaleARGBRowDown2Box_Any_MMI);
  98. if (IS_ALIGNED(dst_width, 2)) {
  99. ScaleARGBRowDown2 =
  100. filtering == kFilterNone
  101. ? ScaleARGBRowDown2_MMI
  102. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_MMI
  103. : ScaleARGBRowDown2Box_MMI);
  104. }
  105. }
  106. #endif
  107. #if defined(HAS_SCALEARGBROWDOWN2_MSA)
  108. if (TestCpuFlag(kCpuHasMSA)) {
  109. ScaleARGBRowDown2 =
  110. filtering == kFilterNone
  111. ? ScaleARGBRowDown2_Any_MSA
  112. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_MSA
  113. : ScaleARGBRowDown2Box_Any_MSA);
  114. if (IS_ALIGNED(dst_width, 4)) {
  115. ScaleARGBRowDown2 =
  116. filtering == kFilterNone
  117. ? ScaleARGBRowDown2_MSA
  118. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_MSA
  119. : ScaleARGBRowDown2Box_MSA);
  120. }
  121. }
  122. #endif
  123. if (filtering == kFilterLinear) {
  124. src_stride = 0;
  125. }
  126. for (j = 0; j < dst_height; ++j) {
  127. ScaleARGBRowDown2(src_argb, src_stride, dst_argb, dst_width);
  128. src_argb += row_stride;
  129. dst_argb += dst_stride;
  130. }
  131. }
  132. // ScaleARGB ARGB, 1/4
  133. // This is an optimized version for scaling down a ARGB to 1/4 of
  134. // its original size.
  135. static void ScaleARGBDown4Box(int src_width,
  136. int src_height,
  137. int dst_width,
  138. int dst_height,
  139. int src_stride,
  140. int dst_stride,
  141. const uint8_t* src_argb,
  142. uint8_t* dst_argb,
  143. int x,
  144. int dx,
  145. int y,
  146. int dy) {
  147. int j;
  148. // Allocate 2 rows of ARGB.
  149. const int kRowSize = (dst_width * 2 * 4 + 31) & ~31;
  150. align_buffer_64(row, kRowSize * 2);
  151. int row_stride = src_stride * (dy >> 16);
  152. void (*ScaleARGBRowDown2)(const uint8_t* src_argb, ptrdiff_t src_stride,
  153. uint8_t* dst_argb, int dst_width) =
  154. ScaleARGBRowDown2Box_C;
  155. // Advance to odd row, even column.
  156. src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
  157. (void)src_width;
  158. (void)src_height;
  159. (void)dx;
  160. assert(dx == 65536 * 4); // Test scale factor of 4.
  161. assert((dy & 0x3ffff) == 0); // Test vertical scale is multiple of 4.
  162. #if defined(HAS_SCALEARGBROWDOWN2_SSE2)
  163. if (TestCpuFlag(kCpuHasSSE2)) {
  164. ScaleARGBRowDown2 = ScaleARGBRowDown2Box_Any_SSE2;
  165. if (IS_ALIGNED(dst_width, 4)) {
  166. ScaleARGBRowDown2 = ScaleARGBRowDown2Box_SSE2;
  167. }
  168. }
  169. #endif
  170. #if defined(HAS_SCALEARGBROWDOWN2_NEON)
  171. if (TestCpuFlag(kCpuHasNEON)) {
  172. ScaleARGBRowDown2 = ScaleARGBRowDown2Box_Any_NEON;
  173. if (IS_ALIGNED(dst_width, 8)) {
  174. ScaleARGBRowDown2 = ScaleARGBRowDown2Box_NEON;
  175. }
  176. }
  177. #endif
  178. for (j = 0; j < dst_height; ++j) {
  179. ScaleARGBRowDown2(src_argb, src_stride, row, dst_width * 2);
  180. ScaleARGBRowDown2(src_argb + src_stride * 2, src_stride, row + kRowSize,
  181. dst_width * 2);
  182. ScaleARGBRowDown2(row, kRowSize, dst_argb, dst_width);
  183. src_argb += row_stride;
  184. dst_argb += dst_stride;
  185. }
  186. free_aligned_buffer_64(row);
  187. }
  188. // ScaleARGB ARGB Even
  189. // This is an optimized version for scaling down a ARGB to even
  190. // multiple of its original size.
  191. static void ScaleARGBDownEven(int src_width,
  192. int src_height,
  193. int dst_width,
  194. int dst_height,
  195. int src_stride,
  196. int dst_stride,
  197. const uint8_t* src_argb,
  198. uint8_t* dst_argb,
  199. int x,
  200. int dx,
  201. int y,
  202. int dy,
  203. enum FilterMode filtering) {
  204. int j;
  205. int col_step = dx >> 16;
  206. int row_stride = (dy >> 16) * src_stride;
  207. void (*ScaleARGBRowDownEven)(const uint8_t* src_argb, ptrdiff_t src_stride,
  208. int src_step, uint8_t* dst_argb, int dst_width) =
  209. filtering ? ScaleARGBRowDownEvenBox_C : ScaleARGBRowDownEven_C;
  210. (void)src_width;
  211. (void)src_height;
  212. assert(IS_ALIGNED(src_width, 2));
  213. assert(IS_ALIGNED(src_height, 2));
  214. src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
  215. #if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
  216. if (TestCpuFlag(kCpuHasSSE2)) {
  217. ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_SSE2
  218. : ScaleARGBRowDownEven_Any_SSE2;
  219. if (IS_ALIGNED(dst_width, 4)) {
  220. ScaleARGBRowDownEven =
  221. filtering ? ScaleARGBRowDownEvenBox_SSE2 : ScaleARGBRowDownEven_SSE2;
  222. }
  223. }
  224. #endif
  225. #if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
  226. if (TestCpuFlag(kCpuHasNEON)) {
  227. ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_NEON
  228. : ScaleARGBRowDownEven_Any_NEON;
  229. if (IS_ALIGNED(dst_width, 4)) {
  230. ScaleARGBRowDownEven =
  231. filtering ? ScaleARGBRowDownEvenBox_NEON : ScaleARGBRowDownEven_NEON;
  232. }
  233. }
  234. #endif
  235. #if defined(HAS_SCALEARGBROWDOWNEVEN_MMI)
  236. if (TestCpuFlag(kCpuHasMMI)) {
  237. ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_MMI
  238. : ScaleARGBRowDownEven_Any_MMI;
  239. if (IS_ALIGNED(dst_width, 2)) {
  240. ScaleARGBRowDownEven =
  241. filtering ? ScaleARGBRowDownEvenBox_MMI : ScaleARGBRowDownEven_MMI;
  242. }
  243. }
  244. #endif
  245. #if defined(HAS_SCALEARGBROWDOWNEVEN_MSA)
  246. if (TestCpuFlag(kCpuHasMSA)) {
  247. ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_MSA
  248. : ScaleARGBRowDownEven_Any_MSA;
  249. if (IS_ALIGNED(dst_width, 4)) {
  250. ScaleARGBRowDownEven =
  251. filtering ? ScaleARGBRowDownEvenBox_MSA : ScaleARGBRowDownEven_MSA;
  252. }
  253. }
  254. #endif
  255. if (filtering == kFilterLinear) {
  256. src_stride = 0;
  257. }
  258. for (j = 0; j < dst_height; ++j) {
  259. ScaleARGBRowDownEven(src_argb, src_stride, col_step, dst_argb, dst_width);
  260. src_argb += row_stride;
  261. dst_argb += dst_stride;
  262. }
  263. }
  264. // Scale ARGB down with bilinear interpolation.
  265. static void ScaleARGBBilinearDown(int src_width,
  266. int src_height,
  267. int dst_width,
  268. int dst_height,
  269. int src_stride,
  270. int dst_stride,
  271. const uint8_t* src_argb,
  272. uint8_t* dst_argb,
  273. int x,
  274. int dx,
  275. int y,
  276. int dy,
  277. enum FilterMode filtering) {
  278. int j;
  279. void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb,
  280. ptrdiff_t src_stride, int dst_width,
  281. int source_y_fraction) = InterpolateRow_C;
  282. void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb,
  283. int dst_width, int x, int dx) =
  284. (src_width >= 32768) ? ScaleARGBFilterCols64_C : ScaleARGBFilterCols_C;
  285. int64_t xlast = x + (int64_t)(dst_width - 1) * dx;
  286. int64_t xl = (dx >= 0) ? x : xlast;
  287. int64_t xr = (dx >= 0) ? xlast : x;
  288. int clip_src_width;
  289. xl = (xl >> 16) & ~3; // Left edge aligned.
  290. xr = (xr >> 16) + 1; // Right most pixel used. Bilinear uses 2 pixels.
  291. xr = (xr + 1 + 3) & ~3; // 1 beyond 4 pixel aligned right most pixel.
  292. if (xr > src_width) {
  293. xr = src_width;
  294. }
  295. clip_src_width = (int)(xr - xl) * 4; // Width aligned to 4.
  296. src_argb += xl * 4;
  297. x -= (int)(xl << 16);
  298. #if defined(HAS_INTERPOLATEROW_SSSE3)
  299. if (TestCpuFlag(kCpuHasSSSE3)) {
  300. InterpolateRow = InterpolateRow_Any_SSSE3;
  301. if (IS_ALIGNED(clip_src_width, 16)) {
  302. InterpolateRow = InterpolateRow_SSSE3;
  303. }
  304. }
  305. #endif
  306. #if defined(HAS_INTERPOLATEROW_AVX2)
  307. if (TestCpuFlag(kCpuHasAVX2)) {
  308. InterpolateRow = InterpolateRow_Any_AVX2;
  309. if (IS_ALIGNED(clip_src_width, 32)) {
  310. InterpolateRow = InterpolateRow_AVX2;
  311. }
  312. }
  313. #endif
  314. #if defined(HAS_INTERPOLATEROW_NEON)
  315. if (TestCpuFlag(kCpuHasNEON)) {
  316. InterpolateRow = InterpolateRow_Any_NEON;
  317. if (IS_ALIGNED(clip_src_width, 16)) {
  318. InterpolateRow = InterpolateRow_NEON;
  319. }
  320. }
  321. #endif
  322. #if defined(HAS_INTERPOLATEROW_MSA)
  323. if (TestCpuFlag(kCpuHasMSA)) {
  324. InterpolateRow = InterpolateRow_Any_MSA;
  325. if (IS_ALIGNED(clip_src_width, 32)) {
  326. InterpolateRow = InterpolateRow_MSA;
  327. }
  328. }
  329. #endif
  330. #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
  331. if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
  332. ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
  333. }
  334. #endif
  335. #if defined(HAS_SCALEARGBFILTERCOLS_NEON)
  336. if (TestCpuFlag(kCpuHasNEON)) {
  337. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
  338. if (IS_ALIGNED(dst_width, 4)) {
  339. ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
  340. }
  341. }
  342. #endif
  343. #if defined(HAS_SCALEARGBFILTERCOLS_MSA)
  344. if (TestCpuFlag(kCpuHasMSA)) {
  345. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_MSA;
  346. if (IS_ALIGNED(dst_width, 8)) {
  347. ScaleARGBFilterCols = ScaleARGBFilterCols_MSA;
  348. }
  349. }
  350. #endif
  351. // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear.
  352. // Allocate a row of ARGB.
  353. {
  354. align_buffer_64(row, clip_src_width * 4);
  355. const int max_y = (src_height - 1) << 16;
  356. if (y > max_y) {
  357. y = max_y;
  358. }
  359. for (j = 0; j < dst_height; ++j) {
  360. int yi = y >> 16;
  361. const uint8_t* src = src_argb + yi * src_stride;
  362. if (filtering == kFilterLinear) {
  363. ScaleARGBFilterCols(dst_argb, src, dst_width, x, dx);
  364. } else {
  365. int yf = (y >> 8) & 255;
  366. InterpolateRow(row, src, src_stride, clip_src_width, yf);
  367. ScaleARGBFilterCols(dst_argb, row, dst_width, x, dx);
  368. }
  369. dst_argb += dst_stride;
  370. y += dy;
  371. if (y > max_y) {
  372. y = max_y;
  373. }
  374. }
  375. free_aligned_buffer_64(row);
  376. }
  377. }
  378. // Scale ARGB up with bilinear interpolation.
  379. static void ScaleARGBBilinearUp(int src_width,
  380. int src_height,
  381. int dst_width,
  382. int dst_height,
  383. int src_stride,
  384. int dst_stride,
  385. const uint8_t* src_argb,
  386. uint8_t* dst_argb,
  387. int x,
  388. int dx,
  389. int y,
  390. int dy,
  391. enum FilterMode filtering) {
  392. int j;
  393. void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb,
  394. ptrdiff_t src_stride, int dst_width,
  395. int source_y_fraction) = InterpolateRow_C;
  396. void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb,
  397. int dst_width, int x, int dx) =
  398. filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
  399. const int max_y = (src_height - 1) << 16;
  400. #if defined(HAS_INTERPOLATEROW_SSSE3)
  401. if (TestCpuFlag(kCpuHasSSSE3)) {
  402. InterpolateRow = InterpolateRow_Any_SSSE3;
  403. if (IS_ALIGNED(dst_width, 4)) {
  404. InterpolateRow = InterpolateRow_SSSE3;
  405. }
  406. }
  407. #endif
  408. #if defined(HAS_INTERPOLATEROW_AVX2)
  409. if (TestCpuFlag(kCpuHasAVX2)) {
  410. InterpolateRow = InterpolateRow_Any_AVX2;
  411. if (IS_ALIGNED(dst_width, 8)) {
  412. InterpolateRow = InterpolateRow_AVX2;
  413. }
  414. }
  415. #endif
  416. #if defined(HAS_INTERPOLATEROW_NEON)
  417. if (TestCpuFlag(kCpuHasNEON)) {
  418. InterpolateRow = InterpolateRow_Any_NEON;
  419. if (IS_ALIGNED(dst_width, 4)) {
  420. InterpolateRow = InterpolateRow_NEON;
  421. }
  422. }
  423. #endif
  424. #if defined(HAS_INTERPOLATEROW_MMI)
  425. if (TestCpuFlag(kCpuHasMMI)) {
  426. InterpolateRow = InterpolateRow_Any_MMI;
  427. if (IS_ALIGNED(dst_width, 2)) {
  428. InterpolateRow = InterpolateRow_MMI;
  429. }
  430. }
  431. #endif
  432. #if defined(HAS_INTERPOLATEROW_MSA)
  433. if (TestCpuFlag(kCpuHasMSA)) {
  434. InterpolateRow = InterpolateRow_Any_MSA;
  435. if (IS_ALIGNED(dst_width, 8)) {
  436. InterpolateRow = InterpolateRow_MSA;
  437. }
  438. }
  439. #endif
  440. if (src_width >= 32768) {
  441. ScaleARGBFilterCols =
  442. filtering ? ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
  443. }
  444. #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
  445. if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
  446. ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
  447. }
  448. #endif
  449. #if defined(HAS_SCALEARGBFILTERCOLS_NEON)
  450. if (filtering && TestCpuFlag(kCpuHasNEON)) {
  451. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
  452. if (IS_ALIGNED(dst_width, 4)) {
  453. ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
  454. }
  455. }
  456. #endif
  457. #if defined(HAS_SCALEARGBFILTERCOLS_MSA)
  458. if (filtering && TestCpuFlag(kCpuHasMSA)) {
  459. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_MSA;
  460. if (IS_ALIGNED(dst_width, 8)) {
  461. ScaleARGBFilterCols = ScaleARGBFilterCols_MSA;
  462. }
  463. }
  464. #endif
  465. #if defined(HAS_SCALEARGBCOLS_SSE2)
  466. if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
  467. ScaleARGBFilterCols = ScaleARGBCols_SSE2;
  468. }
  469. #endif
  470. #if defined(HAS_SCALEARGBCOLS_NEON)
  471. if (!filtering && TestCpuFlag(kCpuHasNEON)) {
  472. ScaleARGBFilterCols = ScaleARGBCols_Any_NEON;
  473. if (IS_ALIGNED(dst_width, 8)) {
  474. ScaleARGBFilterCols = ScaleARGBCols_NEON;
  475. }
  476. }
  477. #endif
  478. #if defined(HAS_SCALEARGBCOLS_MMI)
  479. if (!filtering && TestCpuFlag(kCpuHasMMI)) {
  480. ScaleARGBFilterCols = ScaleARGBCols_Any_MMI;
  481. if (IS_ALIGNED(dst_width, 1)) {
  482. ScaleARGBFilterCols = ScaleARGBCols_MMI;
  483. }
  484. }
  485. #endif
  486. #if defined(HAS_SCALEARGBCOLS_MSA)
  487. if (!filtering && TestCpuFlag(kCpuHasMSA)) {
  488. ScaleARGBFilterCols = ScaleARGBCols_Any_MSA;
  489. if (IS_ALIGNED(dst_width, 4)) {
  490. ScaleARGBFilterCols = ScaleARGBCols_MSA;
  491. }
  492. }
  493. #endif
  494. if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
  495. ScaleARGBFilterCols = ScaleARGBColsUp2_C;
  496. #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
  497. if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
  498. ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
  499. }
  500. #endif
  501. #if defined(HAS_SCALEARGBCOLSUP2_MMI)
  502. if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) {
  503. ScaleARGBFilterCols = ScaleARGBColsUp2_MMI;
  504. }
  505. #endif
  506. }
  507. if (y > max_y) {
  508. y = max_y;
  509. }
  510. {
  511. int yi = y >> 16;
  512. const uint8_t* src = src_argb + yi * src_stride;
  513. // Allocate 2 rows of ARGB.
  514. const int kRowSize = (dst_width * 4 + 31) & ~31;
  515. align_buffer_64(row, kRowSize * 2);
  516. uint8_t* rowptr = row;
  517. int rowstride = kRowSize;
  518. int lasty = yi;
  519. ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
  520. if (src_height > 1) {
  521. src += src_stride;
  522. }
  523. ScaleARGBFilterCols(rowptr + rowstride, src, dst_width, x, dx);
  524. src += src_stride;
  525. for (j = 0; j < dst_height; ++j) {
  526. yi = y >> 16;
  527. if (yi != lasty) {
  528. if (y > max_y) {
  529. y = max_y;
  530. yi = y >> 16;
  531. src = src_argb + yi * src_stride;
  532. }
  533. if (yi != lasty) {
  534. ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
  535. rowptr += rowstride;
  536. rowstride = -rowstride;
  537. lasty = yi;
  538. src += src_stride;
  539. }
  540. }
  541. if (filtering == kFilterLinear) {
  542. InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
  543. } else {
  544. int yf = (y >> 8) & 255;
  545. InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
  546. }
  547. dst_argb += dst_stride;
  548. y += dy;
  549. }
  550. free_aligned_buffer_64(row);
  551. }
  552. }
  553. #ifdef YUVSCALEUP
  554. // Scale YUV to ARGB up with bilinear interpolation.
  555. static void ScaleYUVToARGBBilinearUp(int src_width,
  556. int src_height,
  557. int dst_width,
  558. int dst_height,
  559. int src_stride_y,
  560. int src_stride_u,
  561. int src_stride_v,
  562. int dst_stride_argb,
  563. const uint8_t* src_y,
  564. const uint8_t* src_u,
  565. const uint8_t* src_v,
  566. uint8_t* dst_argb,
  567. int x,
  568. int dx,
  569. int y,
  570. int dy,
  571. enum FilterMode filtering) {
  572. int j;
  573. void (*I422ToARGBRow)(const uint8_t* y_buf, const uint8_t* u_buf,
  574. const uint8_t* v_buf, uint8_t* rgb_buf, int width) =
  575. I422ToARGBRow_C;
  576. #if defined(HAS_I422TOARGBROW_SSSE3)
  577. if (TestCpuFlag(kCpuHasSSSE3)) {
  578. I422ToARGBRow = I422ToARGBRow_Any_SSSE3;
  579. if (IS_ALIGNED(src_width, 8)) {
  580. I422ToARGBRow = I422ToARGBRow_SSSE3;
  581. }
  582. }
  583. #endif
  584. #if defined(HAS_I422TOARGBROW_AVX2)
  585. if (TestCpuFlag(kCpuHasAVX2)) {
  586. I422ToARGBRow = I422ToARGBRow_Any_AVX2;
  587. if (IS_ALIGNED(src_width, 16)) {
  588. I422ToARGBRow = I422ToARGBRow_AVX2;
  589. }
  590. }
  591. #endif
  592. #if defined(HAS_I422TOARGBROW_NEON)
  593. if (TestCpuFlag(kCpuHasNEON)) {
  594. I422ToARGBRow = I422ToARGBRow_Any_NEON;
  595. if (IS_ALIGNED(src_width, 8)) {
  596. I422ToARGBRow = I422ToARGBRow_NEON;
  597. }
  598. }
  599. #endif
  600. #if defined(HAS_I422TOARGBROW_MMI)
  601. if (TestCpuFlag(kCpuHasMMI)) {
  602. I422ToARGBRow = I422ToARGBRow_Any_MMI;
  603. if (IS_ALIGNED(src_width, 4)) {
  604. I422ToARGBRow = I422ToARGBRow_MMI;
  605. }
  606. }
  607. #endif
  608. #if defined(HAS_I422TOARGBROW_MSA)
  609. if (TestCpuFlag(kCpuHasMSA)) {
  610. I422ToARGBRow = I422ToARGBRow_Any_MSA;
  611. if (IS_ALIGNED(src_width, 8)) {
  612. I422ToARGBRow = I422ToARGBRow_MSA;
  613. }
  614. }
  615. #endif
  616. void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb,
  617. ptrdiff_t src_stride, int dst_width,
  618. int source_y_fraction) = InterpolateRow_C;
  619. #if defined(HAS_INTERPOLATEROW_SSSE3)
  620. if (TestCpuFlag(kCpuHasSSSE3)) {
  621. InterpolateRow = InterpolateRow_Any_SSSE3;
  622. if (IS_ALIGNED(dst_width, 4)) {
  623. InterpolateRow = InterpolateRow_SSSE3;
  624. }
  625. }
  626. #endif
  627. #if defined(HAS_INTERPOLATEROW_AVX2)
  628. if (TestCpuFlag(kCpuHasAVX2)) {
  629. InterpolateRow = InterpolateRow_Any_AVX2;
  630. if (IS_ALIGNED(dst_width, 8)) {
  631. InterpolateRow = InterpolateRow_AVX2;
  632. }
  633. }
  634. #endif
  635. #if defined(HAS_INTERPOLATEROW_NEON)
  636. if (TestCpuFlag(kCpuHasNEON)) {
  637. InterpolateRow = InterpolateRow_Any_NEON;
  638. if (IS_ALIGNED(dst_width, 4)) {
  639. InterpolateRow = InterpolateRow_NEON;
  640. }
  641. }
  642. #endif
  643. #if defined(HAS_INTERPOLATEROW_MSA)
  644. if (TestCpuFlag(kCpuHasMSA)) {
  645. InterpolateRow = InterpolateRow_Any_MSA;
  646. if (IS_ALIGNED(dst_width, 8)) {
  647. InterpolateRow = InterpolateRow_MSA;
  648. }
  649. }
  650. #endif
  651. void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb,
  652. int dst_width, int x, int dx) =
  653. filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
  654. if (src_width >= 32768) {
  655. ScaleARGBFilterCols =
  656. filtering ? ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
  657. }
  658. #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
  659. if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
  660. ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
  661. }
  662. #endif
  663. #if defined(HAS_SCALEARGBFILTERCOLS_NEON)
  664. if (filtering && TestCpuFlag(kCpuHasNEON)) {
  665. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
  666. if (IS_ALIGNED(dst_width, 4)) {
  667. ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
  668. }
  669. }
  670. #endif
  671. #if defined(HAS_SCALEARGBFILTERCOLS_MSA)
  672. if (filtering && TestCpuFlag(kCpuHasMSA)) {
  673. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_MSA;
  674. if (IS_ALIGNED(dst_width, 8)) {
  675. ScaleARGBFilterCols = ScaleARGBFilterCols_MSA;
  676. }
  677. }
  678. #endif
  679. #if defined(HAS_SCALEARGBCOLS_SSE2)
  680. if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
  681. ScaleARGBFilterCols = ScaleARGBCols_SSE2;
  682. }
  683. #endif
  684. #if defined(HAS_SCALEARGBCOLS_NEON)
  685. if (!filtering && TestCpuFlag(kCpuHasNEON)) {
  686. ScaleARGBFilterCols = ScaleARGBCols_Any_NEON;
  687. if (IS_ALIGNED(dst_width, 8)) {
  688. ScaleARGBFilterCols = ScaleARGBCols_NEON;
  689. }
  690. }
  691. #endif
  692. #if defined(HAS_SCALEARGBCOLS_MMI)
  693. if (!filtering && TestCpuFlag(kCpuHasMMI)) {
  694. ScaleARGBFilterCols = ScaleARGBCols_Any_MMI;
  695. if (IS_ALIGNED(dst_width, 1)) {
  696. ScaleARGBFilterCols = ScaleARGBCols_MMI;
  697. }
  698. }
  699. #endif
  700. #if defined(HAS_SCALEARGBCOLS_MSA)
  701. if (!filtering && TestCpuFlag(kCpuHasMSA)) {
  702. ScaleARGBFilterCols = ScaleARGBCols_Any_MSA;
  703. if (IS_ALIGNED(dst_width, 4)) {
  704. ScaleARGBFilterCols = ScaleARGBCols_MSA;
  705. }
  706. }
  707. #endif
  708. if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
  709. ScaleARGBFilterCols = ScaleARGBColsUp2_C;
  710. #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
  711. if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
  712. ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
  713. }
  714. #endif
  715. #if defined(HAS_SCALEARGBCOLSUP2_MMI)
  716. if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) {
  717. ScaleARGBFilterCols = ScaleARGBColsUp2_MMI;
  718. }
  719. #endif
  720. }
  721. const int max_y = (src_height - 1) << 16;
  722. if (y > max_y) {
  723. y = max_y;
  724. }
  725. const int kYShift = 1; // Shift Y by 1 to convert Y plane to UV coordinate.
  726. int yi = y >> 16;
  727. int uv_yi = yi >> kYShift;
  728. const uint8_t* src_row_y = src_y + yi * src_stride_y;
  729. const uint8_t* src_row_u = src_u + uv_yi * src_stride_u;
  730. const uint8_t* src_row_v = src_v + uv_yi * src_stride_v;
  731. // Allocate 2 rows of ARGB.
  732. const int kRowSize = (dst_width * 4 + 31) & ~31;
  733. align_buffer_64(row, kRowSize * 2);
  734. // Allocate 1 row of ARGB for source conversion.
  735. align_buffer_64(argb_row, src_width * 4);
  736. uint8_t* rowptr = row;
  737. int rowstride = kRowSize;
  738. int lasty = yi;
  739. // TODO(fbarchard): Convert first 2 rows of YUV to ARGB.
  740. ScaleARGBFilterCols(rowptr, src_row_y, dst_width, x, dx);
  741. if (src_height > 1) {
  742. src_row_y += src_stride_y;
  743. if (yi & 1) {
  744. src_row_u += src_stride_u;
  745. src_row_v += src_stride_v;
  746. }
  747. }
  748. ScaleARGBFilterCols(rowptr + rowstride, src_row_y, dst_width, x, dx);
  749. if (src_height > 2) {
  750. src_row_y += src_stride_y;
  751. if (!(yi & 1)) {
  752. src_row_u += src_stride_u;
  753. src_row_v += src_stride_v;
  754. }
  755. }
  756. for (j = 0; j < dst_height; ++j) {
  757. yi = y >> 16;
  758. if (yi != lasty) {
  759. if (y > max_y) {
  760. y = max_y;
  761. yi = y >> 16;
  762. uv_yi = yi >> kYShift;
  763. src_row_y = src_y + yi * src_stride_y;
  764. src_row_u = src_u + uv_yi * src_stride_u;
  765. src_row_v = src_v + uv_yi * src_stride_v;
  766. }
  767. if (yi != lasty) {
  768. // TODO(fbarchard): Convert the clipped region of row.
  769. I422ToARGBRow(src_row_y, src_row_u, src_row_v, argb_row, src_width);
  770. ScaleARGBFilterCols(rowptr, argb_row, dst_width, x, dx);
  771. rowptr += rowstride;
  772. rowstride = -rowstride;
  773. lasty = yi;
  774. src_row_y += src_stride_y;
  775. if (yi & 1) {
  776. src_row_u += src_stride_u;
  777. src_row_v += src_stride_v;
  778. }
  779. }
  780. }
  781. if (filtering == kFilterLinear) {
  782. InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
  783. } else {
  784. int yf = (y >> 8) & 255;
  785. InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
  786. }
  787. dst_argb += dst_stride_argb;
  788. y += dy;
  789. }
  790. free_aligned_buffer_64(row);
  791. free_aligned_buffer_64(row_argb);
  792. }
  793. #endif
  794. // Scale ARGB to/from any dimensions, without interpolation.
  795. // Fixed point math is used for performance: The upper 16 bits
  796. // of x and dx is the integer part of the source position and
  797. // the lower 16 bits are the fixed decimal part.
  798. static void ScaleARGBSimple(int src_width,
  799. int src_height,
  800. int dst_width,
  801. int dst_height,
  802. int src_stride,
  803. int dst_stride,
  804. const uint8_t* src_argb,
  805. uint8_t* dst_argb,
  806. int x,
  807. int dx,
  808. int y,
  809. int dy) {
  810. int j;
  811. void (*ScaleARGBCols)(uint8_t * dst_argb, const uint8_t* src_argb,
  812. int dst_width, int x, int dx) =
  813. (src_width >= 32768) ? ScaleARGBCols64_C : ScaleARGBCols_C;
  814. (void)src_height;
  815. #if defined(HAS_SCALEARGBCOLS_SSE2)
  816. if (TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
  817. ScaleARGBCols = ScaleARGBCols_SSE2;
  818. }
  819. #endif
  820. #if defined(HAS_SCALEARGBCOLS_NEON)
  821. if (TestCpuFlag(kCpuHasNEON)) {
  822. ScaleARGBCols = ScaleARGBCols_Any_NEON;
  823. if (IS_ALIGNED(dst_width, 8)) {
  824. ScaleARGBCols = ScaleARGBCols_NEON;
  825. }
  826. }
  827. #endif
  828. #if defined(HAS_SCALEARGBCOLS_MMI)
  829. if (TestCpuFlag(kCpuHasMMI)) {
  830. ScaleARGBCols = ScaleARGBCols_Any_MMI;
  831. if (IS_ALIGNED(dst_width, 1)) {
  832. ScaleARGBCols = ScaleARGBCols_MMI;
  833. }
  834. }
  835. #endif
  836. #if defined(HAS_SCALEARGBCOLS_MSA)
  837. if (TestCpuFlag(kCpuHasMSA)) {
  838. ScaleARGBCols = ScaleARGBCols_Any_MSA;
  839. if (IS_ALIGNED(dst_width, 4)) {
  840. ScaleARGBCols = ScaleARGBCols_MSA;
  841. }
  842. }
  843. #endif
  844. if (src_width * 2 == dst_width && x < 0x8000) {
  845. ScaleARGBCols = ScaleARGBColsUp2_C;
  846. #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
  847. if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
  848. ScaleARGBCols = ScaleARGBColsUp2_SSE2;
  849. }
  850. #endif
  851. #if defined(HAS_SCALEARGBCOLSUP2_MMI)
  852. if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) {
  853. ScaleARGBCols = ScaleARGBColsUp2_MMI;
  854. }
  855. #endif
  856. }
  857. for (j = 0; j < dst_height; ++j) {
  858. ScaleARGBCols(dst_argb, src_argb + (y >> 16) * src_stride, dst_width, x,
  859. dx);
  860. dst_argb += dst_stride;
  861. y += dy;
  862. }
  863. }
  864. // ScaleARGB a ARGB.
  865. // This function in turn calls a scaling function
  866. // suitable for handling the desired resolutions.
  867. static void ScaleARGB(const uint8_t* src,
  868. int src_stride,
  869. int src_width,
  870. int src_height,
  871. uint8_t* dst,
  872. int dst_stride,
  873. int dst_width,
  874. int dst_height,
  875. int clip_x,
  876. int clip_y,
  877. int clip_width,
  878. int clip_height,
  879. enum FilterMode filtering) {
  880. // Initial source x/y coordinate and step values as 16.16 fixed point.
  881. int x = 0;
  882. int y = 0;
  883. int dx = 0;
  884. int dy = 0;
  885. // ARGB does not support box filter yet, but allow the user to pass it.
  886. // Simplify filtering when possible.
  887. filtering = ScaleFilterReduce(src_width, src_height, dst_width, dst_height,
  888. filtering);
  889. // Negative src_height means invert the image.
  890. if (src_height < 0) {
  891. src_height = -src_height;
  892. src = src + (src_height - 1) * src_stride;
  893. src_stride = -src_stride;
  894. }
  895. ScaleSlope(src_width, src_height, dst_width, dst_height, filtering, &x, &y,
  896. &dx, &dy);
  897. src_width = Abs(src_width);
  898. if (clip_x) {
  899. int64_t clipf = (int64_t)(clip_x)*dx;
  900. x += (clipf & 0xffff);
  901. src += (clipf >> 16) * 4;
  902. dst += clip_x * 4;
  903. }
  904. if (clip_y) {
  905. int64_t clipf = (int64_t)(clip_y)*dy;
  906. y += (clipf & 0xffff);
  907. src += (clipf >> 16) * src_stride;
  908. dst += clip_y * dst_stride;
  909. }
  910. // Special case for integer step values.
  911. if (((dx | dy) & 0xffff) == 0) {
  912. if (!dx || !dy) { // 1 pixel wide and/or tall.
  913. filtering = kFilterNone;
  914. } else {
  915. // Optimized even scale down. ie 2, 4, 6, 8, 10x.
  916. if (!(dx & 0x10000) && !(dy & 0x10000)) {
  917. if (dx == 0x20000) {
  918. // Optimized 1/2 downsample.
  919. ScaleARGBDown2(src_width, src_height, clip_width, clip_height,
  920. src_stride, dst_stride, src, dst, x, dx, y, dy,
  921. filtering);
  922. return;
  923. }
  924. if (dx == 0x40000 && filtering == kFilterBox) {
  925. // Optimized 1/4 box downsample.
  926. ScaleARGBDown4Box(src_width, src_height, clip_width, clip_height,
  927. src_stride, dst_stride, src, dst, x, dx, y, dy);
  928. return;
  929. }
  930. ScaleARGBDownEven(src_width, src_height, clip_width, clip_height,
  931. src_stride, dst_stride, src, dst, x, dx, y, dy,
  932. filtering);
  933. return;
  934. }
  935. // Optimized odd scale down. ie 3, 5, 7, 9x.
  936. if ((dx & 0x10000) && (dy & 0x10000)) {
  937. filtering = kFilterNone;
  938. if (dx == 0x10000 && dy == 0x10000) {
  939. // Straight copy.
  940. ARGBCopy(src + (y >> 16) * src_stride + (x >> 16) * 4, src_stride,
  941. dst, dst_stride, clip_width, clip_height);
  942. return;
  943. }
  944. }
  945. }
  946. }
  947. if (dx == 0x10000 && (x & 0xffff) == 0) {
  948. // Arbitrary scale vertically, but unscaled vertically.
  949. ScalePlaneVertical(src_height, clip_width, clip_height, src_stride,
  950. dst_stride, src, dst, x, y, dy, 4, filtering);
  951. return;
  952. }
  953. if (filtering && dy < 65536) {
  954. ScaleARGBBilinearUp(src_width, src_height, clip_width, clip_height,
  955. src_stride, dst_stride, src, dst, x, dx, y, dy,
  956. filtering);
  957. return;
  958. }
  959. if (filtering) {
  960. ScaleARGBBilinearDown(src_width, src_height, clip_width, clip_height,
  961. src_stride, dst_stride, src, dst, x, dx, y, dy,
  962. filtering);
  963. return;
  964. }
  965. ScaleARGBSimple(src_width, src_height, clip_width, clip_height, src_stride,
  966. dst_stride, src, dst, x, dx, y, dy);
  967. }
  968. LIBYUV_API
  969. int ARGBScaleClip(const uint8_t* src_argb,
  970. int src_stride_argb,
  971. int src_width,
  972. int src_height,
  973. uint8_t* dst_argb,
  974. int dst_stride_argb,
  975. int dst_width,
  976. int dst_height,
  977. int clip_x,
  978. int clip_y,
  979. int clip_width,
  980. int clip_height,
  981. enum FilterMode filtering) {
  982. if (!src_argb || src_width == 0 || src_height == 0 || !dst_argb ||
  983. dst_width <= 0 || dst_height <= 0 || clip_x < 0 || clip_y < 0 ||
  984. clip_width > 32768 || clip_height > 32768 ||
  985. (clip_x + clip_width) > dst_width ||
  986. (clip_y + clip_height) > dst_height) {
  987. return -1;
  988. }
  989. ScaleARGB(src_argb, src_stride_argb, src_width, src_height, dst_argb,
  990. dst_stride_argb, dst_width, dst_height, clip_x, clip_y, clip_width,
  991. clip_height, filtering);
  992. return 0;
  993. }
  994. // Scale an ARGB image.
  995. LIBYUV_API
  996. int ARGBScale(const uint8_t* src_argb,
  997. int src_stride_argb,
  998. int src_width,
  999. int src_height,
  1000. uint8_t* dst_argb,
  1001. int dst_stride_argb,
  1002. int dst_width,
  1003. int dst_height,
  1004. enum FilterMode filtering) {
  1005. if (!src_argb || src_width == 0 || src_height == 0 || src_width > 32768 ||
  1006. src_height > 32768 || !dst_argb || dst_width <= 0 || dst_height <= 0) {
  1007. return -1;
  1008. }
  1009. ScaleARGB(src_argb, src_stride_argb, src_width, src_height, dst_argb,
  1010. dst_stride_argb, dst_width, dst_height, 0, 0, dst_width, dst_height,
  1011. filtering);
  1012. return 0;
  1013. }
  1014. // Scale with YUV conversion to ARGB and clipping.
  1015. LIBYUV_API
  1016. int YUVToARGBScaleClip(const uint8_t* src_y,
  1017. int src_stride_y,
  1018. const uint8_t* src_u,
  1019. int src_stride_u,
  1020. const uint8_t* src_v,
  1021. int src_stride_v,
  1022. uint32_t src_fourcc,
  1023. int src_width,
  1024. int src_height,
  1025. uint8_t* dst_argb,
  1026. int dst_stride_argb,
  1027. uint32_t dst_fourcc,
  1028. int dst_width,
  1029. int dst_height,
  1030. int clip_x,
  1031. int clip_y,
  1032. int clip_width,
  1033. int clip_height,
  1034. enum FilterMode filtering) {
  1035. uint8_t* argb_buffer = (uint8_t*)malloc(src_width * src_height * 4);
  1036. int r;
  1037. (void)src_fourcc; // TODO(fbarchard): implement and/or assert.
  1038. (void)dst_fourcc;
  1039. I420ToARGB(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
  1040. argb_buffer, src_width * 4, src_width, src_height);
  1041. r = ARGBScaleClip(argb_buffer, src_width * 4, src_width, src_height, dst_argb,
  1042. dst_stride_argb, dst_width, dst_height, clip_x, clip_y,
  1043. clip_width, clip_height, filtering);
  1044. free(argb_buffer);
  1045. return r;
  1046. }
  1047. #ifdef __cplusplus
  1048. } // extern "C"
  1049. } // namespace libyuv
  1050. #endif