convert_from_argb.cc 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568
  1. /*
  2. * Copyright 2012 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/convert_from_argb.h"
  11. #include "libyuv/basic_types.h"
  12. #include "libyuv/cpu_id.h"
  13. #include "libyuv/planar_functions.h"
  14. #include "libyuv/row.h"
  15. #ifdef __cplusplus
  16. namespace libyuv {
  17. extern "C" {
  18. #endif
  19. // ARGB little endian (bgra in memory) to I444
  20. LIBYUV_API
  21. int ARGBToI444(const uint8* src_argb,
  22. int src_stride_argb,
  23. uint8* dst_y,
  24. int dst_stride_y,
  25. uint8* dst_u,
  26. int dst_stride_u,
  27. uint8* dst_v,
  28. int dst_stride_v,
  29. int width,
  30. int height) {
  31. int y;
  32. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  33. ARGBToYRow_C;
  34. void (*ARGBToUV444Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
  35. int width) = ARGBToUV444Row_C;
  36. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  37. return -1;
  38. }
  39. if (height < 0) {
  40. height = -height;
  41. src_argb = src_argb + (height - 1) * src_stride_argb;
  42. src_stride_argb = -src_stride_argb;
  43. }
  44. // Coalesce rows.
  45. if (src_stride_argb == width * 4 && dst_stride_y == width &&
  46. dst_stride_u == width && dst_stride_v == width) {
  47. width *= height;
  48. height = 1;
  49. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  50. }
  51. #if defined(HAS_ARGBTOUV444ROW_SSSE3)
  52. if (TestCpuFlag(kCpuHasSSSE3)) {
  53. ARGBToUV444Row = ARGBToUV444Row_Any_SSSE3;
  54. if (IS_ALIGNED(width, 16)) {
  55. ARGBToUV444Row = ARGBToUV444Row_SSSE3;
  56. }
  57. }
  58. #endif
  59. #if defined(HAS_ARGBTOUV444ROW_NEON)
  60. if (TestCpuFlag(kCpuHasNEON)) {
  61. ARGBToUV444Row = ARGBToUV444Row_Any_NEON;
  62. if (IS_ALIGNED(width, 8)) {
  63. ARGBToUV444Row = ARGBToUV444Row_NEON;
  64. }
  65. }
  66. #endif
  67. #if defined(HAS_ARGBTOUV444ROW_MSA)
  68. if (TestCpuFlag(kCpuHasMSA)) {
  69. ARGBToUV444Row = ARGBToUV444Row_Any_MSA;
  70. if (IS_ALIGNED(width, 16)) {
  71. ARGBToUV444Row = ARGBToUV444Row_MSA;
  72. }
  73. }
  74. #endif
  75. #if defined(HAS_ARGBTOYROW_SSSE3)
  76. if (TestCpuFlag(kCpuHasSSSE3)) {
  77. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  78. if (IS_ALIGNED(width, 16)) {
  79. ARGBToYRow = ARGBToYRow_SSSE3;
  80. }
  81. }
  82. #endif
  83. #if defined(HAS_ARGBTOYROW_AVX2)
  84. if (TestCpuFlag(kCpuHasAVX2)) {
  85. ARGBToYRow = ARGBToYRow_Any_AVX2;
  86. if (IS_ALIGNED(width, 32)) {
  87. ARGBToYRow = ARGBToYRow_AVX2;
  88. }
  89. }
  90. #endif
  91. #if defined(HAS_ARGBTOYROW_NEON)
  92. if (TestCpuFlag(kCpuHasNEON)) {
  93. ARGBToYRow = ARGBToYRow_Any_NEON;
  94. if (IS_ALIGNED(width, 8)) {
  95. ARGBToYRow = ARGBToYRow_NEON;
  96. }
  97. }
  98. #endif
  99. #if defined(HAS_ARGBTOYROW_DSPR2)
  100. if (TestCpuFlag(kCpuHasDSPR2)) {
  101. ARGBToYRow = ARGBToYRow_Any_DSPR2;
  102. if (IS_ALIGNED(width, 8)) {
  103. ARGBToYRow = ARGBToYRow_DSPR2;
  104. }
  105. }
  106. #endif
  107. #if defined(HAS_ARGBTOYROW_MSA)
  108. if (TestCpuFlag(kCpuHasMSA)) {
  109. ARGBToYRow = ARGBToYRow_Any_MSA;
  110. if (IS_ALIGNED(width, 16)) {
  111. ARGBToYRow = ARGBToYRow_MSA;
  112. }
  113. }
  114. #endif
  115. for (y = 0; y < height; ++y) {
  116. ARGBToUV444Row(src_argb, dst_u, dst_v, width);
  117. ARGBToYRow(src_argb, dst_y, width);
  118. src_argb += src_stride_argb;
  119. dst_y += dst_stride_y;
  120. dst_u += dst_stride_u;
  121. dst_v += dst_stride_v;
  122. }
  123. return 0;
  124. }
  125. // ARGB little endian (bgra in memory) to I422
  126. LIBYUV_API
  127. int ARGBToI422(const uint8* src_argb,
  128. int src_stride_argb,
  129. uint8* dst_y,
  130. int dst_stride_y,
  131. uint8* dst_u,
  132. int dst_stride_u,
  133. uint8* dst_v,
  134. int dst_stride_v,
  135. int width,
  136. int height) {
  137. int y;
  138. void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u,
  139. uint8* dst_v, int width) = ARGBToUVRow_C;
  140. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  141. ARGBToYRow_C;
  142. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  143. return -1;
  144. }
  145. // Negative height means invert the image.
  146. if (height < 0) {
  147. height = -height;
  148. src_argb = src_argb + (height - 1) * src_stride_argb;
  149. src_stride_argb = -src_stride_argb;
  150. }
  151. // Coalesce rows.
  152. if (src_stride_argb == width * 4 && dst_stride_y == width &&
  153. dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
  154. width *= height;
  155. height = 1;
  156. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  157. }
  158. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  159. if (TestCpuFlag(kCpuHasSSSE3)) {
  160. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  161. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  162. if (IS_ALIGNED(width, 16)) {
  163. ARGBToUVRow = ARGBToUVRow_SSSE3;
  164. ARGBToYRow = ARGBToYRow_SSSE3;
  165. }
  166. }
  167. #endif
  168. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  169. if (TestCpuFlag(kCpuHasAVX2)) {
  170. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  171. ARGBToYRow = ARGBToYRow_Any_AVX2;
  172. if (IS_ALIGNED(width, 32)) {
  173. ARGBToUVRow = ARGBToUVRow_AVX2;
  174. ARGBToYRow = ARGBToYRow_AVX2;
  175. }
  176. }
  177. #endif
  178. #if defined(HAS_ARGBTOYROW_NEON)
  179. if (TestCpuFlag(kCpuHasNEON)) {
  180. ARGBToYRow = ARGBToYRow_Any_NEON;
  181. if (IS_ALIGNED(width, 8)) {
  182. ARGBToYRow = ARGBToYRow_NEON;
  183. }
  184. }
  185. #endif
  186. #if defined(HAS_ARGBTOUVROW_NEON)
  187. if (TestCpuFlag(kCpuHasNEON)) {
  188. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  189. if (IS_ALIGNED(width, 16)) {
  190. ARGBToUVRow = ARGBToUVRow_NEON;
  191. }
  192. }
  193. #endif
  194. #if defined(HAS_ARGBTOYROW_DSPR2)
  195. if (TestCpuFlag(kCpuHasDSPR2)) {
  196. ARGBToYRow = ARGBToYRow_Any_DSPR2;
  197. if (IS_ALIGNED(width, 8)) {
  198. ARGBToYRow = ARGBToYRow_DSPR2;
  199. }
  200. }
  201. #endif
  202. #if defined(HAS_ARGBTOUVROW_DSPR2)
  203. if (TestCpuFlag(kCpuHasDSPR2)) {
  204. ARGBToUVRow = ARGBToUVRow_Any_DSPR2;
  205. if (IS_ALIGNED(width, 16)) {
  206. ARGBToUVRow = ARGBToUVRow_DSPR2;
  207. }
  208. }
  209. #endif
  210. #if defined(HAS_ARGBTOYROW_MSA)
  211. if (TestCpuFlag(kCpuHasMSA)) {
  212. ARGBToYRow = ARGBToYRow_Any_MSA;
  213. if (IS_ALIGNED(width, 16)) {
  214. ARGBToYRow = ARGBToYRow_MSA;
  215. }
  216. }
  217. #endif
  218. #if defined(HAS_ARGBTOUVROW_MSA)
  219. if (TestCpuFlag(kCpuHasMSA)) {
  220. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  221. if (IS_ALIGNED(width, 32)) {
  222. ARGBToUVRow = ARGBToUVRow_MSA;
  223. }
  224. }
  225. #endif
  226. for (y = 0; y < height; ++y) {
  227. ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
  228. ARGBToYRow(src_argb, dst_y, width);
  229. src_argb += src_stride_argb;
  230. dst_y += dst_stride_y;
  231. dst_u += dst_stride_u;
  232. dst_v += dst_stride_v;
  233. }
  234. return 0;
  235. }
  236. LIBYUV_API
  237. int ARGBToNV12(const uint8* src_argb,
  238. int src_stride_argb,
  239. uint8* dst_y,
  240. int dst_stride_y,
  241. uint8* dst_uv,
  242. int dst_stride_uv,
  243. int width,
  244. int height) {
  245. int y;
  246. int halfwidth = (width + 1) >> 1;
  247. void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u,
  248. uint8* dst_v, int width) = ARGBToUVRow_C;
  249. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  250. ARGBToYRow_C;
  251. void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
  252. int width) = MergeUVRow_C;
  253. if (!src_argb || !dst_y || !dst_uv || width <= 0 || height == 0) {
  254. return -1;
  255. }
  256. // Negative height means invert the image.
  257. if (height < 0) {
  258. height = -height;
  259. src_argb = src_argb + (height - 1) * src_stride_argb;
  260. src_stride_argb = -src_stride_argb;
  261. }
  262. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  263. if (TestCpuFlag(kCpuHasSSSE3)) {
  264. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  265. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  266. if (IS_ALIGNED(width, 16)) {
  267. ARGBToUVRow = ARGBToUVRow_SSSE3;
  268. ARGBToYRow = ARGBToYRow_SSSE3;
  269. }
  270. }
  271. #endif
  272. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  273. if (TestCpuFlag(kCpuHasAVX2)) {
  274. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  275. ARGBToYRow = ARGBToYRow_Any_AVX2;
  276. if (IS_ALIGNED(width, 32)) {
  277. ARGBToUVRow = ARGBToUVRow_AVX2;
  278. ARGBToYRow = ARGBToYRow_AVX2;
  279. }
  280. }
  281. #endif
  282. #if defined(HAS_ARGBTOYROW_NEON)
  283. if (TestCpuFlag(kCpuHasNEON)) {
  284. ARGBToYRow = ARGBToYRow_Any_NEON;
  285. if (IS_ALIGNED(width, 8)) {
  286. ARGBToYRow = ARGBToYRow_NEON;
  287. }
  288. }
  289. #endif
  290. #if defined(HAS_ARGBTOUVROW_NEON)
  291. if (TestCpuFlag(kCpuHasNEON)) {
  292. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  293. if (IS_ALIGNED(width, 16)) {
  294. ARGBToUVRow = ARGBToUVRow_NEON;
  295. }
  296. }
  297. #endif
  298. #if defined(HAS_ARGBTOYROW_MSA)
  299. if (TestCpuFlag(kCpuHasMSA)) {
  300. ARGBToYRow = ARGBToYRow_Any_MSA;
  301. if (IS_ALIGNED(width, 16)) {
  302. ARGBToYRow = ARGBToYRow_MSA;
  303. }
  304. }
  305. #endif
  306. #if defined(HAS_ARGBTOUVROW_MSA)
  307. if (TestCpuFlag(kCpuHasMSA)) {
  308. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  309. if (IS_ALIGNED(width, 32)) {
  310. ARGBToUVRow = ARGBToUVRow_MSA;
  311. }
  312. }
  313. #endif
  314. #if defined(HAS_MERGEUVROW_SSE2)
  315. if (TestCpuFlag(kCpuHasSSE2)) {
  316. MergeUVRow_ = MergeUVRow_Any_SSE2;
  317. if (IS_ALIGNED(halfwidth, 16)) {
  318. MergeUVRow_ = MergeUVRow_SSE2;
  319. }
  320. }
  321. #endif
  322. #if defined(HAS_MERGEUVROW_AVX2)
  323. if (TestCpuFlag(kCpuHasAVX2)) {
  324. MergeUVRow_ = MergeUVRow_Any_AVX2;
  325. if (IS_ALIGNED(halfwidth, 32)) {
  326. MergeUVRow_ = MergeUVRow_AVX2;
  327. }
  328. }
  329. #endif
  330. #if defined(HAS_MERGEUVROW_NEON)
  331. if (TestCpuFlag(kCpuHasNEON)) {
  332. MergeUVRow_ = MergeUVRow_Any_NEON;
  333. if (IS_ALIGNED(halfwidth, 16)) {
  334. MergeUVRow_ = MergeUVRow_NEON;
  335. }
  336. }
  337. #endif
  338. #if defined(HAS_ARGBTOYROW_DSPR2)
  339. if (TestCpuFlag(kCpuHasDSPR2)) {
  340. ARGBToYRow = ARGBToYRow_Any_DSPR2;
  341. if (IS_ALIGNED(width, 8)) {
  342. ARGBToYRow = ARGBToYRow_DSPR2;
  343. }
  344. }
  345. #endif
  346. #if defined(HAS_ARGBTOUVROW_DSPR2)
  347. if (TestCpuFlag(kCpuHasDSPR2)) {
  348. ARGBToUVRow = ARGBToUVRow_Any_DSPR2;
  349. if (IS_ALIGNED(width, 16)) {
  350. ARGBToUVRow = ARGBToUVRow_DSPR2;
  351. }
  352. }
  353. #endif
  354. #if defined(HAS_MERGEUVROW_MSA)
  355. if (TestCpuFlag(kCpuHasMSA)) {
  356. MergeUVRow_ = MergeUVRow_Any_MSA;
  357. if (IS_ALIGNED(halfwidth, 16)) {
  358. MergeUVRow_ = MergeUVRow_MSA;
  359. }
  360. }
  361. #endif
  362. {
  363. // Allocate a rows of uv.
  364. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  365. uint8* row_v = row_u + ((halfwidth + 31) & ~31);
  366. for (y = 0; y < height - 1; y += 2) {
  367. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  368. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  369. ARGBToYRow(src_argb, dst_y, width);
  370. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  371. src_argb += src_stride_argb * 2;
  372. dst_y += dst_stride_y * 2;
  373. dst_uv += dst_stride_uv;
  374. }
  375. if (height & 1) {
  376. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  377. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  378. ARGBToYRow(src_argb, dst_y, width);
  379. }
  380. free_aligned_buffer_64(row_u);
  381. }
  382. return 0;
  383. }
  384. // Same as NV12 but U and V swapped.
  385. LIBYUV_API
  386. int ARGBToNV21(const uint8* src_argb,
  387. int src_stride_argb,
  388. uint8* dst_y,
  389. int dst_stride_y,
  390. uint8* dst_uv,
  391. int dst_stride_uv,
  392. int width,
  393. int height) {
  394. int y;
  395. int halfwidth = (width + 1) >> 1;
  396. void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u,
  397. uint8* dst_v, int width) = ARGBToUVRow_C;
  398. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  399. ARGBToYRow_C;
  400. void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
  401. int width) = MergeUVRow_C;
  402. if (!src_argb || !dst_y || !dst_uv || width <= 0 || height == 0) {
  403. return -1;
  404. }
  405. // Negative height means invert the image.
  406. if (height < 0) {
  407. height = -height;
  408. src_argb = src_argb + (height - 1) * src_stride_argb;
  409. src_stride_argb = -src_stride_argb;
  410. }
  411. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  412. if (TestCpuFlag(kCpuHasSSSE3)) {
  413. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  414. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  415. if (IS_ALIGNED(width, 16)) {
  416. ARGBToUVRow = ARGBToUVRow_SSSE3;
  417. ARGBToYRow = ARGBToYRow_SSSE3;
  418. }
  419. }
  420. #endif
  421. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  422. if (TestCpuFlag(kCpuHasAVX2)) {
  423. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  424. ARGBToYRow = ARGBToYRow_Any_AVX2;
  425. if (IS_ALIGNED(width, 32)) {
  426. ARGBToUVRow = ARGBToUVRow_AVX2;
  427. ARGBToYRow = ARGBToYRow_AVX2;
  428. }
  429. }
  430. #endif
  431. #if defined(HAS_ARGBTOYROW_NEON)
  432. if (TestCpuFlag(kCpuHasNEON)) {
  433. ARGBToYRow = ARGBToYRow_Any_NEON;
  434. if (IS_ALIGNED(width, 8)) {
  435. ARGBToYRow = ARGBToYRow_NEON;
  436. }
  437. }
  438. #endif
  439. #if defined(HAS_ARGBTOUVROW_NEON)
  440. if (TestCpuFlag(kCpuHasNEON)) {
  441. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  442. if (IS_ALIGNED(width, 16)) {
  443. ARGBToUVRow = ARGBToUVRow_NEON;
  444. }
  445. }
  446. #endif
  447. #if defined(HAS_ARGBTOYROW_MSA)
  448. if (TestCpuFlag(kCpuHasMSA)) {
  449. ARGBToYRow = ARGBToYRow_Any_MSA;
  450. if (IS_ALIGNED(width, 16)) {
  451. ARGBToYRow = ARGBToYRow_MSA;
  452. }
  453. }
  454. #endif
  455. #if defined(HAS_ARGBTOUVROW_MSA)
  456. if (TestCpuFlag(kCpuHasMSA)) {
  457. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  458. if (IS_ALIGNED(width, 32)) {
  459. ARGBToUVRow = ARGBToUVRow_MSA;
  460. }
  461. }
  462. #endif
  463. #if defined(HAS_MERGEUVROW_SSE2)
  464. if (TestCpuFlag(kCpuHasSSE2)) {
  465. MergeUVRow_ = MergeUVRow_Any_SSE2;
  466. if (IS_ALIGNED(halfwidth, 16)) {
  467. MergeUVRow_ = MergeUVRow_SSE2;
  468. }
  469. }
  470. #endif
  471. #if defined(HAS_MERGEUVROW_AVX2)
  472. if (TestCpuFlag(kCpuHasAVX2)) {
  473. MergeUVRow_ = MergeUVRow_Any_AVX2;
  474. if (IS_ALIGNED(halfwidth, 32)) {
  475. MergeUVRow_ = MergeUVRow_AVX2;
  476. }
  477. }
  478. #endif
  479. #if defined(HAS_MERGEUVROW_NEON)
  480. if (TestCpuFlag(kCpuHasNEON)) {
  481. MergeUVRow_ = MergeUVRow_Any_NEON;
  482. if (IS_ALIGNED(halfwidth, 16)) {
  483. MergeUVRow_ = MergeUVRow_NEON;
  484. }
  485. }
  486. #endif
  487. #if defined(HAS_ARGBTOYROW_DSPR2)
  488. if (TestCpuFlag(kCpuHasDSPR2)) {
  489. ARGBToYRow = ARGBToYRow_Any_DSPR2;
  490. if (IS_ALIGNED(width, 8)) {
  491. ARGBToYRow = ARGBToYRow_DSPR2;
  492. }
  493. }
  494. #endif
  495. #if defined(HAS_ARGBTOUVROW_DSPR2)
  496. if (TestCpuFlag(kCpuHasDSPR2)) {
  497. ARGBToUVRow = ARGBToUVRow_Any_DSPR2;
  498. if (IS_ALIGNED(width, 16)) {
  499. ARGBToUVRow = ARGBToUVRow_DSPR2;
  500. }
  501. }
  502. #endif
  503. #if defined(HAS_MERGEUVROW_MSA)
  504. if (TestCpuFlag(kCpuHasMSA)) {
  505. MergeUVRow_ = MergeUVRow_Any_MSA;
  506. if (IS_ALIGNED(halfwidth, 16)) {
  507. MergeUVRow_ = MergeUVRow_MSA;
  508. }
  509. }
  510. #endif
  511. {
  512. // Allocate a rows of uv.
  513. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  514. uint8* row_v = row_u + ((halfwidth + 31) & ~31);
  515. for (y = 0; y < height - 1; y += 2) {
  516. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  517. MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
  518. ARGBToYRow(src_argb, dst_y, width);
  519. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  520. src_argb += src_stride_argb * 2;
  521. dst_y += dst_stride_y * 2;
  522. dst_uv += dst_stride_uv;
  523. }
  524. if (height & 1) {
  525. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  526. MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
  527. ARGBToYRow(src_argb, dst_y, width);
  528. }
  529. free_aligned_buffer_64(row_u);
  530. }
  531. return 0;
  532. }
  533. // Convert ARGB to YUY2.
  534. LIBYUV_API
  535. int ARGBToYUY2(const uint8* src_argb,
  536. int src_stride_argb,
  537. uint8* dst_yuy2,
  538. int dst_stride_yuy2,
  539. int width,
  540. int height) {
  541. int y;
  542. void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb, uint8* dst_u,
  543. uint8* dst_v, int width) = ARGBToUVRow_C;
  544. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  545. ARGBToYRow_C;
  546. void (*I422ToYUY2Row)(const uint8* src_y, const uint8* src_u,
  547. const uint8* src_v, uint8* dst_yuy2, int width) =
  548. I422ToYUY2Row_C;
  549. if (!src_argb || !dst_yuy2 || width <= 0 || height == 0) {
  550. return -1;
  551. }
  552. // Negative height means invert the image.
  553. if (height < 0) {
  554. height = -height;
  555. dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
  556. dst_stride_yuy2 = -dst_stride_yuy2;
  557. }
  558. // Coalesce rows.
  559. if (src_stride_argb == width * 4 && dst_stride_yuy2 == width * 2) {
  560. width *= height;
  561. height = 1;
  562. src_stride_argb = dst_stride_yuy2 = 0;
  563. }
  564. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  565. if (TestCpuFlag(kCpuHasSSSE3)) {
  566. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  567. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  568. if (IS_ALIGNED(width, 16)) {
  569. ARGBToUVRow = ARGBToUVRow_SSSE3;
  570. ARGBToYRow = ARGBToYRow_SSSE3;
  571. }
  572. }
  573. #endif
  574. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  575. if (TestCpuFlag(kCpuHasAVX2)) {
  576. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  577. ARGBToYRow = ARGBToYRow_Any_AVX2;
  578. if (IS_ALIGNED(width, 32)) {
  579. ARGBToUVRow = ARGBToUVRow_AVX2;
  580. ARGBToYRow = ARGBToYRow_AVX2;
  581. }
  582. }
  583. #endif
  584. #if defined(HAS_ARGBTOYROW_NEON)
  585. if (TestCpuFlag(kCpuHasNEON)) {
  586. ARGBToYRow = ARGBToYRow_Any_NEON;
  587. if (IS_ALIGNED(width, 8)) {
  588. ARGBToYRow = ARGBToYRow_NEON;
  589. }
  590. }
  591. #endif
  592. #if defined(HAS_ARGBTOUVROW_NEON)
  593. if (TestCpuFlag(kCpuHasNEON)) {
  594. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  595. if (IS_ALIGNED(width, 16)) {
  596. ARGBToUVRow = ARGBToUVRow_NEON;
  597. }
  598. }
  599. #endif
  600. #if defined(HAS_ARGBTOYROW_MSA)
  601. if (TestCpuFlag(kCpuHasMSA)) {
  602. ARGBToYRow = ARGBToYRow_Any_MSA;
  603. if (IS_ALIGNED(width, 16)) {
  604. ARGBToYRow = ARGBToYRow_MSA;
  605. }
  606. }
  607. #endif
  608. #if defined(HAS_ARGBTOUVROW_MSA)
  609. if (TestCpuFlag(kCpuHasMSA)) {
  610. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  611. if (IS_ALIGNED(width, 32)) {
  612. ARGBToUVRow = ARGBToUVRow_MSA;
  613. }
  614. }
  615. #endif
  616. #if defined(HAS_I422TOYUY2ROW_SSE2)
  617. if (TestCpuFlag(kCpuHasSSE2)) {
  618. I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
  619. if (IS_ALIGNED(width, 16)) {
  620. I422ToYUY2Row = I422ToYUY2Row_SSE2;
  621. }
  622. }
  623. #endif
  624. #if defined(HAS_I422TOYUY2ROW_NEON)
  625. if (TestCpuFlag(kCpuHasNEON)) {
  626. I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
  627. if (IS_ALIGNED(width, 16)) {
  628. I422ToYUY2Row = I422ToYUY2Row_NEON;
  629. }
  630. }
  631. #endif
  632. #if defined(HAS_ARGBTOYROW_DSPR2)
  633. if (TestCpuFlag(kCpuHasDSPR2)) {
  634. ARGBToYRow = ARGBToYRow_Any_DSPR2;
  635. if (IS_ALIGNED(width, 8)) {
  636. ARGBToYRow = ARGBToYRow_DSPR2;
  637. }
  638. }
  639. #endif
  640. #if defined(HAS_ARGBTOUVROW_DSPR2)
  641. if (TestCpuFlag(kCpuHasDSPR2)) {
  642. ARGBToUVRow = ARGBToUVRow_Any_DSPR2;
  643. if (IS_ALIGNED(width, 16)) {
  644. ARGBToUVRow = ARGBToUVRow_DSPR2;
  645. }
  646. }
  647. #endif
  648. #if defined(HAS_I422TOYUY2ROW_MSA)
  649. if (TestCpuFlag(kCpuHasMSA)) {
  650. I422ToYUY2Row = I422ToYUY2Row_Any_MSA;
  651. if (IS_ALIGNED(width, 32)) {
  652. I422ToYUY2Row = I422ToYUY2Row_MSA;
  653. }
  654. }
  655. #endif
  656. {
  657. // Allocate a rows of yuv.
  658. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  659. uint8* row_u = row_y + ((width + 63) & ~63);
  660. uint8* row_v = row_u + ((width + 63) & ~63) / 2;
  661. for (y = 0; y < height; ++y) {
  662. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  663. ARGBToYRow(src_argb, row_y, width);
  664. I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
  665. src_argb += src_stride_argb;
  666. dst_yuy2 += dst_stride_yuy2;
  667. }
  668. free_aligned_buffer_64(row_y);
  669. }
  670. return 0;
  671. }
  672. // Convert ARGB to UYVY.
  673. LIBYUV_API
  674. int ARGBToUYVY(const uint8* src_argb,
  675. int src_stride_argb,
  676. uint8* dst_uyvy,
  677. int dst_stride_uyvy,
  678. int width,
  679. int height) {
  680. int y;
  681. void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb, uint8* dst_u,
  682. uint8* dst_v, int width) = ARGBToUVRow_C;
  683. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  684. ARGBToYRow_C;
  685. void (*I422ToUYVYRow)(const uint8* src_y, const uint8* src_u,
  686. const uint8* src_v, uint8* dst_uyvy, int width) =
  687. I422ToUYVYRow_C;
  688. if (!src_argb || !dst_uyvy || width <= 0 || height == 0) {
  689. return -1;
  690. }
  691. // Negative height means invert the image.
  692. if (height < 0) {
  693. height = -height;
  694. dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
  695. dst_stride_uyvy = -dst_stride_uyvy;
  696. }
  697. // Coalesce rows.
  698. if (src_stride_argb == width * 4 && dst_stride_uyvy == width * 2) {
  699. width *= height;
  700. height = 1;
  701. src_stride_argb = dst_stride_uyvy = 0;
  702. }
  703. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  704. if (TestCpuFlag(kCpuHasSSSE3)) {
  705. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  706. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  707. if (IS_ALIGNED(width, 16)) {
  708. ARGBToUVRow = ARGBToUVRow_SSSE3;
  709. ARGBToYRow = ARGBToYRow_SSSE3;
  710. }
  711. }
  712. #endif
  713. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  714. if (TestCpuFlag(kCpuHasAVX2)) {
  715. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  716. ARGBToYRow = ARGBToYRow_Any_AVX2;
  717. if (IS_ALIGNED(width, 32)) {
  718. ARGBToUVRow = ARGBToUVRow_AVX2;
  719. ARGBToYRow = ARGBToYRow_AVX2;
  720. }
  721. }
  722. #endif
  723. #if defined(HAS_ARGBTOYROW_NEON)
  724. if (TestCpuFlag(kCpuHasNEON)) {
  725. ARGBToYRow = ARGBToYRow_Any_NEON;
  726. if (IS_ALIGNED(width, 8)) {
  727. ARGBToYRow = ARGBToYRow_NEON;
  728. }
  729. }
  730. #endif
  731. #if defined(HAS_ARGBTOUVROW_NEON)
  732. if (TestCpuFlag(kCpuHasNEON)) {
  733. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  734. if (IS_ALIGNED(width, 16)) {
  735. ARGBToUVRow = ARGBToUVRow_NEON;
  736. }
  737. }
  738. #endif
  739. #if defined(HAS_ARGBTOYROW_MSA)
  740. if (TestCpuFlag(kCpuHasMSA)) {
  741. ARGBToYRow = ARGBToYRow_Any_MSA;
  742. if (IS_ALIGNED(width, 16)) {
  743. ARGBToYRow = ARGBToYRow_MSA;
  744. }
  745. }
  746. #endif
  747. #if defined(HAS_ARGBTOUVROW_MSA)
  748. if (TestCpuFlag(kCpuHasMSA)) {
  749. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  750. if (IS_ALIGNED(width, 32)) {
  751. ARGBToUVRow = ARGBToUVRow_MSA;
  752. }
  753. }
  754. #endif
  755. #if defined(HAS_I422TOUYVYROW_SSE2)
  756. if (TestCpuFlag(kCpuHasSSE2)) {
  757. I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
  758. if (IS_ALIGNED(width, 16)) {
  759. I422ToUYVYRow = I422ToUYVYRow_SSE2;
  760. }
  761. }
  762. #endif
  763. #if defined(HAS_I422TOUYVYROW_NEON)
  764. if (TestCpuFlag(kCpuHasNEON)) {
  765. I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
  766. if (IS_ALIGNED(width, 16)) {
  767. I422ToUYVYRow = I422ToUYVYRow_NEON;
  768. }
  769. }
  770. #endif
  771. #if defined(HAS_ARGBTOYROW_DSPR2)
  772. if (TestCpuFlag(kCpuHasDSPR2)) {
  773. ARGBToYRow = ARGBToYRow_Any_DSPR2;
  774. if (IS_ALIGNED(width, 8)) {
  775. ARGBToYRow = ARGBToYRow_DSPR2;
  776. }
  777. }
  778. #endif
  779. #if defined(HAS_ARGBTOUVROW_DSPR2)
  780. if (TestCpuFlag(kCpuHasDSPR2)) {
  781. ARGBToUVRow = ARGBToUVRow_Any_DSPR2;
  782. if (IS_ALIGNED(width, 16)) {
  783. ARGBToUVRow = ARGBToUVRow_DSPR2;
  784. }
  785. }
  786. #endif
  787. #if defined(HAS_I422TOUYVYROW_MSA)
  788. if (TestCpuFlag(kCpuHasMSA)) {
  789. I422ToUYVYRow = I422ToUYVYRow_Any_MSA;
  790. if (IS_ALIGNED(width, 32)) {
  791. I422ToUYVYRow = I422ToUYVYRow_MSA;
  792. }
  793. }
  794. #endif
  795. {
  796. // Allocate a rows of yuv.
  797. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  798. uint8* row_u = row_y + ((width + 63) & ~63);
  799. uint8* row_v = row_u + ((width + 63) & ~63) / 2;
  800. for (y = 0; y < height; ++y) {
  801. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  802. ARGBToYRow(src_argb, row_y, width);
  803. I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
  804. src_argb += src_stride_argb;
  805. dst_uyvy += dst_stride_uyvy;
  806. }
  807. free_aligned_buffer_64(row_y);
  808. }
  809. return 0;
  810. }
  811. // Convert ARGB to I400.
  812. LIBYUV_API
  813. int ARGBToI400(const uint8* src_argb,
  814. int src_stride_argb,
  815. uint8* dst_y,
  816. int dst_stride_y,
  817. int width,
  818. int height) {
  819. int y;
  820. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  821. ARGBToYRow_C;
  822. if (!src_argb || !dst_y || width <= 0 || height == 0) {
  823. return -1;
  824. }
  825. if (height < 0) {
  826. height = -height;
  827. src_argb = src_argb + (height - 1) * src_stride_argb;
  828. src_stride_argb = -src_stride_argb;
  829. }
  830. // Coalesce rows.
  831. if (src_stride_argb == width * 4 && dst_stride_y == width) {
  832. width *= height;
  833. height = 1;
  834. src_stride_argb = dst_stride_y = 0;
  835. }
  836. #if defined(HAS_ARGBTOYROW_SSSE3)
  837. if (TestCpuFlag(kCpuHasSSSE3)) {
  838. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  839. if (IS_ALIGNED(width, 16)) {
  840. ARGBToYRow = ARGBToYRow_SSSE3;
  841. }
  842. }
  843. #endif
  844. #if defined(HAS_ARGBTOYROW_AVX2)
  845. if (TestCpuFlag(kCpuHasAVX2)) {
  846. ARGBToYRow = ARGBToYRow_Any_AVX2;
  847. if (IS_ALIGNED(width, 32)) {
  848. ARGBToYRow = ARGBToYRow_AVX2;
  849. }
  850. }
  851. #endif
  852. #if defined(HAS_ARGBTOYROW_NEON)
  853. if (TestCpuFlag(kCpuHasNEON)) {
  854. ARGBToYRow = ARGBToYRow_Any_NEON;
  855. if (IS_ALIGNED(width, 8)) {
  856. ARGBToYRow = ARGBToYRow_NEON;
  857. }
  858. }
  859. #endif
  860. #if defined(HAS_ARGBTOYROW_DSPR2)
  861. if (TestCpuFlag(kCpuHasDSPR2)) {
  862. ARGBToYRow = ARGBToYRow_Any_DSPR2;
  863. if (IS_ALIGNED(width, 8)) {
  864. ARGBToYRow = ARGBToYRow_DSPR2;
  865. }
  866. }
  867. #endif
  868. #if defined(HAS_ARGBTOYROW_MSA)
  869. if (TestCpuFlag(kCpuHasMSA)) {
  870. ARGBToYRow = ARGBToYRow_Any_MSA;
  871. if (IS_ALIGNED(width, 16)) {
  872. ARGBToYRow = ARGBToYRow_MSA;
  873. }
  874. }
  875. #endif
  876. for (y = 0; y < height; ++y) {
  877. ARGBToYRow(src_argb, dst_y, width);
  878. src_argb += src_stride_argb;
  879. dst_y += dst_stride_y;
  880. }
  881. return 0;
  882. }
  883. // Shuffle table for converting ARGB to RGBA.
  884. static uvec8 kShuffleMaskARGBToRGBA = {3u, 0u, 1u, 2u, 7u, 4u, 5u, 6u,
  885. 11u, 8u, 9u, 10u, 15u, 12u, 13u, 14u};
  886. // Convert ARGB to RGBA.
  887. LIBYUV_API
  888. int ARGBToRGBA(const uint8* src_argb,
  889. int src_stride_argb,
  890. uint8* dst_rgba,
  891. int dst_stride_rgba,
  892. int width,
  893. int height) {
  894. return ARGBShuffle(src_argb, src_stride_argb, dst_rgba, dst_stride_rgba,
  895. (const uint8*)(&kShuffleMaskARGBToRGBA), width, height);
  896. }
  897. // Convert ARGB To RGB24.
  898. LIBYUV_API
  899. int ARGBToRGB24(const uint8* src_argb,
  900. int src_stride_argb,
  901. uint8* dst_rgb24,
  902. int dst_stride_rgb24,
  903. int width,
  904. int height) {
  905. int y;
  906. void (*ARGBToRGB24Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  907. ARGBToRGB24Row_C;
  908. if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) {
  909. return -1;
  910. }
  911. if (height < 0) {
  912. height = -height;
  913. src_argb = src_argb + (height - 1) * src_stride_argb;
  914. src_stride_argb = -src_stride_argb;
  915. }
  916. // Coalesce rows.
  917. if (src_stride_argb == width * 4 && dst_stride_rgb24 == width * 3) {
  918. width *= height;
  919. height = 1;
  920. src_stride_argb = dst_stride_rgb24 = 0;
  921. }
  922. #if defined(HAS_ARGBTORGB24ROW_SSSE3)
  923. if (TestCpuFlag(kCpuHasSSSE3)) {
  924. ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3;
  925. if (IS_ALIGNED(width, 16)) {
  926. ARGBToRGB24Row = ARGBToRGB24Row_SSSE3;
  927. }
  928. }
  929. #endif
  930. #if defined(HAS_ARGBTORGB24ROW_NEON)
  931. if (TestCpuFlag(kCpuHasNEON)) {
  932. ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON;
  933. if (IS_ALIGNED(width, 8)) {
  934. ARGBToRGB24Row = ARGBToRGB24Row_NEON;
  935. }
  936. }
  937. #endif
  938. #if defined(HAS_ARGBTORGB24ROW_MSA)
  939. if (TestCpuFlag(kCpuHasMSA)) {
  940. ARGBToRGB24Row = ARGBToRGB24Row_Any_MSA;
  941. if (IS_ALIGNED(width, 16)) {
  942. ARGBToRGB24Row = ARGBToRGB24Row_MSA;
  943. }
  944. }
  945. #endif
  946. for (y = 0; y < height; ++y) {
  947. ARGBToRGB24Row(src_argb, dst_rgb24, width);
  948. src_argb += src_stride_argb;
  949. dst_rgb24 += dst_stride_rgb24;
  950. }
  951. return 0;
  952. }
  953. // Convert ARGB To RAW.
  954. LIBYUV_API
  955. int ARGBToRAW(const uint8* src_argb,
  956. int src_stride_argb,
  957. uint8* dst_raw,
  958. int dst_stride_raw,
  959. int width,
  960. int height) {
  961. int y;
  962. void (*ARGBToRAWRow)(const uint8* src_argb, uint8* dst_rgb, int width) =
  963. ARGBToRAWRow_C;
  964. if (!src_argb || !dst_raw || width <= 0 || height == 0) {
  965. return -1;
  966. }
  967. if (height < 0) {
  968. height = -height;
  969. src_argb = src_argb + (height - 1) * src_stride_argb;
  970. src_stride_argb = -src_stride_argb;
  971. }
  972. // Coalesce rows.
  973. if (src_stride_argb == width * 4 && dst_stride_raw == width * 3) {
  974. width *= height;
  975. height = 1;
  976. src_stride_argb = dst_stride_raw = 0;
  977. }
  978. #if defined(HAS_ARGBTORAWROW_SSSE3)
  979. if (TestCpuFlag(kCpuHasSSSE3)) {
  980. ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3;
  981. if (IS_ALIGNED(width, 16)) {
  982. ARGBToRAWRow = ARGBToRAWRow_SSSE3;
  983. }
  984. }
  985. #endif
  986. #if defined(HAS_ARGBTORAWROW_NEON)
  987. if (TestCpuFlag(kCpuHasNEON)) {
  988. ARGBToRAWRow = ARGBToRAWRow_Any_NEON;
  989. if (IS_ALIGNED(width, 8)) {
  990. ARGBToRAWRow = ARGBToRAWRow_NEON;
  991. }
  992. }
  993. #endif
  994. #if defined(HAS_ARGBTORAWROW_MSA)
  995. if (TestCpuFlag(kCpuHasMSA)) {
  996. ARGBToRAWRow = ARGBToRAWRow_Any_MSA;
  997. if (IS_ALIGNED(width, 16)) {
  998. ARGBToRAWRow = ARGBToRAWRow_MSA;
  999. }
  1000. }
  1001. #endif
  1002. for (y = 0; y < height; ++y) {
  1003. ARGBToRAWRow(src_argb, dst_raw, width);
  1004. src_argb += src_stride_argb;
  1005. dst_raw += dst_stride_raw;
  1006. }
  1007. return 0;
  1008. }
  1009. // Ordered 8x8 dither for 888 to 565. Values from 0 to 7.
  1010. static const uint8 kDither565_4x4[16] = {
  1011. 0, 4, 1, 5, 6, 2, 7, 3, 1, 5, 0, 4, 7, 3, 6, 2,
  1012. };
  1013. // Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
  1014. LIBYUV_API
  1015. int ARGBToRGB565Dither(const uint8* src_argb,
  1016. int src_stride_argb,
  1017. uint8* dst_rgb565,
  1018. int dst_stride_rgb565,
  1019. const uint8* dither4x4,
  1020. int width,
  1021. int height) {
  1022. int y;
  1023. void (*ARGBToRGB565DitherRow)(const uint8* src_argb, uint8* dst_rgb,
  1024. const uint32 dither4, int width) =
  1025. ARGBToRGB565DitherRow_C;
  1026. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  1027. return -1;
  1028. }
  1029. if (height < 0) {
  1030. height = -height;
  1031. src_argb = src_argb + (height - 1) * src_stride_argb;
  1032. src_stride_argb = -src_stride_argb;
  1033. }
  1034. if (!dither4x4) {
  1035. dither4x4 = kDither565_4x4;
  1036. }
  1037. #if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
  1038. if (TestCpuFlag(kCpuHasSSE2)) {
  1039. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2;
  1040. if (IS_ALIGNED(width, 4)) {
  1041. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_SSE2;
  1042. }
  1043. }
  1044. #endif
  1045. #if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
  1046. if (TestCpuFlag(kCpuHasAVX2)) {
  1047. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_AVX2;
  1048. if (IS_ALIGNED(width, 8)) {
  1049. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_AVX2;
  1050. }
  1051. }
  1052. #endif
  1053. #if defined(HAS_ARGBTORGB565DITHERROW_NEON)
  1054. if (TestCpuFlag(kCpuHasNEON)) {
  1055. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_NEON;
  1056. if (IS_ALIGNED(width, 8)) {
  1057. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_NEON;
  1058. }
  1059. }
  1060. #endif
  1061. #if defined(HAS_ARGBTORGB565DITHERROW_MSA)
  1062. if (TestCpuFlag(kCpuHasMSA)) {
  1063. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MSA;
  1064. if (IS_ALIGNED(width, 8)) {
  1065. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MSA;
  1066. }
  1067. }
  1068. #endif
  1069. for (y = 0; y < height; ++y) {
  1070. ARGBToRGB565DitherRow(src_argb, dst_rgb565,
  1071. *(uint32*)(dither4x4 + ((y & 3) << 2)),
  1072. width); /* NOLINT */
  1073. src_argb += src_stride_argb;
  1074. dst_rgb565 += dst_stride_rgb565;
  1075. }
  1076. return 0;
  1077. }
  1078. // Convert ARGB To RGB565.
  1079. // TODO(fbarchard): Consider using dither function low level with zeros.
  1080. LIBYUV_API
  1081. int ARGBToRGB565(const uint8* src_argb,
  1082. int src_stride_argb,
  1083. uint8* dst_rgb565,
  1084. int dst_stride_rgb565,
  1085. int width,
  1086. int height) {
  1087. int y;
  1088. void (*ARGBToRGB565Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  1089. ARGBToRGB565Row_C;
  1090. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  1091. return -1;
  1092. }
  1093. if (height < 0) {
  1094. height = -height;
  1095. src_argb = src_argb + (height - 1) * src_stride_argb;
  1096. src_stride_argb = -src_stride_argb;
  1097. }
  1098. // Coalesce rows.
  1099. if (src_stride_argb == width * 4 && dst_stride_rgb565 == width * 2) {
  1100. width *= height;
  1101. height = 1;
  1102. src_stride_argb = dst_stride_rgb565 = 0;
  1103. }
  1104. #if defined(HAS_ARGBTORGB565ROW_SSE2)
  1105. if (TestCpuFlag(kCpuHasSSE2)) {
  1106. ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2;
  1107. if (IS_ALIGNED(width, 4)) {
  1108. ARGBToRGB565Row = ARGBToRGB565Row_SSE2;
  1109. }
  1110. }
  1111. #endif
  1112. #if defined(HAS_ARGBTORGB565ROW_AVX2)
  1113. if (TestCpuFlag(kCpuHasAVX2)) {
  1114. ARGBToRGB565Row = ARGBToRGB565Row_Any_AVX2;
  1115. if (IS_ALIGNED(width, 8)) {
  1116. ARGBToRGB565Row = ARGBToRGB565Row_AVX2;
  1117. }
  1118. }
  1119. #endif
  1120. #if defined(HAS_ARGBTORGB565ROW_NEON)
  1121. if (TestCpuFlag(kCpuHasNEON)) {
  1122. ARGBToRGB565Row = ARGBToRGB565Row_Any_NEON;
  1123. if (IS_ALIGNED(width, 8)) {
  1124. ARGBToRGB565Row = ARGBToRGB565Row_NEON;
  1125. }
  1126. }
  1127. #endif
  1128. #if defined(HAS_ARGBTORGB565ROW_MSA)
  1129. if (TestCpuFlag(kCpuHasMSA)) {
  1130. ARGBToRGB565Row = ARGBToRGB565Row_Any_MSA;
  1131. if (IS_ALIGNED(width, 8)) {
  1132. ARGBToRGB565Row = ARGBToRGB565Row_MSA;
  1133. }
  1134. }
  1135. #endif
  1136. for (y = 0; y < height; ++y) {
  1137. ARGBToRGB565Row(src_argb, dst_rgb565, width);
  1138. src_argb += src_stride_argb;
  1139. dst_rgb565 += dst_stride_rgb565;
  1140. }
  1141. return 0;
  1142. }
  1143. // Convert ARGB To ARGB1555.
  1144. LIBYUV_API
  1145. int ARGBToARGB1555(const uint8* src_argb,
  1146. int src_stride_argb,
  1147. uint8* dst_argb1555,
  1148. int dst_stride_argb1555,
  1149. int width,
  1150. int height) {
  1151. int y;
  1152. void (*ARGBToARGB1555Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  1153. ARGBToARGB1555Row_C;
  1154. if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) {
  1155. return -1;
  1156. }
  1157. if (height < 0) {
  1158. height = -height;
  1159. src_argb = src_argb + (height - 1) * src_stride_argb;
  1160. src_stride_argb = -src_stride_argb;
  1161. }
  1162. // Coalesce rows.
  1163. if (src_stride_argb == width * 4 && dst_stride_argb1555 == width * 2) {
  1164. width *= height;
  1165. height = 1;
  1166. src_stride_argb = dst_stride_argb1555 = 0;
  1167. }
  1168. #if defined(HAS_ARGBTOARGB1555ROW_SSE2)
  1169. if (TestCpuFlag(kCpuHasSSE2)) {
  1170. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_SSE2;
  1171. if (IS_ALIGNED(width, 4)) {
  1172. ARGBToARGB1555Row = ARGBToARGB1555Row_SSE2;
  1173. }
  1174. }
  1175. #endif
  1176. #if defined(HAS_ARGBTOARGB1555ROW_AVX2)
  1177. if (TestCpuFlag(kCpuHasAVX2)) {
  1178. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_AVX2;
  1179. if (IS_ALIGNED(width, 8)) {
  1180. ARGBToARGB1555Row = ARGBToARGB1555Row_AVX2;
  1181. }
  1182. }
  1183. #endif
  1184. #if defined(HAS_ARGBTOARGB1555ROW_NEON)
  1185. if (TestCpuFlag(kCpuHasNEON)) {
  1186. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_NEON;
  1187. if (IS_ALIGNED(width, 8)) {
  1188. ARGBToARGB1555Row = ARGBToARGB1555Row_NEON;
  1189. }
  1190. }
  1191. #endif
  1192. #if defined(HAS_ARGBTOARGB1555ROW_MSA)
  1193. if (TestCpuFlag(kCpuHasMSA)) {
  1194. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MSA;
  1195. if (IS_ALIGNED(width, 8)) {
  1196. ARGBToARGB1555Row = ARGBToARGB1555Row_MSA;
  1197. }
  1198. }
  1199. #endif
  1200. for (y = 0; y < height; ++y) {
  1201. ARGBToARGB1555Row(src_argb, dst_argb1555, width);
  1202. src_argb += src_stride_argb;
  1203. dst_argb1555 += dst_stride_argb1555;
  1204. }
  1205. return 0;
  1206. }
  1207. // Convert ARGB To ARGB4444.
  1208. LIBYUV_API
  1209. int ARGBToARGB4444(const uint8* src_argb,
  1210. int src_stride_argb,
  1211. uint8* dst_argb4444,
  1212. int dst_stride_argb4444,
  1213. int width,
  1214. int height) {
  1215. int y;
  1216. void (*ARGBToARGB4444Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  1217. ARGBToARGB4444Row_C;
  1218. if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) {
  1219. return -1;
  1220. }
  1221. if (height < 0) {
  1222. height = -height;
  1223. src_argb = src_argb + (height - 1) * src_stride_argb;
  1224. src_stride_argb = -src_stride_argb;
  1225. }
  1226. // Coalesce rows.
  1227. if (src_stride_argb == width * 4 && dst_stride_argb4444 == width * 2) {
  1228. width *= height;
  1229. height = 1;
  1230. src_stride_argb = dst_stride_argb4444 = 0;
  1231. }
  1232. #if defined(HAS_ARGBTOARGB4444ROW_SSE2)
  1233. if (TestCpuFlag(kCpuHasSSE2)) {
  1234. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_SSE2;
  1235. if (IS_ALIGNED(width, 4)) {
  1236. ARGBToARGB4444Row = ARGBToARGB4444Row_SSE2;
  1237. }
  1238. }
  1239. #endif
  1240. #if defined(HAS_ARGBTOARGB4444ROW_AVX2)
  1241. if (TestCpuFlag(kCpuHasAVX2)) {
  1242. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_AVX2;
  1243. if (IS_ALIGNED(width, 8)) {
  1244. ARGBToARGB4444Row = ARGBToARGB4444Row_AVX2;
  1245. }
  1246. }
  1247. #endif
  1248. #if defined(HAS_ARGBTOARGB4444ROW_NEON)
  1249. if (TestCpuFlag(kCpuHasNEON)) {
  1250. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_NEON;
  1251. if (IS_ALIGNED(width, 8)) {
  1252. ARGBToARGB4444Row = ARGBToARGB4444Row_NEON;
  1253. }
  1254. }
  1255. #endif
  1256. #if defined(HAS_ARGBTOARGB4444ROW_MSA)
  1257. if (TestCpuFlag(kCpuHasMSA)) {
  1258. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MSA;
  1259. if (IS_ALIGNED(width, 8)) {
  1260. ARGBToARGB4444Row = ARGBToARGB4444Row_MSA;
  1261. }
  1262. }
  1263. #endif
  1264. for (y = 0; y < height; ++y) {
  1265. ARGBToARGB4444Row(src_argb, dst_argb4444, width);
  1266. src_argb += src_stride_argb;
  1267. dst_argb4444 += dst_stride_argb4444;
  1268. }
  1269. return 0;
  1270. }
  1271. // Convert ARGB to J420. (JPeg full range I420).
  1272. LIBYUV_API
  1273. int ARGBToJ420(const uint8* src_argb,
  1274. int src_stride_argb,
  1275. uint8* dst_yj,
  1276. int dst_stride_yj,
  1277. uint8* dst_u,
  1278. int dst_stride_u,
  1279. uint8* dst_v,
  1280. int dst_stride_v,
  1281. int width,
  1282. int height) {
  1283. int y;
  1284. void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb,
  1285. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C;
  1286. void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
  1287. ARGBToYJRow_C;
  1288. if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
  1289. return -1;
  1290. }
  1291. // Negative height means invert the image.
  1292. if (height < 0) {
  1293. height = -height;
  1294. src_argb = src_argb + (height - 1) * src_stride_argb;
  1295. src_stride_argb = -src_stride_argb;
  1296. }
  1297. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1298. if (TestCpuFlag(kCpuHasSSSE3)) {
  1299. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1300. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1301. if (IS_ALIGNED(width, 16)) {
  1302. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1303. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1304. }
  1305. }
  1306. #endif
  1307. #if defined(HAS_ARGBTOYJROW_AVX2)
  1308. if (TestCpuFlag(kCpuHasAVX2)) {
  1309. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1310. if (IS_ALIGNED(width, 32)) {
  1311. ARGBToYJRow = ARGBToYJRow_AVX2;
  1312. }
  1313. }
  1314. #endif
  1315. #if defined(HAS_ARGBTOYJROW_NEON)
  1316. if (TestCpuFlag(kCpuHasNEON)) {
  1317. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1318. if (IS_ALIGNED(width, 8)) {
  1319. ARGBToYJRow = ARGBToYJRow_NEON;
  1320. }
  1321. }
  1322. #endif
  1323. #if defined(HAS_ARGBTOUVJROW_NEON)
  1324. if (TestCpuFlag(kCpuHasNEON)) {
  1325. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1326. if (IS_ALIGNED(width, 16)) {
  1327. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1328. }
  1329. }
  1330. #endif
  1331. #if defined(HAS_ARGBTOYJROW_MSA)
  1332. if (TestCpuFlag(kCpuHasMSA)) {
  1333. ARGBToYJRow = ARGBToYJRow_Any_MSA;
  1334. if (IS_ALIGNED(width, 16)) {
  1335. ARGBToYJRow = ARGBToYJRow_MSA;
  1336. }
  1337. }
  1338. #endif
  1339. #if defined(HAS_ARGBTOUVJROW_MSA)
  1340. if (TestCpuFlag(kCpuHasMSA)) {
  1341. ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
  1342. if (IS_ALIGNED(width, 32)) {
  1343. ARGBToUVJRow = ARGBToUVJRow_MSA;
  1344. }
  1345. }
  1346. #endif
  1347. for (y = 0; y < height - 1; y += 2) {
  1348. ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width);
  1349. ARGBToYJRow(src_argb, dst_yj, width);
  1350. ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width);
  1351. src_argb += src_stride_argb * 2;
  1352. dst_yj += dst_stride_yj * 2;
  1353. dst_u += dst_stride_u;
  1354. dst_v += dst_stride_v;
  1355. }
  1356. if (height & 1) {
  1357. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  1358. ARGBToYJRow(src_argb, dst_yj, width);
  1359. }
  1360. return 0;
  1361. }
  1362. // Convert ARGB to J422. (JPeg full range I422).
  1363. LIBYUV_API
  1364. int ARGBToJ422(const uint8* src_argb,
  1365. int src_stride_argb,
  1366. uint8* dst_yj,
  1367. int dst_stride_yj,
  1368. uint8* dst_u,
  1369. int dst_stride_u,
  1370. uint8* dst_v,
  1371. int dst_stride_v,
  1372. int width,
  1373. int height) {
  1374. int y;
  1375. void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb,
  1376. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C;
  1377. void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
  1378. ARGBToYJRow_C;
  1379. if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
  1380. return -1;
  1381. }
  1382. // Negative height means invert the image.
  1383. if (height < 0) {
  1384. height = -height;
  1385. src_argb = src_argb + (height - 1) * src_stride_argb;
  1386. src_stride_argb = -src_stride_argb;
  1387. }
  1388. // Coalesce rows.
  1389. if (src_stride_argb == width * 4 && dst_stride_yj == width &&
  1390. dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
  1391. width *= height;
  1392. height = 1;
  1393. src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0;
  1394. }
  1395. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1396. if (TestCpuFlag(kCpuHasSSSE3)) {
  1397. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1398. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1399. if (IS_ALIGNED(width, 16)) {
  1400. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1401. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1402. }
  1403. }
  1404. #endif
  1405. #if defined(HAS_ARGBTOYJROW_AVX2)
  1406. if (TestCpuFlag(kCpuHasAVX2)) {
  1407. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1408. if (IS_ALIGNED(width, 32)) {
  1409. ARGBToYJRow = ARGBToYJRow_AVX2;
  1410. }
  1411. }
  1412. #endif
  1413. #if defined(HAS_ARGBTOYJROW_NEON)
  1414. if (TestCpuFlag(kCpuHasNEON)) {
  1415. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1416. if (IS_ALIGNED(width, 8)) {
  1417. ARGBToYJRow = ARGBToYJRow_NEON;
  1418. }
  1419. }
  1420. #endif
  1421. #if defined(HAS_ARGBTOUVJROW_NEON)
  1422. if (TestCpuFlag(kCpuHasNEON)) {
  1423. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1424. if (IS_ALIGNED(width, 16)) {
  1425. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1426. }
  1427. }
  1428. #endif
  1429. #if defined(HAS_ARGBTOYJROW_MSA)
  1430. if (TestCpuFlag(kCpuHasMSA)) {
  1431. ARGBToYJRow = ARGBToYJRow_Any_MSA;
  1432. if (IS_ALIGNED(width, 16)) {
  1433. ARGBToYJRow = ARGBToYJRow_MSA;
  1434. }
  1435. }
  1436. #endif
  1437. #if defined(HAS_ARGBTOUVJROW_MSA)
  1438. if (TestCpuFlag(kCpuHasMSA)) {
  1439. ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
  1440. if (IS_ALIGNED(width, 32)) {
  1441. ARGBToUVJRow = ARGBToUVJRow_MSA;
  1442. }
  1443. }
  1444. #endif
  1445. for (y = 0; y < height; ++y) {
  1446. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  1447. ARGBToYJRow(src_argb, dst_yj, width);
  1448. src_argb += src_stride_argb;
  1449. dst_yj += dst_stride_yj;
  1450. dst_u += dst_stride_u;
  1451. dst_v += dst_stride_v;
  1452. }
  1453. return 0;
  1454. }
  1455. // Convert ARGB to J400.
  1456. LIBYUV_API
  1457. int ARGBToJ400(const uint8* src_argb,
  1458. int src_stride_argb,
  1459. uint8* dst_yj,
  1460. int dst_stride_yj,
  1461. int width,
  1462. int height) {
  1463. int y;
  1464. void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
  1465. ARGBToYJRow_C;
  1466. if (!src_argb || !dst_yj || width <= 0 || height == 0) {
  1467. return -1;
  1468. }
  1469. if (height < 0) {
  1470. height = -height;
  1471. src_argb = src_argb + (height - 1) * src_stride_argb;
  1472. src_stride_argb = -src_stride_argb;
  1473. }
  1474. // Coalesce rows.
  1475. if (src_stride_argb == width * 4 && dst_stride_yj == width) {
  1476. width *= height;
  1477. height = 1;
  1478. src_stride_argb = dst_stride_yj = 0;
  1479. }
  1480. #if defined(HAS_ARGBTOYJROW_SSSE3)
  1481. if (TestCpuFlag(kCpuHasSSSE3)) {
  1482. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1483. if (IS_ALIGNED(width, 16)) {
  1484. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1485. }
  1486. }
  1487. #endif
  1488. #if defined(HAS_ARGBTOYJROW_AVX2)
  1489. if (TestCpuFlag(kCpuHasAVX2)) {
  1490. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1491. if (IS_ALIGNED(width, 32)) {
  1492. ARGBToYJRow = ARGBToYJRow_AVX2;
  1493. }
  1494. }
  1495. #endif
  1496. #if defined(HAS_ARGBTOYJROW_NEON)
  1497. if (TestCpuFlag(kCpuHasNEON)) {
  1498. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1499. if (IS_ALIGNED(width, 8)) {
  1500. ARGBToYJRow = ARGBToYJRow_NEON;
  1501. }
  1502. }
  1503. #endif
  1504. #if defined(HAS_ARGBTOYJROW_MSA)
  1505. if (TestCpuFlag(kCpuHasMSA)) {
  1506. ARGBToYJRow = ARGBToYJRow_Any_MSA;
  1507. if (IS_ALIGNED(width, 16)) {
  1508. ARGBToYJRow = ARGBToYJRow_MSA;
  1509. }
  1510. }
  1511. #endif
  1512. for (y = 0; y < height; ++y) {
  1513. ARGBToYJRow(src_argb, dst_yj, width);
  1514. src_argb += src_stride_argb;
  1515. dst_yj += dst_stride_yj;
  1516. }
  1517. return 0;
  1518. }
  1519. #ifdef __cplusplus
  1520. } // extern "C"
  1521. } // namespace libyuv
  1522. #endif