vsx_utils.hpp 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html
  4. #ifndef OPENCV_HAL_VSX_UTILS_HPP
  5. #define OPENCV_HAL_VSX_UTILS_HPP
  6. #include "opencv2/core/cvdef.h"
  7. #ifndef SKIP_INCLUDES
  8. # include <assert.h>
  9. #endif
  10. //! @addtogroup core_utils_vsx
  11. //! @{
  12. #if CV_VSX
  13. #define __VSX_S16__(c, v) (c){v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v}
  14. #define __VSX_S8__(c, v) (c){v, v, v, v, v, v, v, v}
  15. #define __VSX_S4__(c, v) (c){v, v, v, v}
  16. #define __VSX_S2__(c, v) (c){v, v}
  17. typedef __vector unsigned char vec_uchar16;
  18. #define vec_uchar16_set(...) (vec_uchar16){__VA_ARGS__}
  19. #define vec_uchar16_sp(c) (__VSX_S16__(vec_uchar16, (unsigned char)c))
  20. #define vec_uchar16_c(v) ((vec_uchar16)(v))
  21. #define vec_uchar16_z vec_uchar16_sp(0)
  22. typedef __vector signed char vec_char16;
  23. #define vec_char16_set(...) (vec_char16){__VA_ARGS__}
  24. #define vec_char16_sp(c) (__VSX_S16__(vec_char16, (signed char)c))
  25. #define vec_char16_c(v) ((vec_char16)(v))
  26. #define vec_char16_z vec_char16_sp(0)
  27. typedef __vector unsigned short vec_ushort8;
  28. #define vec_ushort8_set(...) (vec_ushort8){__VA_ARGS__}
  29. #define vec_ushort8_sp(c) (__VSX_S8__(vec_ushort8, (unsigned short)c))
  30. #define vec_ushort8_c(v) ((vec_ushort8)(v))
  31. #define vec_ushort8_z vec_ushort8_sp(0)
  32. typedef __vector signed short vec_short8;
  33. #define vec_short8_set(...) (vec_short8){__VA_ARGS__}
  34. #define vec_short8_sp(c) (__VSX_S8__(vec_short8, (signed short)c))
  35. #define vec_short8_c(v) ((vec_short8)(v))
  36. #define vec_short8_z vec_short8_sp(0)
  37. typedef __vector unsigned int vec_uint4;
  38. #define vec_uint4_set(...) (vec_uint4){__VA_ARGS__}
  39. #define vec_uint4_sp(c) (__VSX_S4__(vec_uint4, (unsigned int)c))
  40. #define vec_uint4_c(v) ((vec_uint4)(v))
  41. #define vec_uint4_z vec_uint4_sp(0)
  42. typedef __vector signed int vec_int4;
  43. #define vec_int4_set(...) (vec_int4){__VA_ARGS__}
  44. #define vec_int4_sp(c) (__VSX_S4__(vec_int4, (signed int)c))
  45. #define vec_int4_c(v) ((vec_int4)(v))
  46. #define vec_int4_z vec_int4_sp(0)
  47. typedef __vector float vec_float4;
  48. #define vec_float4_set(...) (vec_float4){__VA_ARGS__}
  49. #define vec_float4_sp(c) (__VSX_S4__(vec_float4, c))
  50. #define vec_float4_c(v) ((vec_float4)(v))
  51. #define vec_float4_z vec_float4_sp(0)
  52. typedef __vector unsigned long long vec_udword2;
  53. #define vec_udword2_set(...) (vec_udword2){__VA_ARGS__}
  54. #define vec_udword2_sp(c) (__VSX_S2__(vec_udword2, (unsigned long long)c))
  55. #define vec_udword2_c(v) ((vec_udword2)(v))
  56. #define vec_udword2_z vec_udword2_sp(0)
  57. typedef __vector signed long long vec_dword2;
  58. #define vec_dword2_set(...) (vec_dword2){__VA_ARGS__}
  59. #define vec_dword2_sp(c) (__VSX_S2__(vec_dword2, (signed long long)c))
  60. #define vec_dword2_c(v) ((vec_dword2)(v))
  61. #define vec_dword2_z vec_dword2_sp(0)
  62. typedef __vector double vec_double2;
  63. #define vec_double2_set(...) (vec_double2){__VA_ARGS__}
  64. #define vec_double2_c(v) ((vec_double2)(v))
  65. #define vec_double2_sp(c) (__VSX_S2__(vec_double2, c))
  66. #define vec_double2_z vec_double2_sp(0)
  67. #define vec_bchar16 __vector __bool char
  68. #define vec_bchar16_set(...) (vec_bchar16){__VA_ARGS__}
  69. #define vec_bchar16_c(v) ((vec_bchar16)(v))
  70. #define vec_bshort8 __vector __bool short
  71. #define vec_bshort8_set(...) (vec_bshort8){__VA_ARGS__}
  72. #define vec_bshort8_c(v) ((vec_bshort8)(v))
  73. #define vec_bint4 __vector __bool int
  74. #define vec_bint4_set(...) (vec_bint4){__VA_ARGS__}
  75. #define vec_bint4_c(v) ((vec_bint4)(v))
  76. #define vec_bdword2 __vector __bool long long
  77. #define vec_bdword2_set(...) (vec_bdword2){__VA_ARGS__}
  78. #define vec_bdword2_c(v) ((vec_bdword2)(v))
  79. #define VSX_FINLINE(tp) extern inline tp __attribute__((always_inline))
  80. #define VSX_REDIRECT_1RG(rt, rg, fnm, fn2) \
  81. VSX_FINLINE(rt) fnm(const rg& a) { return fn2(a); }
  82. #define VSX_REDIRECT_2RG(rt, rg, fnm, fn2) \
  83. VSX_FINLINE(rt) fnm(const rg& a, const rg& b) { return fn2(a, b); }
  84. /*
  85. * GCC VSX compatibility
  86. **/
  87. #if defined(__GNUG__) && !defined(__clang__)
  88. // inline asm helper
  89. #define VSX_IMPL_1RG(rt, rto, rg, rgo, opc, fnm) \
  90. VSX_FINLINE(rt) fnm(const rg& a) \
  91. { rt rs; __asm__ __volatile__(#opc" %x0,%x1" : "="#rto (rs) : #rgo (a)); return rs; }
  92. #define VSX_IMPL_1VRG(rt, rg, opc, fnm) \
  93. VSX_FINLINE(rt) fnm(const rg& a) \
  94. { rt rs; __asm__ __volatile__(#opc" %0,%1" : "=v" (rs) : "v" (a)); return rs; }
  95. #define VSX_IMPL_2VRG_F(rt, rg, fopc, fnm) \
  96. VSX_FINLINE(rt) fnm(const rg& a, const rg& b) \
  97. { rt rs; __asm__ __volatile__(fopc : "=v" (rs) : "v" (a), "v" (b)); return rs; }
  98. #define VSX_IMPL_2VRG(rt, rg, opc, fnm) VSX_IMPL_2VRG_F(rt, rg, #opc" %0,%1,%2", fnm)
  99. #if __GNUG__ < 7
  100. // up to GCC 6 vec_mul only supports precisions and llong
  101. # ifdef vec_mul
  102. # undef vec_mul
  103. # endif
  104. /*
  105. * there's no a direct instruction for supporting 8-bit, 16-bit multiplication in ISA 2.07,
  106. * XLC Implement it by using instruction "multiply even", "multiply odd" and "permute"
  107. **/
  108. # define VSX_IMPL_MULH(Tvec, cperm) \
  109. VSX_FINLINE(Tvec) vec_mul(const Tvec& a, const Tvec& b) \
  110. { \
  111. static const vec_uchar16 ev_od = {cperm}; \
  112. return vec_perm((Tvec)vec_mule(a, b), (Tvec)vec_mulo(a, b), ev_od); \
  113. }
  114. #define VSX_IMPL_MULH_P16 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30
  115. VSX_IMPL_MULH(vec_char16, VSX_IMPL_MULH_P16)
  116. VSX_IMPL_MULH(vec_uchar16, VSX_IMPL_MULH_P16)
  117. #define VSX_IMPL_MULH_P8 0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29
  118. VSX_IMPL_MULH(vec_short8, VSX_IMPL_MULH_P8)
  119. VSX_IMPL_MULH(vec_ushort8, VSX_IMPL_MULH_P8)
  120. // vmuluwm can be used for unsigned or signed integers, that's what they said
  121. VSX_IMPL_2VRG(vec_int4, vec_int4, vmuluwm, vec_mul)
  122. VSX_IMPL_2VRG(vec_uint4, vec_uint4, vmuluwm, vec_mul)
  123. // redirect to GCC builtin vec_mul, since it already supports precisions and llong
  124. VSX_REDIRECT_2RG(vec_float4, vec_float4, vec_mul, __builtin_vec_mul)
  125. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mul, __builtin_vec_mul)
  126. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mul, __builtin_vec_mul)
  127. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mul, __builtin_vec_mul)
  128. #endif // __GNUG__ < 7
  129. #if __GNUG__ < 6
  130. /*
  131. * Instruction "compare greater than or equal" in ISA 2.07 only supports single
  132. * and double precision.
  133. * In XLC and new versions of GCC implement integers by using instruction "greater than" and NOR.
  134. **/
  135. # ifdef vec_cmpge
  136. # undef vec_cmpge
  137. # endif
  138. # ifdef vec_cmple
  139. # undef vec_cmple
  140. # endif
  141. # define vec_cmple(a, b) vec_cmpge(b, a)
  142. # define VSX_IMPL_CMPGE(rt, rg, opc, fnm) \
  143. VSX_IMPL_2VRG_F(rt, rg, #opc" %0,%2,%1\n\t xxlnor %x0,%x0,%x0", fnm)
  144. VSX_IMPL_CMPGE(vec_bchar16, vec_char16, vcmpgtsb, vec_cmpge)
  145. VSX_IMPL_CMPGE(vec_bchar16, vec_uchar16, vcmpgtub, vec_cmpge)
  146. VSX_IMPL_CMPGE(vec_bshort8, vec_short8, vcmpgtsh, vec_cmpge)
  147. VSX_IMPL_CMPGE(vec_bshort8, vec_ushort8, vcmpgtuh, vec_cmpge)
  148. VSX_IMPL_CMPGE(vec_bint4, vec_int4, vcmpgtsw, vec_cmpge)
  149. VSX_IMPL_CMPGE(vec_bint4, vec_uint4, vcmpgtuw, vec_cmpge)
  150. VSX_IMPL_CMPGE(vec_bdword2, vec_dword2, vcmpgtsd, vec_cmpge)
  151. VSX_IMPL_CMPGE(vec_bdword2, vec_udword2, vcmpgtud, vec_cmpge)
  152. // redirect to GCC builtin cmpge, since it already supports precisions
  153. VSX_REDIRECT_2RG(vec_bint4, vec_float4, vec_cmpge, __builtin_vec_cmpge)
  154. VSX_REDIRECT_2RG(vec_bdword2, vec_double2, vec_cmpge, __builtin_vec_cmpge)
  155. // up to gcc5 vec_nor doesn't support bool long long
  156. # undef vec_nor
  157. template<typename T>
  158. VSX_REDIRECT_2RG(T, T, vec_nor, __builtin_vec_nor)
  159. VSX_FINLINE(vec_bdword2) vec_nor(const vec_bdword2& a, const vec_bdword2& b)
  160. { return vec_bdword2_c(__builtin_vec_nor(vec_dword2_c(a), vec_dword2_c(b))); }
  161. // vec_packs doesn't support double words in gcc4 and old versions of gcc5
  162. # undef vec_packs
  163. VSX_REDIRECT_2RG(vec_char16, vec_short8, vec_packs, __builtin_vec_packs)
  164. VSX_REDIRECT_2RG(vec_uchar16, vec_ushort8, vec_packs, __builtin_vec_packs)
  165. VSX_REDIRECT_2RG(vec_short8, vec_int4, vec_packs, __builtin_vec_packs)
  166. VSX_REDIRECT_2RG(vec_ushort8, vec_uint4, vec_packs, __builtin_vec_packs)
  167. VSX_IMPL_2VRG_F(vec_int4, vec_dword2, "vpksdss %0,%2,%1", vec_packs)
  168. VSX_IMPL_2VRG_F(vec_uint4, vec_udword2, "vpkudus %0,%2,%1", vec_packs)
  169. #endif // __GNUG__ < 6
  170. #if __GNUG__ < 5
  171. // vec_xxpermdi in gcc4 missing little-endian supports just like clang
  172. # define vec_permi(a, b, c) vec_xxpermdi(b, a, (3 ^ (((c) & 1) << 1 | (c) >> 1)))
  173. #else
  174. # define vec_permi vec_xxpermdi
  175. #endif // __GNUG__ < 5
  176. // shift left double by word immediate
  177. #ifndef vec_sldw
  178. # define vec_sldw __builtin_vsx_xxsldwi
  179. #endif
  180. // vector population count
  181. VSX_IMPL_1VRG(vec_uchar16, vec_uchar16, vpopcntb, vec_popcntu)
  182. VSX_IMPL_1VRG(vec_uchar16, vec_char16, vpopcntb, vec_popcntu)
  183. VSX_IMPL_1VRG(vec_ushort8, vec_ushort8, vpopcnth, vec_popcntu)
  184. VSX_IMPL_1VRG(vec_ushort8, vec_short8, vpopcnth, vec_popcntu)
  185. VSX_IMPL_1VRG(vec_uint4, vec_uint4, vpopcntw, vec_popcntu)
  186. VSX_IMPL_1VRG(vec_uint4, vec_int4, vpopcntw, vec_popcntu)
  187. VSX_IMPL_1VRG(vec_udword2, vec_udword2, vpopcntd, vec_popcntu)
  188. VSX_IMPL_1VRG(vec_udword2, vec_dword2, vpopcntd, vec_popcntu)
  189. // converts between single and double-precision
  190. VSX_REDIRECT_1RG(vec_float4, vec_double2, vec_cvfo, __builtin_vsx_xvcvdpsp)
  191. VSX_REDIRECT_1RG(vec_double2, vec_float4, vec_cvfo, __builtin_vsx_xvcvspdp)
  192. // converts word and doubleword to double-precision
  193. #ifdef vec_ctd
  194. # undef vec_ctd
  195. #endif
  196. VSX_IMPL_1RG(vec_double2, wd, vec_int4, wa, xvcvsxwdp, vec_ctdo)
  197. VSX_IMPL_1RG(vec_double2, wd, vec_uint4, wa, xvcvuxwdp, vec_ctdo)
  198. VSX_IMPL_1RG(vec_double2, wd, vec_dword2, wi, xvcvsxddp, vec_ctd)
  199. VSX_IMPL_1RG(vec_double2, wd, vec_udword2, wi, xvcvuxddp, vec_ctd)
  200. // converts word and doubleword to single-precision
  201. #undef vec_ctf
  202. VSX_IMPL_1RG(vec_float4, wf, vec_int4, wa, xvcvsxwsp, vec_ctf)
  203. VSX_IMPL_1RG(vec_float4, wf, vec_uint4, wa, xvcvuxwsp, vec_ctf)
  204. VSX_IMPL_1RG(vec_float4, wf, vec_dword2, wi, xvcvsxdsp, vec_ctfo)
  205. VSX_IMPL_1RG(vec_float4, wf, vec_udword2, wi, xvcvuxdsp, vec_ctfo)
  206. // converts single and double precision to signed word
  207. #undef vec_cts
  208. VSX_IMPL_1RG(vec_int4, wa, vec_double2, wd, xvcvdpsxws, vec_ctso)
  209. VSX_IMPL_1RG(vec_int4, wa, vec_float4, wf, xvcvspsxws, vec_cts)
  210. // converts single and double precision to unsigned word
  211. #undef vec_ctu
  212. VSX_IMPL_1RG(vec_uint4, wa, vec_double2, wd, xvcvdpuxws, vec_ctuo)
  213. VSX_IMPL_1RG(vec_uint4, wa, vec_float4, wf, xvcvspuxws, vec_ctu)
  214. // converts single and double precision to signed doubleword
  215. #ifdef vec_ctsl
  216. # undef vec_ctsl
  217. #endif
  218. VSX_IMPL_1RG(vec_dword2, wi, vec_double2, wd, xvcvdpsxds, vec_ctsl)
  219. VSX_IMPL_1RG(vec_dword2, wi, vec_float4, wf, xvcvspsxds, vec_ctslo)
  220. // converts single and double precision to unsigned doubleword
  221. #ifdef vec_ctul
  222. # undef vec_ctul
  223. #endif
  224. VSX_IMPL_1RG(vec_udword2, wi, vec_double2, wd, xvcvdpuxds, vec_ctul)
  225. VSX_IMPL_1RG(vec_udword2, wi, vec_float4, wf, xvcvspuxds, vec_ctulo)
  226. // just in case if GCC doesn't define it
  227. #ifndef vec_xl
  228. # define vec_xl vec_vsx_ld
  229. # define vec_xst vec_vsx_st
  230. #endif
  231. #endif // GCC VSX compatibility
  232. /*
  233. * CLANG VSX compatibility
  234. **/
  235. #if defined(__clang__) && !defined(__IBMCPP__)
  236. /*
  237. * CLANG doesn't support %x<n> in the inline asm template which fixes register number
  238. * when using any of the register constraints wa, wd, wf
  239. *
  240. * For more explanation checkout PowerPC and IBM RS6000 in https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
  241. * Also there's already an open bug https://bugs.llvm.org/show_bug.cgi?id=31837
  242. *
  243. * So we're not able to use inline asm and only use built-in functions that CLANG supports
  244. * and use __builtin_convertvector if clang missng any of vector conversions built-in functions
  245. *
  246. * todo: clang asm template bug is fixed, need to reconsider the current workarounds.
  247. */
  248. // convert vector helper
  249. #define VSX_IMPL_CONVERT(rt, rg, fnm) \
  250. VSX_FINLINE(rt) fnm(const rg& a) { return __builtin_convertvector(a, rt); }
  251. #if __clang_major__ < 5
  252. // implement vec_permi in a dirty way
  253. # define VSX_IMPL_CLANG_4_PERMI(Tvec) \
  254. VSX_FINLINE(Tvec) vec_permi(const Tvec& a, const Tvec& b, unsigned const char c) \
  255. { \
  256. switch (c) \
  257. { \
  258. case 0: \
  259. return vec_mergeh(a, b); \
  260. case 1: \
  261. return vec_mergel(vec_mergeh(a, a), b); \
  262. case 2: \
  263. return vec_mergeh(vec_mergel(a, a), b); \
  264. default: \
  265. return vec_mergel(a, b); \
  266. } \
  267. }
  268. VSX_IMPL_CLANG_4_PERMI(vec_udword2)
  269. VSX_IMPL_CLANG_4_PERMI(vec_dword2)
  270. VSX_IMPL_CLANG_4_PERMI(vec_double2)
  271. // vec_xxsldwi is missing in clang 4
  272. # define vec_xxsldwi(a, b, c) vec_sld(a, b, (c) * 4)
  273. #else
  274. // vec_xxpermdi is missing little-endian supports in clang 4 just like gcc4
  275. # define vec_permi(a, b, c) vec_xxpermdi(b, a, (3 ^ (((c) & 1) << 1 | (c) >> 1)))
  276. #endif // __clang_major__ < 5
  277. // shift left double by word immediate
  278. #ifndef vec_sldw
  279. # define vec_sldw vec_xxsldwi
  280. #endif
  281. // Implement vec_rsqrt since clang only supports vec_rsqrte
  282. #ifndef vec_rsqrt
  283. VSX_FINLINE(vec_float4) vec_rsqrt(const vec_float4& a)
  284. { return vec_div(vec_float4_sp(1), vec_sqrt(a)); }
  285. VSX_FINLINE(vec_double2) vec_rsqrt(const vec_double2& a)
  286. { return vec_div(vec_double2_sp(1), vec_sqrt(a)); }
  287. #endif
  288. // vec_promote missing support for doubleword
  289. VSX_FINLINE(vec_dword2) vec_promote(long long a, int b)
  290. {
  291. vec_dword2 ret = vec_dword2_z;
  292. ret[b & 1] = a;
  293. return ret;
  294. }
  295. VSX_FINLINE(vec_udword2) vec_promote(unsigned long long a, int b)
  296. {
  297. vec_udword2 ret = vec_udword2_z;
  298. ret[b & 1] = a;
  299. return ret;
  300. }
  301. // vec_popcnt should return unsigned but clang has different thought just like gcc in vec_vpopcnt
  302. #define VSX_IMPL_POPCNTU(Tvec, Tvec2, ucast) \
  303. VSX_FINLINE(Tvec) vec_popcntu(const Tvec2& a) \
  304. { return ucast(vec_popcnt(a)); }
  305. VSX_IMPL_POPCNTU(vec_uchar16, vec_char16, vec_uchar16_c);
  306. VSX_IMPL_POPCNTU(vec_ushort8, vec_short8, vec_ushort8_c);
  307. VSX_IMPL_POPCNTU(vec_uint4, vec_int4, vec_uint4_c);
  308. // redirect unsigned types
  309. VSX_REDIRECT_1RG(vec_uchar16, vec_uchar16, vec_popcntu, vec_popcnt)
  310. VSX_REDIRECT_1RG(vec_ushort8, vec_ushort8, vec_popcntu, vec_popcnt)
  311. VSX_REDIRECT_1RG(vec_uint4, vec_uint4, vec_popcntu, vec_popcnt)
  312. // converts between single and double precision
  313. VSX_REDIRECT_1RG(vec_float4, vec_double2, vec_cvfo, __builtin_vsx_xvcvdpsp)
  314. VSX_REDIRECT_1RG(vec_double2, vec_float4, vec_cvfo, __builtin_vsx_xvcvspdp)
  315. // converts word and doubleword to double-precision
  316. #ifdef vec_ctd
  317. # undef vec_ctd
  318. #endif
  319. VSX_REDIRECT_1RG(vec_double2, vec_int4, vec_ctdo, __builtin_vsx_xvcvsxwdp)
  320. VSX_REDIRECT_1RG(vec_double2, vec_uint4, vec_ctdo, __builtin_vsx_xvcvuxwdp)
  321. VSX_IMPL_CONVERT(vec_double2, vec_dword2, vec_ctd)
  322. VSX_IMPL_CONVERT(vec_double2, vec_udword2, vec_ctd)
  323. // converts word and doubleword to single-precision
  324. #if __clang_major__ > 4
  325. # undef vec_ctf
  326. #endif
  327. VSX_IMPL_CONVERT(vec_float4, vec_int4, vec_ctf)
  328. VSX_IMPL_CONVERT(vec_float4, vec_uint4, vec_ctf)
  329. VSX_REDIRECT_1RG(vec_float4, vec_dword2, vec_ctfo, __builtin_vsx_xvcvsxdsp)
  330. VSX_REDIRECT_1RG(vec_float4, vec_udword2, vec_ctfo, __builtin_vsx_xvcvuxdsp)
  331. // converts single and double precision to signed word
  332. #if __clang_major__ > 4
  333. # undef vec_cts
  334. #endif
  335. VSX_REDIRECT_1RG(vec_int4, vec_double2, vec_ctso, __builtin_vsx_xvcvdpsxws)
  336. VSX_IMPL_CONVERT(vec_int4, vec_float4, vec_cts)
  337. // converts single and double precision to unsigned word
  338. #if __clang_major__ > 4
  339. # undef vec_ctu
  340. #endif
  341. VSX_REDIRECT_1RG(vec_uint4, vec_double2, vec_ctuo, __builtin_vsx_xvcvdpuxws)
  342. VSX_IMPL_CONVERT(vec_uint4, vec_float4, vec_ctu)
  343. // converts single and double precision to signed doubleword
  344. #ifdef vec_ctsl
  345. # undef vec_ctsl
  346. #endif
  347. VSX_IMPL_CONVERT(vec_dword2, vec_double2, vec_ctsl)
  348. // __builtin_convertvector unable to convert, xvcvspsxds is missing on it
  349. VSX_FINLINE(vec_dword2) vec_ctslo(const vec_float4& a)
  350. { return vec_ctsl(vec_cvfo(a)); }
  351. // converts single and double precision to unsigned doubleword
  352. #ifdef vec_ctul
  353. # undef vec_ctul
  354. #endif
  355. VSX_IMPL_CONVERT(vec_udword2, vec_double2, vec_ctul)
  356. // __builtin_convertvector unable to convert, xvcvspuxds is missing on it
  357. VSX_FINLINE(vec_udword2) vec_ctulo(const vec_float4& a)
  358. { return vec_ctul(vec_cvfo(a)); }
  359. #endif // CLANG VSX compatibility
  360. /*
  361. * Common GCC, CLANG compatibility
  362. **/
  363. #if defined(__GNUG__) && !defined(__IBMCPP__)
  364. #ifdef vec_cvf
  365. # undef vec_cvf
  366. #endif
  367. #define VSX_IMPL_CONV_EVEN_4_2(rt, rg, fnm, fn2) \
  368. VSX_FINLINE(rt) fnm(const rg& a) \
  369. { return fn2(vec_sldw(a, a, 1)); }
  370. VSX_IMPL_CONV_EVEN_4_2(vec_double2, vec_float4, vec_cvf, vec_cvfo)
  371. VSX_IMPL_CONV_EVEN_4_2(vec_double2, vec_int4, vec_ctd, vec_ctdo)
  372. VSX_IMPL_CONV_EVEN_4_2(vec_double2, vec_uint4, vec_ctd, vec_ctdo)
  373. VSX_IMPL_CONV_EVEN_4_2(vec_dword2, vec_float4, vec_ctsl, vec_ctslo)
  374. VSX_IMPL_CONV_EVEN_4_2(vec_udword2, vec_float4, vec_ctul, vec_ctulo)
  375. #define VSX_IMPL_CONV_EVEN_2_4(rt, rg, fnm, fn2) \
  376. VSX_FINLINE(rt) fnm(const rg& a) \
  377. { \
  378. rt v4 = fn2(a); \
  379. return vec_sldw(v4, v4, 3); \
  380. }
  381. VSX_IMPL_CONV_EVEN_2_4(vec_float4, vec_double2, vec_cvf, vec_cvfo)
  382. VSX_IMPL_CONV_EVEN_2_4(vec_float4, vec_dword2, vec_ctf, vec_ctfo)
  383. VSX_IMPL_CONV_EVEN_2_4(vec_float4, vec_udword2, vec_ctf, vec_ctfo)
  384. VSX_IMPL_CONV_EVEN_2_4(vec_int4, vec_double2, vec_cts, vec_ctso)
  385. VSX_IMPL_CONV_EVEN_2_4(vec_uint4, vec_double2, vec_ctu, vec_ctuo)
  386. // Only for Eigen!
  387. /*
  388. * changing behavior of conversion intrinsics for gcc has effect on Eigen
  389. * so we redfine old behavior again only on gcc, clang
  390. */
  391. #if !defined(__clang__) || __clang_major__ > 4
  392. // ignoring second arg since Eigen only truncates toward zero
  393. # define VSX_IMPL_CONV_2VARIANT(rt, rg, fnm, fn2) \
  394. VSX_FINLINE(rt) fnm(const rg& a, int only_truncate) \
  395. { \
  396. assert(only_truncate == 0); \
  397. CV_UNUSED(only_truncate); \
  398. return fn2(a); \
  399. }
  400. VSX_IMPL_CONV_2VARIANT(vec_int4, vec_float4, vec_cts, vec_cts)
  401. VSX_IMPL_CONV_2VARIANT(vec_float4, vec_int4, vec_ctf, vec_ctf)
  402. // define vec_cts for converting double precision to signed doubleword
  403. // which isn't combitable with xlc but its okay since Eigen only use it for gcc
  404. VSX_IMPL_CONV_2VARIANT(vec_dword2, vec_double2, vec_cts, vec_ctsl)
  405. #endif // Eigen
  406. #endif // Common GCC, CLANG compatibility
  407. /*
  408. * XLC VSX compatibility
  409. **/
  410. #if defined(__IBMCPP__)
  411. // vector population count
  412. #define vec_popcntu vec_popcnt
  413. // overload and redirect with setting second arg to zero
  414. // since we only support conversions without the second arg
  415. #define VSX_IMPL_OVERLOAD_Z2(rt, rg, fnm) \
  416. VSX_FINLINE(rt) fnm(const rg& a) { return fnm(a, 0); }
  417. VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_int4, vec_ctd)
  418. VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_uint4, vec_ctd)
  419. VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_dword2, vec_ctd)
  420. VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_udword2, vec_ctd)
  421. VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_int4, vec_ctf)
  422. VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_uint4, vec_ctf)
  423. VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_dword2, vec_ctf)
  424. VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_udword2, vec_ctf)
  425. VSX_IMPL_OVERLOAD_Z2(vec_int4, vec_double2, vec_cts)
  426. VSX_IMPL_OVERLOAD_Z2(vec_int4, vec_float4, vec_cts)
  427. VSX_IMPL_OVERLOAD_Z2(vec_uint4, vec_double2, vec_ctu)
  428. VSX_IMPL_OVERLOAD_Z2(vec_uint4, vec_float4, vec_ctu)
  429. VSX_IMPL_OVERLOAD_Z2(vec_dword2, vec_double2, vec_ctsl)
  430. VSX_IMPL_OVERLOAD_Z2(vec_dword2, vec_float4, vec_ctsl)
  431. VSX_IMPL_OVERLOAD_Z2(vec_udword2, vec_double2, vec_ctul)
  432. VSX_IMPL_OVERLOAD_Z2(vec_udword2, vec_float4, vec_ctul)
  433. // fixme: implement conversions of odd-numbered elements in a dirty way
  434. // since xlc doesn't support VSX registers operand in inline asm.
  435. #define VSX_IMPL_CONV_ODD_4_2(rt, rg, fnm, fn2) \
  436. VSX_FINLINE(rt) fnm(const rg& a) { return fn2(vec_sldw(a, a, 3)); }
  437. VSX_IMPL_CONV_ODD_4_2(vec_double2, vec_float4, vec_cvfo, vec_cvf)
  438. VSX_IMPL_CONV_ODD_4_2(vec_double2, vec_int4, vec_ctdo, vec_ctd)
  439. VSX_IMPL_CONV_ODD_4_2(vec_double2, vec_uint4, vec_ctdo, vec_ctd)
  440. VSX_IMPL_CONV_ODD_4_2(vec_dword2, vec_float4, vec_ctslo, vec_ctsl)
  441. VSX_IMPL_CONV_ODD_4_2(vec_udword2, vec_float4, vec_ctulo, vec_ctul)
  442. #define VSX_IMPL_CONV_ODD_2_4(rt, rg, fnm, fn2) \
  443. VSX_FINLINE(rt) fnm(const rg& a) \
  444. { \
  445. rt v4 = fn2(a); \
  446. return vec_sldw(v4, v4, 1); \
  447. }
  448. VSX_IMPL_CONV_ODD_2_4(vec_float4, vec_double2, vec_cvfo, vec_cvf)
  449. VSX_IMPL_CONV_ODD_2_4(vec_float4, vec_dword2, vec_ctfo, vec_ctf)
  450. VSX_IMPL_CONV_ODD_2_4(vec_float4, vec_udword2, vec_ctfo, vec_ctf)
  451. VSX_IMPL_CONV_ODD_2_4(vec_int4, vec_double2, vec_ctso, vec_cts)
  452. VSX_IMPL_CONV_ODD_2_4(vec_uint4, vec_double2, vec_ctuo, vec_ctu)
  453. #endif // XLC VSX compatibility
  454. // ignore GCC warning that caused by -Wunused-but-set-variable in rare cases
  455. #if defined(__GNUG__) && !defined(__clang__)
  456. # define VSX_UNUSED(Tvec) Tvec __attribute__((__unused__))
  457. #else // CLANG, XLC
  458. # define VSX_UNUSED(Tvec) Tvec
  459. #endif
  460. // gcc can find his way in casting log int and XLC, CLANG ambiguous
  461. #if defined(__clang__) || defined(__IBMCPP__)
  462. VSX_FINLINE(vec_udword2) vec_splats(uint64 v)
  463. { return vec_splats((unsigned long long) v); }
  464. VSX_FINLINE(vec_dword2) vec_splats(int64 v)
  465. { return vec_splats((long long) v); }
  466. VSX_FINLINE(vec_udword2) vec_promote(uint64 a, int b)
  467. { return vec_promote((unsigned long long) a, b); }
  468. VSX_FINLINE(vec_dword2) vec_promote(int64 a, int b)
  469. { return vec_promote((long long) a, b); }
  470. #endif
  471. /*
  472. * implement vsx_ld(offset, pointer), vsx_st(vector, offset, pointer)
  473. * load and set using offset depend on the pointer type
  474. *
  475. * implement vsx_ldf(offset, pointer), vsx_stf(vector, offset, pointer)
  476. * load and set using offset depend on fixed bytes size
  477. *
  478. * Note: In clang vec_xl and vec_xst fails to load unaligned addresses
  479. * so we are using vec_vsx_ld, vec_vsx_st instead
  480. */
  481. #if defined(__clang__) && !defined(__IBMCPP__)
  482. # define vsx_ldf vec_vsx_ld
  483. # define vsx_stf vec_vsx_st
  484. #else // GCC , XLC
  485. # define vsx_ldf vec_xl
  486. # define vsx_stf vec_xst
  487. #endif
  488. #define VSX_OFFSET(o, p) ((o) * sizeof(*(p)))
  489. #define vsx_ld(o, p) vsx_ldf(VSX_OFFSET(o, p), p)
  490. #define vsx_st(v, o, p) vsx_stf(v, VSX_OFFSET(o, p), p)
  491. /*
  492. * implement vsx_ld2(offset, pointer), vsx_st2(vector, offset, pointer) to load and store double words
  493. * In GCC vec_xl and vec_xst it maps to vec_vsx_ld, vec_vsx_st which doesn't support long long
  494. * and in CLANG we are using vec_vsx_ld, vec_vsx_st because vec_xl, vec_xst fails to load unaligned addresses
  495. *
  496. * In XLC vec_xl and vec_xst fail to cast int64(long int) to long long
  497. */
  498. #if (defined(__GNUG__) || defined(__clang__)) && !defined(__IBMCPP__)
  499. VSX_FINLINE(vec_udword2) vsx_ld2(long o, const uint64* p)
  500. { return vec_udword2_c(vsx_ldf(VSX_OFFSET(o, p), (unsigned int*)p)); }
  501. VSX_FINLINE(vec_dword2) vsx_ld2(long o, const int64* p)
  502. { return vec_dword2_c(vsx_ldf(VSX_OFFSET(o, p), (int*)p)); }
  503. VSX_FINLINE(void) vsx_st2(const vec_udword2& vec, long o, uint64* p)
  504. { vsx_stf(vec_uint4_c(vec), VSX_OFFSET(o, p), (unsigned int*)p); }
  505. VSX_FINLINE(void) vsx_st2(const vec_dword2& vec, long o, int64* p)
  506. { vsx_stf(vec_int4_c(vec), VSX_OFFSET(o, p), (int*)p); }
  507. #else // XLC
  508. VSX_FINLINE(vec_udword2) vsx_ld2(long o, const uint64* p)
  509. { return vsx_ldf(VSX_OFFSET(o, p), (unsigned long long*)p); }
  510. VSX_FINLINE(vec_dword2) vsx_ld2(long o, const int64* p)
  511. { return vsx_ldf(VSX_OFFSET(o, p), (long long*)p); }
  512. VSX_FINLINE(void) vsx_st2(const vec_udword2& vec, long o, uint64* p)
  513. { vsx_stf(vec, VSX_OFFSET(o, p), (unsigned long long*)p); }
  514. VSX_FINLINE(void) vsx_st2(const vec_dword2& vec, long o, int64* p)
  515. { vsx_stf(vec, VSX_OFFSET(o, p), (long long*)p); }
  516. #endif
  517. // Store lower 8 byte
  518. #define vec_st_l8(v, p) *((uint64*)(p)) = vec_extract(vec_udword2_c(v), 0)
  519. // Store higher 8 byte
  520. #define vec_st_h8(v, p) *((uint64*)(p)) = vec_extract(vec_udword2_c(v), 1)
  521. // Load 64-bits of integer data to lower part
  522. #define VSX_IMPL_LOAD_L8(Tvec, Tp) \
  523. VSX_FINLINE(Tvec) vec_ld_l8(const Tp *p) \
  524. { return ((Tvec)vec_promote(*((uint64*)p), 0)); }
  525. VSX_IMPL_LOAD_L8(vec_uchar16, uchar)
  526. VSX_IMPL_LOAD_L8(vec_char16, schar)
  527. VSX_IMPL_LOAD_L8(vec_ushort8, ushort)
  528. VSX_IMPL_LOAD_L8(vec_short8, short)
  529. VSX_IMPL_LOAD_L8(vec_uint4, uint)
  530. VSX_IMPL_LOAD_L8(vec_int4, int)
  531. VSX_IMPL_LOAD_L8(vec_float4, float)
  532. VSX_IMPL_LOAD_L8(vec_udword2, uint64)
  533. VSX_IMPL_LOAD_L8(vec_dword2, int64)
  534. VSX_IMPL_LOAD_L8(vec_double2, double)
  535. // logical not
  536. #define vec_not(a) vec_nor(a, a)
  537. // power9 yaya
  538. // not equal
  539. #ifndef vec_cmpne
  540. # define vec_cmpne(a, b) vec_not(vec_cmpeq(a, b))
  541. #endif
  542. // absolute difference
  543. #ifndef vec_absd
  544. # define vec_absd(a, b) vec_sub(vec_max(a, b), vec_min(a, b))
  545. #endif
  546. /*
  547. * Implement vec_unpacklu and vec_unpackhu
  548. * since vec_unpackl, vec_unpackh only support signed integers
  549. **/
  550. #define VSX_IMPL_UNPACKU(rt, rg, zero) \
  551. VSX_FINLINE(rt) vec_unpacklu(const rg& a) \
  552. { return (rt)(vec_mergel(a, zero)); } \
  553. VSX_FINLINE(rt) vec_unpackhu(const rg& a) \
  554. { return (rt)(vec_mergeh(a, zero)); }
  555. VSX_IMPL_UNPACKU(vec_ushort8, vec_uchar16, vec_uchar16_z)
  556. VSX_IMPL_UNPACKU(vec_uint4, vec_ushort8, vec_ushort8_z)
  557. VSX_IMPL_UNPACKU(vec_udword2, vec_uint4, vec_uint4_z)
  558. /*
  559. * Implement vec_mergesqe and vec_mergesqo
  560. * Merges the sequence values of even and odd elements of two vectors
  561. */
  562. #define VSX_IMPL_PERM(rt, fnm, ...) \
  563. VSX_FINLINE(rt) fnm(const rt& a, const rt& b) \
  564. { static const vec_uchar16 perm = {__VA_ARGS__}; return vec_perm(a, b, perm); }
  565. // 16
  566. #define perm16_mergesqe 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
  567. #define perm16_mergesqo 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
  568. VSX_IMPL_PERM(vec_uchar16, vec_mergesqe, perm16_mergesqe)
  569. VSX_IMPL_PERM(vec_uchar16, vec_mergesqo, perm16_mergesqo)
  570. VSX_IMPL_PERM(vec_char16, vec_mergesqe, perm16_mergesqe)
  571. VSX_IMPL_PERM(vec_char16, vec_mergesqo, perm16_mergesqo)
  572. // 8
  573. #define perm8_mergesqe 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29
  574. #define perm8_mergesqo 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31
  575. VSX_IMPL_PERM(vec_ushort8, vec_mergesqe, perm8_mergesqe)
  576. VSX_IMPL_PERM(vec_ushort8, vec_mergesqo, perm8_mergesqo)
  577. VSX_IMPL_PERM(vec_short8, vec_mergesqe, perm8_mergesqe)
  578. VSX_IMPL_PERM(vec_short8, vec_mergesqo, perm8_mergesqo)
  579. // 4
  580. #define perm4_mergesqe 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
  581. #define perm4_mergesqo 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  582. VSX_IMPL_PERM(vec_uint4, vec_mergesqe, perm4_mergesqe)
  583. VSX_IMPL_PERM(vec_uint4, vec_mergesqo, perm4_mergesqo)
  584. VSX_IMPL_PERM(vec_int4, vec_mergesqe, perm4_mergesqe)
  585. VSX_IMPL_PERM(vec_int4, vec_mergesqo, perm4_mergesqo)
  586. VSX_IMPL_PERM(vec_float4, vec_mergesqe, perm4_mergesqe)
  587. VSX_IMPL_PERM(vec_float4, vec_mergesqo, perm4_mergesqo)
  588. // 2
  589. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesqe, vec_mergeh)
  590. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesqo, vec_mergel)
  591. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesqe, vec_mergeh)
  592. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesqo, vec_mergel)
  593. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesqe, vec_mergeh)
  594. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesqo, vec_mergel)
  595. /*
  596. * Implement vec_mergesqh and vec_mergesql
  597. * Merges the sequence most and least significant halves of two vectors
  598. */
  599. #define VSX_IMPL_MERGESQHL(Tvec) \
  600. VSX_FINLINE(Tvec) vec_mergesqh(const Tvec& a, const Tvec& b) \
  601. { return (Tvec)vec_mergeh(vec_udword2_c(a), vec_udword2_c(b)); } \
  602. VSX_FINLINE(Tvec) vec_mergesql(const Tvec& a, const Tvec& b) \
  603. { return (Tvec)vec_mergel(vec_udword2_c(a), vec_udword2_c(b)); }
  604. VSX_IMPL_MERGESQHL(vec_uchar16)
  605. VSX_IMPL_MERGESQHL(vec_char16)
  606. VSX_IMPL_MERGESQHL(vec_ushort8)
  607. VSX_IMPL_MERGESQHL(vec_short8)
  608. VSX_IMPL_MERGESQHL(vec_uint4)
  609. VSX_IMPL_MERGESQHL(vec_int4)
  610. VSX_IMPL_MERGESQHL(vec_float4)
  611. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesqh, vec_mergeh)
  612. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesql, vec_mergel)
  613. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesqh, vec_mergeh)
  614. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesql, vec_mergel)
  615. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesqh, vec_mergeh)
  616. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesql, vec_mergel)
  617. // 2 and 4 channels interleave for all types except 2 lanes
  618. #define VSX_IMPL_ST_INTERLEAVE(Tp, Tvec) \
  619. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, Tp* ptr) \
  620. { \
  621. vsx_stf(vec_mergeh(a, b), 0, ptr); \
  622. vsx_stf(vec_mergel(a, b), 16, ptr); \
  623. } \
  624. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  625. const Tvec& c, const Tvec& d, Tp* ptr) \
  626. { \
  627. Tvec ac = vec_mergeh(a, c); \
  628. Tvec bd = vec_mergeh(b, d); \
  629. vsx_stf(vec_mergeh(ac, bd), 0, ptr); \
  630. vsx_stf(vec_mergel(ac, bd), 16, ptr); \
  631. ac = vec_mergel(a, c); \
  632. bd = vec_mergel(b, d); \
  633. vsx_stf(vec_mergeh(ac, bd), 32, ptr); \
  634. vsx_stf(vec_mergel(ac, bd), 48, ptr); \
  635. }
  636. VSX_IMPL_ST_INTERLEAVE(uchar, vec_uchar16)
  637. VSX_IMPL_ST_INTERLEAVE(schar, vec_char16)
  638. VSX_IMPL_ST_INTERLEAVE(ushort, vec_ushort8)
  639. VSX_IMPL_ST_INTERLEAVE(short, vec_short8)
  640. VSX_IMPL_ST_INTERLEAVE(uint, vec_uint4)
  641. VSX_IMPL_ST_INTERLEAVE(int, vec_int4)
  642. VSX_IMPL_ST_INTERLEAVE(float, vec_float4)
  643. // 2 and 4 channels deinterleave for 16 lanes
  644. #define VSX_IMPL_ST_DINTERLEAVE_8(Tp, Tvec) \
  645. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \
  646. { \
  647. Tvec v0 = vsx_ld(0, ptr); \
  648. Tvec v1 = vsx_ld(16, ptr); \
  649. a = vec_mergesqe(v0, v1); \
  650. b = vec_mergesqo(v0, v1); \
  651. } \
  652. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \
  653. Tvec& c, Tvec& d) \
  654. { \
  655. Tvec v0 = vsx_ld(0, ptr); \
  656. Tvec v1 = vsx_ld(16, ptr); \
  657. Tvec v2 = vsx_ld(32, ptr); \
  658. Tvec v3 = vsx_ld(48, ptr); \
  659. Tvec m0 = vec_mergesqe(v0, v1); \
  660. Tvec m1 = vec_mergesqe(v2, v3); \
  661. a = vec_mergesqe(m0, m1); \
  662. c = vec_mergesqo(m0, m1); \
  663. m0 = vec_mergesqo(v0, v1); \
  664. m1 = vec_mergesqo(v2, v3); \
  665. b = vec_mergesqe(m0, m1); \
  666. d = vec_mergesqo(m0, m1); \
  667. }
  668. VSX_IMPL_ST_DINTERLEAVE_8(uchar, vec_uchar16)
  669. VSX_IMPL_ST_DINTERLEAVE_8(schar, vec_char16)
  670. // 2 and 4 channels deinterleave for 8 lanes
  671. #define VSX_IMPL_ST_DINTERLEAVE_16(Tp, Tvec) \
  672. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \
  673. { \
  674. Tvec v0 = vsx_ld(0, ptr); \
  675. Tvec v1 = vsx_ld(8, ptr); \
  676. a = vec_mergesqe(v0, v1); \
  677. b = vec_mergesqo(v0, v1); \
  678. } \
  679. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \
  680. Tvec& c, Tvec& d) \
  681. { \
  682. Tvec v0 = vsx_ld(0, ptr); \
  683. Tvec v1 = vsx_ld(8, ptr); \
  684. Tvec m0 = vec_mergeh(v0, v1); \
  685. Tvec m1 = vec_mergel(v0, v1); \
  686. Tvec ab0 = vec_mergeh(m0, m1); \
  687. Tvec cd0 = vec_mergel(m0, m1); \
  688. v0 = vsx_ld(16, ptr); \
  689. v1 = vsx_ld(24, ptr); \
  690. m0 = vec_mergeh(v0, v1); \
  691. m1 = vec_mergel(v0, v1); \
  692. Tvec ab1 = vec_mergeh(m0, m1); \
  693. Tvec cd1 = vec_mergel(m0, m1); \
  694. a = vec_mergesqh(ab0, ab1); \
  695. b = vec_mergesql(ab0, ab1); \
  696. c = vec_mergesqh(cd0, cd1); \
  697. d = vec_mergesql(cd0, cd1); \
  698. }
  699. VSX_IMPL_ST_DINTERLEAVE_16(ushort, vec_ushort8)
  700. VSX_IMPL_ST_DINTERLEAVE_16(short, vec_short8)
  701. // 2 and 4 channels deinterleave for 4 lanes
  702. #define VSX_IMPL_ST_DINTERLEAVE_32(Tp, Tvec) \
  703. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \
  704. { \
  705. a = vsx_ld(0, ptr); \
  706. b = vsx_ld(4, ptr); \
  707. Tvec m0 = vec_mergeh(a, b); \
  708. Tvec m1 = vec_mergel(a, b); \
  709. a = vec_mergeh(m0, m1); \
  710. b = vec_mergel(m0, m1); \
  711. } \
  712. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \
  713. Tvec& c, Tvec& d) \
  714. { \
  715. Tvec v0 = vsx_ld(0, ptr); \
  716. Tvec v1 = vsx_ld(4, ptr); \
  717. Tvec v2 = vsx_ld(8, ptr); \
  718. Tvec v3 = vsx_ld(12, ptr); \
  719. Tvec m0 = vec_mergeh(v0, v2); \
  720. Tvec m1 = vec_mergeh(v1, v3); \
  721. a = vec_mergeh(m0, m1); \
  722. b = vec_mergel(m0, m1); \
  723. m0 = vec_mergel(v0, v2); \
  724. m1 = vec_mergel(v1, v3); \
  725. c = vec_mergeh(m0, m1); \
  726. d = vec_mergel(m0, m1); \
  727. }
  728. VSX_IMPL_ST_DINTERLEAVE_32(uint, vec_uint4)
  729. VSX_IMPL_ST_DINTERLEAVE_32(int, vec_int4)
  730. VSX_IMPL_ST_DINTERLEAVE_32(float, vec_float4)
  731. // 2 and 4 channels interleave and deinterleave for 2 lanes
  732. #define VSX_IMPL_ST_D_INTERLEAVE_64(Tp, Tvec, ld_func, st_func) \
  733. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, Tp* ptr) \
  734. { \
  735. st_func(vec_mergeh(a, b), 0, ptr); \
  736. st_func(vec_mergel(a, b), 2, ptr); \
  737. } \
  738. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  739. const Tvec& c, const Tvec& d, Tp* ptr) \
  740. { \
  741. st_func(vec_mergeh(a, b), 0, ptr); \
  742. st_func(vec_mergeh(c, d), 2, ptr); \
  743. st_func(vec_mergel(a, b), 4, ptr); \
  744. st_func(vec_mergel(c, d), 6, ptr); \
  745. } \
  746. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \
  747. { \
  748. Tvec m0 = ld_func(0, ptr); \
  749. Tvec m1 = ld_func(2, ptr); \
  750. a = vec_mergeh(m0, m1); \
  751. b = vec_mergel(m0, m1); \
  752. } \
  753. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \
  754. Tvec& c, Tvec& d) \
  755. { \
  756. Tvec v0 = ld_func(0, ptr); \
  757. Tvec v1 = ld_func(2, ptr); \
  758. Tvec v2 = ld_func(4, ptr); \
  759. Tvec v3 = ld_func(6, ptr); \
  760. a = vec_mergeh(v0, v2); \
  761. b = vec_mergel(v0, v2); \
  762. c = vec_mergeh(v1, v3); \
  763. d = vec_mergel(v1, v3); \
  764. }
  765. VSX_IMPL_ST_D_INTERLEAVE_64(int64, vec_dword2, vsx_ld2, vsx_st2)
  766. VSX_IMPL_ST_D_INTERLEAVE_64(uint64, vec_udword2, vsx_ld2, vsx_st2)
  767. VSX_IMPL_ST_D_INTERLEAVE_64(double, vec_double2, vsx_ld, vsx_st)
  768. /* 3 channels */
  769. #define VSX_IMPL_ST_INTERLEAVE_3CH_16(Tp, Tvec) \
  770. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  771. const Tvec& c, Tp* ptr) \
  772. { \
  773. static const vec_uchar16 a12 = {0, 16, 0, 1, 17, 0, 2, 18, 0, 3, 19, 0, 4, 20, 0, 5}; \
  774. static const vec_uchar16 a123 = {0, 1, 16, 3, 4, 17, 6, 7, 18, 9, 10, 19, 12, 13, 20, 15}; \
  775. vsx_st(vec_perm(vec_perm(a, b, a12), c, a123), 0, ptr); \
  776. static const vec_uchar16 b12 = {21, 0, 6, 22, 0, 7, 23, 0, 8, 24, 0, 9, 25, 0, 10, 26}; \
  777. static const vec_uchar16 b123 = {0, 21, 2, 3, 22, 5, 6, 23, 8, 9, 24, 11, 12, 25, 14, 15}; \
  778. vsx_st(vec_perm(vec_perm(a, b, b12), c, b123), 16, ptr); \
  779. static const vec_uchar16 c12 = {0, 11, 27, 0, 12, 28, 0, 13, 29, 0, 14, 30, 0, 15, 31, 0}; \
  780. static const vec_uchar16 c123 = {26, 1, 2, 27, 4, 5, 28, 7, 8, 29, 10, 11, 30, 13, 14, 31}; \
  781. vsx_st(vec_perm(vec_perm(a, b, c12), c, c123), 32, ptr); \
  782. } \
  783. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, Tvec& c) \
  784. { \
  785. Tvec v1 = vsx_ld(0, ptr); \
  786. Tvec v2 = vsx_ld(16, ptr); \
  787. Tvec v3 = vsx_ld(32, ptr); \
  788. static const vec_uchar16 a12_perm = {0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 0, 0, 0, 0, 0}; \
  789. static const vec_uchar16 a123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 17, 20, 23, 26, 29}; \
  790. a = vec_perm(vec_perm(v1, v2, a12_perm), v3, a123_perm); \
  791. static const vec_uchar16 b12_perm = {1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 0, 0, 0, 0, 0}; \
  792. static const vec_uchar16 b123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 18, 21, 24, 27, 30}; \
  793. b = vec_perm(vec_perm(v1, v2, b12_perm), v3, b123_perm); \
  794. static const vec_uchar16 c12_perm = {2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 0, 0, 0, 0, 0, 0}; \
  795. static const vec_uchar16 c123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 19, 22, 25, 28, 31}; \
  796. c = vec_perm(vec_perm(v1, v2, c12_perm), v3, c123_perm); \
  797. }
  798. VSX_IMPL_ST_INTERLEAVE_3CH_16(uchar, vec_uchar16)
  799. VSX_IMPL_ST_INTERLEAVE_3CH_16(schar, vec_char16)
  800. #define VSX_IMPL_ST_INTERLEAVE_3CH_8(Tp, Tvec) \
  801. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  802. const Tvec& c, Tp* ptr) \
  803. { \
  804. static const vec_uchar16 a12 = {0, 1, 16, 17, 0, 0, 2, 3, 18, 19, 0, 0, 4, 5, 20, 21}; \
  805. static const vec_uchar16 a123 = {0, 1, 2, 3, 16, 17, 6, 7, 8, 9, 18, 19, 12, 13, 14, 15}; \
  806. vsx_st(vec_perm(vec_perm(a, b, a12), c, a123), 0, ptr); \
  807. static const vec_uchar16 b12 = {0, 0, 6, 7, 22, 23, 0, 0, 8, 9, 24, 25, 0, 0, 10, 11}; \
  808. static const vec_uchar16 b123 = {20, 21, 2, 3, 4, 5, 22, 23, 8, 9, 10, 11, 24, 25, 14, 15}; \
  809. vsx_st(vec_perm(vec_perm(a, b, b12), c, b123), 8, ptr); \
  810. static const vec_uchar16 c12 = {26, 27, 0, 0, 12, 13, 28, 29, 0, 0, 14, 15, 30, 31, 0, 0}; \
  811. static const vec_uchar16 c123 = {0, 1, 26, 27, 4, 5, 6, 7, 28, 29, 10, 11, 12, 13, 30, 31}; \
  812. vsx_st(vec_perm(vec_perm(a, b, c12), c, c123), 16, ptr); \
  813. } \
  814. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, Tvec& c) \
  815. { \
  816. Tvec v1 = vsx_ld(0, ptr); \
  817. Tvec v2 = vsx_ld(8, ptr); \
  818. Tvec v3 = vsx_ld(16, ptr); \
  819. static const vec_uchar16 a12_perm = {0, 1, 6, 7, 12, 13, 18, 19, 24, 25, 30, 31, 0, 0, 0, 0}; \
  820. static const vec_uchar16 a123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 20, 21, 26, 27}; \
  821. a = vec_perm(vec_perm(v1, v2, a12_perm), v3, a123_perm); \
  822. static const vec_uchar16 b12_perm = {2, 3, 8, 9, 14, 15, 20, 21, 26, 27, 0, 0, 0, 0, 0, 0}; \
  823. static const vec_uchar16 b123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 17, 22, 23, 28, 29}; \
  824. b = vec_perm(vec_perm(v1, v2, b12_perm), v3, b123_perm); \
  825. static const vec_uchar16 c12_perm = {4, 5, 10, 11, 16, 17, 22, 23, 28, 29, 0, 0, 0, 0, 0, 0}; \
  826. static const vec_uchar16 c123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 18, 19, 24, 25, 30, 31}; \
  827. c = vec_perm(vec_perm(v1, v2, c12_perm), v3, c123_perm); \
  828. }
  829. VSX_IMPL_ST_INTERLEAVE_3CH_8(ushort, vec_ushort8)
  830. VSX_IMPL_ST_INTERLEAVE_3CH_8(short, vec_short8)
  831. #define VSX_IMPL_ST_INTERLEAVE_3CH_4(Tp, Tvec) \
  832. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  833. const Tvec& c, Tp* ptr) \
  834. { \
  835. Tvec hbc = vec_mergeh(b, c); \
  836. static const vec_uchar16 ahbc = {0, 1, 2, 3, 16, 17, 18, 19, 20, 21, 22, 23, 4, 5, 6, 7}; \
  837. vsx_st(vec_perm(a, hbc, ahbc), 0, ptr); \
  838. Tvec lab = vec_mergel(a, b); \
  839. vsx_st(vec_sld(lab, hbc, 8), 4, ptr); \
  840. static const vec_uchar16 clab = {8, 9, 10, 11, 24, 25, 26, 27, 28, 29, 30, 31, 12, 13, 14, 15};\
  841. vsx_st(vec_perm(c, lab, clab), 8, ptr); \
  842. } \
  843. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, Tvec& c) \
  844. { \
  845. Tvec v1 = vsx_ld(0, ptr); \
  846. Tvec v2 = vsx_ld(4, ptr); \
  847. Tvec v3 = vsx_ld(8, ptr); \
  848. static const vec_uchar16 flp = {0, 1, 2, 3, 12, 13, 14, 15, 16, 17, 18, 19, 28, 29, 30, 31}; \
  849. a = vec_perm(v1, vec_sld(v3, v2, 8), flp); \
  850. static const vec_uchar16 flp2 = {28, 29, 30, 31, 0, 1, 2, 3, 12, 13, 14, 15, 16, 17, 18, 19}; \
  851. b = vec_perm(v2, vec_sld(v1, v3, 8), flp2); \
  852. c = vec_perm(vec_sld(v2, v1, 8), v3, flp); \
  853. }
  854. VSX_IMPL_ST_INTERLEAVE_3CH_4(uint, vec_uint4)
  855. VSX_IMPL_ST_INTERLEAVE_3CH_4(int, vec_int4)
  856. VSX_IMPL_ST_INTERLEAVE_3CH_4(float, vec_float4)
  857. #define VSX_IMPL_ST_INTERLEAVE_3CH_2(Tp, Tvec, ld_func, st_func) \
  858. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  859. const Tvec& c, Tp* ptr) \
  860. { \
  861. st_func(vec_mergeh(a, b), 0, ptr); \
  862. st_func(vec_permi(c, a, 1), 2, ptr); \
  863. st_func(vec_mergel(b, c), 4, ptr); \
  864. } \
  865. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, \
  866. Tvec& b, Tvec& c) \
  867. { \
  868. Tvec v1 = ld_func(0, ptr); \
  869. Tvec v2 = ld_func(2, ptr); \
  870. Tvec v3 = ld_func(4, ptr); \
  871. a = vec_permi(v1, v2, 1); \
  872. b = vec_permi(v1, v3, 2); \
  873. c = vec_permi(v2, v3, 1); \
  874. }
  875. VSX_IMPL_ST_INTERLEAVE_3CH_2(int64, vec_dword2, vsx_ld2, vsx_st2)
  876. VSX_IMPL_ST_INTERLEAVE_3CH_2(uint64, vec_udword2, vsx_ld2, vsx_st2)
  877. VSX_IMPL_ST_INTERLEAVE_3CH_2(double, vec_double2, vsx_ld, vsx_st)
  878. #endif // CV_VSX
  879. //! @}
  880. #endif // OPENCV_HAL_VSX_UTILS_HPP