cpu_id.cc 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * Copyright 2011 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/cpu_id.h"
  11. #if defined(_MSC_VER)
  12. #include <intrin.h> // For __cpuidex()
  13. #endif
  14. #if !defined(__pnacl__) && !defined(__CLR_VER) && \
  15. !defined(__native_client__) && (defined(_M_IX86) || defined(_M_X64)) && \
  16. defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
  17. #include <immintrin.h> // For _xgetbv()
  18. #endif
  19. // For ArmCpuCaps() but unittested on all platforms
  20. #include <stdio.h>
  21. #include <string.h>
  22. #ifdef __cplusplus
  23. namespace libyuv {
  24. extern "C" {
  25. #endif
  26. // For functions that use the stack and have runtime checks for overflow,
  27. // use SAFEBUFFERS to avoid additional check.
  28. #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) && \
  29. !defined(__clang__)
  30. #define SAFEBUFFERS __declspec(safebuffers)
  31. #else
  32. #define SAFEBUFFERS
  33. #endif
  34. // cpu_info_ variable for SIMD instruction sets detected.
  35. LIBYUV_API int cpu_info_ = 0;
  36. // TODO(fbarchard): Consider using int for cpuid so casting is not needed.
  37. // Low level cpuid for X86.
  38. #if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
  39. defined(__x86_64__)) && \
  40. !defined(__pnacl__) && !defined(__CLR_VER)
  41. LIBYUV_API
  42. void CpuId(int info_eax, int info_ecx, int* cpu_info) {
  43. #if defined(_MSC_VER)
  44. // Visual C version uses intrinsic or inline x86 assembly.
  45. #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
  46. __cpuidex(cpu_info, info_eax, info_ecx);
  47. #elif defined(_M_IX86)
  48. __asm {
  49. mov eax, info_eax
  50. mov ecx, info_ecx
  51. mov edi, cpu_info
  52. cpuid
  53. mov [edi], eax
  54. mov [edi + 4], ebx
  55. mov [edi + 8], ecx
  56. mov [edi + 12], edx
  57. }
  58. #else // Visual C but not x86
  59. if (info_ecx == 0) {
  60. __cpuid(cpu_info, info_eax);
  61. } else {
  62. cpu_info[3] = cpu_info[2] = cpu_info[1] = cpu_info[0] = 0u;
  63. }
  64. #endif
  65. // GCC version uses inline x86 assembly.
  66. #else // defined(_MSC_VER)
  67. int info_ebx, info_edx;
  68. asm volatile(
  69. #if defined(__i386__) && defined(__PIC__)
  70. // Preserve ebx for fpic 32 bit.
  71. "mov %%ebx, %%edi \n"
  72. "cpuid \n"
  73. "xchg %%edi, %%ebx \n"
  74. : "=D"(info_ebx),
  75. #else
  76. "cpuid \n"
  77. : "=b"(info_ebx),
  78. #endif // defined( __i386__) && defined(__PIC__)
  79. "+a"(info_eax), "+c"(info_ecx), "=d"(info_edx));
  80. cpu_info[0] = info_eax;
  81. cpu_info[1] = info_ebx;
  82. cpu_info[2] = info_ecx;
  83. cpu_info[3] = info_edx;
  84. #endif // defined(_MSC_VER)
  85. }
  86. #else // (defined(_M_IX86) || defined(_M_X64) ...
  87. LIBYUV_API
  88. void CpuId(int eax, int ecx, int* cpu_info) {
  89. (void)eax;
  90. (void)ecx;
  91. cpu_info[0] = cpu_info[1] = cpu_info[2] = cpu_info[3] = 0;
  92. }
  93. #endif
  94. // For VS2010 and earlier emit can be used:
  95. // _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0 // For VS2010 and earlier.
  96. // __asm {
  97. // xor ecx, ecx // xcr 0
  98. // xgetbv
  99. // mov xcr0, eax
  100. // }
  101. // For VS2013 and earlier 32 bit, the _xgetbv(0) optimizer produces bad code.
  102. // https://code.google.com/p/libyuv/issues/detail?id=529
  103. #if defined(_M_IX86) && (_MSC_VER < 1900)
  104. #pragma optimize("g", off)
  105. #endif
  106. #if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
  107. defined(__x86_64__)) && \
  108. !defined(__pnacl__) && !defined(__CLR_VER) && !defined(__native_client__)
  109. // X86 CPUs have xgetbv to detect OS saves high parts of ymm registers.
  110. int GetXCR0() {
  111. int xcr0 = 0;
  112. #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
  113. xcr0 = (int)_xgetbv(0); // VS2010 SP1 required. NOLINT
  114. #elif defined(__i386__) || defined(__x86_64__)
  115. asm(".byte 0x0f, 0x01, 0xd0" : "=a"(xcr0) : "c"(0) : "%edx");
  116. #endif // defined(__i386__) || defined(__x86_64__)
  117. return xcr0;
  118. }
  119. #else
  120. // xgetbv unavailable to query for OSSave support. Return 0.
  121. #define GetXCR0() 0
  122. #endif // defined(_M_IX86) || defined(_M_X64) ..
  123. // Return optimization to previous setting.
  124. #if defined(_M_IX86) && (_MSC_VER < 1900)
  125. #pragma optimize("g", on)
  126. #endif
  127. // based on libvpx arm_cpudetect.c
  128. // For Arm, but public to allow testing on any CPU
  129. LIBYUV_API SAFEBUFFERS int ArmCpuCaps(const char* cpuinfo_name) {
  130. char cpuinfo_line[512];
  131. FILE* f = fopen(cpuinfo_name, "r");
  132. if (!f) {
  133. // Assume Neon if /proc/cpuinfo is unavailable.
  134. // This will occur for Chrome sandbox for Pepper or Render process.
  135. return kCpuHasNEON;
  136. }
  137. while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) {
  138. if (memcmp(cpuinfo_line, "Features", 8) == 0) {
  139. char* p = strstr(cpuinfo_line, " neon");
  140. if (p && (p[5] == ' ' || p[5] == '\n')) {
  141. fclose(f);
  142. return kCpuHasNEON;
  143. }
  144. // aarch64 uses asimd for Neon.
  145. p = strstr(cpuinfo_line, " asimd");
  146. if (p) {
  147. fclose(f);
  148. return kCpuHasNEON;
  149. }
  150. }
  151. }
  152. fclose(f);
  153. return 0;
  154. }
  155. // TODO(fbarchard): Consider read_msa_ir().
  156. LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name) {
  157. char cpuinfo_line[512];
  158. int flag = 0x0;
  159. FILE* f = fopen(cpuinfo_name, "r");
  160. if (!f) {
  161. // Assume nothing if /proc/cpuinfo is unavailable.
  162. // This will occur for Chrome sandbox for Pepper or Render process.
  163. return 0;
  164. }
  165. while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) {
  166. if (memcmp(cpuinfo_line, "cpu model", 9) == 0) {
  167. // Workaround early kernel without mmi in ASEs line.
  168. if (strstr(cpuinfo_line, "Loongson-3")) {
  169. flag |= kCpuHasMMI;
  170. } else if (strstr(cpuinfo_line, "Loongson-2K")) {
  171. flag |= kCpuHasMMI | kCpuHasMSA;
  172. }
  173. }
  174. if (memcmp(cpuinfo_line, "ASEs implemented", 16) == 0) {
  175. if (strstr(cpuinfo_line, "loongson-mmi") &&
  176. strstr(cpuinfo_line, "loongson-ext")) {
  177. flag |= kCpuHasMMI;
  178. }
  179. if (strstr(cpuinfo_line, "msa")) {
  180. flag |= kCpuHasMSA;
  181. }
  182. // ASEs is the last line, so we can break here.
  183. break;
  184. }
  185. }
  186. fclose(f);
  187. return flag;
  188. }
  189. static SAFEBUFFERS int GetCpuFlags(void) {
  190. int cpu_info = 0;
  191. #if !defined(__pnacl__) && !defined(__CLR_VER) && \
  192. (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \
  193. defined(_M_IX86))
  194. int cpu_info0[4] = {0, 0, 0, 0};
  195. int cpu_info1[4] = {0, 0, 0, 0};
  196. int cpu_info7[4] = {0, 0, 0, 0};
  197. CpuId(0, 0, cpu_info0);
  198. CpuId(1, 0, cpu_info1);
  199. if (cpu_info0[0] >= 7) {
  200. CpuId(7, 0, cpu_info7);
  201. }
  202. cpu_info = kCpuHasX86 | ((cpu_info1[3] & 0x04000000) ? kCpuHasSSE2 : 0) |
  203. ((cpu_info1[2] & 0x00000200) ? kCpuHasSSSE3 : 0) |
  204. ((cpu_info1[2] & 0x00080000) ? kCpuHasSSE41 : 0) |
  205. ((cpu_info1[2] & 0x00100000) ? kCpuHasSSE42 : 0) |
  206. ((cpu_info7[1] & 0x00000200) ? kCpuHasERMS : 0);
  207. // AVX requires OS saves YMM registers.
  208. if (((cpu_info1[2] & 0x1c000000) == 0x1c000000) && // AVX and OSXSave
  209. ((GetXCR0() & 6) == 6)) { // Test OS saves YMM registers
  210. cpu_info |= kCpuHasAVX | ((cpu_info7[1] & 0x00000020) ? kCpuHasAVX2 : 0) |
  211. ((cpu_info1[2] & 0x00001000) ? kCpuHasFMA3 : 0) |
  212. ((cpu_info1[2] & 0x20000000) ? kCpuHasF16C : 0);
  213. // Detect AVX512bw
  214. if ((GetXCR0() & 0xe0) == 0xe0) {
  215. cpu_info |= (cpu_info7[1] & 0x40000000) ? kCpuHasAVX512BW : 0;
  216. cpu_info |= (cpu_info7[1] & 0x80000000) ? kCpuHasAVX512VL : 0;
  217. cpu_info |= (cpu_info7[2] & 0x00000002) ? kCpuHasAVX512VBMI : 0;
  218. cpu_info |= (cpu_info7[2] & 0x00000040) ? kCpuHasAVX512VBMI2 : 0;
  219. cpu_info |= (cpu_info7[2] & 0x00001000) ? kCpuHasAVX512VBITALG : 0;
  220. cpu_info |= (cpu_info7[2] & 0x00004000) ? kCpuHasAVX512VPOPCNTDQ : 0;
  221. cpu_info |= (cpu_info7[2] & 0x00000100) ? kCpuHasGFNI : 0;
  222. }
  223. }
  224. #endif
  225. #if defined(__mips__) && defined(__linux__)
  226. cpu_info = MipsCpuCaps("/proc/cpuinfo");
  227. cpu_info |= kCpuHasMIPS;
  228. #endif
  229. #if defined(__arm__) || defined(__aarch64__)
  230. // gcc -mfpu=neon defines __ARM_NEON__
  231. // __ARM_NEON__ generates code that requires Neon. NaCL also requires Neon.
  232. // For Linux, /proc/cpuinfo can be tested but without that assume Neon.
  233. #if defined(__ARM_NEON__) || defined(__native_client__) || !defined(__linux__)
  234. cpu_info = kCpuHasNEON;
  235. // For aarch64(arm64), /proc/cpuinfo's feature is not complete, e.g. no neon
  236. // flag in it.
  237. // So for aarch64, neon enabling is hard coded here.
  238. #endif
  239. #if defined(__aarch64__)
  240. cpu_info = kCpuHasNEON;
  241. #else
  242. // Linux arm parse text file for neon detect.
  243. cpu_info = ArmCpuCaps("/proc/cpuinfo");
  244. #endif
  245. cpu_info |= kCpuHasARM;
  246. #endif // __arm__
  247. cpu_info |= kCpuInitialized;
  248. return cpu_info;
  249. }
  250. // Note that use of this function is not thread safe.
  251. LIBYUV_API
  252. int MaskCpuFlags(int enable_flags) {
  253. int cpu_info = GetCpuFlags() & enable_flags;
  254. SetCpuFlags(cpu_info);
  255. return cpu_info;
  256. }
  257. LIBYUV_API
  258. int InitCpuFlags(void) {
  259. return MaskCpuFlags(-1);
  260. }
  261. #ifdef __cplusplus
  262. } // extern "C"
  263. } // namespace libyuv
  264. #endif