You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

5938 lines
210KB

  1. /*
  2. * i386 CPUID helper functions
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "qemu/units.h"
  21. #include "qemu/cutils.h"
  22. #include "qemu/bitops.h"
  23. #include "cpu.h"
  24. #include "exec/exec-all.h"
  25. #include "sysemu/kvm.h"
  26. #include "sysemu/hvf.h"
  27. #include "sysemu/cpus.h"
  28. #include "kvm_i386.h"
  29. #include "sev_i386.h"
  30. #include "qemu/error-report.h"
  31. #include "qemu/option.h"
  32. #include "qemu/config-file.h"
  33. #include "qapi/error.h"
  34. #include "qapi/qapi-visit-misc.h"
  35. #include "qapi/qapi-visit-run-state.h"
  36. #include "qapi/qmp/qdict.h"
  37. #include "qapi/qmp/qerror.h"
  38. #include "qapi/visitor.h"
  39. #include "qom/qom-qobject.h"
  40. #include "sysemu/arch_init.h"
  41. #include "qapi/qapi-commands-target.h"
  42. #include "standard-headers/asm-x86/kvm_para.h"
  43. #include "sysemu/sysemu.h"
  44. #include "hw/qdev-properties.h"
  45. #include "hw/i386/topology.h"
  46. #ifndef CONFIG_USER_ONLY
  47. #include "exec/address-spaces.h"
  48. #include "hw/hw.h"
  49. #include "hw/xen/xen.h"
  50. #include "hw/i386/apic_internal.h"
  51. #endif
  52. #include "disas/capstone.h"
  53. /* Helpers for building CPUID[2] descriptors: */
  54. struct CPUID2CacheDescriptorInfo {
  55. enum CacheType type;
  56. int level;
  57. int size;
  58. int line_size;
  59. int associativity;
  60. };
  61. /*
  62. * Known CPUID 2 cache descriptors.
  63. * From Intel SDM Volume 2A, CPUID instruction
  64. */
  65. struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
  66. [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
  67. .associativity = 4, .line_size = 32, },
  68. [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
  69. .associativity = 4, .line_size = 32, },
  70. [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
  71. .associativity = 4, .line_size = 64, },
  72. [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
  73. .associativity = 2, .line_size = 32, },
  74. [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
  75. .associativity = 4, .line_size = 32, },
  76. [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
  77. .associativity = 4, .line_size = 64, },
  78. [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
  79. .associativity = 6, .line_size = 64, },
  80. [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
  81. .associativity = 2, .line_size = 64, },
  82. [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
  83. .associativity = 8, .line_size = 64, },
  84. /* lines per sector is not supported cpuid2_cache_descriptor(),
  85. * so descriptors 0x22, 0x23 are not included
  86. */
  87. [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
  88. .associativity = 16, .line_size = 64, },
  89. /* lines per sector is not supported cpuid2_cache_descriptor(),
  90. * so descriptors 0x25, 0x20 are not included
  91. */
  92. [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
  93. .associativity = 8, .line_size = 64, },
  94. [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
  95. .associativity = 8, .line_size = 64, },
  96. [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
  97. .associativity = 4, .line_size = 32, },
  98. [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
  99. .associativity = 4, .line_size = 32, },
  100. [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
  101. .associativity = 4, .line_size = 32, },
  102. [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
  103. .associativity = 4, .line_size = 32, },
  104. [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
  105. .associativity = 4, .line_size = 32, },
  106. [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
  107. .associativity = 4, .line_size = 64, },
  108. [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
  109. .associativity = 8, .line_size = 64, },
  110. [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
  111. .associativity = 12, .line_size = 64, },
  112. /* Descriptor 0x49 depends on CPU family/model, so it is not included */
  113. [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
  114. .associativity = 12, .line_size = 64, },
  115. [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
  116. .associativity = 16, .line_size = 64, },
  117. [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
  118. .associativity = 12, .line_size = 64, },
  119. [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
  120. .associativity = 16, .line_size = 64, },
  121. [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
  122. .associativity = 24, .line_size = 64, },
  123. [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
  124. .associativity = 8, .line_size = 64, },
  125. [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
  126. .associativity = 4, .line_size = 64, },
  127. [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
  128. .associativity = 4, .line_size = 64, },
  129. [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
  130. .associativity = 4, .line_size = 64, },
  131. [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
  132. .associativity = 4, .line_size = 64, },
  133. /* lines per sector is not supported cpuid2_cache_descriptor(),
  134. * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
  135. */
  136. [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
  137. .associativity = 8, .line_size = 64, },
  138. [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
  139. .associativity = 2, .line_size = 64, },
  140. [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
  141. .associativity = 8, .line_size = 64, },
  142. [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
  143. .associativity = 8, .line_size = 32, },
  144. [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
  145. .associativity = 8, .line_size = 32, },
  146. [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
  147. .associativity = 8, .line_size = 32, },
  148. [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
  149. .associativity = 8, .line_size = 32, },
  150. [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
  151. .associativity = 4, .line_size = 64, },
  152. [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
  153. .associativity = 8, .line_size = 64, },
  154. [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
  155. .associativity = 4, .line_size = 64, },
  156. [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
  157. .associativity = 4, .line_size = 64, },
  158. [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
  159. .associativity = 4, .line_size = 64, },
  160. [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
  161. .associativity = 8, .line_size = 64, },
  162. [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
  163. .associativity = 8, .line_size = 64, },
  164. [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
  165. .associativity = 8, .line_size = 64, },
  166. [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
  167. .associativity = 12, .line_size = 64, },
  168. [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
  169. .associativity = 12, .line_size = 64, },
  170. [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
  171. .associativity = 12, .line_size = 64, },
  172. [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
  173. .associativity = 16, .line_size = 64, },
  174. [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
  175. .associativity = 16, .line_size = 64, },
  176. [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
  177. .associativity = 16, .line_size = 64, },
  178. [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
  179. .associativity = 24, .line_size = 64, },
  180. [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
  181. .associativity = 24, .line_size = 64, },
  182. [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
  183. .associativity = 24, .line_size = 64, },
  184. };
  185. /*
  186. * "CPUID leaf 2 does not report cache descriptor information,
  187. * use CPUID leaf 4 to query cache parameters"
  188. */
  189. #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
  190. /*
  191. * Return a CPUID 2 cache descriptor for a given cache.
  192. * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
  193. */
  194. static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
  195. {
  196. int i;
  197. assert(cache->size > 0);
  198. assert(cache->level > 0);
  199. assert(cache->line_size > 0);
  200. assert(cache->associativity > 0);
  201. for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
  202. struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
  203. if (d->level == cache->level && d->type == cache->type &&
  204. d->size == cache->size && d->line_size == cache->line_size &&
  205. d->associativity == cache->associativity) {
  206. return i;
  207. }
  208. }
  209. return CACHE_DESCRIPTOR_UNAVAILABLE;
  210. }
  211. /* CPUID Leaf 4 constants: */
  212. /* EAX: */
  213. #define CACHE_TYPE_D 1
  214. #define CACHE_TYPE_I 2
  215. #define CACHE_TYPE_UNIFIED 3
  216. #define CACHE_LEVEL(l) (l << 5)
  217. #define CACHE_SELF_INIT_LEVEL (1 << 8)
  218. /* EDX: */
  219. #define CACHE_NO_INVD_SHARING (1 << 0)
  220. #define CACHE_INCLUSIVE (1 << 1)
  221. #define CACHE_COMPLEX_IDX (1 << 2)
  222. /* Encode CacheType for CPUID[4].EAX */
  223. #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
  224. ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
  225. ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
  226. 0 /* Invalid value */)
  227. /* Encode cache info for CPUID[4] */
  228. static void encode_cache_cpuid4(CPUCacheInfo *cache,
  229. int num_apic_ids, int num_cores,
  230. uint32_t *eax, uint32_t *ebx,
  231. uint32_t *ecx, uint32_t *edx)
  232. {
  233. assert(cache->size == cache->line_size * cache->associativity *
  234. cache->partitions * cache->sets);
  235. assert(num_apic_ids > 0);
  236. *eax = CACHE_TYPE(cache->type) |
  237. CACHE_LEVEL(cache->level) |
  238. (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
  239. ((num_cores - 1) << 26) |
  240. ((num_apic_ids - 1) << 14);
  241. assert(cache->line_size > 0);
  242. assert(cache->partitions > 0);
  243. assert(cache->associativity > 0);
  244. /* We don't implement fully-associative caches */
  245. assert(cache->associativity < cache->sets);
  246. *ebx = (cache->line_size - 1) |
  247. ((cache->partitions - 1) << 12) |
  248. ((cache->associativity - 1) << 22);
  249. assert(cache->sets > 0);
  250. *ecx = cache->sets - 1;
  251. *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
  252. (cache->inclusive ? CACHE_INCLUSIVE : 0) |
  253. (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
  254. }
  255. /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
  256. static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
  257. {
  258. assert(cache->size % 1024 == 0);
  259. assert(cache->lines_per_tag > 0);
  260. assert(cache->associativity > 0);
  261. assert(cache->line_size > 0);
  262. return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
  263. (cache->lines_per_tag << 8) | (cache->line_size);
  264. }
  265. #define ASSOC_FULL 0xFF
  266. /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
  267. #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
  268. a == 2 ? 0x2 : \
  269. a == 4 ? 0x4 : \
  270. a == 8 ? 0x6 : \
  271. a == 16 ? 0x8 : \
  272. a == 32 ? 0xA : \
  273. a == 48 ? 0xB : \
  274. a == 64 ? 0xC : \
  275. a == 96 ? 0xD : \
  276. a == 128 ? 0xE : \
  277. a == ASSOC_FULL ? 0xF : \
  278. 0 /* invalid value */)
  279. /*
  280. * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
  281. * @l3 can be NULL.
  282. */
  283. static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
  284. CPUCacheInfo *l3,
  285. uint32_t *ecx, uint32_t *edx)
  286. {
  287. assert(l2->size % 1024 == 0);
  288. assert(l2->associativity > 0);
  289. assert(l2->lines_per_tag > 0);
  290. assert(l2->line_size > 0);
  291. *ecx = ((l2->size / 1024) << 16) |
  292. (AMD_ENC_ASSOC(l2->associativity) << 12) |
  293. (l2->lines_per_tag << 8) | (l2->line_size);
  294. if (l3) {
  295. assert(l3->size % (512 * 1024) == 0);
  296. assert(l3->associativity > 0);
  297. assert(l3->lines_per_tag > 0);
  298. assert(l3->line_size > 0);
  299. *edx = ((l3->size / (512 * 1024)) << 18) |
  300. (AMD_ENC_ASSOC(l3->associativity) << 12) |
  301. (l3->lines_per_tag << 8) | (l3->line_size);
  302. } else {
  303. *edx = 0;
  304. }
  305. }
  306. /*
  307. * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
  308. * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
  309. * Define the constants to build the cpu topology. Right now, TOPOEXT
  310. * feature is enabled only on EPYC. So, these constants are based on
  311. * EPYC supported configurations. We may need to handle the cases if
  312. * these values change in future.
  313. */
  314. /* Maximum core complexes in a node */
  315. #define MAX_CCX 2
  316. /* Maximum cores in a core complex */
  317. #define MAX_CORES_IN_CCX 4
  318. /* Maximum cores in a node */
  319. #define MAX_CORES_IN_NODE 8
  320. /* Maximum nodes in a socket */
  321. #define MAX_NODES_PER_SOCKET 4
  322. /*
  323. * Figure out the number of nodes required to build this config.
  324. * Max cores in a node is 8
  325. */
  326. static int nodes_in_socket(int nr_cores)
  327. {
  328. int nodes;
  329. nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
  330. /* Hardware does not support config with 3 nodes, return 4 in that case */
  331. return (nodes == 3) ? 4 : nodes;
  332. }
  333. /*
  334. * Decide the number of cores in a core complex with the given nr_cores using
  335. * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
  336. * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
  337. * L3 cache is shared across all cores in a core complex. So, this will also
  338. * tell us how many cores are sharing the L3 cache.
  339. */
  340. static int cores_in_core_complex(int nr_cores)
  341. {
  342. int nodes;
  343. /* Check if we can fit all the cores in one core complex */
  344. if (nr_cores <= MAX_CORES_IN_CCX) {
  345. return nr_cores;
  346. }
  347. /* Get the number of nodes required to build this config */
  348. nodes = nodes_in_socket(nr_cores);
  349. /*
  350. * Divide the cores accros all the core complexes
  351. * Return rounded up value
  352. */
  353. return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
  354. }
  355. /* Encode cache info for CPUID[8000001D] */
  356. static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
  357. uint32_t *eax, uint32_t *ebx,
  358. uint32_t *ecx, uint32_t *edx)
  359. {
  360. uint32_t l3_cores;
  361. assert(cache->size == cache->line_size * cache->associativity *
  362. cache->partitions * cache->sets);
  363. *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
  364. (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
  365. /* L3 is shared among multiple cores */
  366. if (cache->level == 3) {
  367. l3_cores = cores_in_core_complex(cs->nr_cores);
  368. *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
  369. } else {
  370. *eax |= ((cs->nr_threads - 1) << 14);
  371. }
  372. assert(cache->line_size > 0);
  373. assert(cache->partitions > 0);
  374. assert(cache->associativity > 0);
  375. /* We don't implement fully-associative caches */
  376. assert(cache->associativity < cache->sets);
  377. *ebx = (cache->line_size - 1) |
  378. ((cache->partitions - 1) << 12) |
  379. ((cache->associativity - 1) << 22);
  380. assert(cache->sets > 0);
  381. *ecx = cache->sets - 1;
  382. *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
  383. (cache->inclusive ? CACHE_INCLUSIVE : 0) |
  384. (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
  385. }
  386. /* Data structure to hold the configuration info for a given core index */
  387. struct core_topology {
  388. /* core complex id of the current core index */
  389. int ccx_id;
  390. /*
  391. * Adjusted core index for this core in the topology
  392. * This can be 0,1,2,3 with max 4 cores in a core complex
  393. */
  394. int core_id;
  395. /* Node id for this core index */
  396. int node_id;
  397. /* Number of nodes in this config */
  398. int num_nodes;
  399. };
  400. /*
  401. * Build the configuration closely match the EPYC hardware. Using the EPYC
  402. * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
  403. * right now. This could change in future.
  404. * nr_cores : Total number of cores in the config
  405. * core_id : Core index of the current CPU
  406. * topo : Data structure to hold all the config info for this core index
  407. */
  408. static void build_core_topology(int nr_cores, int core_id,
  409. struct core_topology *topo)
  410. {
  411. int nodes, cores_in_ccx;
  412. /* First get the number of nodes required */
  413. nodes = nodes_in_socket(nr_cores);
  414. cores_in_ccx = cores_in_core_complex(nr_cores);
  415. topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
  416. topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
  417. topo->core_id = core_id % cores_in_ccx;
  418. topo->num_nodes = nodes;
  419. }
  420. /* Encode cache info for CPUID[8000001E] */
  421. static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
  422. uint32_t *eax, uint32_t *ebx,
  423. uint32_t *ecx, uint32_t *edx)
  424. {
  425. struct core_topology topo = {0};
  426. unsigned long nodes;
  427. int shift;
  428. build_core_topology(cs->nr_cores, cpu->core_id, &topo);
  429. *eax = cpu->apic_id;
  430. /*
  431. * CPUID_Fn8000001E_EBX
  432. * 31:16 Reserved
  433. * 15:8 Threads per core (The number of threads per core is
  434. * Threads per core + 1)
  435. * 7:0 Core id (see bit decoding below)
  436. * SMT:
  437. * 4:3 node id
  438. * 2 Core complex id
  439. * 1:0 Core id
  440. * Non SMT:
  441. * 5:4 node id
  442. * 3 Core complex id
  443. * 1:0 Core id
  444. */
  445. if (cs->nr_threads - 1) {
  446. *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
  447. (topo.ccx_id << 2) | topo.core_id;
  448. } else {
  449. *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
  450. }
  451. /*
  452. * CPUID_Fn8000001E_ECX
  453. * 31:11 Reserved
  454. * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
  455. * 7:0 Node id (see bit decoding below)
  456. * 2 Socket id
  457. * 1:0 Node id
  458. */
  459. if (topo.num_nodes <= 4) {
  460. *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
  461. topo.node_id;
  462. } else {
  463. /*
  464. * Node id fix up. Actual hardware supports up to 4 nodes. But with
  465. * more than 32 cores, we may end up with more than 4 nodes.
  466. * Node id is a combination of socket id and node id. Only requirement
  467. * here is that this number should be unique accross the system.
  468. * Shift the socket id to accommodate more nodes. We dont expect both
  469. * socket id and node id to be big number at the same time. This is not
  470. * an ideal config but we need to to support it. Max nodes we can have
  471. * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
  472. * 5 bits for nodes. Find the left most set bit to represent the total
  473. * number of nodes. find_last_bit returns last set bit(0 based). Left
  474. * shift(+1) the socket id to represent all the nodes.
  475. */
  476. nodes = topo.num_nodes - 1;
  477. shift = find_last_bit(&nodes, 8);
  478. *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
  479. topo.node_id;
  480. }
  481. *edx = 0;
  482. }
  483. /*
  484. * Definitions of the hardcoded cache entries we expose:
  485. * These are legacy cache values. If there is a need to change any
  486. * of these values please use builtin_x86_defs
  487. */
  488. /* L1 data cache: */
  489. static CPUCacheInfo legacy_l1d_cache = {
  490. .type = DATA_CACHE,
  491. .level = 1,
  492. .size = 32 * KiB,
  493. .self_init = 1,
  494. .line_size = 64,
  495. .associativity = 8,
  496. .sets = 64,
  497. .partitions = 1,
  498. .no_invd_sharing = true,
  499. };
  500. /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
  501. static CPUCacheInfo legacy_l1d_cache_amd = {
  502. .type = DATA_CACHE,
  503. .level = 1,
  504. .size = 64 * KiB,
  505. .self_init = 1,
  506. .line_size = 64,
  507. .associativity = 2,
  508. .sets = 512,
  509. .partitions = 1,
  510. .lines_per_tag = 1,
  511. .no_invd_sharing = true,
  512. };
  513. /* L1 instruction cache: */
  514. static CPUCacheInfo legacy_l1i_cache = {
  515. .type = INSTRUCTION_CACHE,
  516. .level = 1,
  517. .size = 32 * KiB,
  518. .self_init = 1,
  519. .line_size = 64,
  520. .associativity = 8,
  521. .sets = 64,
  522. .partitions = 1,
  523. .no_invd_sharing = true,
  524. };
  525. /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
  526. static CPUCacheInfo legacy_l1i_cache_amd = {
  527. .type = INSTRUCTION_CACHE,
  528. .level = 1,
  529. .size = 64 * KiB,
  530. .self_init = 1,
  531. .line_size = 64,
  532. .associativity = 2,
  533. .sets = 512,
  534. .partitions = 1,
  535. .lines_per_tag = 1,
  536. .no_invd_sharing = true,
  537. };
  538. /* Level 2 unified cache: */
  539. static CPUCacheInfo legacy_l2_cache = {
  540. .type = UNIFIED_CACHE,
  541. .level = 2,
  542. .size = 4 * MiB,
  543. .self_init = 1,
  544. .line_size = 64,
  545. .associativity = 16,
  546. .sets = 4096,
  547. .partitions = 1,
  548. .no_invd_sharing = true,
  549. };
  550. /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
  551. static CPUCacheInfo legacy_l2_cache_cpuid2 = {
  552. .type = UNIFIED_CACHE,
  553. .level = 2,
  554. .size = 2 * MiB,
  555. .line_size = 64,
  556. .associativity = 8,
  557. };
  558. /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
  559. static CPUCacheInfo legacy_l2_cache_amd = {
  560. .type = UNIFIED_CACHE,
  561. .level = 2,
  562. .size = 512 * KiB,
  563. .line_size = 64,
  564. .lines_per_tag = 1,
  565. .associativity = 16,
  566. .sets = 512,
  567. .partitions = 1,
  568. };
  569. /* Level 3 unified cache: */
  570. static CPUCacheInfo legacy_l3_cache = {
  571. .type = UNIFIED_CACHE,
  572. .level = 3,
  573. .size = 16 * MiB,
  574. .line_size = 64,
  575. .associativity = 16,
  576. .sets = 16384,
  577. .partitions = 1,
  578. .lines_per_tag = 1,
  579. .self_init = true,
  580. .inclusive = true,
  581. .complex_indexing = true,
  582. };
  583. /* TLB definitions: */
  584. #define L1_DTLB_2M_ASSOC 1
  585. #define L1_DTLB_2M_ENTRIES 255
  586. #define L1_DTLB_4K_ASSOC 1
  587. #define L1_DTLB_4K_ENTRIES 255
  588. #define L1_ITLB_2M_ASSOC 1
  589. #define L1_ITLB_2M_ENTRIES 255
  590. #define L1_ITLB_4K_ASSOC 1
  591. #define L1_ITLB_4K_ENTRIES 255
  592. #define L2_DTLB_2M_ASSOC 0 /* disabled */
  593. #define L2_DTLB_2M_ENTRIES 0 /* disabled */
  594. #define L2_DTLB_4K_ASSOC 4
  595. #define L2_DTLB_4K_ENTRIES 512
  596. #define L2_ITLB_2M_ASSOC 0 /* disabled */
  597. #define L2_ITLB_2M_ENTRIES 0 /* disabled */
  598. #define L2_ITLB_4K_ASSOC 4
  599. #define L2_ITLB_4K_ENTRIES 512
  600. /* CPUID Leaf 0x14 constants: */
  601. #define INTEL_PT_MAX_SUBLEAF 0x1
  602. /*
  603. * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
  604. * MSR can be accessed;
  605. * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
  606. * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
  607. * of Intel PT MSRs across warm reset;
  608. * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
  609. */
  610. #define INTEL_PT_MINIMAL_EBX 0xf
  611. /*
  612. * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
  613. * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
  614. * accessed;
  615. * bit[01]: ToPA tables can hold any number of output entries, up to the
  616. * maximum allowed by the MaskOrTableOffset field of
  617. * IA32_RTIT_OUTPUT_MASK_PTRS;
  618. * bit[02]: Support Single-Range Output scheme;
  619. */
  620. #define INTEL_PT_MINIMAL_ECX 0x7
  621. /* generated packets which contain IP payloads have LIP values */
  622. #define INTEL_PT_IP_LIP (1 << 31)
  623. #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
  624. #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
  625. #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
  626. #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
  627. #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
  628. static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
  629. uint32_t vendor2, uint32_t vendor3)
  630. {
  631. int i;
  632. for (i = 0; i < 4; i++) {
  633. dst[i] = vendor1 >> (8 * i);
  634. dst[i + 4] = vendor2 >> (8 * i);
  635. dst[i + 8] = vendor3 >> (8 * i);
  636. }
  637. dst[CPUID_VENDOR_SZ] = '\0';
  638. }
  639. #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
  640. #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
  641. CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
  642. #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
  643. CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
  644. CPUID_PSE36 | CPUID_FXSR)
  645. #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
  646. #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
  647. CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
  648. CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
  649. CPUID_PAE | CPUID_SEP | CPUID_APIC)
  650. #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
  651. CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
  652. CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
  653. CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
  654. CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
  655. /* partly implemented:
  656. CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
  657. /* missing:
  658. CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
  659. #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
  660. CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
  661. CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
  662. CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
  663. CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
  664. /* missing:
  665. CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
  666. CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
  667. CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
  668. CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
  669. CPUID_EXT_F16C, CPUID_EXT_RDRAND */
  670. #ifdef TARGET_X86_64
  671. #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
  672. #else
  673. #define TCG_EXT2_X86_64_FEATURES 0
  674. #endif
  675. #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
  676. CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
  677. CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
  678. TCG_EXT2_X86_64_FEATURES)
  679. #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
  680. CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
  681. #define TCG_EXT4_FEATURES 0
  682. #define TCG_SVM_FEATURES CPUID_SVM_NPT
  683. #define TCG_KVM_FEATURES 0
  684. #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
  685. CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
  686. CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
  687. CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
  688. CPUID_7_0_EBX_ERMS)
  689. /* missing:
  690. CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
  691. CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
  692. CPUID_7_0_EBX_RDSEED */
  693. #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
  694. /* CPUID_7_0_ECX_OSPKE is dynamic */ \
  695. CPUID_7_0_ECX_LA57)
  696. #define TCG_7_0_EDX_FEATURES 0
  697. #define TCG_APM_FEATURES 0
  698. #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
  699. #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
  700. /* missing:
  701. CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
  702. typedef enum FeatureWordType {
  703. CPUID_FEATURE_WORD,
  704. MSR_FEATURE_WORD,
  705. } FeatureWordType;
  706. typedef struct FeatureWordInfo {
  707. FeatureWordType type;
  708. /* feature flags names are taken from "Intel Processor Identification and
  709. * the CPUID Instruction" and AMD's "CPUID Specification".
  710. * In cases of disagreement between feature naming conventions,
  711. * aliases may be added.
  712. */
  713. const char *feat_names[32];
  714. union {
  715. /* If type==CPUID_FEATURE_WORD */
  716. struct {
  717. uint32_t eax; /* Input EAX for CPUID */
  718. bool needs_ecx; /* CPUID instruction uses ECX as input */
  719. uint32_t ecx; /* Input ECX value for CPUID */
  720. int reg; /* output register (R_* constant) */
  721. } cpuid;
  722. /* If type==MSR_FEATURE_WORD */
  723. struct {
  724. uint32_t index;
  725. struct { /*CPUID that enumerate this MSR*/
  726. FeatureWord cpuid_class;
  727. uint32_t cpuid_flag;
  728. } cpuid_dep;
  729. } msr;
  730. };
  731. uint32_t tcg_features; /* Feature flags supported by TCG */
  732. uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
  733. uint32_t migratable_flags; /* Feature flags known to be migratable */
  734. /* Features that shouldn't be auto-enabled by "-cpu host" */
  735. uint32_t no_autoenable_flags;
  736. } FeatureWordInfo;
  737. static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
  738. [FEAT_1_EDX] = {
  739. .type = CPUID_FEATURE_WORD,
  740. .feat_names = {
  741. "fpu", "vme", "de", "pse",
  742. "tsc", "msr", "pae", "mce",
  743. "cx8", "apic", NULL, "sep",
  744. "mtrr", "pge", "mca", "cmov",
  745. "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
  746. NULL, "ds" /* Intel dts */, "acpi", "mmx",
  747. "fxsr", "sse", "sse2", "ss",
  748. "ht" /* Intel htt */, "tm", "ia64", "pbe",
  749. },
  750. .cpuid = {.eax = 1, .reg = R_EDX, },
  751. .tcg_features = TCG_FEATURES,
  752. },
  753. [FEAT_1_ECX] = {
  754. .type = CPUID_FEATURE_WORD,
  755. .feat_names = {
  756. "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
  757. "ds-cpl", "vmx", "smx", "est",
  758. "tm2", "ssse3", "cid", NULL,
  759. "fma", "cx16", "xtpr", "pdcm",
  760. NULL, "pcid", "dca", "sse4.1",
  761. "sse4.2", "x2apic", "movbe", "popcnt",
  762. "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
  763. "avx", "f16c", "rdrand", "hypervisor",
  764. },
  765. .cpuid = { .eax = 1, .reg = R_ECX, },
  766. .tcg_features = TCG_EXT_FEATURES,
  767. },
  768. /* Feature names that are already defined on feature_name[] but
  769. * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
  770. * names on feat_names below. They are copied automatically
  771. * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
  772. */
  773. [FEAT_8000_0001_EDX] = {
  774. .type = CPUID_FEATURE_WORD,
  775. .feat_names = {
  776. NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
  777. NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
  778. NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
  779. NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
  780. NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
  781. "nx", NULL, "mmxext", NULL /* mmx */,
  782. NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
  783. NULL, "lm", "3dnowext", "3dnow",
  784. },
  785. .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
  786. .tcg_features = TCG_EXT2_FEATURES,
  787. },
  788. [FEAT_8000_0001_ECX] = {
  789. .type = CPUID_FEATURE_WORD,
  790. .feat_names = {
  791. "lahf-lm", "cmp-legacy", "svm", "extapic",
  792. "cr8legacy", "abm", "sse4a", "misalignsse",
  793. "3dnowprefetch", "osvw", "ibs", "xop",
  794. "skinit", "wdt", NULL, "lwp",
  795. "fma4", "tce", NULL, "nodeid-msr",
  796. NULL, "tbm", "topoext", "perfctr-core",
  797. "perfctr-nb", NULL, NULL, NULL,
  798. NULL, NULL, NULL, NULL,
  799. },
  800. .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
  801. .tcg_features = TCG_EXT3_FEATURES,
  802. /*
  803. * TOPOEXT is always allowed but can't be enabled blindly by
  804. * "-cpu host", as it requires consistent cache topology info
  805. * to be provided so it doesn't confuse guests.
  806. */
  807. .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
  808. },
  809. [FEAT_C000_0001_EDX] = {
  810. .type = CPUID_FEATURE_WORD,
  811. .feat_names = {
  812. NULL, NULL, "xstore", "xstore-en",
  813. NULL, NULL, "xcrypt", "xcrypt-en",
  814. "ace2", "ace2-en", "phe", "phe-en",
  815. "pmm", "pmm-en", NULL, NULL,
  816. NULL, NULL, NULL, NULL,
  817. NULL, NULL, NULL, NULL,
  818. NULL, NULL, NULL, NULL,
  819. NULL, NULL, NULL, NULL,
  820. },
  821. .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
  822. .tcg_features = TCG_EXT4_FEATURES,
  823. },
  824. [FEAT_KVM] = {
  825. .type = CPUID_FEATURE_WORD,
  826. .feat_names = {
  827. "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
  828. "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
  829. NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
  830. NULL, NULL, NULL, NULL,
  831. NULL, NULL, NULL, NULL,
  832. NULL, NULL, NULL, NULL,
  833. "kvmclock-stable-bit", NULL, NULL, NULL,
  834. NULL, NULL, NULL, NULL,
  835. },
  836. .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
  837. .tcg_features = TCG_KVM_FEATURES,
  838. },
  839. [FEAT_KVM_HINTS] = {
  840. .type = CPUID_FEATURE_WORD,
  841. .feat_names = {
  842. "kvm-hint-dedicated", NULL, NULL, NULL,
  843. NULL, NULL, NULL, NULL,
  844. NULL, NULL, NULL, NULL,
  845. NULL, NULL, NULL, NULL,
  846. NULL, NULL, NULL, NULL,
  847. NULL, NULL, NULL, NULL,
  848. NULL, NULL, NULL, NULL,
  849. NULL, NULL, NULL, NULL,
  850. },
  851. .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
  852. .tcg_features = TCG_KVM_FEATURES,
  853. /*
  854. * KVM hints aren't auto-enabled by -cpu host, they need to be
  855. * explicitly enabled in the command-line.
  856. */
  857. .no_autoenable_flags = ~0U,
  858. },
  859. /*
  860. * .feat_names are commented out for Hyper-V enlightenments because we
  861. * don't want to have two different ways for enabling them on QEMU command
  862. * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
  863. * enabling several feature bits simultaneously, exposing these bits
  864. * individually may just confuse guests.
  865. */
  866. [FEAT_HYPERV_EAX] = {
  867. .type = CPUID_FEATURE_WORD,
  868. .feat_names = {
  869. NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
  870. NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
  871. NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
  872. NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
  873. NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
  874. NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
  875. NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
  876. NULL, NULL,
  877. NULL, NULL, NULL, NULL,
  878. NULL, NULL, NULL, NULL,
  879. NULL, NULL, NULL, NULL,
  880. NULL, NULL, NULL, NULL,
  881. },
  882. .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
  883. },
  884. [FEAT_HYPERV_EBX] = {
  885. .type = CPUID_FEATURE_WORD,
  886. .feat_names = {
  887. NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
  888. NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
  889. NULL /* hv_post_messages */, NULL /* hv_signal_events */,
  890. NULL /* hv_create_port */, NULL /* hv_connect_port */,
  891. NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
  892. NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
  893. NULL, NULL,
  894. NULL, NULL, NULL, NULL,
  895. NULL, NULL, NULL, NULL,
  896. NULL, NULL, NULL, NULL,
  897. NULL, NULL, NULL, NULL,
  898. },
  899. .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
  900. },
  901. [FEAT_HYPERV_EDX] = {
  902. .type = CPUID_FEATURE_WORD,
  903. .feat_names = {
  904. NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
  905. NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
  906. NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
  907. NULL, NULL,
  908. NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
  909. NULL, NULL, NULL, NULL,
  910. NULL, NULL, NULL, NULL,
  911. NULL, NULL, NULL, NULL,
  912. NULL, NULL, NULL, NULL,
  913. NULL, NULL, NULL, NULL,
  914. },
  915. .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
  916. },
  917. [FEAT_HV_RECOMM_EAX] = {
  918. .type = CPUID_FEATURE_WORD,
  919. .feat_names = {
  920. NULL /* hv_recommend_pv_as_switch */,
  921. NULL /* hv_recommend_pv_tlbflush_local */,
  922. NULL /* hv_recommend_pv_tlbflush_remote */,
  923. NULL /* hv_recommend_msr_apic_access */,
  924. NULL /* hv_recommend_msr_reset */,
  925. NULL /* hv_recommend_relaxed_timing */,
  926. NULL /* hv_recommend_dma_remapping */,
  927. NULL /* hv_recommend_int_remapping */,
  928. NULL /* hv_recommend_x2apic_msrs */,
  929. NULL /* hv_recommend_autoeoi_deprecation */,
  930. NULL /* hv_recommend_pv_ipi */,
  931. NULL /* hv_recommend_ex_hypercalls */,
  932. NULL /* hv_hypervisor_is_nested */,
  933. NULL /* hv_recommend_int_mbec */,
  934. NULL /* hv_recommend_evmcs */,
  935. NULL,
  936. NULL, NULL, NULL, NULL,
  937. NULL, NULL, NULL, NULL,
  938. NULL, NULL, NULL, NULL,
  939. NULL, NULL, NULL, NULL,
  940. },
  941. .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
  942. },
  943. [FEAT_HV_NESTED_EAX] = {
  944. .type = CPUID_FEATURE_WORD,
  945. .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
  946. },
  947. [FEAT_SVM] = {
  948. .type = CPUID_FEATURE_WORD,
  949. .feat_names = {
  950. "npt", "lbrv", "svm-lock", "nrip-save",
  951. "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
  952. NULL, NULL, "pause-filter", NULL,
  953. "pfthreshold", NULL, NULL, NULL,
  954. NULL, NULL, NULL, NULL,
  955. NULL, NULL, NULL, NULL,
  956. NULL, NULL, NULL, NULL,
  957. NULL, NULL, NULL, NULL,
  958. },
  959. .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
  960. .tcg_features = TCG_SVM_FEATURES,
  961. },
  962. [FEAT_7_0_EBX] = {
  963. .type = CPUID_FEATURE_WORD,
  964. .feat_names = {
  965. "fsgsbase", "tsc-adjust", NULL, "bmi1",
  966. "hle", "avx2", NULL, "smep",
  967. "bmi2", "erms", "invpcid", "rtm",
  968. NULL, NULL, "mpx", NULL,
  969. "avx512f", "avx512dq", "rdseed", "adx",
  970. "smap", "avx512ifma", "pcommit", "clflushopt",
  971. "clwb", "intel-pt", "avx512pf", "avx512er",
  972. "avx512cd", "sha-ni", "avx512bw", "avx512vl",
  973. },
  974. .cpuid = {
  975. .eax = 7,
  976. .needs_ecx = true, .ecx = 0,
  977. .reg = R_EBX,
  978. },
  979. .tcg_features = TCG_7_0_EBX_FEATURES,
  980. },
  981. [FEAT_7_0_ECX] = {
  982. .type = CPUID_FEATURE_WORD,
  983. .feat_names = {
  984. NULL, "avx512vbmi", "umip", "pku",
  985. NULL /* ospke */, NULL, "avx512vbmi2", NULL,
  986. "gfni", "vaes", "vpclmulqdq", "avx512vnni",
  987. "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
  988. "la57", NULL, NULL, NULL,
  989. NULL, NULL, "rdpid", NULL,
  990. NULL, "cldemote", NULL, "movdiri",
  991. "movdir64b", NULL, NULL, NULL,
  992. },
  993. .cpuid = {
  994. .eax = 7,
  995. .needs_ecx = true, .ecx = 0,
  996. .reg = R_ECX,
  997. },
  998. .tcg_features = TCG_7_0_ECX_FEATURES,
  999. },
  1000. [FEAT_7_0_EDX] = {
  1001. .type = CPUID_FEATURE_WORD,
  1002. .feat_names = {
  1003. NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
  1004. NULL, NULL, NULL, NULL,
  1005. NULL, NULL, NULL, NULL,
  1006. NULL, NULL, NULL, NULL,
  1007. NULL, NULL, NULL, NULL,
  1008. NULL, NULL, NULL, NULL,
  1009. NULL, NULL, "spec-ctrl", "stibp",
  1010. NULL, "arch-capabilities", NULL, "ssbd",
  1011. },
  1012. .cpuid = {
  1013. .eax = 7,
  1014. .needs_ecx = true, .ecx = 0,
  1015. .reg = R_EDX,
  1016. },
  1017. .tcg_features = TCG_7_0_EDX_FEATURES,
  1018. .unmigratable_flags = CPUID_7_0_EDX_ARCH_CAPABILITIES,
  1019. },
  1020. [FEAT_8000_0007_EDX] = {
  1021. .type = CPUID_FEATURE_WORD,
  1022. .feat_names = {
  1023. NULL, NULL, NULL, NULL,
  1024. NULL, NULL, NULL, NULL,
  1025. "invtsc", NULL, NULL, NULL,
  1026. NULL, NULL, NULL, NULL,
  1027. NULL, NULL, NULL, NULL,
  1028. NULL, NULL, NULL, NULL,
  1029. NULL, NULL, NULL, NULL,
  1030. NULL, NULL, NULL, NULL,
  1031. },
  1032. .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
  1033. .tcg_features = TCG_APM_FEATURES,
  1034. .unmigratable_flags = CPUID_APM_INVTSC,
  1035. },
  1036. [FEAT_8000_0008_EBX] = {
  1037. .type = CPUID_FEATURE_WORD,
  1038. .feat_names = {
  1039. NULL, NULL, NULL, NULL,
  1040. NULL, NULL, NULL, NULL,
  1041. NULL, "wbnoinvd", NULL, NULL,
  1042. "ibpb", NULL, NULL, NULL,
  1043. NULL, NULL, NULL, NULL,
  1044. NULL, NULL, NULL, NULL,
  1045. "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
  1046. NULL, NULL, NULL, NULL,
  1047. },
  1048. .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
  1049. .tcg_features = 0,
  1050. .unmigratable_flags = 0,
  1051. },
  1052. [FEAT_XSAVE] = {
  1053. .type = CPUID_FEATURE_WORD,
  1054. .feat_names = {
  1055. "xsaveopt", "xsavec", "xgetbv1", "xsaves",
  1056. NULL, NULL, NULL, NULL,
  1057. NULL, NULL, NULL, NULL,
  1058. NULL, NULL, NULL, NULL,
  1059. NULL, NULL, NULL, NULL,
  1060. NULL, NULL, NULL, NULL,
  1061. NULL, NULL, NULL, NULL,
  1062. NULL, NULL, NULL, NULL,
  1063. },
  1064. .cpuid = {
  1065. .eax = 0xd,
  1066. .needs_ecx = true, .ecx = 1,
  1067. .reg = R_EAX,
  1068. },
  1069. .tcg_features = TCG_XSAVE_FEATURES,
  1070. },
  1071. [FEAT_6_EAX] = {
  1072. .type = CPUID_FEATURE_WORD,
  1073. .feat_names = {
  1074. NULL, NULL, "arat", NULL,
  1075. NULL, NULL, NULL, NULL,
  1076. NULL, NULL, NULL, NULL,
  1077. NULL, NULL, NULL, NULL,
  1078. NULL, NULL, NULL, NULL,
  1079. NULL, NULL, NULL, NULL,
  1080. NULL, NULL, NULL, NULL,
  1081. NULL, NULL, NULL, NULL,
  1082. },
  1083. .cpuid = { .eax = 6, .reg = R_EAX, },
  1084. .tcg_features = TCG_6_EAX_FEATURES,
  1085. },
  1086. [FEAT_XSAVE_COMP_LO] = {
  1087. .type = CPUID_FEATURE_WORD,
  1088. .cpuid = {
  1089. .eax = 0xD,
  1090. .needs_ecx = true, .ecx = 0,
  1091. .reg = R_EAX,
  1092. },
  1093. .tcg_features = ~0U,
  1094. .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
  1095. XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
  1096. XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
  1097. XSTATE_PKRU_MASK,
  1098. },
  1099. [FEAT_XSAVE_COMP_HI] = {
  1100. .type = CPUID_FEATURE_WORD,
  1101. .cpuid = {
  1102. .eax = 0xD,
  1103. .needs_ecx = true, .ecx = 0,
  1104. .reg = R_EDX,
  1105. },
  1106. .tcg_features = ~0U,
  1107. },
  1108. /*Below are MSR exposed features*/
  1109. [FEAT_ARCH_CAPABILITIES] = {
  1110. .type = MSR_FEATURE_WORD,
  1111. .feat_names = {
  1112. "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
  1113. "ssb-no", NULL, NULL, NULL,
  1114. NULL, NULL, NULL, NULL,
  1115. NULL, NULL, NULL, NULL,
  1116. NULL, NULL, NULL, NULL,
  1117. NULL, NULL, NULL, NULL,
  1118. NULL, NULL, NULL, NULL,
  1119. NULL, NULL, NULL, NULL,
  1120. },
  1121. .msr = {
  1122. .index = MSR_IA32_ARCH_CAPABILITIES,
  1123. .cpuid_dep = {
  1124. FEAT_7_0_EDX,
  1125. CPUID_7_0_EDX_ARCH_CAPABILITIES
  1126. }
  1127. },
  1128. },
  1129. };
  1130. typedef struct X86RegisterInfo32 {
  1131. /* Name of register */
  1132. const char *name;
  1133. /* QAPI enum value register */
  1134. X86CPURegister32 qapi_enum;
  1135. } X86RegisterInfo32;
  1136. #define REGISTER(reg) \
  1137. [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
  1138. static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
  1139. REGISTER(EAX),
  1140. REGISTER(ECX),
  1141. REGISTER(EDX),
  1142. REGISTER(EBX),
  1143. REGISTER(ESP),
  1144. REGISTER(EBP),
  1145. REGISTER(ESI),
  1146. REGISTER(EDI),
  1147. };
  1148. #undef REGISTER
  1149. typedef struct ExtSaveArea {
  1150. uint32_t feature, bits;
  1151. uint32_t offset, size;
  1152. } ExtSaveArea;
  1153. static const ExtSaveArea x86_ext_save_areas[] = {
  1154. [XSTATE_FP_BIT] = {
  1155. /* x87 FP state component is always enabled if XSAVE is supported */
  1156. .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
  1157. /* x87 state is in the legacy region of the XSAVE area */
  1158. .offset = 0,
  1159. .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
  1160. },
  1161. [XSTATE_SSE_BIT] = {
  1162. /* SSE state component is always enabled if XSAVE is supported */
  1163. .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
  1164. /* SSE state is in the legacy region of the XSAVE area */
  1165. .offset = 0,
  1166. .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
  1167. },
  1168. [XSTATE_YMM_BIT] =
  1169. { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
  1170. .offset = offsetof(X86XSaveArea, avx_state),
  1171. .size = sizeof(XSaveAVX) },
  1172. [XSTATE_BNDREGS_BIT] =
  1173. { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
  1174. .offset = offsetof(X86XSaveArea, bndreg_state),
  1175. .size = sizeof(XSaveBNDREG) },
  1176. [XSTATE_BNDCSR_BIT] =
  1177. { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
  1178. .offset = offsetof(X86XSaveArea, bndcsr_state),
  1179. .size = sizeof(XSaveBNDCSR) },
  1180. [XSTATE_OPMASK_BIT] =
  1181. { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
  1182. .offset = offsetof(X86XSaveArea, opmask_state),
  1183. .size = sizeof(XSaveOpmask) },
  1184. [XSTATE_ZMM_Hi256_BIT] =
  1185. { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
  1186. .offset = offsetof(X86XSaveArea, zmm_hi256_state),
  1187. .size = sizeof(XSaveZMM_Hi256) },
  1188. [XSTATE_Hi16_ZMM_BIT] =
  1189. { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
  1190. .offset = offsetof(X86XSaveArea, hi16_zmm_state),
  1191. .size = sizeof(XSaveHi16_ZMM) },
  1192. [XSTATE_PKRU_BIT] =
  1193. { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
  1194. .offset = offsetof(X86XSaveArea, pkru_state),
  1195. .size = sizeof(XSavePKRU) },
  1196. };
  1197. static uint32_t xsave_area_size(uint64_t mask)
  1198. {
  1199. int i;
  1200. uint64_t ret = 0;
  1201. for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
  1202. const ExtSaveArea *esa = &x86_ext_save_areas[i];
  1203. if ((mask >> i) & 1) {
  1204. ret = MAX(ret, esa->offset + esa->size);
  1205. }
  1206. }
  1207. return ret;
  1208. }
  1209. static inline bool accel_uses_host_cpuid(void)
  1210. {
  1211. return kvm_enabled() || hvf_enabled();
  1212. }
  1213. static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
  1214. {
  1215. return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
  1216. cpu->env.features[FEAT_XSAVE_COMP_LO];
  1217. }
  1218. const char *get_register_name_32(unsigned int reg)
  1219. {
  1220. if (reg >= CPU_NB_REGS32) {
  1221. return NULL;
  1222. }
  1223. return x86_reg_info_32[reg].name;
  1224. }
  1225. /*
  1226. * Returns the set of feature flags that are supported and migratable by
  1227. * QEMU, for a given FeatureWord.
  1228. */
  1229. static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
  1230. {
  1231. FeatureWordInfo *wi = &feature_word_info[w];
  1232. uint32_t r = 0;
  1233. int i;
  1234. for (i = 0; i < 32; i++) {
  1235. uint32_t f = 1U << i;
  1236. /* If the feature name is known, it is implicitly considered migratable,
  1237. * unless it is explicitly set in unmigratable_flags */
  1238. if ((wi->migratable_flags & f) ||
  1239. (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
  1240. r |= f;
  1241. }
  1242. }
  1243. return r;
  1244. }
  1245. void host_cpuid(uint32_t function, uint32_t count,
  1246. uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
  1247. {
  1248. uint32_t vec[4];
  1249. #ifdef __x86_64__
  1250. asm volatile("cpuid"
  1251. : "=a"(vec[0]), "=b"(vec[1]),
  1252. "=c"(vec[2]), "=d"(vec[3])
  1253. : "0"(function), "c"(count) : "cc");
  1254. #elif defined(__i386__)
  1255. asm volatile("pusha \n\t"
  1256. "cpuid \n\t"
  1257. "mov %%eax, 0(%2) \n\t"
  1258. "mov %%ebx, 4(%2) \n\t"
  1259. "mov %%ecx, 8(%2) \n\t"
  1260. "mov %%edx, 12(%2) \n\t"
  1261. "popa"
  1262. : : "a"(function), "c"(count), "S"(vec)
  1263. : "memory", "cc");
  1264. #else
  1265. abort();
  1266. #endif
  1267. if (eax)
  1268. *eax = vec[0];
  1269. if (ebx)
  1270. *ebx = vec[1];
  1271. if (ecx)
  1272. *ecx = vec[2];
  1273. if (edx)
  1274. *edx = vec[3];
  1275. }
  1276. void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
  1277. {
  1278. uint32_t eax, ebx, ecx, edx;
  1279. host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
  1280. x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
  1281. host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
  1282. if (family) {
  1283. *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
  1284. }
  1285. if (model) {
  1286. *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
  1287. }
  1288. if (stepping) {
  1289. *stepping = eax & 0x0F;
  1290. }
  1291. }
  1292. /* CPU class name definitions: */
  1293. /* Return type name for a given CPU model name
  1294. * Caller is responsible for freeing the returned string.
  1295. */
  1296. static char *x86_cpu_type_name(const char *model_name)
  1297. {
  1298. return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
  1299. }
  1300. static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
  1301. {
  1302. ObjectClass *oc;
  1303. char *typename = x86_cpu_type_name(cpu_model);
  1304. oc = object_class_by_name(typename);
  1305. g_free(typename);
  1306. return oc;
  1307. }
  1308. static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
  1309. {
  1310. const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
  1311. assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
  1312. return g_strndup(class_name,
  1313. strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
  1314. }
  1315. struct X86CPUDefinition {
  1316. const char *name;
  1317. uint32_t level;
  1318. uint32_t xlevel;
  1319. /* vendor is zero-terminated, 12 character ASCII string */
  1320. char vendor[CPUID_VENDOR_SZ + 1];
  1321. int family;
  1322. int model;
  1323. int stepping;
  1324. FeatureWordArray features;
  1325. const char *model_id;
  1326. CPUCaches *cache_info;
  1327. };
  1328. static CPUCaches epyc_cache_info = {
  1329. .l1d_cache = &(CPUCacheInfo) {
  1330. .type = DATA_CACHE,
  1331. .level = 1,
  1332. .size = 32 * KiB,
  1333. .line_size = 64,
  1334. .associativity = 8,
  1335. .partitions = 1,
  1336. .sets = 64,
  1337. .lines_per_tag = 1,
  1338. .self_init = 1,
  1339. .no_invd_sharing = true,
  1340. },
  1341. .l1i_cache = &(CPUCacheInfo) {
  1342. .type = INSTRUCTION_CACHE,
  1343. .level = 1,
  1344. .size = 64 * KiB,
  1345. .line_size = 64,
  1346. .associativity = 4,
  1347. .partitions = 1,
  1348. .sets = 256,
  1349. .lines_per_tag = 1,
  1350. .self_init = 1,
  1351. .no_invd_sharing = true,
  1352. },
  1353. .l2_cache = &(CPUCacheInfo) {
  1354. .type = UNIFIED_CACHE,
  1355. .level = 2,
  1356. .size = 512 * KiB,
  1357. .line_size = 64,
  1358. .associativity = 8,
  1359. .partitions = 1,
  1360. .sets = 1024,
  1361. .lines_per_tag = 1,
  1362. },
  1363. .l3_cache = &(CPUCacheInfo) {
  1364. .type = UNIFIED_CACHE,
  1365. .level = 3,
  1366. .size = 8 * MiB,
  1367. .line_size = 64,
  1368. .associativity = 16,
  1369. .partitions = 1,
  1370. .sets = 8192,
  1371. .lines_per_tag = 1,
  1372. .self_init = true,
  1373. .inclusive = true,
  1374. .complex_indexing = true,
  1375. },
  1376. };
  1377. static X86CPUDefinition builtin_x86_defs[] = {
  1378. {
  1379. .name = "qemu64",
  1380. .level = 0xd,
  1381. .vendor = CPUID_VENDOR_AMD,
  1382. .family = 6,
  1383. .model = 6,
  1384. .stepping = 3,
  1385. .features[FEAT_1_EDX] =
  1386. PPRO_FEATURES |
  1387. CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
  1388. CPUID_PSE36,
  1389. .features[FEAT_1_ECX] =
  1390. CPUID_EXT_SSE3 | CPUID_EXT_CX16,
  1391. .features[FEAT_8000_0001_EDX] =
  1392. CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
  1393. .features[FEAT_8000_0001_ECX] =
  1394. CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
  1395. .xlevel = 0x8000000A,
  1396. .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
  1397. },
  1398. {
  1399. .name = "phenom",
  1400. .level = 5,
  1401. .vendor = CPUID_VENDOR_AMD,
  1402. .family = 16,
  1403. .model = 2,
  1404. .stepping = 3,
  1405. /* Missing: CPUID_HT */
  1406. .features[FEAT_1_EDX] =
  1407. PPRO_FEATURES |
  1408. CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
  1409. CPUID_PSE36 | CPUID_VME,
  1410. .features[FEAT_1_ECX] =
  1411. CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
  1412. CPUID_EXT_POPCNT,
  1413. .features[FEAT_8000_0001_EDX] =
  1414. CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
  1415. CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
  1416. CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
  1417. /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
  1418. CPUID_EXT3_CR8LEG,
  1419. CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
  1420. CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
  1421. .features[FEAT_8000_0001_ECX] =
  1422. CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
  1423. CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
  1424. /* Missing: CPUID_SVM_LBRV */
  1425. .features[FEAT_SVM] =
  1426. CPUID_SVM_NPT,
  1427. .xlevel = 0x8000001A,
  1428. .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
  1429. },
  1430. {
  1431. .name = "core2duo",
  1432. .level = 10,
  1433. .vendor = CPUID_VENDOR_INTEL,
  1434. .family = 6,
  1435. .model = 15,
  1436. .stepping = 11,
  1437. /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
  1438. .features[FEAT_1_EDX] =
  1439. PPRO_FEATURES |
  1440. CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
  1441. CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
  1442. /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
  1443. * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
  1444. .features[FEAT_1_ECX] =
  1445. CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
  1446. CPUID_EXT_CX16,
  1447. .features[FEAT_8000_0001_EDX] =
  1448. CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
  1449. .features[FEAT_8000_0001_ECX] =
  1450. CPUID_EXT3_LAHF_LM,
  1451. .xlevel = 0x80000008,
  1452. .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
  1453. },
  1454. {
  1455. .name = "kvm64",
  1456. .level = 0xd,
  1457. .vendor = CPUID_VENDOR_INTEL,
  1458. .family = 15,
  1459. .model = 6,
  1460. .stepping = 1,
  1461. /* Missing: CPUID_HT */
  1462. .features[FEAT_1_EDX] =
  1463. PPRO_FEATURES | CPUID_VME |
  1464. CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
  1465. CPUID_PSE36,
  1466. /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
  1467. .features[FEAT_1_ECX] =
  1468. CPUID_EXT_SSE3 | CPUID_EXT_CX16,
  1469. /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
  1470. .features[FEAT_8000_0001_EDX] =
  1471. CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
  1472. /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
  1473. CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
  1474. CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
  1475. CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
  1476. .features[FEAT_8000_0001_ECX] =
  1477. 0,
  1478. .xlevel = 0x80000008,
  1479. .model_id = "Common KVM processor"
  1480. },
  1481. {
  1482. .name = "qemu32",
  1483. .level = 4,
  1484. .vendor = CPUID_VENDOR_INTEL,
  1485. .family = 6,
  1486. .model = 6,
  1487. .stepping = 3,
  1488. .features[FEAT_1_EDX] =
  1489. PPRO_FEATURES,
  1490. .features[FEAT_1_ECX] =
  1491. CPUID_EXT_SSE3,
  1492. .xlevel = 0x80000004,
  1493. .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
  1494. },
  1495. {
  1496. .name = "kvm32",
  1497. .level = 5,
  1498. .vendor = CPUID_VENDOR_INTEL,
  1499. .family = 15,
  1500. .model = 6,
  1501. .stepping = 1,
  1502. .features[FEAT_1_EDX] =
  1503. PPRO_FEATURES | CPUID_VME |
  1504. CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
  1505. .features[FEAT_1_ECX] =
  1506. CPUID_EXT_SSE3,
  1507. .features[FEAT_8000_0001_ECX] =
  1508. 0,
  1509. .xlevel = 0x80000008,
  1510. .model_id = "Common 32-bit KVM processor"
  1511. },
  1512. {
  1513. .name = "coreduo",
  1514. .level = 10,
  1515. .vendor = CPUID_VENDOR_INTEL,
  1516. .family = 6,
  1517. .model = 14,
  1518. .stepping = 8,
  1519. /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
  1520. .features[FEAT_1_EDX] =
  1521. PPRO_FEATURES | CPUID_VME |
  1522. CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
  1523. CPUID_SS,
  1524. /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
  1525. * CPUID_EXT_PDCM, CPUID_EXT_VMX */
  1526. .features[FEAT_1_ECX] =
  1527. CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
  1528. .features[FEAT_8000_0001_EDX] =
  1529. CPUID_EXT2_NX,
  1530. .xlevel = 0x80000008,
  1531. .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
  1532. },
  1533. {
  1534. .name = "486",
  1535. .level = 1,
  1536. .vendor = CPUID_VENDOR_INTEL,
  1537. .family = 4,
  1538. .model = 8,
  1539. .stepping = 0,
  1540. .features[FEAT_1_EDX] =
  1541. I486_FEATURES,
  1542. .xlevel = 0,
  1543. .model_id = "",
  1544. },
  1545. {
  1546. .name = "pentium",
  1547. .level = 1,
  1548. .vendor = CPUID_VENDOR_INTEL,
  1549. .family = 5,
  1550. .model = 4,
  1551. .stepping = 3,
  1552. .features[FEAT_1_EDX] =
  1553. PENTIUM_FEATURES,
  1554. .xlevel = 0,
  1555. .model_id = "",
  1556. },
  1557. {
  1558. .name = "pentium2",
  1559. .level = 2,
  1560. .vendor = CPUID_VENDOR_INTEL,
  1561. .family = 6,
  1562. .model = 5,
  1563. .stepping = 2,
  1564. .features[FEAT_1_EDX] =
  1565. PENTIUM2_FEATURES,
  1566. .xlevel = 0,
  1567. .model_id = "",
  1568. },
  1569. {
  1570. .name = "pentium3",
  1571. .level = 3,
  1572. .vendor = CPUID_VENDOR_INTEL,
  1573. .family = 6,
  1574. .model = 7,
  1575. .stepping = 3,
  1576. .features[FEAT_1_EDX] =
  1577. PENTIUM3_FEATURES,
  1578. .xlevel = 0,
  1579. .model_id = "",
  1580. },
  1581. {
  1582. .name = "athlon",
  1583. .level = 2,
  1584. .vendor = CPUID_VENDOR_AMD,
  1585. .family = 6,
  1586. .model = 2,
  1587. .stepping = 3,
  1588. .features[FEAT_1_EDX] =
  1589. PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
  1590. CPUID_MCA,
  1591. .features[FEAT_8000_0001_EDX] =
  1592. CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
  1593. .xlevel = 0x80000008,
  1594. .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
  1595. },
  1596. {
  1597. .name = "n270",
  1598. .level = 10,
  1599. .vendor = CPUID_VENDOR_INTEL,
  1600. .family = 6,
  1601. .model = 28,
  1602. .stepping = 2,
  1603. /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
  1604. .features[FEAT_1_EDX] =
  1605. PPRO_FEATURES |
  1606. CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
  1607. CPUID_ACPI | CPUID_SS,
  1608. /* Some CPUs got no CPUID_SEP */
  1609. /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
  1610. * CPUID_EXT_XTPR */
  1611. .features[FEAT_1_ECX] =
  1612. CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
  1613. CPUID_EXT_MOVBE,
  1614. .features[FEAT_8000_0001_EDX] =
  1615. CPUID_EXT2_NX,
  1616. .features[FEAT_8000_0001_ECX] =
  1617. CPUID_EXT3_LAHF_LM,
  1618. .xlevel = 0x80000008,
  1619. .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
  1620. },
  1621. {
  1622. .name = "Conroe",
  1623. .level = 10,
  1624. .vendor = CPUID_VENDOR_INTEL,
  1625. .family = 6,
  1626. .model = 15,
  1627. .stepping = 3,
  1628. .features[FEAT_1_EDX] =
  1629. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1630. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1631. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1632. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1633. CPUID_DE | CPUID_FP87,
  1634. .features[FEAT_1_ECX] =
  1635. CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
  1636. .features[FEAT_8000_0001_EDX] =
  1637. CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
  1638. .features[FEAT_8000_0001_ECX] =
  1639. CPUID_EXT3_LAHF_LM,
  1640. .xlevel = 0x80000008,
  1641. .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
  1642. },
  1643. {
  1644. .name = "Penryn",
  1645. .level = 10,
  1646. .vendor = CPUID_VENDOR_INTEL,
  1647. .family = 6,
  1648. .model = 23,
  1649. .stepping = 3,
  1650. .features[FEAT_1_EDX] =
  1651. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1652. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1653. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1654. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1655. CPUID_DE | CPUID_FP87,
  1656. .features[FEAT_1_ECX] =
  1657. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  1658. CPUID_EXT_SSE3,
  1659. .features[FEAT_8000_0001_EDX] =
  1660. CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
  1661. .features[FEAT_8000_0001_ECX] =
  1662. CPUID_EXT3_LAHF_LM,
  1663. .xlevel = 0x80000008,
  1664. .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
  1665. },
  1666. {
  1667. .name = "Nehalem",
  1668. .level = 11,
  1669. .vendor = CPUID_VENDOR_INTEL,
  1670. .family = 6,
  1671. .model = 26,
  1672. .stepping = 3,
  1673. .features[FEAT_1_EDX] =
  1674. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1675. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1676. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1677. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1678. CPUID_DE | CPUID_FP87,
  1679. .features[FEAT_1_ECX] =
  1680. CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
  1681. CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
  1682. .features[FEAT_8000_0001_EDX] =
  1683. CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
  1684. .features[FEAT_8000_0001_ECX] =
  1685. CPUID_EXT3_LAHF_LM,
  1686. .xlevel = 0x80000008,
  1687. .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
  1688. },
  1689. {
  1690. .name = "Nehalem-IBRS",
  1691. .level = 11,
  1692. .vendor = CPUID_VENDOR_INTEL,
  1693. .family = 6,
  1694. .model = 26,
  1695. .stepping = 3,
  1696. .features[FEAT_1_EDX] =
  1697. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1698. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1699. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1700. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1701. CPUID_DE | CPUID_FP87,
  1702. .features[FEAT_1_ECX] =
  1703. CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
  1704. CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
  1705. .features[FEAT_7_0_EDX] =
  1706. CPUID_7_0_EDX_SPEC_CTRL,
  1707. .features[FEAT_8000_0001_EDX] =
  1708. CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
  1709. .features[FEAT_8000_0001_ECX] =
  1710. CPUID_EXT3_LAHF_LM,
  1711. .xlevel = 0x80000008,
  1712. .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
  1713. },
  1714. {
  1715. .name = "Westmere",
  1716. .level = 11,
  1717. .vendor = CPUID_VENDOR_INTEL,
  1718. .family = 6,
  1719. .model = 44,
  1720. .stepping = 1,
  1721. .features[FEAT_1_EDX] =
  1722. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1723. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1724. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1725. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1726. CPUID_DE | CPUID_FP87,
  1727. .features[FEAT_1_ECX] =
  1728. CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
  1729. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  1730. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
  1731. .features[FEAT_8000_0001_EDX] =
  1732. CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
  1733. .features[FEAT_8000_0001_ECX] =
  1734. CPUID_EXT3_LAHF_LM,
  1735. .features[FEAT_6_EAX] =
  1736. CPUID_6_EAX_ARAT,
  1737. .xlevel = 0x80000008,
  1738. .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
  1739. },
  1740. {
  1741. .name = "Westmere-IBRS",
  1742. .level = 11,
  1743. .vendor = CPUID_VENDOR_INTEL,
  1744. .family = 6,
  1745. .model = 44,
  1746. .stepping = 1,
  1747. .features[FEAT_1_EDX] =
  1748. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1749. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1750. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1751. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1752. CPUID_DE | CPUID_FP87,
  1753. .features[FEAT_1_ECX] =
  1754. CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
  1755. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  1756. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
  1757. .features[FEAT_8000_0001_EDX] =
  1758. CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
  1759. .features[FEAT_8000_0001_ECX] =
  1760. CPUID_EXT3_LAHF_LM,
  1761. .features[FEAT_7_0_EDX] =
  1762. CPUID_7_0_EDX_SPEC_CTRL,
  1763. .features[FEAT_6_EAX] =
  1764. CPUID_6_EAX_ARAT,
  1765. .xlevel = 0x80000008,
  1766. .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
  1767. },
  1768. {
  1769. .name = "SandyBridge",
  1770. .level = 0xd,
  1771. .vendor = CPUID_VENDOR_INTEL,
  1772. .family = 6,
  1773. .model = 42,
  1774. .stepping = 1,
  1775. .features[FEAT_1_EDX] =
  1776. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1777. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1778. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1779. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1780. CPUID_DE | CPUID_FP87,
  1781. .features[FEAT_1_ECX] =
  1782. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  1783. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
  1784. CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
  1785. CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
  1786. CPUID_EXT_SSE3,
  1787. .features[FEAT_8000_0001_EDX] =
  1788. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  1789. CPUID_EXT2_SYSCALL,
  1790. .features[FEAT_8000_0001_ECX] =
  1791. CPUID_EXT3_LAHF_LM,
  1792. .features[FEAT_XSAVE] =
  1793. CPUID_XSAVE_XSAVEOPT,
  1794. .features[FEAT_6_EAX] =
  1795. CPUID_6_EAX_ARAT,
  1796. .xlevel = 0x80000008,
  1797. .model_id = "Intel Xeon E312xx (Sandy Bridge)",
  1798. },
  1799. {
  1800. .name = "SandyBridge-IBRS",
  1801. .level = 0xd,
  1802. .vendor = CPUID_VENDOR_INTEL,
  1803. .family = 6,
  1804. .model = 42,
  1805. .stepping = 1,
  1806. .features[FEAT_1_EDX] =
  1807. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1808. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1809. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1810. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1811. CPUID_DE | CPUID_FP87,
  1812. .features[FEAT_1_ECX] =
  1813. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  1814. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
  1815. CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
  1816. CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
  1817. CPUID_EXT_SSE3,
  1818. .features[FEAT_8000_0001_EDX] =
  1819. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  1820. CPUID_EXT2_SYSCALL,
  1821. .features[FEAT_8000_0001_ECX] =
  1822. CPUID_EXT3_LAHF_LM,
  1823. .features[FEAT_7_0_EDX] =
  1824. CPUID_7_0_EDX_SPEC_CTRL,
  1825. .features[FEAT_XSAVE] =
  1826. CPUID_XSAVE_XSAVEOPT,
  1827. .features[FEAT_6_EAX] =
  1828. CPUID_6_EAX_ARAT,
  1829. .xlevel = 0x80000008,
  1830. .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
  1831. },
  1832. {
  1833. .name = "IvyBridge",
  1834. .level = 0xd,
  1835. .vendor = CPUID_VENDOR_INTEL,
  1836. .family = 6,
  1837. .model = 58,
  1838. .stepping = 9,
  1839. .features[FEAT_1_EDX] =
  1840. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1841. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1842. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1843. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1844. CPUID_DE | CPUID_FP87,
  1845. .features[FEAT_1_ECX] =
  1846. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  1847. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
  1848. CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
  1849. CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
  1850. CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  1851. .features[FEAT_7_0_EBX] =
  1852. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
  1853. CPUID_7_0_EBX_ERMS,
  1854. .features[FEAT_8000_0001_EDX] =
  1855. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  1856. CPUID_EXT2_SYSCALL,
  1857. .features[FEAT_8000_0001_ECX] =
  1858. CPUID_EXT3_LAHF_LM,
  1859. .features[FEAT_XSAVE] =
  1860. CPUID_XSAVE_XSAVEOPT,
  1861. .features[FEAT_6_EAX] =
  1862. CPUID_6_EAX_ARAT,
  1863. .xlevel = 0x80000008,
  1864. .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
  1865. },
  1866. {
  1867. .name = "IvyBridge-IBRS",
  1868. .level = 0xd,
  1869. .vendor = CPUID_VENDOR_INTEL,
  1870. .family = 6,
  1871. .model = 58,
  1872. .stepping = 9,
  1873. .features[FEAT_1_EDX] =
  1874. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1875. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1876. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1877. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1878. CPUID_DE | CPUID_FP87,
  1879. .features[FEAT_1_ECX] =
  1880. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  1881. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
  1882. CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
  1883. CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
  1884. CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  1885. .features[FEAT_7_0_EBX] =
  1886. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
  1887. CPUID_7_0_EBX_ERMS,
  1888. .features[FEAT_8000_0001_EDX] =
  1889. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  1890. CPUID_EXT2_SYSCALL,
  1891. .features[FEAT_8000_0001_ECX] =
  1892. CPUID_EXT3_LAHF_LM,
  1893. .features[FEAT_7_0_EDX] =
  1894. CPUID_7_0_EDX_SPEC_CTRL,
  1895. .features[FEAT_XSAVE] =
  1896. CPUID_XSAVE_XSAVEOPT,
  1897. .features[FEAT_6_EAX] =
  1898. CPUID_6_EAX_ARAT,
  1899. .xlevel = 0x80000008,
  1900. .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
  1901. },
  1902. {
  1903. .name = "Haswell-noTSX",
  1904. .level = 0xd,
  1905. .vendor = CPUID_VENDOR_INTEL,
  1906. .family = 6,
  1907. .model = 60,
  1908. .stepping = 1,
  1909. .features[FEAT_1_EDX] =
  1910. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1911. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1912. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1913. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1914. CPUID_DE | CPUID_FP87,
  1915. .features[FEAT_1_ECX] =
  1916. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  1917. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  1918. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  1919. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  1920. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  1921. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  1922. .features[FEAT_8000_0001_EDX] =
  1923. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  1924. CPUID_EXT2_SYSCALL,
  1925. .features[FEAT_8000_0001_ECX] =
  1926. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
  1927. .features[FEAT_7_0_EBX] =
  1928. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  1929. CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  1930. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
  1931. .features[FEAT_XSAVE] =
  1932. CPUID_XSAVE_XSAVEOPT,
  1933. .features[FEAT_6_EAX] =
  1934. CPUID_6_EAX_ARAT,
  1935. .xlevel = 0x80000008,
  1936. .model_id = "Intel Core Processor (Haswell, no TSX)",
  1937. },
  1938. {
  1939. .name = "Haswell-noTSX-IBRS",
  1940. .level = 0xd,
  1941. .vendor = CPUID_VENDOR_INTEL,
  1942. .family = 6,
  1943. .model = 60,
  1944. .stepping = 1,
  1945. .features[FEAT_1_EDX] =
  1946. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1947. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1948. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1949. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1950. CPUID_DE | CPUID_FP87,
  1951. .features[FEAT_1_ECX] =
  1952. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  1953. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  1954. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  1955. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  1956. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  1957. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  1958. .features[FEAT_8000_0001_EDX] =
  1959. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  1960. CPUID_EXT2_SYSCALL,
  1961. .features[FEAT_8000_0001_ECX] =
  1962. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
  1963. .features[FEAT_7_0_EDX] =
  1964. CPUID_7_0_EDX_SPEC_CTRL,
  1965. .features[FEAT_7_0_EBX] =
  1966. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  1967. CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  1968. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
  1969. .features[FEAT_XSAVE] =
  1970. CPUID_XSAVE_XSAVEOPT,
  1971. .features[FEAT_6_EAX] =
  1972. CPUID_6_EAX_ARAT,
  1973. .xlevel = 0x80000008,
  1974. .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
  1975. },
  1976. {
  1977. .name = "Haswell",
  1978. .level = 0xd,
  1979. .vendor = CPUID_VENDOR_INTEL,
  1980. .family = 6,
  1981. .model = 60,
  1982. .stepping = 4,
  1983. .features[FEAT_1_EDX] =
  1984. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  1985. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  1986. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  1987. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  1988. CPUID_DE | CPUID_FP87,
  1989. .features[FEAT_1_ECX] =
  1990. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  1991. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  1992. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  1993. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  1994. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  1995. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  1996. .features[FEAT_8000_0001_EDX] =
  1997. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  1998. CPUID_EXT2_SYSCALL,
  1999. .features[FEAT_8000_0001_ECX] =
  2000. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
  2001. .features[FEAT_7_0_EBX] =
  2002. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2003. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2004. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2005. CPUID_7_0_EBX_RTM,
  2006. .features[FEAT_XSAVE] =
  2007. CPUID_XSAVE_XSAVEOPT,
  2008. .features[FEAT_6_EAX] =
  2009. CPUID_6_EAX_ARAT,
  2010. .xlevel = 0x80000008,
  2011. .model_id = "Intel Core Processor (Haswell)",
  2012. },
  2013. {
  2014. .name = "Haswell-IBRS",
  2015. .level = 0xd,
  2016. .vendor = CPUID_VENDOR_INTEL,
  2017. .family = 6,
  2018. .model = 60,
  2019. .stepping = 4,
  2020. .features[FEAT_1_EDX] =
  2021. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2022. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2023. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2024. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2025. CPUID_DE | CPUID_FP87,
  2026. .features[FEAT_1_ECX] =
  2027. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2028. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2029. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2030. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2031. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2032. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2033. .features[FEAT_8000_0001_EDX] =
  2034. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  2035. CPUID_EXT2_SYSCALL,
  2036. .features[FEAT_8000_0001_ECX] =
  2037. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
  2038. .features[FEAT_7_0_EDX] =
  2039. CPUID_7_0_EDX_SPEC_CTRL,
  2040. .features[FEAT_7_0_EBX] =
  2041. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2042. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2043. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2044. CPUID_7_0_EBX_RTM,
  2045. .features[FEAT_XSAVE] =
  2046. CPUID_XSAVE_XSAVEOPT,
  2047. .features[FEAT_6_EAX] =
  2048. CPUID_6_EAX_ARAT,
  2049. .xlevel = 0x80000008,
  2050. .model_id = "Intel Core Processor (Haswell, IBRS)",
  2051. },
  2052. {
  2053. .name = "Broadwell-noTSX",
  2054. .level = 0xd,
  2055. .vendor = CPUID_VENDOR_INTEL,
  2056. .family = 6,
  2057. .model = 61,
  2058. .stepping = 2,
  2059. .features[FEAT_1_EDX] =
  2060. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2061. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2062. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2063. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2064. CPUID_DE | CPUID_FP87,
  2065. .features[FEAT_1_ECX] =
  2066. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2067. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2068. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2069. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2070. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2071. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2072. .features[FEAT_8000_0001_EDX] =
  2073. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  2074. CPUID_EXT2_SYSCALL,
  2075. .features[FEAT_8000_0001_ECX] =
  2076. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2077. .features[FEAT_7_0_EBX] =
  2078. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2079. CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2080. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2081. CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2082. CPUID_7_0_EBX_SMAP,
  2083. .features[FEAT_XSAVE] =
  2084. CPUID_XSAVE_XSAVEOPT,
  2085. .features[FEAT_6_EAX] =
  2086. CPUID_6_EAX_ARAT,
  2087. .xlevel = 0x80000008,
  2088. .model_id = "Intel Core Processor (Broadwell, no TSX)",
  2089. },
  2090. {
  2091. .name = "Broadwell-noTSX-IBRS",
  2092. .level = 0xd,
  2093. .vendor = CPUID_VENDOR_INTEL,
  2094. .family = 6,
  2095. .model = 61,
  2096. .stepping = 2,
  2097. .features[FEAT_1_EDX] =
  2098. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2099. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2100. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2101. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2102. CPUID_DE | CPUID_FP87,
  2103. .features[FEAT_1_ECX] =
  2104. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2105. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2106. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2107. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2108. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2109. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2110. .features[FEAT_8000_0001_EDX] =
  2111. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  2112. CPUID_EXT2_SYSCALL,
  2113. .features[FEAT_8000_0001_ECX] =
  2114. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2115. .features[FEAT_7_0_EDX] =
  2116. CPUID_7_0_EDX_SPEC_CTRL,
  2117. .features[FEAT_7_0_EBX] =
  2118. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2119. CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2120. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2121. CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2122. CPUID_7_0_EBX_SMAP,
  2123. .features[FEAT_XSAVE] =
  2124. CPUID_XSAVE_XSAVEOPT,
  2125. .features[FEAT_6_EAX] =
  2126. CPUID_6_EAX_ARAT,
  2127. .xlevel = 0x80000008,
  2128. .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
  2129. },
  2130. {
  2131. .name = "Broadwell",
  2132. .level = 0xd,
  2133. .vendor = CPUID_VENDOR_INTEL,
  2134. .family = 6,
  2135. .model = 61,
  2136. .stepping = 2,
  2137. .features[FEAT_1_EDX] =
  2138. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2139. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2140. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2141. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2142. CPUID_DE | CPUID_FP87,
  2143. .features[FEAT_1_ECX] =
  2144. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2145. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2146. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2147. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2148. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2149. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2150. .features[FEAT_8000_0001_EDX] =
  2151. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  2152. CPUID_EXT2_SYSCALL,
  2153. .features[FEAT_8000_0001_ECX] =
  2154. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2155. .features[FEAT_7_0_EBX] =
  2156. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2157. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2158. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2159. CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2160. CPUID_7_0_EBX_SMAP,
  2161. .features[FEAT_XSAVE] =
  2162. CPUID_XSAVE_XSAVEOPT,
  2163. .features[FEAT_6_EAX] =
  2164. CPUID_6_EAX_ARAT,
  2165. .xlevel = 0x80000008,
  2166. .model_id = "Intel Core Processor (Broadwell)",
  2167. },
  2168. {
  2169. .name = "Broadwell-IBRS",
  2170. .level = 0xd,
  2171. .vendor = CPUID_VENDOR_INTEL,
  2172. .family = 6,
  2173. .model = 61,
  2174. .stepping = 2,
  2175. .features[FEAT_1_EDX] =
  2176. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2177. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2178. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2179. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2180. CPUID_DE | CPUID_FP87,
  2181. .features[FEAT_1_ECX] =
  2182. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2183. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2184. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2185. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2186. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2187. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2188. .features[FEAT_8000_0001_EDX] =
  2189. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  2190. CPUID_EXT2_SYSCALL,
  2191. .features[FEAT_8000_0001_ECX] =
  2192. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2193. .features[FEAT_7_0_EDX] =
  2194. CPUID_7_0_EDX_SPEC_CTRL,
  2195. .features[FEAT_7_0_EBX] =
  2196. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2197. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2198. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2199. CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2200. CPUID_7_0_EBX_SMAP,
  2201. .features[FEAT_XSAVE] =
  2202. CPUID_XSAVE_XSAVEOPT,
  2203. .features[FEAT_6_EAX] =
  2204. CPUID_6_EAX_ARAT,
  2205. .xlevel = 0x80000008,
  2206. .model_id = "Intel Core Processor (Broadwell, IBRS)",
  2207. },
  2208. {
  2209. .name = "Skylake-Client",
  2210. .level = 0xd,
  2211. .vendor = CPUID_VENDOR_INTEL,
  2212. .family = 6,
  2213. .model = 94,
  2214. .stepping = 3,
  2215. .features[FEAT_1_EDX] =
  2216. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2217. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2218. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2219. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2220. CPUID_DE | CPUID_FP87,
  2221. .features[FEAT_1_ECX] =
  2222. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2223. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2224. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2225. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2226. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2227. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2228. .features[FEAT_8000_0001_EDX] =
  2229. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  2230. CPUID_EXT2_SYSCALL,
  2231. .features[FEAT_8000_0001_ECX] =
  2232. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2233. .features[FEAT_7_0_EBX] =
  2234. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2235. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2236. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2237. CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2238. CPUID_7_0_EBX_SMAP,
  2239. /* Missing: XSAVES (not supported by some Linux versions,
  2240. * including v4.1 to v4.12).
  2241. * KVM doesn't yet expose any XSAVES state save component,
  2242. * and the only one defined in Skylake (processor tracing)
  2243. * probably will block migration anyway.
  2244. */
  2245. .features[FEAT_XSAVE] =
  2246. CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
  2247. CPUID_XSAVE_XGETBV1,
  2248. .features[FEAT_6_EAX] =
  2249. CPUID_6_EAX_ARAT,
  2250. .xlevel = 0x80000008,
  2251. .model_id = "Intel Core Processor (Skylake)",
  2252. },
  2253. {
  2254. .name = "Skylake-Client-IBRS",
  2255. .level = 0xd,
  2256. .vendor = CPUID_VENDOR_INTEL,
  2257. .family = 6,
  2258. .model = 94,
  2259. .stepping = 3,
  2260. .features[FEAT_1_EDX] =
  2261. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2262. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2263. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2264. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2265. CPUID_DE | CPUID_FP87,
  2266. .features[FEAT_1_ECX] =
  2267. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2268. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2269. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2270. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2271. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2272. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2273. .features[FEAT_8000_0001_EDX] =
  2274. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  2275. CPUID_EXT2_SYSCALL,
  2276. .features[FEAT_8000_0001_ECX] =
  2277. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2278. .features[FEAT_7_0_EDX] =
  2279. CPUID_7_0_EDX_SPEC_CTRL,
  2280. .features[FEAT_7_0_EBX] =
  2281. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2282. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2283. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2284. CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2285. CPUID_7_0_EBX_SMAP,
  2286. /* Missing: XSAVES (not supported by some Linux versions,
  2287. * including v4.1 to v4.12).
  2288. * KVM doesn't yet expose any XSAVES state save component,
  2289. * and the only one defined in Skylake (processor tracing)
  2290. * probably will block migration anyway.
  2291. */
  2292. .features[FEAT_XSAVE] =
  2293. CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
  2294. CPUID_XSAVE_XGETBV1,
  2295. .features[FEAT_6_EAX] =
  2296. CPUID_6_EAX_ARAT,
  2297. .xlevel = 0x80000008,
  2298. .model_id = "Intel Core Processor (Skylake, IBRS)",
  2299. },
  2300. {
  2301. .name = "Skylake-Server",
  2302. .level = 0xd,
  2303. .vendor = CPUID_VENDOR_INTEL,
  2304. .family = 6,
  2305. .model = 85,
  2306. .stepping = 4,
  2307. .features[FEAT_1_EDX] =
  2308. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2309. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2310. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2311. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2312. CPUID_DE | CPUID_FP87,
  2313. .features[FEAT_1_ECX] =
  2314. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2315. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2316. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2317. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2318. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2319. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2320. .features[FEAT_8000_0001_EDX] =
  2321. CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
  2322. CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
  2323. .features[FEAT_8000_0001_ECX] =
  2324. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2325. .features[FEAT_7_0_EBX] =
  2326. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2327. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2328. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2329. CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2330. CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
  2331. CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
  2332. CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
  2333. CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
  2334. .features[FEAT_7_0_ECX] =
  2335. CPUID_7_0_ECX_PKU,
  2336. /* Missing: XSAVES (not supported by some Linux versions,
  2337. * including v4.1 to v4.12).
  2338. * KVM doesn't yet expose any XSAVES state save component,
  2339. * and the only one defined in Skylake (processor tracing)
  2340. * probably will block migration anyway.
  2341. */
  2342. .features[FEAT_XSAVE] =
  2343. CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
  2344. CPUID_XSAVE_XGETBV1,
  2345. .features[FEAT_6_EAX] =
  2346. CPUID_6_EAX_ARAT,
  2347. .xlevel = 0x80000008,
  2348. .model_id = "Intel Xeon Processor (Skylake)",
  2349. },
  2350. {
  2351. .name = "Skylake-Server-IBRS",
  2352. .level = 0xd,
  2353. .vendor = CPUID_VENDOR_INTEL,
  2354. .family = 6,
  2355. .model = 85,
  2356. .stepping = 4,
  2357. .features[FEAT_1_EDX] =
  2358. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2359. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2360. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2361. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2362. CPUID_DE | CPUID_FP87,
  2363. .features[FEAT_1_ECX] =
  2364. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2365. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2366. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2367. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2368. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2369. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2370. .features[FEAT_8000_0001_EDX] =
  2371. CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
  2372. CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
  2373. .features[FEAT_8000_0001_ECX] =
  2374. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2375. .features[FEAT_7_0_EDX] =
  2376. CPUID_7_0_EDX_SPEC_CTRL,
  2377. .features[FEAT_7_0_EBX] =
  2378. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2379. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2380. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2381. CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2382. CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
  2383. CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
  2384. CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
  2385. CPUID_7_0_EBX_AVX512VL,
  2386. .features[FEAT_7_0_ECX] =
  2387. CPUID_7_0_ECX_PKU,
  2388. /* Missing: XSAVES (not supported by some Linux versions,
  2389. * including v4.1 to v4.12).
  2390. * KVM doesn't yet expose any XSAVES state save component,
  2391. * and the only one defined in Skylake (processor tracing)
  2392. * probably will block migration anyway.
  2393. */
  2394. .features[FEAT_XSAVE] =
  2395. CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
  2396. CPUID_XSAVE_XGETBV1,
  2397. .features[FEAT_6_EAX] =
  2398. CPUID_6_EAX_ARAT,
  2399. .xlevel = 0x80000008,
  2400. .model_id = "Intel Xeon Processor (Skylake, IBRS)",
  2401. },
  2402. {
  2403. .name = "Cascadelake-Server",
  2404. .level = 0xd,
  2405. .vendor = CPUID_VENDOR_INTEL,
  2406. .family = 6,
  2407. .model = 85,
  2408. .stepping = 6,
  2409. .features[FEAT_1_EDX] =
  2410. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2411. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2412. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2413. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2414. CPUID_DE | CPUID_FP87,
  2415. .features[FEAT_1_ECX] =
  2416. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2417. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2418. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2419. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2420. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2421. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2422. .features[FEAT_8000_0001_EDX] =
  2423. CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
  2424. CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
  2425. .features[FEAT_8000_0001_ECX] =
  2426. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2427. .features[FEAT_7_0_EBX] =
  2428. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2429. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2430. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2431. CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2432. CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
  2433. CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
  2434. CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
  2435. CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
  2436. .features[FEAT_7_0_ECX] =
  2437. CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE |
  2438. CPUID_7_0_ECX_AVX512VNNI,
  2439. .features[FEAT_7_0_EDX] =
  2440. CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
  2441. /* Missing: XSAVES (not supported by some Linux versions,
  2442. * including v4.1 to v4.12).
  2443. * KVM doesn't yet expose any XSAVES state save component,
  2444. * and the only one defined in Skylake (processor tracing)
  2445. * probably will block migration anyway.
  2446. */
  2447. .features[FEAT_XSAVE] =
  2448. CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
  2449. CPUID_XSAVE_XGETBV1,
  2450. .features[FEAT_6_EAX] =
  2451. CPUID_6_EAX_ARAT,
  2452. .xlevel = 0x80000008,
  2453. .model_id = "Intel Xeon Processor (Cascadelake)",
  2454. },
  2455. {
  2456. .name = "Icelake-Client",
  2457. .level = 0xd,
  2458. .vendor = CPUID_VENDOR_INTEL,
  2459. .family = 6,
  2460. .model = 126,
  2461. .stepping = 0,
  2462. .features[FEAT_1_EDX] =
  2463. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2464. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2465. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2466. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2467. CPUID_DE | CPUID_FP87,
  2468. .features[FEAT_1_ECX] =
  2469. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2470. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2471. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2472. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2473. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2474. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2475. .features[FEAT_8000_0001_EDX] =
  2476. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
  2477. CPUID_EXT2_SYSCALL,
  2478. .features[FEAT_8000_0001_ECX] =
  2479. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2480. .features[FEAT_8000_0008_EBX] =
  2481. CPUID_8000_0008_EBX_WBNOINVD,
  2482. .features[FEAT_7_0_EBX] =
  2483. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2484. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2485. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2486. CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2487. CPUID_7_0_EBX_SMAP,
  2488. .features[FEAT_7_0_ECX] =
  2489. CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
  2490. CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
  2491. CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
  2492. CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
  2493. CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
  2494. .features[FEAT_7_0_EDX] =
  2495. CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
  2496. /* Missing: XSAVES (not supported by some Linux versions,
  2497. * including v4.1 to v4.12).
  2498. * KVM doesn't yet expose any XSAVES state save component,
  2499. * and the only one defined in Skylake (processor tracing)
  2500. * probably will block migration anyway.
  2501. */
  2502. .features[FEAT_XSAVE] =
  2503. CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
  2504. CPUID_XSAVE_XGETBV1,
  2505. .features[FEAT_6_EAX] =
  2506. CPUID_6_EAX_ARAT,
  2507. .xlevel = 0x80000008,
  2508. .model_id = "Intel Core Processor (Icelake)",
  2509. },
  2510. {
  2511. .name = "Icelake-Server",
  2512. .level = 0xd,
  2513. .vendor = CPUID_VENDOR_INTEL,
  2514. .family = 6,
  2515. .model = 134,
  2516. .stepping = 0,
  2517. .features[FEAT_1_EDX] =
  2518. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2519. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2520. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2521. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2522. CPUID_DE | CPUID_FP87,
  2523. .features[FEAT_1_ECX] =
  2524. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2525. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2526. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2527. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2528. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2529. CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2530. .features[FEAT_8000_0001_EDX] =
  2531. CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
  2532. CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
  2533. .features[FEAT_8000_0001_ECX] =
  2534. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2535. .features[FEAT_8000_0008_EBX] =
  2536. CPUID_8000_0008_EBX_WBNOINVD,
  2537. .features[FEAT_7_0_EBX] =
  2538. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
  2539. CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
  2540. CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
  2541. CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
  2542. CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
  2543. CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
  2544. CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
  2545. CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
  2546. .features[FEAT_7_0_ECX] =
  2547. CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
  2548. CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
  2549. CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
  2550. CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
  2551. CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
  2552. .features[FEAT_7_0_EDX] =
  2553. CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
  2554. /* Missing: XSAVES (not supported by some Linux versions,
  2555. * including v4.1 to v4.12).
  2556. * KVM doesn't yet expose any XSAVES state save component,
  2557. * and the only one defined in Skylake (processor tracing)
  2558. * probably will block migration anyway.
  2559. */
  2560. .features[FEAT_XSAVE] =
  2561. CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
  2562. CPUID_XSAVE_XGETBV1,
  2563. .features[FEAT_6_EAX] =
  2564. CPUID_6_EAX_ARAT,
  2565. .xlevel = 0x80000008,
  2566. .model_id = "Intel Xeon Processor (Icelake)",
  2567. },
  2568. {
  2569. .name = "KnightsMill",
  2570. .level = 0xd,
  2571. .vendor = CPUID_VENDOR_INTEL,
  2572. .family = 6,
  2573. .model = 133,
  2574. .stepping = 0,
  2575. .features[FEAT_1_EDX] =
  2576. CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
  2577. CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
  2578. CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
  2579. CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
  2580. CPUID_PSE | CPUID_DE | CPUID_FP87,
  2581. .features[FEAT_1_ECX] =
  2582. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2583. CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
  2584. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
  2585. CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
  2586. CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
  2587. CPUID_EXT_F16C | CPUID_EXT_RDRAND,
  2588. .features[FEAT_8000_0001_EDX] =
  2589. CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
  2590. CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
  2591. .features[FEAT_8000_0001_ECX] =
  2592. CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
  2593. .features[FEAT_7_0_EBX] =
  2594. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
  2595. CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
  2596. CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
  2597. CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
  2598. CPUID_7_0_EBX_AVX512ER,
  2599. .features[FEAT_7_0_ECX] =
  2600. CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
  2601. .features[FEAT_7_0_EDX] =
  2602. CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
  2603. .features[FEAT_XSAVE] =
  2604. CPUID_XSAVE_XSAVEOPT,
  2605. .features[FEAT_6_EAX] =
  2606. CPUID_6_EAX_ARAT,
  2607. .xlevel = 0x80000008,
  2608. .model_id = "Intel Xeon Phi Processor (Knights Mill)",
  2609. },
  2610. {
  2611. .name = "Opteron_G1",
  2612. .level = 5,
  2613. .vendor = CPUID_VENDOR_AMD,
  2614. .family = 15,
  2615. .model = 6,
  2616. .stepping = 1,
  2617. .features[FEAT_1_EDX] =
  2618. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2619. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2620. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2621. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2622. CPUID_DE | CPUID_FP87,
  2623. .features[FEAT_1_ECX] =
  2624. CPUID_EXT_SSE3,
  2625. .features[FEAT_8000_0001_EDX] =
  2626. CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
  2627. .xlevel = 0x80000008,
  2628. .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
  2629. },
  2630. {
  2631. .name = "Opteron_G2",
  2632. .level = 5,
  2633. .vendor = CPUID_VENDOR_AMD,
  2634. .family = 15,
  2635. .model = 6,
  2636. .stepping = 1,
  2637. .features[FEAT_1_EDX] =
  2638. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2639. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2640. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2641. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2642. CPUID_DE | CPUID_FP87,
  2643. .features[FEAT_1_ECX] =
  2644. CPUID_EXT_CX16 | CPUID_EXT_SSE3,
  2645. .features[FEAT_8000_0001_EDX] =
  2646. CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
  2647. .features[FEAT_8000_0001_ECX] =
  2648. CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
  2649. .xlevel = 0x80000008,
  2650. .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
  2651. },
  2652. {
  2653. .name = "Opteron_G3",
  2654. .level = 5,
  2655. .vendor = CPUID_VENDOR_AMD,
  2656. .family = 16,
  2657. .model = 2,
  2658. .stepping = 3,
  2659. .features[FEAT_1_EDX] =
  2660. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2661. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2662. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2663. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2664. CPUID_DE | CPUID_FP87,
  2665. .features[FEAT_1_ECX] =
  2666. CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
  2667. CPUID_EXT_SSE3,
  2668. .features[FEAT_8000_0001_EDX] =
  2669. CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
  2670. CPUID_EXT2_RDTSCP,
  2671. .features[FEAT_8000_0001_ECX] =
  2672. CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
  2673. CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
  2674. .xlevel = 0x80000008,
  2675. .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
  2676. },
  2677. {
  2678. .name = "Opteron_G4",
  2679. .level = 0xd,
  2680. .vendor = CPUID_VENDOR_AMD,
  2681. .family = 21,
  2682. .model = 1,
  2683. .stepping = 2,
  2684. .features[FEAT_1_EDX] =
  2685. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2686. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2687. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2688. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2689. CPUID_DE | CPUID_FP87,
  2690. .features[FEAT_1_ECX] =
  2691. CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
  2692. CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
  2693. CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
  2694. CPUID_EXT_SSE3,
  2695. .features[FEAT_8000_0001_EDX] =
  2696. CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
  2697. CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
  2698. .features[FEAT_8000_0001_ECX] =
  2699. CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
  2700. CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
  2701. CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
  2702. CPUID_EXT3_LAHF_LM,
  2703. .features[FEAT_SVM] =
  2704. CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
  2705. /* no xsaveopt! */
  2706. .xlevel = 0x8000001A,
  2707. .model_id = "AMD Opteron 62xx class CPU",
  2708. },
  2709. {
  2710. .name = "Opteron_G5",
  2711. .level = 0xd,
  2712. .vendor = CPUID_VENDOR_AMD,
  2713. .family = 21,
  2714. .model = 2,
  2715. .stepping = 0,
  2716. .features[FEAT_1_EDX] =
  2717. CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
  2718. CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
  2719. CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
  2720. CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
  2721. CPUID_DE | CPUID_FP87,
  2722. .features[FEAT_1_ECX] =
  2723. CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
  2724. CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
  2725. CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
  2726. CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
  2727. .features[FEAT_8000_0001_EDX] =
  2728. CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
  2729. CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
  2730. .features[FEAT_8000_0001_ECX] =
  2731. CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
  2732. CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
  2733. CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
  2734. CPUID_EXT3_LAHF_LM,
  2735. .features[FEAT_SVM] =
  2736. CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
  2737. /* no xsaveopt! */
  2738. .xlevel = 0x8000001A,
  2739. .model_id = "AMD Opteron 63xx class CPU",
  2740. },
  2741. {
  2742. .name = "EPYC",
  2743. .level = 0xd,
  2744. .vendor = CPUID_VENDOR_AMD,
  2745. .family = 23,
  2746. .model = 1,
  2747. .stepping = 2,
  2748. .features[FEAT_1_EDX] =
  2749. CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
  2750. CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
  2751. CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
  2752. CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
  2753. CPUID_VME | CPUID_FP87,
  2754. .features[FEAT_1_ECX] =
  2755. CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
  2756. CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
  2757. CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
  2758. CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
  2759. CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
  2760. .features[FEAT_8000_0001_EDX] =
  2761. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
  2762. CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
  2763. CPUID_EXT2_SYSCALL,
  2764. .features[FEAT_8000_0001_ECX] =
  2765. CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
  2766. CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
  2767. CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
  2768. CPUID_EXT3_TOPOEXT,
  2769. .features[FEAT_7_0_EBX] =
  2770. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
  2771. CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
  2772. CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
  2773. CPUID_7_0_EBX_SHA_NI,
  2774. /* Missing: XSAVES (not supported by some Linux versions,
  2775. * including v4.1 to v4.12).
  2776. * KVM doesn't yet expose any XSAVES state save component.
  2777. */
  2778. .features[FEAT_XSAVE] =
  2779. CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
  2780. CPUID_XSAVE_XGETBV1,
  2781. .features[FEAT_6_EAX] =
  2782. CPUID_6_EAX_ARAT,
  2783. .features[FEAT_SVM] =
  2784. CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
  2785. .xlevel = 0x8000001E,
  2786. .model_id = "AMD EPYC Processor",
  2787. .cache_info = &epyc_cache_info,
  2788. },
  2789. {
  2790. .name = "EPYC-IBPB",
  2791. .level = 0xd,
  2792. .vendor = CPUID_VENDOR_AMD,
  2793. .family = 23,
  2794. .model = 1,
  2795. .stepping = 2,
  2796. .features[FEAT_1_EDX] =
  2797. CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
  2798. CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
  2799. CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
  2800. CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
  2801. CPUID_VME | CPUID_FP87,
  2802. .features[FEAT_1_ECX] =
  2803. CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
  2804. CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
  2805. CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
  2806. CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
  2807. CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
  2808. .features[FEAT_8000_0001_EDX] =
  2809. CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
  2810. CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
  2811. CPUID_EXT2_SYSCALL,
  2812. .features[FEAT_8000_0001_ECX] =
  2813. CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
  2814. CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
  2815. CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
  2816. CPUID_EXT3_TOPOEXT,
  2817. .features[FEAT_8000_0008_EBX] =
  2818. CPUID_8000_0008_EBX_IBPB,
  2819. .features[FEAT_7_0_EBX] =
  2820. CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
  2821. CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
  2822. CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
  2823. CPUID_7_0_EBX_SHA_NI,
  2824. /* Missing: XSAVES (not supported by some Linux versions,
  2825. * including v4.1 to v4.12).
  2826. * KVM doesn't yet expose any XSAVES state save component.
  2827. */
  2828. .features[FEAT_XSAVE] =
  2829. CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
  2830. CPUID_XSAVE_XGETBV1,
  2831. .features[FEAT_6_EAX] =
  2832. CPUID_6_EAX_ARAT,
  2833. .features[FEAT_SVM] =
  2834. CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
  2835. .xlevel = 0x8000001E,
  2836. .model_id = "AMD EPYC Processor (with IBPB)",
  2837. .cache_info = &epyc_cache_info,
  2838. },
  2839. };
  2840. typedef struct PropValue {
  2841. const char *prop, *value;
  2842. } PropValue;
  2843. /* KVM-specific features that are automatically added/removed
  2844. * from all CPU models when KVM is enabled.
  2845. */
  2846. static PropValue kvm_default_props[] = {
  2847. { "kvmclock", "on" },
  2848. { "kvm-nopiodelay", "on" },
  2849. { "kvm-asyncpf", "on" },
  2850. { "kvm-steal-time", "on" },
  2851. { "kvm-pv-eoi", "on" },
  2852. { "kvmclock-stable-bit", "on" },
  2853. { "x2apic", "on" },
  2854. { "acpi", "off" },
  2855. { "monitor", "off" },
  2856. { "svm", "off" },
  2857. { NULL, NULL },
  2858. };
  2859. /* TCG-specific defaults that override all CPU models when using TCG
  2860. */
  2861. static PropValue tcg_default_props[] = {
  2862. { "vme", "off" },
  2863. { NULL, NULL },
  2864. };
  2865. void x86_cpu_change_kvm_default(const char *prop, const char *value)
  2866. {
  2867. PropValue *pv;
  2868. for (pv = kvm_default_props; pv->prop; pv++) {
  2869. if (!strcmp(pv->prop, prop)) {
  2870. pv->value = value;
  2871. break;
  2872. }
  2873. }
  2874. /* It is valid to call this function only for properties that
  2875. * are already present in the kvm_default_props table.
  2876. */
  2877. assert(pv->prop);
  2878. }
  2879. static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
  2880. bool migratable_only);
  2881. static bool lmce_supported(void)
  2882. {
  2883. uint64_t mce_cap = 0;
  2884. #ifdef CONFIG_KVM
  2885. if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
  2886. return false;
  2887. }
  2888. #endif
  2889. return !!(mce_cap & MCG_LMCE_P);
  2890. }
  2891. #define CPUID_MODEL_ID_SZ 48
  2892. /**
  2893. * cpu_x86_fill_model_id:
  2894. * Get CPUID model ID string from host CPU.
  2895. *
  2896. * @str should have at least CPUID_MODEL_ID_SZ bytes
  2897. *
  2898. * The function does NOT add a null terminator to the string
  2899. * automatically.
  2900. */
  2901. static int cpu_x86_fill_model_id(char *str)
  2902. {
  2903. uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
  2904. int i;
  2905. for (i = 0; i < 3; i++) {
  2906. host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
  2907. memcpy(str + i * 16 + 0, &eax, 4);
  2908. memcpy(str + i * 16 + 4, &ebx, 4);
  2909. memcpy(str + i * 16 + 8, &ecx, 4);
  2910. memcpy(str + i * 16 + 12, &edx, 4);
  2911. }
  2912. return 0;
  2913. }
  2914. static Property max_x86_cpu_properties[] = {
  2915. DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
  2916. DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
  2917. DEFINE_PROP_END_OF_LIST()
  2918. };
  2919. static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
  2920. {
  2921. DeviceClass *dc = DEVICE_CLASS(oc);
  2922. X86CPUClass *xcc = X86_CPU_CLASS(oc);
  2923. xcc->ordering = 9;
  2924. xcc->model_description =
  2925. "Enables all features supported by the accelerator in the current host";
  2926. dc->props = max_x86_cpu_properties;
  2927. }
  2928. static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
  2929. static void max_x86_cpu_initfn(Object *obj)
  2930. {
  2931. X86CPU *cpu = X86_CPU(obj);
  2932. CPUX86State *env = &cpu->env;
  2933. KVMState *s = kvm_state;
  2934. /* We can't fill the features array here because we don't know yet if
  2935. * "migratable" is true or false.
  2936. */
  2937. cpu->max_features = true;
  2938. if (accel_uses_host_cpuid()) {
  2939. char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
  2940. char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
  2941. int family, model, stepping;
  2942. X86CPUDefinition host_cpudef = { };
  2943. uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
  2944. host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
  2945. x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
  2946. host_vendor_fms(vendor, &family, &model, &stepping);
  2947. cpu_x86_fill_model_id(model_id);
  2948. object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
  2949. object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
  2950. object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
  2951. object_property_set_int(OBJECT(cpu), stepping, "stepping",
  2952. &error_abort);
  2953. object_property_set_str(OBJECT(cpu), model_id, "model-id",
  2954. &error_abort);
  2955. if (kvm_enabled()) {
  2956. env->cpuid_min_level =
  2957. kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
  2958. env->cpuid_min_xlevel =
  2959. kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
  2960. env->cpuid_min_xlevel2 =
  2961. kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
  2962. } else {
  2963. env->cpuid_min_level =
  2964. hvf_get_supported_cpuid(0x0, 0, R_EAX);
  2965. env->cpuid_min_xlevel =
  2966. hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
  2967. env->cpuid_min_xlevel2 =
  2968. hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
  2969. }
  2970. if (lmce_supported()) {
  2971. object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
  2972. }
  2973. } else {
  2974. object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
  2975. "vendor", &error_abort);
  2976. object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
  2977. object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
  2978. object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
  2979. object_property_set_str(OBJECT(cpu),
  2980. "QEMU TCG CPU version " QEMU_HW_VERSION,
  2981. "model-id", &error_abort);
  2982. }
  2983. object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
  2984. }
  2985. static const TypeInfo max_x86_cpu_type_info = {
  2986. .name = X86_CPU_TYPE_NAME("max"),
  2987. .parent = TYPE_X86_CPU,
  2988. .instance_init = max_x86_cpu_initfn,
  2989. .class_init = max_x86_cpu_class_init,
  2990. };
  2991. #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
  2992. static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
  2993. {
  2994. X86CPUClass *xcc = X86_CPU_CLASS(oc);
  2995. xcc->host_cpuid_required = true;
  2996. xcc->ordering = 8;
  2997. #if defined(CONFIG_KVM)
  2998. xcc->model_description =
  2999. "KVM processor with all supported host features ";
  3000. #elif defined(CONFIG_HVF)
  3001. xcc->model_description =
  3002. "HVF processor with all supported host features ";
  3003. #endif
  3004. }
  3005. static const TypeInfo host_x86_cpu_type_info = {
  3006. .name = X86_CPU_TYPE_NAME("host"),
  3007. .parent = X86_CPU_TYPE_NAME("max"),
  3008. .class_init = host_x86_cpu_class_init,
  3009. };
  3010. #endif
  3011. static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
  3012. {
  3013. assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
  3014. switch (f->type) {
  3015. case CPUID_FEATURE_WORD:
  3016. {
  3017. const char *reg = get_register_name_32(f->cpuid.reg);
  3018. assert(reg);
  3019. return g_strdup_printf("CPUID.%02XH:%s",
  3020. f->cpuid.eax, reg);
  3021. }
  3022. case MSR_FEATURE_WORD:
  3023. return g_strdup_printf("MSR(%02XH)",
  3024. f->msr.index);
  3025. }
  3026. return NULL;
  3027. }
  3028. static void report_unavailable_features(FeatureWord w, uint32_t mask)
  3029. {
  3030. FeatureWordInfo *f = &feature_word_info[w];
  3031. int i;
  3032. char *feat_word_str;
  3033. for (i = 0; i < 32; ++i) {
  3034. if ((1UL << i) & mask) {
  3035. feat_word_str = feature_word_description(f, i);
  3036. warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
  3037. accel_uses_host_cpuid() ? "host" : "TCG",
  3038. feat_word_str,
  3039. f->feat_names[i] ? "." : "",
  3040. f->feat_names[i] ? f->feat_names[i] : "", i);
  3041. g_free(feat_word_str);
  3042. }
  3043. }
  3044. }
  3045. static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
  3046. const char *name, void *opaque,
  3047. Error **errp)
  3048. {
  3049. X86CPU *cpu = X86_CPU(obj);
  3050. CPUX86State *env = &cpu->env;
  3051. int64_t value;
  3052. value = (env->cpuid_version >> 8) & 0xf;
  3053. if (value == 0xf) {
  3054. value += (env->cpuid_version >> 20) & 0xff;
  3055. }
  3056. visit_type_int(v, name, &value, errp);
  3057. }
  3058. static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
  3059. const char *name, void *opaque,
  3060. Error **errp)
  3061. {
  3062. X86CPU *cpu = X86_CPU(obj);
  3063. CPUX86State *env = &cpu->env;
  3064. const int64_t min = 0;
  3065. const int64_t max = 0xff + 0xf;
  3066. Error *local_err = NULL;
  3067. int64_t value;
  3068. visit_type_int(v, name, &value, &local_err);
  3069. if (local_err) {
  3070. error_propagate(errp, local_err);
  3071. return;
  3072. }
  3073. if (value < min || value > max) {
  3074. error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
  3075. name ? name : "null", value, min, max);
  3076. return;
  3077. }
  3078. env->cpuid_version &= ~0xff00f00;
  3079. if (value > 0x0f) {
  3080. env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
  3081. } else {
  3082. env->cpuid_version |= value << 8;
  3083. }
  3084. }
  3085. static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
  3086. const char *name, void *opaque,
  3087. Error **errp)
  3088. {
  3089. X86CPU *cpu = X86_CPU(obj);
  3090. CPUX86State *env = &cpu->env;
  3091. int64_t value;
  3092. value = (env->cpuid_version >> 4) & 0xf;
  3093. value |= ((env->cpuid_version >> 16) & 0xf) << 4;
  3094. visit_type_int(v, name, &value, errp);
  3095. }
  3096. static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
  3097. const char *name, void *opaque,
  3098. Error **errp)
  3099. {
  3100. X86CPU *cpu = X86_CPU(obj);
  3101. CPUX86State *env = &cpu->env;
  3102. const int64_t min = 0;
  3103. const int64_t max = 0xff;
  3104. Error *local_err = NULL;
  3105. int64_t value;
  3106. visit_type_int(v, name, &value, &local_err);
  3107. if (local_err) {
  3108. error_propagate(errp, local_err);
  3109. return;
  3110. }
  3111. if (value < min || value > max) {
  3112. error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
  3113. name ? name : "null", value, min, max);
  3114. return;
  3115. }
  3116. env->cpuid_version &= ~0xf00f0;
  3117. env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
  3118. }
  3119. static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
  3120. const char *name, void *opaque,
  3121. Error **errp)
  3122. {
  3123. X86CPU *cpu = X86_CPU(obj);
  3124. CPUX86State *env = &cpu->env;
  3125. int64_t value;
  3126. value = env->cpuid_version & 0xf;
  3127. visit_type_int(v, name, &value, errp);
  3128. }
  3129. static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
  3130. const char *name, void *opaque,
  3131. Error **errp)
  3132. {
  3133. X86CPU *cpu = X86_CPU(obj);
  3134. CPUX86State *env = &cpu->env;
  3135. const int64_t min = 0;
  3136. const int64_t max = 0xf;
  3137. Error *local_err = NULL;
  3138. int64_t value;
  3139. visit_type_int(v, name, &value, &local_err);
  3140. if (local_err) {
  3141. error_propagate(errp, local_err);
  3142. return;
  3143. }
  3144. if (value < min || value > max) {
  3145. error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
  3146. name ? name : "null", value, min, max);
  3147. return;
  3148. }
  3149. env->cpuid_version &= ~0xf;
  3150. env->cpuid_version |= value & 0xf;
  3151. }
  3152. static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
  3153. {
  3154. X86CPU *cpu = X86_CPU(obj);
  3155. CPUX86State *env = &cpu->env;
  3156. char *value;
  3157. value = g_malloc(CPUID_VENDOR_SZ + 1);
  3158. x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
  3159. env->cpuid_vendor3);
  3160. return value;
  3161. }
  3162. static void x86_cpuid_set_vendor(Object *obj, const char *value,
  3163. Error **errp)
  3164. {
  3165. X86CPU *cpu = X86_CPU(obj);
  3166. CPUX86State *env = &cpu->env;
  3167. int i;
  3168. if (strlen(value) != CPUID_VENDOR_SZ) {
  3169. error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
  3170. return;
  3171. }
  3172. env->cpuid_vendor1 = 0;
  3173. env->cpuid_vendor2 = 0;
  3174. env->cpuid_vendor3 = 0;
  3175. for (i = 0; i < 4; i++) {
  3176. env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
  3177. env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
  3178. env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
  3179. }
  3180. }
  3181. static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
  3182. {
  3183. X86CPU *cpu = X86_CPU(obj);
  3184. CPUX86State *env = &cpu->env;
  3185. char *value;
  3186. int i;
  3187. value = g_malloc(48 + 1);
  3188. for (i = 0; i < 48; i++) {
  3189. value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
  3190. }
  3191. value[48] = '\0';
  3192. return value;
  3193. }
  3194. static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
  3195. Error **errp)
  3196. {
  3197. X86CPU *cpu = X86_CPU(obj);
  3198. CPUX86State *env = &cpu->env;
  3199. int c, len, i;
  3200. if (model_id == NULL) {
  3201. model_id = "";
  3202. }
  3203. len = strlen(model_id);
  3204. memset(env->cpuid_model, 0, 48);
  3205. for (i = 0; i < 48; i++) {
  3206. if (i >= len) {
  3207. c = '\0';
  3208. } else {
  3209. c = (uint8_t)model_id[i];
  3210. }
  3211. env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
  3212. }
  3213. }
  3214. static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
  3215. void *opaque, Error **errp)
  3216. {
  3217. X86CPU *cpu = X86_CPU(obj);
  3218. int64_t value;
  3219. value = cpu->env.tsc_khz * 1000;
  3220. visit_type_int(v, name, &value, errp);
  3221. }
  3222. static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
  3223. void *opaque, Error **errp)
  3224. {
  3225. X86CPU *cpu = X86_CPU(obj);
  3226. const int64_t min = 0;
  3227. const int64_t max = INT64_MAX;
  3228. Error *local_err = NULL;
  3229. int64_t value;
  3230. visit_type_int(v, name, &value, &local_err);
  3231. if (local_err) {
  3232. error_propagate(errp, local_err);
  3233. return;
  3234. }
  3235. if (value < min || value > max) {
  3236. error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
  3237. name ? name : "null", value, min, max);
  3238. return;
  3239. }
  3240. cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
  3241. }
  3242. /* Generic getter for "feature-words" and "filtered-features" properties */
  3243. static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
  3244. const char *name, void *opaque,
  3245. Error **errp)
  3246. {
  3247. uint32_t *array = (uint32_t *)opaque;
  3248. FeatureWord w;
  3249. X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
  3250. X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
  3251. X86CPUFeatureWordInfoList *list = NULL;
  3252. for (w = 0; w < FEATURE_WORDS; w++) {
  3253. FeatureWordInfo *wi = &feature_word_info[w];
  3254. /*
  3255. * We didn't have MSR features when "feature-words" was
  3256. * introduced. Therefore skipped other type entries.
  3257. */
  3258. if (wi->type != CPUID_FEATURE_WORD) {
  3259. continue;
  3260. }
  3261. X86CPUFeatureWordInfo *qwi = &word_infos[w];
  3262. qwi->cpuid_input_eax = wi->cpuid.eax;
  3263. qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
  3264. qwi->cpuid_input_ecx = wi->cpuid.ecx;
  3265. qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
  3266. qwi->features = array[w];
  3267. /* List will be in reverse order, but order shouldn't matter */
  3268. list_entries[w].next = list;
  3269. list_entries[w].value = &word_infos[w];
  3270. list = &list_entries[w];
  3271. }
  3272. visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
  3273. }
  3274. static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
  3275. void *opaque, Error **errp)
  3276. {
  3277. X86CPU *cpu = X86_CPU(obj);
  3278. int64_t value = cpu->hyperv_spinlock_attempts;
  3279. visit_type_int(v, name, &value, errp);
  3280. }
  3281. static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
  3282. void *opaque, Error **errp)
  3283. {
  3284. const int64_t min = 0xFFF;
  3285. const int64_t max = UINT_MAX;
  3286. X86CPU *cpu = X86_CPU(obj);
  3287. Error *err = NULL;
  3288. int64_t value;
  3289. visit_type_int(v, name, &value, &err);
  3290. if (err) {
  3291. error_propagate(errp, err);
  3292. return;
  3293. }
  3294. if (value < min || value > max) {
  3295. error_setg(errp, "Property %s.%s doesn't take value %" PRId64
  3296. " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
  3297. object_get_typename(obj), name ? name : "null",
  3298. value, min, max);
  3299. return;
  3300. }
  3301. cpu->hyperv_spinlock_attempts = value;
  3302. }
  3303. static const PropertyInfo qdev_prop_spinlocks = {
  3304. .name = "int",
  3305. .get = x86_get_hv_spinlocks,
  3306. .set = x86_set_hv_spinlocks,
  3307. };
  3308. /* Convert all '_' in a feature string option name to '-', to make feature
  3309. * name conform to QOM property naming rule, which uses '-' instead of '_'.
  3310. */
  3311. static inline void feat2prop(char *s)
  3312. {
  3313. while ((s = strchr(s, '_'))) {
  3314. *s = '-';
  3315. }
  3316. }
  3317. /* Return the feature property name for a feature flag bit */
  3318. static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
  3319. {
  3320. /* XSAVE components are automatically enabled by other features,
  3321. * so return the original feature name instead
  3322. */
  3323. if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
  3324. int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
  3325. if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
  3326. x86_ext_save_areas[comp].bits) {
  3327. w = x86_ext_save_areas[comp].feature;
  3328. bitnr = ctz32(x86_ext_save_areas[comp].bits);
  3329. }
  3330. }
  3331. assert(bitnr < 32);
  3332. assert(w < FEATURE_WORDS);
  3333. return feature_word_info[w].feat_names[bitnr];
  3334. }
  3335. /* Compatibily hack to maintain legacy +-feat semantic,
  3336. * where +-feat overwrites any feature set by
  3337. * feat=on|feat even if the later is parsed after +-feat
  3338. * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
  3339. */
  3340. static GList *plus_features, *minus_features;
  3341. static gint compare_string(gconstpointer a, gconstpointer b)
  3342. {
  3343. return g_strcmp0(a, b);
  3344. }
  3345. /* Parse "+feature,-feature,feature=foo" CPU feature string
  3346. */
  3347. static void x86_cpu_parse_featurestr(const char *typename, char *features,
  3348. Error **errp)
  3349. {
  3350. char *featurestr; /* Single 'key=value" string being parsed */
  3351. static bool cpu_globals_initialized;
  3352. bool ambiguous = false;
  3353. if (cpu_globals_initialized) {
  3354. return;
  3355. }
  3356. cpu_globals_initialized = true;
  3357. if (!features) {
  3358. return;
  3359. }
  3360. for (featurestr = strtok(features, ",");
  3361. featurestr;
  3362. featurestr = strtok(NULL, ",")) {
  3363. const char *name;
  3364. const char *val = NULL;
  3365. char *eq = NULL;
  3366. char num[32];
  3367. GlobalProperty *prop;
  3368. /* Compatibility syntax: */
  3369. if (featurestr[0] == '+') {
  3370. plus_features = g_list_append(plus_features,
  3371. g_strdup(featurestr + 1));
  3372. continue;
  3373. } else if (featurestr[0] == '-') {
  3374. minus_features = g_list_append(minus_features,
  3375. g_strdup(featurestr + 1));
  3376. continue;
  3377. }
  3378. eq = strchr(featurestr, '=');
  3379. if (eq) {
  3380. *eq++ = 0;
  3381. val = eq;
  3382. } else {
  3383. val = "on";
  3384. }
  3385. feat2prop(featurestr);
  3386. name = featurestr;
  3387. if (g_list_find_custom(plus_features, name, compare_string)) {
  3388. warn_report("Ambiguous CPU model string. "
  3389. "Don't mix both \"+%s\" and \"%s=%s\"",
  3390. name, name, val);
  3391. ambiguous = true;
  3392. }
  3393. if (g_list_find_custom(minus_features, name, compare_string)) {
  3394. warn_report("Ambiguous CPU model string. "
  3395. "Don't mix both \"-%s\" and \"%s=%s\"",
  3396. name, name, val);
  3397. ambiguous = true;
  3398. }
  3399. /* Special case: */
  3400. if (!strcmp(name, "tsc-freq")) {
  3401. int ret;
  3402. uint64_t tsc_freq;
  3403. ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
  3404. if (ret < 0 || tsc_freq > INT64_MAX) {
  3405. error_setg(errp, "bad numerical value %s", val);
  3406. return;
  3407. }
  3408. snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
  3409. val = num;
  3410. name = "tsc-frequency";
  3411. }
  3412. prop = g_new0(typeof(*prop), 1);
  3413. prop->driver = typename;
  3414. prop->property = g_strdup(name);
  3415. prop->value = g_strdup(val);
  3416. qdev_prop_register_global(prop);
  3417. }
  3418. if (ambiguous) {
  3419. warn_report("Compatibility of ambiguous CPU model "
  3420. "strings won't be kept on future QEMU versions");
  3421. }
  3422. }
  3423. static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
  3424. static int x86_cpu_filter_features(X86CPU *cpu);
  3425. /* Check for missing features that may prevent the CPU class from
  3426. * running using the current machine and accelerator.
  3427. */
  3428. static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
  3429. strList **missing_feats)
  3430. {
  3431. X86CPU *xc;
  3432. FeatureWord w;
  3433. Error *err = NULL;
  3434. strList **next = missing_feats;
  3435. if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
  3436. strList *new = g_new0(strList, 1);
  3437. new->value = g_strdup("kvm");
  3438. *missing_feats = new;
  3439. return;
  3440. }
  3441. xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
  3442. x86_cpu_expand_features(xc, &err);
  3443. if (err) {
  3444. /* Errors at x86_cpu_expand_features should never happen,
  3445. * but in case it does, just report the model as not
  3446. * runnable at all using the "type" property.
  3447. */
  3448. strList *new = g_new0(strList, 1);
  3449. new->value = g_strdup("type");
  3450. *next = new;
  3451. next = &new->next;
  3452. }
  3453. x86_cpu_filter_features(xc);
  3454. for (w = 0; w < FEATURE_WORDS; w++) {
  3455. uint32_t filtered = xc->filtered_features[w];
  3456. int i;
  3457. for (i = 0; i < 32; i++) {
  3458. if (filtered & (1UL << i)) {
  3459. strList *new = g_new0(strList, 1);
  3460. new->value = g_strdup(x86_cpu_feature_name(w, i));
  3461. *next = new;
  3462. next = &new->next;
  3463. }
  3464. }
  3465. }
  3466. object_unref(OBJECT(xc));
  3467. }
  3468. /* Print all cpuid feature names in featureset
  3469. */
  3470. static void listflags(FILE *f, fprintf_function print, GList *features)
  3471. {
  3472. size_t len = 0;
  3473. GList *tmp;
  3474. for (tmp = features; tmp; tmp = tmp->next) {
  3475. const char *name = tmp->data;
  3476. if ((len + strlen(name) + 1) >= 75) {
  3477. print(f, "\n");
  3478. len = 0;
  3479. }
  3480. print(f, "%s%s", len == 0 ? " " : " ", name);
  3481. len += strlen(name) + 1;
  3482. }
  3483. print(f, "\n");
  3484. }
  3485. /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
  3486. static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
  3487. {
  3488. ObjectClass *class_a = (ObjectClass *)a;
  3489. ObjectClass *class_b = (ObjectClass *)b;
  3490. X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
  3491. X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
  3492. char *name_a, *name_b;
  3493. int ret;
  3494. if (cc_a->ordering != cc_b->ordering) {
  3495. ret = cc_a->ordering - cc_b->ordering;
  3496. } else {
  3497. name_a = x86_cpu_class_get_model_name(cc_a);
  3498. name_b = x86_cpu_class_get_model_name(cc_b);
  3499. ret = strcmp(name_a, name_b);
  3500. g_free(name_a);
  3501. g_free(name_b);
  3502. }
  3503. return ret;
  3504. }
  3505. static GSList *get_sorted_cpu_model_list(void)
  3506. {
  3507. GSList *list = object_class_get_list(TYPE_X86_CPU, false);
  3508. list = g_slist_sort(list, x86_cpu_list_compare);
  3509. return list;
  3510. }
  3511. static void x86_cpu_list_entry(gpointer data, gpointer user_data)
  3512. {
  3513. ObjectClass *oc = data;
  3514. X86CPUClass *cc = X86_CPU_CLASS(oc);
  3515. CPUListState *s = user_data;
  3516. char *name = x86_cpu_class_get_model_name(cc);
  3517. const char *desc = cc->model_description;
  3518. if (!desc && cc->cpu_def) {
  3519. desc = cc->cpu_def->model_id;
  3520. }
  3521. (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
  3522. name, desc);
  3523. g_free(name);
  3524. }
  3525. /* list available CPU models and flags */
  3526. void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
  3527. {
  3528. int i, j;
  3529. CPUListState s = {
  3530. .file = f,
  3531. .cpu_fprintf = cpu_fprintf,
  3532. };
  3533. GSList *list;
  3534. GList *names = NULL;
  3535. (*cpu_fprintf)(f, "Available CPUs:\n");
  3536. list = get_sorted_cpu_model_list();
  3537. g_slist_foreach(list, x86_cpu_list_entry, &s);
  3538. g_slist_free(list);
  3539. names = NULL;
  3540. for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
  3541. FeatureWordInfo *fw = &feature_word_info[i];
  3542. for (j = 0; j < 32; j++) {
  3543. if (fw->feat_names[j]) {
  3544. names = g_list_append(names, (gpointer)fw->feat_names[j]);
  3545. }
  3546. }
  3547. }
  3548. names = g_list_sort(names, (GCompareFunc)strcmp);
  3549. (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
  3550. listflags(f, cpu_fprintf, names);
  3551. (*cpu_fprintf)(f, "\n");
  3552. g_list_free(names);
  3553. }
  3554. static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
  3555. {
  3556. ObjectClass *oc = data;
  3557. X86CPUClass *cc = X86_CPU_CLASS(oc);
  3558. CpuDefinitionInfoList **cpu_list = user_data;
  3559. CpuDefinitionInfoList *entry;
  3560. CpuDefinitionInfo *info;
  3561. info = g_malloc0(sizeof(*info));
  3562. info->name = x86_cpu_class_get_model_name(cc);
  3563. x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
  3564. info->has_unavailable_features = true;
  3565. info->q_typename = g_strdup(object_class_get_name(oc));
  3566. info->migration_safe = cc->migration_safe;
  3567. info->has_migration_safe = true;
  3568. info->q_static = cc->static_model;
  3569. entry = g_malloc0(sizeof(*entry));
  3570. entry->value = info;
  3571. entry->next = *cpu_list;
  3572. *cpu_list = entry;
  3573. }
  3574. CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
  3575. {
  3576. CpuDefinitionInfoList *cpu_list = NULL;
  3577. GSList *list = get_sorted_cpu_model_list();
  3578. g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
  3579. g_slist_free(list);
  3580. return cpu_list;
  3581. }
  3582. static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
  3583. bool migratable_only)
  3584. {
  3585. FeatureWordInfo *wi = &feature_word_info[w];
  3586. uint32_t r = 0;
  3587. if (kvm_enabled()) {
  3588. switch (wi->type) {
  3589. case CPUID_FEATURE_WORD:
  3590. r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
  3591. wi->cpuid.ecx,
  3592. wi->cpuid.reg);
  3593. break;
  3594. case MSR_FEATURE_WORD:
  3595. r = kvm_arch_get_supported_msr_feature(kvm_state,
  3596. wi->msr.index);
  3597. break;
  3598. }
  3599. } else if (hvf_enabled()) {
  3600. if (wi->type != CPUID_FEATURE_WORD) {
  3601. return 0;
  3602. }
  3603. r = hvf_get_supported_cpuid(wi->cpuid.eax,
  3604. wi->cpuid.ecx,
  3605. wi->cpuid.reg);
  3606. } else if (tcg_enabled()) {
  3607. r = wi->tcg_features;
  3608. } else {
  3609. return ~0;
  3610. }
  3611. if (migratable_only) {
  3612. r &= x86_cpu_get_migratable_flags(w);
  3613. }
  3614. return r;
  3615. }
  3616. static void x86_cpu_report_filtered_features(X86CPU *cpu)
  3617. {
  3618. FeatureWord w;
  3619. for (w = 0; w < FEATURE_WORDS; w++) {
  3620. report_unavailable_features(w, cpu->filtered_features[w]);
  3621. }
  3622. }
  3623. static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
  3624. {
  3625. PropValue *pv;
  3626. for (pv = props; pv->prop; pv++) {
  3627. if (!pv->value) {
  3628. continue;
  3629. }
  3630. object_property_parse(OBJECT(cpu), pv->value, pv->prop,
  3631. &error_abort);
  3632. }
  3633. }
  3634. /* Load data from X86CPUDefinition into a X86CPU object
  3635. */
  3636. static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
  3637. {
  3638. CPUX86State *env = &cpu->env;
  3639. const char *vendor;
  3640. char host_vendor[CPUID_VENDOR_SZ + 1];
  3641. FeatureWord w;
  3642. /*NOTE: any property set by this function should be returned by
  3643. * x86_cpu_static_props(), so static expansion of
  3644. * query-cpu-model-expansion is always complete.
  3645. */
  3646. /* CPU models only set _minimum_ values for level/xlevel: */
  3647. object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
  3648. object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
  3649. object_property_set_int(OBJECT(cpu), def->family, "family", errp);
  3650. object_property_set_int(OBJECT(cpu), def->model, "model", errp);
  3651. object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
  3652. object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
  3653. for (w = 0; w < FEATURE_WORDS; w++) {
  3654. env->features[w] = def->features[w];
  3655. }
  3656. /* legacy-cache defaults to 'off' if CPU model provides cache info */
  3657. cpu->legacy_cache = !def->cache_info;
  3658. /* Special cases not set in the X86CPUDefinition structs: */
  3659. /* TODO: in-kernel irqchip for hvf */
  3660. if (kvm_enabled()) {
  3661. if (!kvm_irqchip_in_kernel()) {
  3662. x86_cpu_change_kvm_default("x2apic", "off");
  3663. }
  3664. x86_cpu_apply_props(cpu, kvm_default_props);
  3665. } else if (tcg_enabled()) {
  3666. x86_cpu_apply_props(cpu, tcg_default_props);
  3667. }
  3668. env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
  3669. /* sysenter isn't supported in compatibility mode on AMD,
  3670. * syscall isn't supported in compatibility mode on Intel.
  3671. * Normally we advertise the actual CPU vendor, but you can
  3672. * override this using the 'vendor' property if you want to use
  3673. * KVM's sysenter/syscall emulation in compatibility mode and
  3674. * when doing cross vendor migration
  3675. */
  3676. vendor = def->vendor;
  3677. if (accel_uses_host_cpuid()) {
  3678. uint32_t ebx = 0, ecx = 0, edx = 0;
  3679. host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
  3680. x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
  3681. vendor = host_vendor;
  3682. }
  3683. object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
  3684. }
  3685. #ifndef CONFIG_USER_ONLY
  3686. /* Return a QDict containing keys for all properties that can be included
  3687. * in static expansion of CPU models. All properties set by x86_cpu_load_def()
  3688. * must be included in the dictionary.
  3689. */
  3690. static QDict *x86_cpu_static_props(void)
  3691. {
  3692. FeatureWord w;
  3693. int i;
  3694. static const char *props[] = {
  3695. "min-level",
  3696. "min-xlevel",
  3697. "family",
  3698. "model",
  3699. "stepping",
  3700. "model-id",
  3701. "vendor",
  3702. "lmce",
  3703. NULL,
  3704. };
  3705. static QDict *d;
  3706. if (d) {
  3707. return d;
  3708. }
  3709. d = qdict_new();
  3710. for (i = 0; props[i]; i++) {
  3711. qdict_put_null(d, props[i]);
  3712. }
  3713. for (w = 0; w < FEATURE_WORDS; w++) {
  3714. FeatureWordInfo *fi = &feature_word_info[w];
  3715. int bit;
  3716. for (bit = 0; bit < 32; bit++) {
  3717. if (!fi->feat_names[bit]) {
  3718. continue;
  3719. }
  3720. qdict_put_null(d, fi->feat_names[bit]);
  3721. }
  3722. }
  3723. return d;
  3724. }
  3725. /* Add an entry to @props dict, with the value for property. */
  3726. static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
  3727. {
  3728. QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
  3729. &error_abort);
  3730. qdict_put_obj(props, prop, value);
  3731. }
  3732. /* Convert CPU model data from X86CPU object to a property dictionary
  3733. * that can recreate exactly the same CPU model.
  3734. */
  3735. static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
  3736. {
  3737. QDict *sprops = x86_cpu_static_props();
  3738. const QDictEntry *e;
  3739. for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
  3740. const char *prop = qdict_entry_key(e);
  3741. x86_cpu_expand_prop(cpu, props, prop);
  3742. }
  3743. }
  3744. /* Convert CPU model data from X86CPU object to a property dictionary
  3745. * that can recreate exactly the same CPU model, including every
  3746. * writeable QOM property.
  3747. */
  3748. static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
  3749. {
  3750. ObjectPropertyIterator iter;
  3751. ObjectProperty *prop;
  3752. object_property_iter_init(&iter, OBJECT(cpu));
  3753. while ((prop = object_property_iter_next(&iter))) {
  3754. /* skip read-only or write-only properties */
  3755. if (!prop->get || !prop->set) {
  3756. continue;
  3757. }
  3758. /* "hotplugged" is the only property that is configurable
  3759. * on the command-line but will be set differently on CPUs
  3760. * created using "-cpu ... -smp ..." and by CPUs created
  3761. * on the fly by x86_cpu_from_model() for querying. Skip it.
  3762. */
  3763. if (!strcmp(prop->name, "hotplugged")) {
  3764. continue;
  3765. }
  3766. x86_cpu_expand_prop(cpu, props, prop->name);
  3767. }
  3768. }
  3769. static void object_apply_props(Object *obj, QDict *props, Error **errp)
  3770. {
  3771. const QDictEntry *prop;
  3772. Error *err = NULL;
  3773. for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
  3774. object_property_set_qobject(obj, qdict_entry_value(prop),
  3775. qdict_entry_key(prop), &err);
  3776. if (err) {
  3777. break;
  3778. }
  3779. }
  3780. error_propagate(errp, err);
  3781. }
  3782. /* Create X86CPU object according to model+props specification */
  3783. static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
  3784. {
  3785. X86CPU *xc = NULL;
  3786. X86CPUClass *xcc;
  3787. Error *err = NULL;
  3788. xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
  3789. if (xcc == NULL) {
  3790. error_setg(&err, "CPU model '%s' not found", model);
  3791. goto out;
  3792. }
  3793. xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
  3794. if (props) {
  3795. object_apply_props(OBJECT(xc), props, &err);
  3796. if (err) {
  3797. goto out;
  3798. }
  3799. }
  3800. x86_cpu_expand_features(xc, &err);
  3801. if (err) {
  3802. goto out;
  3803. }
  3804. out:
  3805. if (err) {
  3806. error_propagate(errp, err);
  3807. object_unref(OBJECT(xc));
  3808. xc = NULL;
  3809. }
  3810. return xc;
  3811. }
  3812. CpuModelExpansionInfo *
  3813. qmp_query_cpu_model_expansion(CpuModelExpansionType type,
  3814. CpuModelInfo *model,
  3815. Error **errp)
  3816. {
  3817. X86CPU *xc = NULL;
  3818. Error *err = NULL;
  3819. CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
  3820. QDict *props = NULL;
  3821. const char *base_name;
  3822. xc = x86_cpu_from_model(model->name,
  3823. model->has_props ?
  3824. qobject_to(QDict, model->props) :
  3825. NULL, &err);
  3826. if (err) {
  3827. goto out;
  3828. }
  3829. props = qdict_new();
  3830. ret->model = g_new0(CpuModelInfo, 1);
  3831. ret->model->props = QOBJECT(props);
  3832. ret->model->has_props = true;
  3833. switch (type) {
  3834. case CPU_MODEL_EXPANSION_TYPE_STATIC:
  3835. /* Static expansion will be based on "base" only */
  3836. base_name = "base";
  3837. x86_cpu_to_dict(xc, props);
  3838. break;
  3839. case CPU_MODEL_EXPANSION_TYPE_FULL:
  3840. /* As we don't return every single property, full expansion needs
  3841. * to keep the original model name+props, and add extra
  3842. * properties on top of that.
  3843. */
  3844. base_name = model->name;
  3845. x86_cpu_to_dict_full(xc, props);
  3846. break;
  3847. default:
  3848. error_setg(&err, "Unsupported expansion type");
  3849. goto out;
  3850. }
  3851. x86_cpu_to_dict(xc, props);
  3852. ret->model->name = g_strdup(base_name);
  3853. out:
  3854. object_unref(OBJECT(xc));
  3855. if (err) {
  3856. error_propagate(errp, err);
  3857. qapi_free_CpuModelExpansionInfo(ret);
  3858. ret = NULL;
  3859. }
  3860. return ret;
  3861. }
  3862. #endif /* !CONFIG_USER_ONLY */
  3863. static gchar *x86_gdb_arch_name(CPUState *cs)
  3864. {
  3865. #ifdef TARGET_X86_64
  3866. return g_strdup("i386:x86-64");
  3867. #else
  3868. return g_strdup("i386");
  3869. #endif
  3870. }
  3871. static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
  3872. {
  3873. X86CPUDefinition *cpudef = data;
  3874. X86CPUClass *xcc = X86_CPU_CLASS(oc);
  3875. xcc->cpu_def = cpudef;
  3876. xcc->migration_safe = true;
  3877. }
  3878. static void x86_register_cpudef_type(X86CPUDefinition *def)
  3879. {
  3880. char *typename = x86_cpu_type_name(def->name);
  3881. TypeInfo ti = {
  3882. .name = typename,
  3883. .parent = TYPE_X86_CPU,
  3884. .class_init = x86_cpu_cpudef_class_init,
  3885. .class_data = def,
  3886. };
  3887. /* AMD aliases are handled at runtime based on CPUID vendor, so
  3888. * they shouldn't be set on the CPU model table.
  3889. */
  3890. assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
  3891. /* catch mistakes instead of silently truncating model_id when too long */
  3892. assert(def->model_id && strlen(def->model_id) <= 48);
  3893. type_register(&ti);
  3894. g_free(typename);
  3895. }
  3896. #if !defined(CONFIG_USER_ONLY)
  3897. void cpu_clear_apic_feature(CPUX86State *env)
  3898. {
  3899. env->features[FEAT_1_EDX] &= ~CPUID_APIC;
  3900. }
  3901. #endif /* !CONFIG_USER_ONLY */
  3902. void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
  3903. uint32_t *eax, uint32_t *ebx,
  3904. uint32_t *ecx, uint32_t *edx)
  3905. {
  3906. X86CPU *cpu = x86_env_get_cpu(env);
  3907. CPUState *cs = CPU(cpu);
  3908. uint32_t pkg_offset;
  3909. uint32_t limit;
  3910. uint32_t signature[3];
  3911. /* Calculate & apply limits for different index ranges */
  3912. if (index >= 0xC0000000) {
  3913. limit = env->cpuid_xlevel2;
  3914. } else if (index >= 0x80000000) {
  3915. limit = env->cpuid_xlevel;
  3916. } else if (index >= 0x40000000) {
  3917. limit = 0x40000001;
  3918. } else {
  3919. limit = env->cpuid_level;
  3920. }
  3921. if (index > limit) {
  3922. /* Intel documentation states that invalid EAX input will
  3923. * return the same information as EAX=cpuid_level
  3924. * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
  3925. */
  3926. index = env->cpuid_level;
  3927. }
  3928. switch(index) {
  3929. case 0:
  3930. *eax = env->cpuid_level;
  3931. *ebx = env->cpuid_vendor1;
  3932. *edx = env->cpuid_vendor2;
  3933. *ecx = env->cpuid_vendor3;
  3934. break;
  3935. case 1:
  3936. *eax = env->cpuid_version;
  3937. *ebx = (cpu->apic_id << 24) |
  3938. 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
  3939. *ecx = env->features[FEAT_1_ECX];
  3940. if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
  3941. *ecx |= CPUID_EXT_OSXSAVE;
  3942. }
  3943. *edx = env->features[FEAT_1_EDX];
  3944. if (cs->nr_cores * cs->nr_threads > 1) {
  3945. *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
  3946. *edx |= CPUID_HT;
  3947. }
  3948. break;
  3949. case 2:
  3950. /* cache info: needed for Pentium Pro compatibility */
  3951. if (cpu->cache_info_passthrough) {
  3952. host_cpuid(index, 0, eax, ebx, ecx, edx);
  3953. break;
  3954. }
  3955. *eax = 1; /* Number of CPUID[EAX=2] calls required */
  3956. *ebx = 0;
  3957. if (!cpu->enable_l3_cache) {
  3958. *ecx = 0;
  3959. } else {
  3960. *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
  3961. }
  3962. *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
  3963. (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
  3964. (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
  3965. break;
  3966. case 4:
  3967. /* cache info: needed for Core compatibility */
  3968. if (cpu->cache_info_passthrough) {
  3969. host_cpuid(index, count, eax, ebx, ecx, edx);
  3970. /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
  3971. *eax &= ~0xFC000000;
  3972. if ((*eax & 31) && cs->nr_cores > 1) {
  3973. *eax |= (cs->nr_cores - 1) << 26;
  3974. }
  3975. } else {
  3976. *eax = 0;
  3977. switch (count) {
  3978. case 0: /* L1 dcache info */
  3979. encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
  3980. 1, cs->nr_cores,
  3981. eax, ebx, ecx, edx);
  3982. break;
  3983. case 1: /* L1 icache info */
  3984. encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
  3985. 1, cs->nr_cores,
  3986. eax, ebx, ecx, edx);
  3987. break;
  3988. case 2: /* L2 cache info */
  3989. encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
  3990. cs->nr_threads, cs->nr_cores,
  3991. eax, ebx, ecx, edx);
  3992. break;
  3993. case 3: /* L3 cache info */
  3994. pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
  3995. if (cpu->enable_l3_cache) {
  3996. encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
  3997. (1 << pkg_offset), cs->nr_cores,
  3998. eax, ebx, ecx, edx);
  3999. break;
  4000. }
  4001. /* fall through */
  4002. default: /* end of info */
  4003. *eax = *ebx = *ecx = *edx = 0;
  4004. break;
  4005. }
  4006. }
  4007. break;
  4008. case 5:
  4009. /* MONITOR/MWAIT Leaf */
  4010. *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
  4011. *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
  4012. *ecx = cpu->mwait.ecx; /* flags */
  4013. *edx = cpu->mwait.edx; /* mwait substates */
  4014. break;
  4015. case 6:
  4016. /* Thermal and Power Leaf */
  4017. *eax = env->features[FEAT_6_EAX];
  4018. *ebx = 0;
  4019. *ecx = 0;
  4020. *edx = 0;
  4021. break;
  4022. case 7:
  4023. /* Structured Extended Feature Flags Enumeration Leaf */
  4024. if (count == 0) {
  4025. *eax = 0; /* Maximum ECX value for sub-leaves */
  4026. *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
  4027. *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
  4028. if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
  4029. *ecx |= CPUID_7_0_ECX_OSPKE;
  4030. }
  4031. *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
  4032. } else {
  4033. *eax = 0;
  4034. *ebx = 0;
  4035. *ecx = 0;
  4036. *edx = 0;
  4037. }
  4038. break;
  4039. case 9:
  4040. /* Direct Cache Access Information Leaf */
  4041. *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
  4042. *ebx = 0;
  4043. *ecx = 0;
  4044. *edx = 0;
  4045. break;
  4046. case 0xA:
  4047. /* Architectural Performance Monitoring Leaf */
  4048. if (kvm_enabled() && cpu->enable_pmu) {
  4049. KVMState *s = cs->kvm_state;
  4050. *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
  4051. *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
  4052. *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
  4053. *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
  4054. } else if (hvf_enabled() && cpu->enable_pmu) {
  4055. *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
  4056. *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
  4057. *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
  4058. *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
  4059. } else {
  4060. *eax = 0;
  4061. *ebx = 0;
  4062. *ecx = 0;
  4063. *edx = 0;
  4064. }
  4065. break;
  4066. case 0xB:
  4067. /* Extended Topology Enumeration Leaf */
  4068. if (!cpu->enable_cpuid_0xb) {
  4069. *eax = *ebx = *ecx = *edx = 0;
  4070. break;
  4071. }
  4072. *ecx = count & 0xff;
  4073. *edx = cpu->apic_id;
  4074. switch (count) {
  4075. case 0:
  4076. *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
  4077. *ebx = cs->nr_threads;
  4078. *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
  4079. break;
  4080. case 1:
  4081. *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
  4082. *ebx = cs->nr_cores * cs->nr_threads;
  4083. *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
  4084. break;
  4085. default:
  4086. *eax = 0;
  4087. *ebx = 0;
  4088. *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
  4089. }
  4090. assert(!(*eax & ~0x1f));
  4091. *ebx &= 0xffff; /* The count doesn't need to be reliable. */
  4092. break;
  4093. case 0xD: {
  4094. /* Processor Extended State */
  4095. *eax = 0;
  4096. *ebx = 0;
  4097. *ecx = 0;
  4098. *edx = 0;
  4099. if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
  4100. break;
  4101. }
  4102. if (count == 0) {
  4103. *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
  4104. *eax = env->features[FEAT_XSAVE_COMP_LO];
  4105. *edx = env->features[FEAT_XSAVE_COMP_HI];
  4106. *ebx = xsave_area_size(env->xcr0);
  4107. } else if (count == 1) {
  4108. *eax = env->features[FEAT_XSAVE];
  4109. } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
  4110. if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
  4111. const ExtSaveArea *esa = &x86_ext_save_areas[count];
  4112. *eax = esa->size;
  4113. *ebx = esa->offset;
  4114. }
  4115. }
  4116. break;
  4117. }
  4118. case 0x14: {
  4119. /* Intel Processor Trace Enumeration */
  4120. *eax = 0;
  4121. *ebx = 0;
  4122. *ecx = 0;
  4123. *edx = 0;
  4124. if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
  4125. !kvm_enabled()) {
  4126. break;
  4127. }
  4128. if (count == 0) {
  4129. *eax = INTEL_PT_MAX_SUBLEAF;
  4130. *ebx = INTEL_PT_MINIMAL_EBX;
  4131. *ecx = INTEL_PT_MINIMAL_ECX;
  4132. } else if (count == 1) {
  4133. *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
  4134. *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
  4135. }
  4136. break;
  4137. }
  4138. case 0x40000000:
  4139. /*
  4140. * CPUID code in kvm_arch_init_vcpu() ignores stuff
  4141. * set here, but we restrict to TCG none the less.
  4142. */
  4143. if (tcg_enabled() && cpu->expose_tcg) {
  4144. memcpy(signature, "TCGTCGTCGTCG", 12);
  4145. *eax = 0x40000001;
  4146. *ebx = signature[0];
  4147. *ecx = signature[1];
  4148. *edx = signature[2];
  4149. } else {
  4150. *eax = 0;
  4151. *ebx = 0;
  4152. *ecx = 0;
  4153. *edx = 0;
  4154. }
  4155. break;
  4156. case 0x40000001:
  4157. *eax = 0;
  4158. *ebx = 0;
  4159. *ecx = 0;
  4160. *edx = 0;
  4161. break;
  4162. case 0x80000000:
  4163. *eax = env->cpuid_xlevel;
  4164. *ebx = env->cpuid_vendor1;
  4165. *edx = env->cpuid_vendor2;
  4166. *ecx = env->cpuid_vendor3;
  4167. break;
  4168. case 0x80000001:
  4169. *eax = env->cpuid_version;
  4170. *ebx = 0;
  4171. *ecx = env->features[FEAT_8000_0001_ECX];
  4172. *edx = env->features[FEAT_8000_0001_EDX];
  4173. /* The Linux kernel checks for the CMPLegacy bit and
  4174. * discards multiple thread information if it is set.
  4175. * So don't set it here for Intel to make Linux guests happy.
  4176. */
  4177. if (cs->nr_cores * cs->nr_threads > 1) {
  4178. if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
  4179. env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
  4180. env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
  4181. *ecx |= 1 << 1; /* CmpLegacy bit */
  4182. }
  4183. }
  4184. break;
  4185. case 0x80000002:
  4186. case 0x80000003:
  4187. case 0x80000004:
  4188. *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
  4189. *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
  4190. *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
  4191. *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
  4192. break;
  4193. case 0x80000005:
  4194. /* cache info (L1 cache) */
  4195. if (cpu->cache_info_passthrough) {
  4196. host_cpuid(index, 0, eax, ebx, ecx, edx);
  4197. break;
  4198. }
  4199. *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
  4200. (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
  4201. *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
  4202. (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
  4203. *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
  4204. *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
  4205. break;
  4206. case 0x80000006:
  4207. /* cache info (L2 cache) */
  4208. if (cpu->cache_info_passthrough) {
  4209. host_cpuid(index, 0, eax, ebx, ecx, edx);
  4210. break;
  4211. }
  4212. *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
  4213. (L2_DTLB_2M_ENTRIES << 16) | \
  4214. (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
  4215. (L2_ITLB_2M_ENTRIES);
  4216. *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
  4217. (L2_DTLB_4K_ENTRIES << 16) | \
  4218. (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
  4219. (L2_ITLB_4K_ENTRIES);
  4220. encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
  4221. cpu->enable_l3_cache ?
  4222. env->cache_info_amd.l3_cache : NULL,
  4223. ecx, edx);
  4224. break;
  4225. case 0x80000007:
  4226. *eax = 0;
  4227. *ebx = 0;
  4228. *ecx = 0;
  4229. *edx = env->features[FEAT_8000_0007_EDX];
  4230. break;
  4231. case 0x80000008:
  4232. /* virtual & phys address size in low 2 bytes. */
  4233. if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
  4234. /* 64 bit processor */
  4235. *eax = cpu->phys_bits; /* configurable physical bits */
  4236. if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
  4237. *eax |= 0x00003900; /* 57 bits virtual */
  4238. } else {
  4239. *eax |= 0x00003000; /* 48 bits virtual */
  4240. }
  4241. } else {
  4242. *eax = cpu->phys_bits;
  4243. }
  4244. *ebx = env->features[FEAT_8000_0008_EBX];
  4245. *ecx = 0;
  4246. *edx = 0;
  4247. if (cs->nr_cores * cs->nr_threads > 1) {
  4248. *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
  4249. }
  4250. break;
  4251. case 0x8000000A:
  4252. if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
  4253. *eax = 0x00000001; /* SVM Revision */
  4254. *ebx = 0x00000010; /* nr of ASIDs */
  4255. *ecx = 0;
  4256. *edx = env->features[FEAT_SVM]; /* optional features */
  4257. } else {
  4258. *eax = 0;
  4259. *ebx = 0;
  4260. *ecx = 0;
  4261. *edx = 0;
  4262. }
  4263. break;
  4264. case 0x8000001D:
  4265. *eax = 0;
  4266. switch (count) {
  4267. case 0: /* L1 dcache info */
  4268. encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
  4269. eax, ebx, ecx, edx);
  4270. break;
  4271. case 1: /* L1 icache info */
  4272. encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
  4273. eax, ebx, ecx, edx);
  4274. break;
  4275. case 2: /* L2 cache info */
  4276. encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
  4277. eax, ebx, ecx, edx);
  4278. break;
  4279. case 3: /* L3 cache info */
  4280. encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
  4281. eax, ebx, ecx, edx);
  4282. break;
  4283. default: /* end of info */
  4284. *eax = *ebx = *ecx = *edx = 0;
  4285. break;
  4286. }
  4287. break;
  4288. case 0x8000001E:
  4289. assert(cpu->core_id <= 255);
  4290. encode_topo_cpuid8000001e(cs, cpu,
  4291. eax, ebx, ecx, edx);
  4292. break;
  4293. case 0xC0000000:
  4294. *eax = env->cpuid_xlevel2;
  4295. *ebx = 0;
  4296. *ecx = 0;
  4297. *edx = 0;
  4298. break;
  4299. case 0xC0000001:
  4300. /* Support for VIA CPU's CPUID instruction */
  4301. *eax = env->cpuid_version;
  4302. *ebx = 0;
  4303. *ecx = 0;
  4304. *edx = env->features[FEAT_C000_0001_EDX];
  4305. break;
  4306. case 0xC0000002:
  4307. case 0xC0000003:
  4308. case 0xC0000004:
  4309. /* Reserved for the future, and now filled with zero */
  4310. *eax = 0;
  4311. *ebx = 0;
  4312. *ecx = 0;
  4313. *edx = 0;
  4314. break;
  4315. case 0x8000001F:
  4316. *eax = sev_enabled() ? 0x2 : 0;
  4317. *ebx = sev_get_cbit_position();
  4318. *ebx |= sev_get_reduced_phys_bits() << 6;
  4319. *ecx = 0;
  4320. *edx = 0;
  4321. break;
  4322. default:
  4323. /* reserved values: zero */
  4324. *eax = 0;
  4325. *ebx = 0;
  4326. *ecx = 0;
  4327. *edx = 0;
  4328. break;
  4329. }
  4330. }
  4331. /* CPUClass::reset() */
  4332. static void x86_cpu_reset(CPUState *s)
  4333. {
  4334. X86CPU *cpu = X86_CPU(s);
  4335. X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
  4336. CPUX86State *env = &cpu->env;
  4337. target_ulong cr4;
  4338. uint64_t xcr0;
  4339. int i;
  4340. xcc->parent_reset(s);
  4341. memset(env, 0, offsetof(CPUX86State, end_reset_fields));
  4342. env->old_exception = -1;
  4343. /* init to reset state */
  4344. env->hflags2 |= HF2_GIF_MASK;
  4345. cpu_x86_update_cr0(env, 0x60000010);
  4346. env->a20_mask = ~0x0;
  4347. env->smbase = 0x30000;
  4348. env->msr_smi_count = 0;
  4349. env->idt.limit = 0xffff;
  4350. env->gdt.limit = 0xffff;
  4351. env->ldt.limit = 0xffff;
  4352. env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
  4353. env->tr.limit = 0xffff;
  4354. env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
  4355. cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
  4356. DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
  4357. DESC_R_MASK | DESC_A_MASK);
  4358. cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
  4359. DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
  4360. DESC_A_MASK);
  4361. cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
  4362. DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
  4363. DESC_A_MASK);
  4364. cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
  4365. DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
  4366. DESC_A_MASK);
  4367. cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
  4368. DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
  4369. DESC_A_MASK);
  4370. cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
  4371. DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
  4372. DESC_A_MASK);
  4373. env->eip = 0xfff0;
  4374. env->regs[R_EDX] = env->cpuid_version;
  4375. env->eflags = 0x2;
  4376. /* FPU init */
  4377. for (i = 0; i < 8; i++) {
  4378. env->fptags[i] = 1;
  4379. }
  4380. cpu_set_fpuc(env, 0x37f);
  4381. env->mxcsr = 0x1f80;
  4382. /* All units are in INIT state. */
  4383. env->xstate_bv = 0;
  4384. env->pat = 0x0007040600070406ULL;
  4385. env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
  4386. memset(env->dr, 0, sizeof(env->dr));
  4387. env->dr[6] = DR6_FIXED_1;
  4388. env->dr[7] = DR7_FIXED_1;
  4389. cpu_breakpoint_remove_all(s, BP_CPU);
  4390. cpu_watchpoint_remove_all(s, BP_CPU);
  4391. cr4 = 0;
  4392. xcr0 = XSTATE_FP_MASK;
  4393. #ifdef CONFIG_USER_ONLY
  4394. /* Enable all the features for user-mode. */
  4395. if (env->features[FEAT_1_EDX] & CPUID_SSE) {
  4396. xcr0 |= XSTATE_SSE_MASK;
  4397. }
  4398. for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
  4399. const ExtSaveArea *esa = &x86_ext_save_areas[i];
  4400. if (env->features[esa->feature] & esa->bits) {
  4401. xcr0 |= 1ull << i;
  4402. }
  4403. }
  4404. if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
  4405. cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
  4406. }
  4407. if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
  4408. cr4 |= CR4_FSGSBASE_MASK;
  4409. }
  4410. #endif
  4411. env->xcr0 = xcr0;
  4412. cpu_x86_update_cr4(env, cr4);
  4413. /*
  4414. * SDM 11.11.5 requires:
  4415. * - IA32_MTRR_DEF_TYPE MSR.E = 0
  4416. * - IA32_MTRR_PHYSMASKn.V = 0
  4417. * All other bits are undefined. For simplification, zero it all.
  4418. */
  4419. env->mtrr_deftype = 0;
  4420. memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
  4421. memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
  4422. env->interrupt_injected = -1;
  4423. env->exception_injected = -1;
  4424. env->nmi_injected = false;
  4425. #if !defined(CONFIG_USER_ONLY)
  4426. /* We hard-wire the BSP to the first CPU. */
  4427. apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
  4428. s->halted = !cpu_is_bsp(cpu);
  4429. if (kvm_enabled()) {
  4430. kvm_arch_reset_vcpu(cpu);
  4431. }
  4432. else if (hvf_enabled()) {
  4433. hvf_reset_vcpu(s);
  4434. }
  4435. #endif
  4436. }
  4437. #ifndef CONFIG_USER_ONLY
  4438. bool cpu_is_bsp(X86CPU *cpu)
  4439. {
  4440. return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
  4441. }
  4442. /* TODO: remove me, when reset over QOM tree is implemented */
  4443. static void x86_cpu_machine_reset_cb(void *opaque)
  4444. {
  4445. X86CPU *cpu = opaque;
  4446. cpu_reset(CPU(cpu));
  4447. }
  4448. #endif
  4449. static void mce_init(X86CPU *cpu)
  4450. {
  4451. CPUX86State *cenv = &cpu->env;
  4452. unsigned int bank;
  4453. if (((cenv->cpuid_version >> 8) & 0xf) >= 6
  4454. && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
  4455. (CPUID_MCE | CPUID_MCA)) {
  4456. cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
  4457. (cpu->enable_lmce ? MCG_LMCE_P : 0);
  4458. cenv->mcg_ctl = ~(uint64_t)0;
  4459. for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
  4460. cenv->mce_banks[bank * 4] = ~(uint64_t)0;
  4461. }
  4462. }
  4463. }
  4464. #ifndef CONFIG_USER_ONLY
  4465. APICCommonClass *apic_get_class(void)
  4466. {
  4467. const char *apic_type = "apic";
  4468. /* TODO: in-kernel irqchip for hvf */
  4469. if (kvm_apic_in_kernel()) {
  4470. apic_type = "kvm-apic";
  4471. } else if (xen_enabled()) {
  4472. apic_type = "xen-apic";
  4473. }
  4474. return APIC_COMMON_CLASS(object_class_by_name(apic_type));
  4475. }
  4476. static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
  4477. {
  4478. APICCommonState *apic;
  4479. ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
  4480. cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
  4481. object_property_add_child(OBJECT(cpu), "lapic",
  4482. OBJECT(cpu->apic_state), &error_abort);
  4483. object_unref(OBJECT(cpu->apic_state));
  4484. qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
  4485. /* TODO: convert to link<> */
  4486. apic = APIC_COMMON(cpu->apic_state);
  4487. apic->cpu = cpu;
  4488. apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
  4489. }
  4490. static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
  4491. {
  4492. APICCommonState *apic;
  4493. static bool apic_mmio_map_once;
  4494. if (cpu->apic_state == NULL) {
  4495. return;
  4496. }
  4497. object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
  4498. errp);
  4499. /* Map APIC MMIO area */
  4500. apic = APIC_COMMON(cpu->apic_state);
  4501. if (!apic_mmio_map_once) {
  4502. memory_region_add_subregion_overlap(get_system_memory(),
  4503. apic->apicbase &
  4504. MSR_IA32_APICBASE_BASE,
  4505. &apic->io_memory,
  4506. 0x1000);
  4507. apic_mmio_map_once = true;
  4508. }
  4509. }
  4510. static void x86_cpu_machine_done(Notifier *n, void *unused)
  4511. {
  4512. X86CPU *cpu = container_of(n, X86CPU, machine_done);
  4513. MemoryRegion *smram =
  4514. (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
  4515. if (smram) {
  4516. cpu->smram = g_new(MemoryRegion, 1);
  4517. memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
  4518. smram, 0, 1ull << 32);
  4519. memory_region_set_enabled(cpu->smram, true);
  4520. memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
  4521. }
  4522. }
  4523. #else
  4524. static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
  4525. {
  4526. }
  4527. #endif
  4528. /* Note: Only safe for use on x86(-64) hosts */
  4529. static uint32_t x86_host_phys_bits(void)
  4530. {
  4531. uint32_t eax;
  4532. uint32_t host_phys_bits;
  4533. host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
  4534. if (eax >= 0x80000008) {
  4535. host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
  4536. /* Note: According to AMD doc 25481 rev 2.34 they have a field
  4537. * at 23:16 that can specify a maximum physical address bits for
  4538. * the guest that can override this value; but I've not seen
  4539. * anything with that set.
  4540. */
  4541. host_phys_bits = eax & 0xff;
  4542. } else {
  4543. /* It's an odd 64 bit machine that doesn't have the leaf for
  4544. * physical address bits; fall back to 36 that's most older
  4545. * Intel.
  4546. */
  4547. host_phys_bits = 36;
  4548. }
  4549. return host_phys_bits;
  4550. }
  4551. static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
  4552. {
  4553. if (*min < value) {
  4554. *min = value;
  4555. }
  4556. }
  4557. /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
  4558. static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
  4559. {
  4560. CPUX86State *env = &cpu->env;
  4561. FeatureWordInfo *fi = &feature_word_info[w];
  4562. uint32_t eax = fi->cpuid.eax;
  4563. uint32_t region = eax & 0xF0000000;
  4564. assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
  4565. if (!env->features[w]) {
  4566. return;
  4567. }
  4568. switch (region) {
  4569. case 0x00000000:
  4570. x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
  4571. break;
  4572. case 0x80000000:
  4573. x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
  4574. break;
  4575. case 0xC0000000:
  4576. x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
  4577. break;
  4578. }
  4579. }
  4580. /* Calculate XSAVE components based on the configured CPU feature flags */
  4581. static void x86_cpu_enable_xsave_components(X86CPU *cpu)
  4582. {
  4583. CPUX86State *env = &cpu->env;
  4584. int i;
  4585. uint64_t mask;
  4586. if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
  4587. return;
  4588. }
  4589. mask = 0;
  4590. for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
  4591. const ExtSaveArea *esa = &x86_ext_save_areas[i];
  4592. if (env->features[esa->feature] & esa->bits) {
  4593. mask |= (1ULL << i);
  4594. }
  4595. }
  4596. env->features[FEAT_XSAVE_COMP_LO] = mask;
  4597. env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
  4598. }
  4599. /***** Steps involved on loading and filtering CPUID data
  4600. *
  4601. * When initializing and realizing a CPU object, the steps
  4602. * involved in setting up CPUID data are:
  4603. *
  4604. * 1) Loading CPU model definition (X86CPUDefinition). This is
  4605. * implemented by x86_cpu_load_def() and should be completely
  4606. * transparent, as it is done automatically by instance_init.
  4607. * No code should need to look at X86CPUDefinition structs
  4608. * outside instance_init.
  4609. *
  4610. * 2) CPU expansion. This is done by realize before CPUID
  4611. * filtering, and will make sure host/accelerator data is
  4612. * loaded for CPU models that depend on host capabilities
  4613. * (e.g. "host"). Done by x86_cpu_expand_features().
  4614. *
  4615. * 3) CPUID filtering. This initializes extra data related to
  4616. * CPUID, and checks if the host supports all capabilities
  4617. * required by the CPU. Runnability of a CPU model is
  4618. * determined at this step. Done by x86_cpu_filter_features().
  4619. *
  4620. * Some operations don't require all steps to be performed.
  4621. * More precisely:
  4622. *
  4623. * - CPU instance creation (instance_init) will run only CPU
  4624. * model loading. CPU expansion can't run at instance_init-time
  4625. * because host/accelerator data may be not available yet.
  4626. * - CPU realization will perform both CPU model expansion and CPUID
  4627. * filtering, and return an error in case one of them fails.
  4628. * - query-cpu-definitions needs to run all 3 steps. It needs
  4629. * to run CPUID filtering, as the 'unavailable-features'
  4630. * field is set based on the filtering results.
  4631. * - The query-cpu-model-expansion QMP command only needs to run
  4632. * CPU model loading and CPU expansion. It should not filter
  4633. * any CPUID data based on host capabilities.
  4634. */
  4635. /* Expand CPU configuration data, based on configured features
  4636. * and host/accelerator capabilities when appropriate.
  4637. */
  4638. static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
  4639. {
  4640. CPUX86State *env = &cpu->env;
  4641. FeatureWord w;
  4642. GList *l;
  4643. Error *local_err = NULL;
  4644. /*TODO: Now cpu->max_features doesn't overwrite features
  4645. * set using QOM properties, and we can convert
  4646. * plus_features & minus_features to global properties
  4647. * inside x86_cpu_parse_featurestr() too.
  4648. */
  4649. if (cpu->max_features) {
  4650. for (w = 0; w < FEATURE_WORDS; w++) {
  4651. /* Override only features that weren't set explicitly
  4652. * by the user.
  4653. */
  4654. env->features[w] |=
  4655. x86_cpu_get_supported_feature_word(w, cpu->migratable) &
  4656. ~env->user_features[w] & \
  4657. ~feature_word_info[w].no_autoenable_flags;
  4658. }
  4659. }
  4660. for (l = plus_features; l; l = l->next) {
  4661. const char *prop = l->data;
  4662. object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
  4663. if (local_err) {
  4664. goto out;
  4665. }
  4666. }
  4667. for (l = minus_features; l; l = l->next) {
  4668. const char *prop = l->data;
  4669. object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
  4670. if (local_err) {
  4671. goto out;
  4672. }
  4673. }
  4674. if (!kvm_enabled() || !cpu->expose_kvm) {
  4675. env->features[FEAT_KVM] = 0;
  4676. }
  4677. x86_cpu_enable_xsave_components(cpu);
  4678. /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
  4679. x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
  4680. if (cpu->full_cpuid_auto_level) {
  4681. x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
  4682. x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
  4683. x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
  4684. x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
  4685. x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
  4686. x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
  4687. x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
  4688. x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
  4689. x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
  4690. x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
  4691. x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
  4692. /* SVM requires CPUID[0x8000000A] */
  4693. if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
  4694. x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
  4695. }
  4696. /* SEV requires CPUID[0x8000001F] */
  4697. if (sev_enabled()) {
  4698. x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
  4699. }
  4700. }
  4701. /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
  4702. if (env->cpuid_level == UINT32_MAX) {
  4703. env->cpuid_level = env->cpuid_min_level;
  4704. }
  4705. if (env->cpuid_xlevel == UINT32_MAX) {
  4706. env->cpuid_xlevel = env->cpuid_min_xlevel;
  4707. }
  4708. if (env->cpuid_xlevel2 == UINT32_MAX) {
  4709. env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
  4710. }
  4711. out:
  4712. if (local_err != NULL) {
  4713. error_propagate(errp, local_err);
  4714. }
  4715. }
  4716. /*
  4717. * Finishes initialization of CPUID data, filters CPU feature
  4718. * words based on host availability of each feature.
  4719. *
  4720. * Returns: 0 if all flags are supported by the host, non-zero otherwise.
  4721. */
  4722. static int x86_cpu_filter_features(X86CPU *cpu)
  4723. {
  4724. CPUX86State *env = &cpu->env;
  4725. FeatureWord w;
  4726. int rv = 0;
  4727. for (w = 0; w < FEATURE_WORDS; w++) {
  4728. uint32_t host_feat =
  4729. x86_cpu_get_supported_feature_word(w, false);
  4730. uint32_t requested_features = env->features[w];
  4731. env->features[w] &= host_feat;
  4732. cpu->filtered_features[w] = requested_features & ~env->features[w];
  4733. if (cpu->filtered_features[w]) {
  4734. rv = 1;
  4735. }
  4736. }
  4737. if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
  4738. kvm_enabled()) {
  4739. KVMState *s = CPU(cpu)->kvm_state;
  4740. uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
  4741. uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
  4742. uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
  4743. uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
  4744. uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
  4745. if (!eax_0 ||
  4746. ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
  4747. ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
  4748. ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
  4749. ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
  4750. INTEL_PT_ADDR_RANGES_NUM) ||
  4751. ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
  4752. (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
  4753. (ecx_0 & INTEL_PT_IP_LIP)) {
  4754. /*
  4755. * Processor Trace capabilities aren't configurable, so if the
  4756. * host can't emulate the capabilities we report on
  4757. * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
  4758. */
  4759. env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
  4760. cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
  4761. rv = 1;
  4762. }
  4763. }
  4764. return rv;
  4765. }
  4766. #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
  4767. (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
  4768. (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
  4769. #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
  4770. (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
  4771. (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
  4772. static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
  4773. {
  4774. CPUState *cs = CPU(dev);
  4775. X86CPU *cpu = X86_CPU(dev);
  4776. X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
  4777. CPUX86State *env = &cpu->env;
  4778. Error *local_err = NULL;
  4779. static bool ht_warned;
  4780. if (xcc->host_cpuid_required) {
  4781. if (!accel_uses_host_cpuid()) {
  4782. char *name = x86_cpu_class_get_model_name(xcc);
  4783. error_setg(&local_err, "CPU model '%s' requires KVM", name);
  4784. g_free(name);
  4785. goto out;
  4786. }
  4787. if (enable_cpu_pm) {
  4788. host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
  4789. &cpu->mwait.ecx, &cpu->mwait.edx);
  4790. env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
  4791. }
  4792. }
  4793. /* mwait extended info: needed for Core compatibility */
  4794. /* We always wake on interrupt even if host does not have the capability */
  4795. cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
  4796. if (cpu->apic_id == UNASSIGNED_APIC_ID) {
  4797. error_setg(errp, "apic-id property was not initialized properly");
  4798. return;
  4799. }
  4800. x86_cpu_expand_features(cpu, &local_err);
  4801. if (local_err) {
  4802. goto out;
  4803. }
  4804. if (x86_cpu_filter_features(cpu) &&
  4805. (cpu->check_cpuid || cpu->enforce_cpuid)) {
  4806. x86_cpu_report_filtered_features(cpu);
  4807. if (cpu->enforce_cpuid) {
  4808. error_setg(&local_err,
  4809. accel_uses_host_cpuid() ?
  4810. "Host doesn't support requested features" :
  4811. "TCG doesn't support requested features");
  4812. goto out;
  4813. }
  4814. }
  4815. /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
  4816. * CPUID[1].EDX.
  4817. */
  4818. if (IS_AMD_CPU(env)) {
  4819. env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
  4820. env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
  4821. & CPUID_EXT2_AMD_ALIASES);
  4822. }
  4823. /* For 64bit systems think about the number of physical bits to present.
  4824. * ideally this should be the same as the host; anything other than matching
  4825. * the host can cause incorrect guest behaviour.
  4826. * QEMU used to pick the magic value of 40 bits that corresponds to
  4827. * consumer AMD devices but nothing else.
  4828. */
  4829. if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
  4830. if (accel_uses_host_cpuid()) {
  4831. uint32_t host_phys_bits = x86_host_phys_bits();
  4832. static bool warned;
  4833. if (cpu->host_phys_bits) {
  4834. /* The user asked for us to use the host physical bits */
  4835. cpu->phys_bits = host_phys_bits;
  4836. if (cpu->host_phys_bits_limit &&
  4837. cpu->phys_bits > cpu->host_phys_bits_limit) {
  4838. cpu->phys_bits = cpu->host_phys_bits_limit;
  4839. }
  4840. }
  4841. /* Print a warning if the user set it to a value that's not the
  4842. * host value.
  4843. */
  4844. if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
  4845. !warned) {
  4846. warn_report("Host physical bits (%u)"
  4847. " does not match phys-bits property (%u)",
  4848. host_phys_bits, cpu->phys_bits);
  4849. warned = true;
  4850. }
  4851. if (cpu->phys_bits &&
  4852. (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
  4853. cpu->phys_bits < 32)) {
  4854. error_setg(errp, "phys-bits should be between 32 and %u "
  4855. " (but is %u)",
  4856. TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
  4857. return;
  4858. }
  4859. } else {
  4860. if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
  4861. error_setg(errp, "TCG only supports phys-bits=%u",
  4862. TCG_PHYS_ADDR_BITS);
  4863. return;
  4864. }
  4865. }
  4866. /* 0 means it was not explicitly set by the user (or by machine
  4867. * compat_props or by the host code above). In this case, the default
  4868. * is the value used by TCG (40).
  4869. */
  4870. if (cpu->phys_bits == 0) {
  4871. cpu->phys_bits = TCG_PHYS_ADDR_BITS;
  4872. }
  4873. } else {
  4874. /* For 32 bit systems don't use the user set value, but keep
  4875. * phys_bits consistent with what we tell the guest.
  4876. */
  4877. if (cpu->phys_bits != 0) {
  4878. error_setg(errp, "phys-bits is not user-configurable in 32 bit");
  4879. return;
  4880. }
  4881. if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
  4882. cpu->phys_bits = 36;
  4883. } else {
  4884. cpu->phys_bits = 32;
  4885. }
  4886. }
  4887. /* Cache information initialization */
  4888. if (!cpu->legacy_cache) {
  4889. if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
  4890. char *name = x86_cpu_class_get_model_name(xcc);
  4891. error_setg(errp,
  4892. "CPU model '%s' doesn't support legacy-cache=off", name);
  4893. g_free(name);
  4894. return;
  4895. }
  4896. env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
  4897. *xcc->cpu_def->cache_info;
  4898. } else {
  4899. /* Build legacy cache information */
  4900. env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
  4901. env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
  4902. env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
  4903. env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
  4904. env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
  4905. env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
  4906. env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
  4907. env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
  4908. env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
  4909. env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
  4910. env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
  4911. env->cache_info_amd.l3_cache = &legacy_l3_cache;
  4912. }
  4913. cpu_exec_realizefn(cs, &local_err);
  4914. if (local_err != NULL) {
  4915. error_propagate(errp, local_err);
  4916. return;
  4917. }
  4918. #ifndef CONFIG_USER_ONLY
  4919. qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
  4920. if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
  4921. x86_cpu_apic_create(cpu, &local_err);
  4922. if (local_err != NULL) {
  4923. goto out;
  4924. }
  4925. }
  4926. #endif
  4927. mce_init(cpu);
  4928. #ifndef CONFIG_USER_ONLY
  4929. if (tcg_enabled()) {
  4930. cpu->cpu_as_mem = g_new(MemoryRegion, 1);
  4931. cpu->cpu_as_root = g_new(MemoryRegion, 1);
  4932. /* Outer container... */
  4933. memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
  4934. memory_region_set_enabled(cpu->cpu_as_root, true);
  4935. /* ... with two regions inside: normal system memory with low
  4936. * priority, and...
  4937. */
  4938. memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
  4939. get_system_memory(), 0, ~0ull);
  4940. memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
  4941. memory_region_set_enabled(cpu->cpu_as_mem, true);
  4942. cs->num_ases = 2;
  4943. cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
  4944. cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
  4945. /* ... SMRAM with higher priority, linked from /machine/smram. */
  4946. cpu->machine_done.notify = x86_cpu_machine_done;
  4947. qemu_add_machine_init_done_notifier(&cpu->machine_done);
  4948. }
  4949. #endif
  4950. qemu_init_vcpu(cs);
  4951. /*
  4952. * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
  4953. * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
  4954. * based on inputs (sockets,cores,threads), it is still better to give
  4955. * users a warning.
  4956. *
  4957. * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
  4958. * cs->nr_threads hasn't be populated yet and the checking is incorrect.
  4959. */
  4960. if (IS_AMD_CPU(env) &&
  4961. !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
  4962. cs->nr_threads > 1 && !ht_warned) {
  4963. warn_report("This family of AMD CPU doesn't support "
  4964. "hyperthreading(%d)",
  4965. cs->nr_threads);
  4966. error_printf("Please configure -smp options properly"
  4967. " or try enabling topoext feature.\n");
  4968. ht_warned = true;
  4969. }
  4970. x86_cpu_apic_realize(cpu, &local_err);
  4971. if (local_err != NULL) {
  4972. goto out;
  4973. }
  4974. cpu_reset(cs);
  4975. xcc->parent_realize(dev, &local_err);
  4976. out:
  4977. if (local_err != NULL) {
  4978. error_propagate(errp, local_err);
  4979. return;
  4980. }
  4981. }
  4982. static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
  4983. {
  4984. X86CPU *cpu = X86_CPU(dev);
  4985. X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
  4986. Error *local_err = NULL;
  4987. #ifndef CONFIG_USER_ONLY
  4988. cpu_remove_sync(CPU(dev));
  4989. qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
  4990. #endif
  4991. if (cpu->apic_state) {
  4992. object_unparent(OBJECT(cpu->apic_state));
  4993. cpu->apic_state = NULL;
  4994. }
  4995. xcc->parent_unrealize(dev, &local_err);
  4996. if (local_err != NULL) {
  4997. error_propagate(errp, local_err);
  4998. return;
  4999. }
  5000. }
  5001. typedef struct BitProperty {
  5002. FeatureWord w;
  5003. uint32_t mask;
  5004. } BitProperty;
  5005. static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
  5006. void *opaque, Error **errp)
  5007. {
  5008. X86CPU *cpu = X86_CPU(obj);
  5009. BitProperty *fp = opaque;
  5010. uint32_t f = cpu->env.features[fp->w];
  5011. bool value = (f & fp->mask) == fp->mask;
  5012. visit_type_bool(v, name, &value, errp);
  5013. }
  5014. static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
  5015. void *opaque, Error **errp)
  5016. {
  5017. DeviceState *dev = DEVICE(obj);
  5018. X86CPU *cpu = X86_CPU(obj);
  5019. BitProperty *fp = opaque;
  5020. Error *local_err = NULL;
  5021. bool value;
  5022. if (dev->realized) {
  5023. qdev_prop_set_after_realize(dev, name, errp);
  5024. return;
  5025. }
  5026. visit_type_bool(v, name, &value, &local_err);
  5027. if (local_err) {
  5028. error_propagate(errp, local_err);
  5029. return;
  5030. }
  5031. if (value) {
  5032. cpu->env.features[fp->w] |= fp->mask;
  5033. } else {
  5034. cpu->env.features[fp->w] &= ~fp->mask;
  5035. }
  5036. cpu->env.user_features[fp->w] |= fp->mask;
  5037. }
  5038. static void x86_cpu_release_bit_prop(Object *obj, const char *name,
  5039. void *opaque)
  5040. {
  5041. BitProperty *prop = opaque;
  5042. g_free(prop);
  5043. }
  5044. /* Register a boolean property to get/set a single bit in a uint32_t field.
  5045. *
  5046. * The same property name can be registered multiple times to make it affect
  5047. * multiple bits in the same FeatureWord. In that case, the getter will return
  5048. * true only if all bits are set.
  5049. */
  5050. static void x86_cpu_register_bit_prop(X86CPU *cpu,
  5051. const char *prop_name,
  5052. FeatureWord w,
  5053. int bitnr)
  5054. {
  5055. BitProperty *fp;
  5056. ObjectProperty *op;
  5057. uint32_t mask = (1UL << bitnr);
  5058. op = object_property_find(OBJECT(cpu), prop_name, NULL);
  5059. if (op) {
  5060. fp = op->opaque;
  5061. assert(fp->w == w);
  5062. fp->mask |= mask;
  5063. } else {
  5064. fp = g_new0(BitProperty, 1);
  5065. fp->w = w;
  5066. fp->mask = mask;
  5067. object_property_add(OBJECT(cpu), prop_name, "bool",
  5068. x86_cpu_get_bit_prop,
  5069. x86_cpu_set_bit_prop,
  5070. x86_cpu_release_bit_prop, fp, &error_abort);
  5071. }
  5072. }
  5073. static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
  5074. FeatureWord w,
  5075. int bitnr)
  5076. {
  5077. FeatureWordInfo *fi = &feature_word_info[w];
  5078. const char *name = fi->feat_names[bitnr];
  5079. if (!name) {
  5080. return;
  5081. }
  5082. /* Property names should use "-" instead of "_".
  5083. * Old names containing underscores are registered as aliases
  5084. * using object_property_add_alias()
  5085. */
  5086. assert(!strchr(name, '_'));
  5087. /* aliases don't use "|" delimiters anymore, they are registered
  5088. * manually using object_property_add_alias() */
  5089. assert(!strchr(name, '|'));
  5090. x86_cpu_register_bit_prop(cpu, name, w, bitnr);
  5091. }
  5092. static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
  5093. {
  5094. X86CPU *cpu = X86_CPU(cs);
  5095. CPUX86State *env = &cpu->env;
  5096. GuestPanicInformation *panic_info = NULL;
  5097. if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
  5098. panic_info = g_malloc0(sizeof(GuestPanicInformation));
  5099. panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
  5100. assert(HV_CRASH_PARAMS >= 5);
  5101. panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
  5102. panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
  5103. panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
  5104. panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
  5105. panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
  5106. }
  5107. return panic_info;
  5108. }
  5109. static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
  5110. const char *name, void *opaque,
  5111. Error **errp)
  5112. {
  5113. CPUState *cs = CPU(obj);
  5114. GuestPanicInformation *panic_info;
  5115. if (!cs->crash_occurred) {
  5116. error_setg(errp, "No crash occured");
  5117. return;
  5118. }
  5119. panic_info = x86_cpu_get_crash_info(cs);
  5120. if (panic_info == NULL) {
  5121. error_setg(errp, "No crash information");
  5122. return;
  5123. }
  5124. visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
  5125. errp);
  5126. qapi_free_GuestPanicInformation(panic_info);
  5127. }
  5128. static void x86_cpu_initfn(Object *obj)
  5129. {
  5130. CPUState *cs = CPU(obj);
  5131. X86CPU *cpu = X86_CPU(obj);
  5132. X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
  5133. CPUX86State *env = &cpu->env;
  5134. FeatureWord w;
  5135. cs->env_ptr = env;
  5136. object_property_add(obj, "family", "int",
  5137. x86_cpuid_version_get_family,
  5138. x86_cpuid_version_set_family, NULL, NULL, NULL);
  5139. object_property_add(obj, "model", "int",
  5140. x86_cpuid_version_get_model,
  5141. x86_cpuid_version_set_model, NULL, NULL, NULL);
  5142. object_property_add(obj, "stepping", "int",
  5143. x86_cpuid_version_get_stepping,
  5144. x86_cpuid_version_set_stepping, NULL, NULL, NULL);
  5145. object_property_add_str(obj, "vendor",
  5146. x86_cpuid_get_vendor,
  5147. x86_cpuid_set_vendor, NULL);
  5148. object_property_add_str(obj, "model-id",
  5149. x86_cpuid_get_model_id,
  5150. x86_cpuid_set_model_id, NULL);
  5151. object_property_add(obj, "tsc-frequency", "int",
  5152. x86_cpuid_get_tsc_freq,
  5153. x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
  5154. object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
  5155. x86_cpu_get_feature_words,
  5156. NULL, NULL, (void *)env->features, NULL);
  5157. object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
  5158. x86_cpu_get_feature_words,
  5159. NULL, NULL, (void *)cpu->filtered_features, NULL);
  5160. object_property_add(obj, "crash-information", "GuestPanicInformation",
  5161. x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
  5162. cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
  5163. for (w = 0; w < FEATURE_WORDS; w++) {
  5164. int bitnr;
  5165. for (bitnr = 0; bitnr < 32; bitnr++) {
  5166. x86_cpu_register_feature_bit_props(cpu, w, bitnr);
  5167. }
  5168. }
  5169. object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
  5170. object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
  5171. object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
  5172. object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
  5173. object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
  5174. object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
  5175. object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
  5176. object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
  5177. object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
  5178. object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
  5179. object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
  5180. object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
  5181. object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
  5182. object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
  5183. object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
  5184. object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
  5185. object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
  5186. object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
  5187. object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
  5188. object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
  5189. object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
  5190. object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
  5191. object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
  5192. object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
  5193. object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
  5194. object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
  5195. object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
  5196. object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
  5197. if (xcc->cpu_def) {
  5198. x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
  5199. }
  5200. }
  5201. static int64_t x86_cpu_get_arch_id(CPUState *cs)
  5202. {
  5203. X86CPU *cpu = X86_CPU(cs);
  5204. return cpu->apic_id;
  5205. }
  5206. static bool x86_cpu_get_paging_enabled(const CPUState *cs)
  5207. {
  5208. X86CPU *cpu = X86_CPU(cs);
  5209. return cpu->env.cr[0] & CR0_PG_MASK;
  5210. }
  5211. static void x86_cpu_set_pc(CPUState *cs, vaddr value)
  5212. {
  5213. X86CPU *cpu = X86_CPU(cs);
  5214. cpu->env.eip = value;
  5215. }
  5216. static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
  5217. {
  5218. X86CPU *cpu = X86_CPU(cs);
  5219. cpu->env.eip = tb->pc - tb->cs_base;
  5220. }
  5221. int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
  5222. {
  5223. X86CPU *cpu = X86_CPU(cs);
  5224. CPUX86State *env = &cpu->env;
  5225. #if !defined(CONFIG_USER_ONLY)
  5226. if (interrupt_request & CPU_INTERRUPT_POLL) {
  5227. return CPU_INTERRUPT_POLL;
  5228. }
  5229. #endif
  5230. if (interrupt_request & CPU_INTERRUPT_SIPI) {
  5231. return CPU_INTERRUPT_SIPI;
  5232. }
  5233. if (env->hflags2 & HF2_GIF_MASK) {
  5234. if ((interrupt_request & CPU_INTERRUPT_SMI) &&
  5235. !(env->hflags & HF_SMM_MASK)) {
  5236. return CPU_INTERRUPT_SMI;
  5237. } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
  5238. !(env->hflags2 & HF2_NMI_MASK)) {
  5239. return CPU_INTERRUPT_NMI;
  5240. } else if (interrupt_request & CPU_INTERRUPT_MCE) {
  5241. return CPU_INTERRUPT_MCE;
  5242. } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  5243. (((env->hflags2 & HF2_VINTR_MASK) &&
  5244. (env->hflags2 & HF2_HIF_MASK)) ||
  5245. (!(env->hflags2 & HF2_VINTR_MASK) &&
  5246. (env->eflags & IF_MASK &&
  5247. !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
  5248. return CPU_INTERRUPT_HARD;
  5249. #if !defined(CONFIG_USER_ONLY)
  5250. } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
  5251. (env->eflags & IF_MASK) &&
  5252. !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
  5253. return CPU_INTERRUPT_VIRQ;
  5254. #endif
  5255. }
  5256. }
  5257. return 0;
  5258. }
  5259. static bool x86_cpu_has_work(CPUState *cs)
  5260. {
  5261. return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
  5262. }
  5263. static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
  5264. {
  5265. X86CPU *cpu = X86_CPU(cs);
  5266. CPUX86State *env = &cpu->env;
  5267. info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
  5268. : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
  5269. : bfd_mach_i386_i8086);
  5270. info->print_insn = print_insn_i386;
  5271. info->cap_arch = CS_ARCH_X86;
  5272. info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
  5273. : env->hflags & HF_CS32_MASK ? CS_MODE_32
  5274. : CS_MODE_16);
  5275. info->cap_insn_unit = 1;
  5276. info->cap_insn_split = 8;
  5277. }
  5278. void x86_update_hflags(CPUX86State *env)
  5279. {
  5280. uint32_t hflags;
  5281. #define HFLAG_COPY_MASK \
  5282. ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
  5283. HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
  5284. HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
  5285. HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
  5286. hflags = env->hflags & HFLAG_COPY_MASK;
  5287. hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
  5288. hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
  5289. hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
  5290. (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
  5291. hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
  5292. if (env->cr[4] & CR4_OSFXSR_MASK) {
  5293. hflags |= HF_OSFXSR_MASK;
  5294. }
  5295. if (env->efer & MSR_EFER_LMA) {
  5296. hflags |= HF_LMA_MASK;
  5297. }
  5298. if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
  5299. hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
  5300. } else {
  5301. hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
  5302. (DESC_B_SHIFT - HF_CS32_SHIFT);
  5303. hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
  5304. (DESC_B_SHIFT - HF_SS32_SHIFT);
  5305. if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
  5306. !(hflags & HF_CS32_MASK)) {
  5307. hflags |= HF_ADDSEG_MASK;
  5308. } else {
  5309. hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
  5310. env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
  5311. }
  5312. }
  5313. env->hflags = hflags;
  5314. }
  5315. static Property x86_cpu_properties[] = {
  5316. #ifdef CONFIG_USER_ONLY
  5317. /* apic_id = 0 by default for *-user, see commit 9886e834 */
  5318. DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
  5319. DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
  5320. DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
  5321. DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
  5322. #else
  5323. DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
  5324. DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
  5325. DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
  5326. DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
  5327. #endif
  5328. DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
  5329. DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
  5330. { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
  5331. DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
  5332. DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
  5333. DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
  5334. DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
  5335. DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
  5336. DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
  5337. DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
  5338. DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
  5339. DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
  5340. DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
  5341. DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
  5342. DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
  5343. DEFINE_PROP_BOOL("hv-evmcs", X86CPU, hyperv_evmcs, false),
  5344. DEFINE_PROP_BOOL("hv-ipi", X86CPU, hyperv_ipi, false),
  5345. DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
  5346. DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
  5347. DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
  5348. DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
  5349. DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
  5350. DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
  5351. DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
  5352. DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
  5353. DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
  5354. DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
  5355. DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
  5356. DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
  5357. DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
  5358. DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
  5359. DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
  5360. DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
  5361. DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
  5362. DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
  5363. DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
  5364. false),
  5365. DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
  5366. DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
  5367. DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
  5368. true),
  5369. /*
  5370. * lecacy_cache defaults to true unless the CPU model provides its
  5371. * own cache information (see x86_cpu_load_def()).
  5372. */
  5373. DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
  5374. /*
  5375. * From "Requirements for Implementing the Microsoft
  5376. * Hypervisor Interface":
  5377. * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
  5378. *
  5379. * "Starting with Windows Server 2012 and Windows 8, if
  5380. * CPUID.40000005.EAX contains a value of -1, Windows assumes that
  5381. * the hypervisor imposes no specific limit to the number of VPs.
  5382. * In this case, Windows Server 2012 guest VMs may use more than
  5383. * 64 VPs, up to the maximum supported number of processors applicable
  5384. * to the specific Windows version being used."
  5385. */
  5386. DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
  5387. DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
  5388. false),
  5389. DEFINE_PROP_END_OF_LIST()
  5390. };
  5391. static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
  5392. {
  5393. X86CPUClass *xcc = X86_CPU_CLASS(oc);
  5394. CPUClass *cc = CPU_CLASS(oc);
  5395. DeviceClass *dc = DEVICE_CLASS(oc);
  5396. device_class_set_parent_realize(dc, x86_cpu_realizefn,
  5397. &xcc->parent_realize);
  5398. device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
  5399. &xcc->parent_unrealize);
  5400. dc->props = x86_cpu_properties;
  5401. xcc->parent_reset = cc->reset;
  5402. cc->reset = x86_cpu_reset;
  5403. cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
  5404. cc->class_by_name = x86_cpu_class_by_name;
  5405. cc->parse_features = x86_cpu_parse_featurestr;
  5406. cc->has_work = x86_cpu_has_work;
  5407. #ifdef CONFIG_TCG
  5408. cc->do_interrupt = x86_cpu_do_interrupt;
  5409. cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
  5410. #endif
  5411. cc->dump_state = x86_cpu_dump_state;
  5412. cc->get_crash_info = x86_cpu_get_crash_info;
  5413. cc->set_pc = x86_cpu_set_pc;
  5414. cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
  5415. cc->gdb_read_register = x86_cpu_gdb_read_register;
  5416. cc->gdb_write_register = x86_cpu_gdb_write_register;
  5417. cc->get_arch_id = x86_cpu_get_arch_id;
  5418. cc->get_paging_enabled = x86_cpu_get_paging_enabled;
  5419. #ifdef CONFIG_USER_ONLY
  5420. cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
  5421. #else
  5422. cc->asidx_from_attrs = x86_asidx_from_attrs;
  5423. cc->get_memory_mapping = x86_cpu_get_memory_mapping;
  5424. cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
  5425. cc->write_elf64_note = x86_cpu_write_elf64_note;
  5426. cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
  5427. cc->write_elf32_note = x86_cpu_write_elf32_note;
  5428. cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
  5429. cc->vmsd = &vmstate_x86_cpu;
  5430. #endif
  5431. cc->gdb_arch_name = x86_gdb_arch_name;
  5432. #ifdef TARGET_X86_64
  5433. cc->gdb_core_xml_file = "i386-64bit.xml";
  5434. cc->gdb_num_core_regs = 66;
  5435. #else
  5436. cc->gdb_core_xml_file = "i386-32bit.xml";
  5437. cc->gdb_num_core_regs = 50;
  5438. #endif
  5439. #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
  5440. cc->debug_excp_handler = breakpoint_handler;
  5441. #endif
  5442. cc->cpu_exec_enter = x86_cpu_exec_enter;
  5443. cc->cpu_exec_exit = x86_cpu_exec_exit;
  5444. #ifdef CONFIG_TCG
  5445. cc->tcg_initialize = tcg_x86_init;
  5446. #endif
  5447. cc->disas_set_info = x86_disas_set_info;
  5448. dc->user_creatable = true;
  5449. }
  5450. static const TypeInfo x86_cpu_type_info = {
  5451. .name = TYPE_X86_CPU,
  5452. .parent = TYPE_CPU,
  5453. .instance_size = sizeof(X86CPU),
  5454. .instance_init = x86_cpu_initfn,
  5455. .abstract = true,
  5456. .class_size = sizeof(X86CPUClass),
  5457. .class_init = x86_cpu_common_class_init,
  5458. };
  5459. /* "base" CPU model, used by query-cpu-model-expansion */
  5460. static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
  5461. {
  5462. X86CPUClass *xcc = X86_CPU_CLASS(oc);
  5463. xcc->static_model = true;
  5464. xcc->migration_safe = true;
  5465. xcc->model_description = "base CPU model type with no features enabled";
  5466. xcc->ordering = 8;
  5467. }
  5468. static const TypeInfo x86_base_cpu_type_info = {
  5469. .name = X86_CPU_TYPE_NAME("base"),
  5470. .parent = TYPE_X86_CPU,
  5471. .class_init = x86_cpu_base_class_init,
  5472. };
  5473. static void x86_cpu_register_types(void)
  5474. {
  5475. int i;
  5476. type_register_static(&x86_cpu_type_info);
  5477. for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
  5478. x86_register_cpudef_type(&builtin_x86_defs[i]);
  5479. }
  5480. type_register_static(&max_x86_cpu_type_info);
  5481. type_register_static(&x86_base_cpu_type_info);
  5482. #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
  5483. type_register_static(&host_x86_cpu_type_info);
  5484. #endif
  5485. }
  5486. type_init(x86_cpu_register_types)