12 #if (defined(__x86_64__) || defined(__i386__))
13 # define INCLUDE_VMWARE_BDOOR
16 #ifdef INCLUDE_VMWARE_BDOOR
26 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
27 #define XENFEAT_supervisor_mode_kernel 3
28 #define XENFEAT_mmu_pt_update_preserve_ad 5
29 #define XENFEAT_hvm_callback_vector 8
31 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
32 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
33 | (1U << XENFEAT_hvm_callback_vector) )
34 static const int hv_vendor_pci
[] = {
35 [VIRT_VENDOR_NONE
] = 0x0000,
36 [VIRT_VENDOR_XEN
] = 0x5853,
37 [VIRT_VENDOR_KVM
] = 0x0000,
38 [VIRT_VENDOR_MSHV
] = 0x1414,
39 [VIRT_VENDOR_VMWARE
] = 0x15ad,
40 [VIRT_VENDOR_VBOX
] = 0x80ee,
43 static const int hv_graphics_pci
[] = {
44 [VIRT_VENDOR_NONE
] = 0x0000,
45 [VIRT_VENDOR_XEN
] = 0x0001,
46 [VIRT_VENDOR_KVM
] = 0x0000,
47 [VIRT_VENDOR_MSHV
] = 0x5353,
48 [VIRT_VENDOR_VMWARE
] = 0x0710,
49 [VIRT_VENDOR_VBOX
] = 0xbeef,
52 #define WORD(x) (uint16_t)(*(const uint16_t *)(x))
53 #define DWORD(x) (uint32_t)(*(const uint32_t *)(x))
55 void *get_mem_chunk(size_t base
, size_t len
, const char *devmem
)
60 if ((fd
= open(devmem
, O_RDONLY
)) < 0)
63 if (!(p
= malloc(len
)))
65 if (lseek(fd
, base
, SEEK_SET
) == -1)
67 if (read_all(fd
, p
, len
) == -1)
79 static int hypervisor_from_dmi_table(uint32_t base
, uint16_t len
,
80 uint16_t num
, const char *devmem
)
83 int rc
= VIRT_VENDOR_NONE
;
86 data
= get_mem_chunk(base
, len
, devmem
);
90 memset(&di
, 0, sizeof(struct dmi_info
));
91 rc
= parse_dmi_table(len
, num
, data
, &di
);
95 if (di
.manufacturer
&& !strcmp(di
.manufacturer
, "innotek GmbH"))
96 rc
= VIRT_VENDOR_INNOTEK
;
97 else if (di
.manufacturer
&& strstr(di
.manufacturer
, "HITACHI") &&
98 di
.product
&& strstr(di
.product
, "LPAR"))
99 rc
= VIRT_VENDOR_HITACHI
;
100 else if (di
.vendor
&& !strcmp(di
.vendor
, "Parallels"))
101 rc
= VIRT_VENDOR_PARALLELS
;
107 static int checksum(const uint8_t *buf
, size_t len
)
112 for (a
= 0; a
< len
; a
++)
117 #if defined(__x86_64__) || defined(__i386__)
118 static int hypervisor_decode_legacy(uint8_t *buf
, const char *devmem
)
120 if (!checksum(buf
, 0x0F))
123 return hypervisor_from_dmi_table(DWORD(buf
+ 0x08), WORD(buf
+ 0x06),
129 static int hypervisor_decode_smbios(uint8_t *buf
, const char *devmem
)
131 if (!checksum(buf
, buf
[0x05])
132 || memcmp(buf
+ 0x10, "_DMI_", 5) != 0
133 || !checksum(buf
+ 0x10, 0x0F))
136 return hypervisor_from_dmi_table(DWORD(buf
+ 0x18), WORD(buf
+ 0x16),
142 * Probe for EFI interface
144 #define EFI_NOT_FOUND (-1)
145 #define EFI_NO_SMBIOS (-2)
146 static int address_from_efi(size_t *address
)
152 *address
= 0; /* Prevent compiler warning */
155 * Linux up to 2.6.6: /proc/efi/systab
156 * Linux 2.6.7 and up: /sys/firmware/efi/systab
158 if (!(tab
= fopen("/sys/firmware/efi/systab", "r")) &&
159 !(tab
= fopen("/proc/efi/systab", "r")))
160 return EFI_NOT_FOUND
; /* No EFI interface */
163 while ((fgets(linebuf
, sizeof(linebuf
) - 1, tab
)) != NULL
) {
164 char *addrp
= strchr(linebuf
, '=');
168 if (strcmp(linebuf
, "SMBIOS") == 0) {
170 *address
= strtoul(addrp
, NULL
, 0);
182 static int read_hypervisor_dmi_from_devmem(void)
184 int rc
= VIRT_VENDOR_NONE
;
188 /* First try EFI (ia64, Intel-based Mac) */
189 switch (address_from_efi(&fp
)) {
196 buf
= get_mem_chunk(fp
, 0x20, _PATH_DEV_MEM
);
200 rc
= hypervisor_decode_smbios(buf
, _PATH_DEV_MEM
);
201 if (rc
>= VIRT_VENDOR_NONE
)
207 #if defined(__x86_64__) || defined(__i386__)
208 /* Fallback to memory scan (x86, x86_64) */
209 buf
= get_mem_chunk(0xF0000, 0x10000, _PATH_DEV_MEM
);
213 for (fp
= 0; fp
<= 0xFFF0; fp
+= 16) {
214 if (memcmp(buf
+ fp
, "_SM_", 4) == 0 && fp
<= 0xFFE0) {
215 rc
= hypervisor_decode_smbios(buf
+ fp
, _PATH_DEV_MEM
);
219 } else if (memcmp(buf
+ fp
, "_DMI_", 5) == 0)
220 rc
= hypervisor_decode_legacy(buf
+ fp
, _PATH_DEV_MEM
);
222 if (rc
>= VIRT_VENDOR_NONE
)
231 static int read_hypervisor_dmi_from_sysfw(void)
233 static char const sys_fw_dmi_tables
[] = _PATH_SYS_DMI
;
236 if (stat(sys_fw_dmi_tables
, &st
))
239 return hypervisor_from_dmi_table(0, st
.st_size
, st
.st_size
/ 4,
243 static int read_hypervisor_dmi(void)
247 if (sizeof(uint8_t) != 1
248 || sizeof(uint16_t) != 2
249 || sizeof(uint32_t) != 4
251 return VIRT_VENDOR_NONE
;
253 /* -1 : no DMI in /sys,
254 * 0 : DMI exist, nothing detected (VIRT_VENDOR_NONE)
255 * >0 : hypervisor detected
257 rc
= read_hypervisor_dmi_from_sysfw();
259 rc
= read_hypervisor_dmi_from_devmem();
261 return rc
< 0 ? VIRT_VENDOR_NONE
: rc
;
264 static int has_pci_device(struct lscpu_cxt
*cxt
,
265 unsigned int vendor
, unsigned int device
)
268 unsigned int num
, fn
, ven
, dev
;
271 f
= ul_path_fopen(cxt
->procfs
, "r", "bus/pci/devices");
275 /* for more details about bus/pci/devices format see
276 * drivers/pci/proc.c in linux kernel
278 while(fscanf(f
, "%02x%02x\t%04x%04x\t%*[^\n]",
279 &num
, &fn
, &ven
, &dev
) == 4) {
281 if (ven
== vendor
&& dev
== device
)
291 #if defined(__x86_64__) || defined(__i386__)
293 * This CPUID leaf returns the information about the hypervisor.
294 * EAX : maximum input value for CPUID supported by the hypervisor.
295 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
297 #define HYPERVISOR_INFO_LEAF 0x40000000
299 static inline void cpuid(unsigned int op
, unsigned int *eax
,
300 unsigned int *ebx
, unsigned int *ecx
,
304 #if defined(__PIC__) && defined(__i386__)
305 /* x86 PIC cannot clobber ebx -- gcc bitches */
320 static int read_hypervisor_cpuid(void)
322 unsigned int eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
323 char hyper_vendor_id
[13] = { 0 };
325 cpuid(HYPERVISOR_INFO_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
326 memcpy(hyper_vendor_id
+ 0, &ebx
, 4);
327 memcpy(hyper_vendor_id
+ 4, &ecx
, 4);
328 memcpy(hyper_vendor_id
+ 8, &edx
, 4);
329 hyper_vendor_id
[12] = '\0';
331 if (!hyper_vendor_id
[0])
334 if (!strncmp("XenVMMXenVMM", hyper_vendor_id
, 12))
335 return VIRT_VENDOR_XEN
;
336 else if (!strncmp("KVMKVMKVM", hyper_vendor_id
, 9))
337 return VIRT_VENDOR_KVM
;
338 else if (!strncmp("Microsoft Hv", hyper_vendor_id
, 12))
339 return VIRT_VENDOR_MSHV
;
340 else if (!strncmp("VMwareVMware", hyper_vendor_id
, 12))
341 return VIRT_VENDOR_VMWARE
;
342 else if (!strncmp("UnisysSpar64", hyper_vendor_id
, 12))
343 return VIRT_VENDOR_SPAR
;
345 return VIRT_VENDOR_NONE
;
348 #else /* ! (__x86_64__ || __i386__) */
349 static int read_hypervisor_cpuid(void)
355 static int is_devtree_compatible(struct lscpu_cxt
*cxt
, const char *str
)
357 FILE *fd
= ul_path_fopen(cxt
->procfs
, "r", "device-tree/compatible");
363 memset(buf
, 0, sizeof(buf
));
364 len
= fread(buf
, 1, sizeof(buf
) - 1, fd
);
367 for (i
= 0; i
< len
;) {
368 if (!strcmp(&buf
[i
], str
))
370 i
+= strlen(&buf
[i
]);
378 static int read_hypervisor_powerpc(struct lscpu_cxt
*cxt
, int *type
)
380 int vendor
= VIRT_VENDOR_NONE
;
382 *type
= VIRT_TYPE_NONE
;
384 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
385 if (ul_path_access(cxt
->procfs
, F_OK
, "iSeries") == 0) {
386 vendor
= VIRT_VENDOR_OS400
;
387 *type
= VIRT_TYPE_PARA
;
389 /* PowerNV (POWER Non-Virtualized, bare-metal) */
390 } else if (is_devtree_compatible(cxt
, "ibm,powernv") != 0) {
393 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
394 } else if (ul_path_access(cxt
->procfs
, F_OK
, "device-tree/ibm,partition-name") == 0
395 && ul_path_access(cxt
->procfs
, F_OK
, "device-tree/hmc-managed?") == 0
396 && ul_path_access(cxt
->procfs
, F_OK
, "device-tree/chosen/qemu,graphic-width") != 0) {
399 vendor
= VIRT_VENDOR_PHYP
;
400 *type
= VIRT_TYPE_PARA
;
402 if (ul_path_scanf(cxt
->procfs
, "device-tree/ibm,partition-name", "%255s", buf
) == 1 &&
403 !strcmp(buf
, "full"))
404 *type
= VIRT_TYPE_NONE
;
407 } else if (is_devtree_compatible(cxt
, "qemu,pseries")) {
408 vendor
= VIRT_VENDOR_KVM
;
409 *type
= VIRT_TYPE_PARA
;
415 #ifdef INCLUDE_VMWARE_BDOOR
417 #define VMWARE_BDOOR_MAGIC 0x564D5868
418 #define VMWARE_BDOOR_PORT 0x5658
419 #define VMWARE_BDOOR_CMD_GETVERSION 10
421 static UL_ASAN_BLACKLIST
422 void vmware_bdoor(uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
425 #if defined(__PIC__) && defined(__i386__)
426 /* x86 PIC cannot clobber ebx -- gcc bitches */
438 : "0" (VMWARE_BDOOR_MAGIC
),
439 "1" (VMWARE_BDOOR_CMD_GETVERSION
),
440 "2" (VMWARE_BDOOR_PORT
),
445 static jmp_buf segv_handler_env
;
448 segv_handler(__attribute__((__unused__
)) int sig
,
449 __attribute__((__unused__
)) siginfo_t
*info
,
450 __attribute__((__unused__
)) void *ignored
)
452 siglongjmp(segv_handler_env
, 1);
455 static int is_vmware_platform(void)
457 uint32_t eax
, ebx
, ecx
, edx
;
458 struct sigaction act
, oact
;
461 * FIXME: Not reliable for non-root users. Note it works as expected if
462 * vmware_bdoor() is not optimized for PIE, but then it fails to build
463 * on 32bit x86 systems. See lscpu git log for more details (commit
464 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
470 * The assembly routine for vmware detection works
471 * fine under vmware, even if ran as regular user. But
472 * on real HW or under other hypervisors, it segfaults (which is
473 * expected). So we temporarily install SIGSEGV handler to catch
474 * the signal. All this magic is needed because lscpu
475 * isn't supposed to require root privileges.
477 if (sigsetjmp(segv_handler_env
, 1))
480 memset(&act
, 0, sizeof(act
));
481 act
.sa_sigaction
= segv_handler
;
482 act
.sa_flags
= SA_SIGINFO
;
484 if (sigaction(SIGSEGV
, &act
, &oact
))
485 err(EXIT_FAILURE
, _("cannot set signal handler"));
487 vmware_bdoor(&eax
, &ebx
, &ecx
, &edx
);
489 if (sigaction(SIGSEGV
, &oact
, NULL
))
490 err(EXIT_FAILURE
, _("cannot restore signal handler"));
492 return eax
!= (uint32_t)-1 && ebx
== VMWARE_BDOOR_MAGIC
;
495 #else /* ! INCLUDE_VMWARE_BDOOR */
497 static int is_vmware_platform(void)
502 #endif /* INCLUDE_VMWARE_BDOOR */
503 struct lscpu_virt
*lscpu_read_virtualization(struct lscpu_cxt
*cxt
)
506 struct lscpu_cputype
*ct
;
507 struct lscpu_virt
*virt
;
510 DBG(VIRT
, ul_debug("reading virtualization"));
511 virt
= xcalloc(1, sizeof(*virt
));
514 ct
= lscpu_cputype_get_default(cxt
);
515 if (ct
&& ct
->flags
) {
516 snprintf(buf
, sizeof(buf
), " %s ", ct
->flags
);
517 if (strstr(buf
, " svm "))
518 virt
->cpuflag
= xstrdup("svm");
519 else if (strstr(buf
, " vmx "))
520 virt
->cpuflag
= xstrdup("vmx");
524 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
525 fd
= ul_path_fopen(cxt
->procfs
, "r", "sys/kernel/osrelease");
527 if (fgets(buf
, sizeof(buf
), fd
) && strstr(buf
, "Microsoft")) {
528 virt
->vendor
= VIRT_VENDOR_WSL
;
529 virt
->type
= VIRT_TYPE_CONTAINER
;
537 virt
->vendor
= read_hypervisor_cpuid();
539 virt
->vendor
= read_hypervisor_dmi();
540 if (!virt
->vendor
&& is_vmware_platform())
541 virt
->vendor
= VIRT_VENDOR_VMWARE
;
545 virt
->type
= VIRT_TYPE_FULL
;
547 if (virt
->vendor
== VIRT_VENDOR_XEN
) {
550 if (ul_path_scanf(cxt
->rootfs
, _PATH_SYS_HYP_FEATURES
, "%x", &features
) == 1) {
552 if (features
& XEN_FEATURES_PV_MASK
)
553 virt
->type
= VIRT_TYPE_PARA
;
555 else if ((features
& XEN_FEATURES_PVH_MASK
)
556 == XEN_FEATURES_PVH_MASK
)
557 virt
->type
= VIRT_TYPE_PARA
;
560 } else if ((virt
->vendor
= read_hypervisor_powerpc(cxt
, &virt
->type
))) {
563 /* Xen para-virt or dom0 */
564 } else if (ul_path_access(cxt
->procfs
, F_OK
, "xen") == 0) {
568 if (ul_path_scanf(cxt
->procfs
, "xen/capabilities", "%255s", xenbuf
) == 1 &&
569 !strcmp(xenbuf
, "control_d"))
571 virt
->type
= dom0
? VIRT_TYPE_NONE
: VIRT_TYPE_PARA
;
572 virt
->vendor
= VIRT_VENDOR_XEN
;
574 /* Xen full-virt on non-x86_64 */
575 } else if (has_pci_device(cxt
, hv_vendor_pci
[VIRT_VENDOR_XEN
], hv_graphics_pci
[VIRT_VENDOR_XEN
])) {
576 virt
->vendor
= VIRT_VENDOR_XEN
;
577 virt
->type
= VIRT_TYPE_FULL
;
578 } else if (has_pci_device(cxt
, hv_vendor_pci
[VIRT_VENDOR_VMWARE
], hv_graphics_pci
[VIRT_VENDOR_VMWARE
])) {
579 virt
->vendor
= VIRT_VENDOR_VMWARE
;
580 virt
->type
= VIRT_TYPE_FULL
;
581 } else if (has_pci_device(cxt
, hv_vendor_pci
[VIRT_VENDOR_VBOX
], hv_graphics_pci
[VIRT_VENDOR_VBOX
])) {
582 virt
->vendor
= VIRT_VENDOR_VBOX
;
583 virt
->type
= VIRT_TYPE_FULL
;
586 } else if ((fd
= ul_path_fopen(cxt
->procfs
, "r", "sysinfo"))) {
588 virt
->vendor
= VIRT_VENDOR_IBM
;
589 virt
->hypervisor
= "PR/SM";
590 virt
->type
= VIRT_TYPE_FULL
;
592 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
593 if (!strstr(buf
, "Control Program:"))
595 virt
->vendor
= strstr(buf
, "KVM") ? VIRT_VENDOR_KVM
: VIRT_VENDOR_IBM
;
596 virt
->hypervisor
= strchr(buf
, ':');
598 if (virt
->hypervisor
) {
600 normalize_whitespace((unsigned char *) virt
->hypervisor
);
604 if (virt
->hypervisor
)
605 virt
->hypervisor
= xstrdup(virt
->hypervisor
);
609 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
610 * /proc/bc should not */
611 else if (ul_path_access(cxt
->procfs
, F_OK
, "vz") == 0 &&
612 ul_path_access(cxt
->procfs
, F_OK
, "bc") != 0) {
613 virt
->vendor
= VIRT_VENDOR_PARALLELS
;
614 virt
->type
= VIRT_TYPE_CONTAINER
;
617 } else if (virt
->hypervisor
&&
618 (strcmp(virt
->hypervisor
, "PowerVM Lx86") == 0 ||
619 strcmp(virt
->hypervisor
, "IBM/S390") == 0)) {
620 virt
->vendor
= VIRT_VENDOR_IBM
;
621 virt
->type
= VIRT_TYPE_FULL
;
623 /* User-mode-linux */
624 } else if (ct
&& ct
->modelname
&& strstr(ct
->modelname
, "UML")) {
625 virt
->vendor
= VIRT_VENDOR_UML
;
626 virt
->type
= VIRT_TYPE_PARA
;
629 } else if ((fd
= ul_path_fopen(cxt
->procfs
, "r", "self/status"))) {
632 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
633 if (lookup(buf
, "VxID", &val
))
641 while (isdigit(*val
))
644 virt
->vendor
= VIRT_VENDOR_VSERVER
;
645 virt
->type
= VIRT_TYPE_CONTAINER
;
651 DBG(VIRT
, ul_debugobj(virt
, "virt: cpu='%s' hypervisor='%s' vendor=%d type=%d",
657 if (!virt
->cpuflag
&& !virt
->hypervisor
&& !virt
->vendor
&& !virt
->type
) {
658 lscpu_free_virtualization(virt
);
664 void lscpu_free_virtualization(struct lscpu_virt
*virt
)
670 free(virt
->hypervisor
);