12 #if (defined(__x86_64__) || defined(__i386__))
13 # define INCLUDE_VMWARE_BDOOR
16 #ifdef INCLUDE_VMWARE_BDOOR
26 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
27 #define XENFEAT_supervisor_mode_kernel 3
28 #define XENFEAT_mmu_pt_update_preserve_ad 5
29 #define XENFEAT_hvm_callback_vector 8
31 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
32 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
33 | (1U << XENFEAT_hvm_callback_vector) )
34 static const int hv_vendor_pci
[] = {
35 [VIRT_VENDOR_NONE
] = 0x0000,
36 [VIRT_VENDOR_XEN
] = 0x5853,
37 [VIRT_VENDOR_KVM
] = 0x0000,
38 [VIRT_VENDOR_MSHV
] = 0x1414,
39 [VIRT_VENDOR_VMWARE
] = 0x15ad,
40 [VIRT_VENDOR_VBOX
] = 0x80ee,
43 static const int hv_graphics_pci
[] = {
44 [VIRT_VENDOR_NONE
] = 0x0000,
45 [VIRT_VENDOR_XEN
] = 0x0001,
46 [VIRT_VENDOR_KVM
] = 0x0000,
47 [VIRT_VENDOR_MSHV
] = 0x5353,
48 [VIRT_VENDOR_VMWARE
] = 0x0710,
49 [VIRT_VENDOR_VBOX
] = 0xbeef,
52 #define WORD(x) (uint16_t)(*(const uint16_t *)(x))
53 #define DWORD(x) (uint32_t)(*(const uint32_t *)(x))
55 void *get_mem_chunk(size_t base
, size_t len
, const char *devmem
)
60 if ((fd
= open(devmem
, O_RDONLY
)) < 0)
63 if (!(p
= malloc(len
)))
65 if (lseek(fd
, base
, SEEK_SET
) == -1)
67 if (read_all(fd
, p
, len
) == -1)
79 static int hypervisor_from_dmi_table(uint32_t base
, uint16_t len
,
80 uint16_t num
, const char *devmem
)
83 int rc
= VIRT_VENDOR_NONE
;
86 data
= get_mem_chunk(base
, len
, devmem
);
90 memset(&di
, 0, sizeof(struct dmi_info
));
91 rc
= parse_dmi_table(len
, num
, data
, &di
);
95 if (di
.manufacturer
&& !strcmp(di
.manufacturer
, "innotek GmbH"))
96 rc
= VIRT_VENDOR_INNOTEK
;
97 else if (di
.manufacturer
&& strstr(di
.manufacturer
, "HITACHI") &&
98 di
.product
&& strstr(di
.product
, "LPAR"))
99 rc
= VIRT_VENDOR_HITACHI
;
100 else if (di
.vendor
&& !strcmp(di
.vendor
, "Parallels"))
101 rc
= VIRT_VENDOR_PARALLELS
;
107 static int checksum(const uint8_t *buf
, size_t len
)
112 for (a
= 0; a
< len
; a
++)
117 #if defined(__x86_64__) || defined(__i386__)
118 static int hypervisor_decode_legacy(uint8_t *buf
, const char *devmem
)
120 if (!checksum(buf
, 0x0F))
123 return hypervisor_from_dmi_table(DWORD(buf
+ 0x08), WORD(buf
+ 0x06),
129 static int hypervisor_decode_smbios(uint8_t *buf
, const char *devmem
)
131 if (!checksum(buf
, buf
[0x05])
132 || memcmp(buf
+ 0x10, "_DMI_", 5) != 0
133 || !checksum(buf
+ 0x10, 0x0F))
136 return hypervisor_from_dmi_table(DWORD(buf
+ 0x18), WORD(buf
+ 0x16),
142 * Probe for EFI interface
144 #define EFI_NOT_FOUND (-1)
145 #define EFI_NO_SMBIOS (-2)
146 static int address_from_efi(size_t *address
)
152 *address
= 0; /* Prevent compiler warning */
155 * Linux up to 2.6.6: /proc/efi/systab
156 * Linux 2.6.7 and up: /sys/firmware/efi/systab
158 if (!(tab
= fopen("/sys/firmware/efi/systab", "r")) &&
159 !(tab
= fopen("/proc/efi/systab", "r")))
160 return EFI_NOT_FOUND
; /* No EFI interface */
163 while ((fgets(linebuf
, sizeof(linebuf
) - 1, tab
)) != NULL
) {
164 char *addrp
= strchr(linebuf
, '=');
168 if (strcmp(linebuf
, "SMBIOS") == 0) {
170 *address
= strtoul(addrp
, NULL
, 0);
182 static int read_hypervisor_dmi_from_devmem(void)
184 int rc
= VIRT_VENDOR_NONE
;
188 /* First try EFI (ia64, Intel-based Mac) */
189 switch (address_from_efi(&fp
)) {
196 buf
= get_mem_chunk(fp
, 0x20, _PATH_DEV_MEM
);
200 rc
= hypervisor_decode_smbios(buf
, _PATH_DEV_MEM
);
201 if (rc
>= VIRT_VENDOR_NONE
)
207 #if defined(__x86_64__) || defined(__i386__)
208 /* Fallback to memory scan (x86, x86_64) */
209 buf
= get_mem_chunk(0xF0000, 0x10000, _PATH_DEV_MEM
);
213 for (fp
= 0; fp
<= 0xFFF0; fp
+= 16) {
214 if (memcmp(buf
+ fp
, "_SM_", 4) == 0 && fp
<= 0xFFE0) {
215 rc
= hypervisor_decode_smbios(buf
+ fp
, _PATH_DEV_MEM
);
219 } else if (memcmp(buf
+ fp
, "_DMI_", 5) == 0)
220 rc
= hypervisor_decode_legacy(buf
+ fp
, _PATH_DEV_MEM
);
222 if (rc
>= VIRT_VENDOR_NONE
)
231 static int read_hypervisor_dmi_from_sysfw(void)
233 static char const sys_fw_dmi_tables
[] = _PATH_SYS_DMI
;
236 if (stat(sys_fw_dmi_tables
, &st
))
239 return hypervisor_from_dmi_table(0, st
.st_size
, st
.st_size
/ 4,
243 static int read_hypervisor_dmi(void)
247 if (sizeof(uint8_t) != 1
248 || sizeof(uint16_t) != 2
249 || sizeof(uint32_t) != 4
251 return VIRT_VENDOR_NONE
;
253 /* -1 : no DMI in /sys,
254 * 0 : DMI exist, nothing detected (VIRT_VENDOR_NONE)
255 * >0 : hypervisor detected
257 rc
= read_hypervisor_dmi_from_sysfw();
259 rc
= read_hypervisor_dmi_from_devmem();
261 return rc
< 0 ? VIRT_VENDOR_NONE
: rc
;
264 static int has_pci_device(struct lscpu_cxt
*cxt
,
265 unsigned int vendor
, unsigned int device
)
268 unsigned int num
, fn
, ven
, dev
;
271 f
= ul_path_fopen(cxt
->procfs
, "r", "bus/pci/devices");
275 /* for more details about bus/pci/devices format see
276 * drivers/pci/proc.c in linux kernel
278 while(fscanf(f
, "%02x%02x\t%04x%04x\t%*[^\n]",
279 &num
, &fn
, &ven
, &dev
) == 4) {
281 if (ven
== vendor
&& dev
== device
)
291 #if defined(__x86_64__) || defined(__i386__)
293 * This CPUID leaf returns the information about the hypervisor.
294 * EAX : maximum input value for CPUID supported by the hypervisor.
295 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
297 #define HYPERVISOR_INFO_LEAF 0x40000000
299 static inline void cpuid(unsigned int op
, unsigned int *eax
,
300 unsigned int *ebx
, unsigned int *ecx
,
304 #if defined(__PIC__) && defined(__i386__)
305 /* x86 PIC cannot clobber ebx -- gcc bitches */
320 static int read_hypervisor_cpuid(void)
322 unsigned int eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
323 char hyper_vendor_id
[13] = { 0 };
325 cpuid(HYPERVISOR_INFO_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
326 memcpy(hyper_vendor_id
+ 0, &ebx
, 4);
327 memcpy(hyper_vendor_id
+ 4, &ecx
, 4);
328 memcpy(hyper_vendor_id
+ 8, &edx
, 4);
329 hyper_vendor_id
[12] = '\0';
331 if (!hyper_vendor_id
[0])
334 if (!strncmp("XenVMMXenVMM", hyper_vendor_id
, 12))
335 return VIRT_VENDOR_XEN
;
336 else if (!strncmp("KVMKVMKVM", hyper_vendor_id
, 9))
337 return VIRT_VENDOR_KVM
;
338 else if (!strncmp("Microsoft Hv", hyper_vendor_id
, 12))
339 return VIRT_VENDOR_MSHV
;
340 else if (!strncmp("VMwareVMware", hyper_vendor_id
, 12))
341 return VIRT_VENDOR_VMWARE
;
342 else if (!strncmp("UnisysSpar64", hyper_vendor_id
, 12))
343 return VIRT_VENDOR_SPAR
;
345 return VIRT_VENDOR_NONE
;
348 #else /* ! (__x86_64__ || __i386__) */
349 static int read_hypervisor_cpuid(void)
355 static int is_devtree_compatible(struct lscpu_cxt
*cxt
, const char *str
)
357 FILE *fd
= ul_path_fopen(cxt
->procfs
, "r", "device-tree/compatible");
363 memset(buf
, 0, sizeof(buf
));
364 len
= fread(buf
, 1, sizeof(buf
) - 1, fd
);
367 for (i
= 0; i
< len
;) {
368 if (!strcmp(&buf
[i
], str
))
370 i
+= strlen(&buf
[i
]);
378 static int read_hypervisor_powerpc(struct lscpu_cxt
*cxt
, int *type
)
380 int vendor
= VIRT_VENDOR_NONE
;
382 *type
= VIRT_TYPE_NONE
;
384 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
385 if (ul_path_access(cxt
->procfs
, F_OK
, "iSeries") == 0) {
386 vendor
= VIRT_VENDOR_OS400
;
387 *type
= VIRT_TYPE_PARA
;
389 /* PowerNV (POWER Non-Virtualized, bare-metal) */
390 } else if (is_devtree_compatible(cxt
, "ibm,powernv") != 0) {
393 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
394 } else if (ul_path_access(cxt
->procfs
, F_OK
, "device-tree/ibm,partition-name") == 0
395 && ul_path_access(cxt
->procfs
, F_OK
, "device-tree/hmc-managed?") == 0
396 && ul_path_access(cxt
->procfs
, F_OK
, "device-tree/chosen/qemu,graphic-width") != 0) {
399 vendor
= VIRT_VENDOR_PHYP
;
400 *type
= VIRT_TYPE_PARA
;
402 fd
= ul_path_fopen(cxt
->procfs
, "r", "device-tree/ibm,partition-name");
405 if (fscanf(fd
, "%255s", buf
) == 1 && !strcmp(buf
, "full"))
406 *type
= VIRT_TYPE_NONE
;
411 } else if (is_devtree_compatible(cxt
, "qemu,pseries")) {
412 vendor
= VIRT_VENDOR_KVM
;
413 *type
= VIRT_TYPE_PARA
;
419 #ifdef INCLUDE_VMWARE_BDOOR
421 #define VMWARE_BDOOR_MAGIC 0x564D5868
422 #define VMWARE_BDOOR_PORT 0x5658
423 #define VMWARE_BDOOR_CMD_GETVERSION 10
425 static UL_ASAN_BLACKLIST
426 void vmware_bdoor(uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
429 #if defined(__PIC__) && defined(__i386__)
430 /* x86 PIC cannot clobber ebx -- gcc bitches */
442 : "0" (VMWARE_BDOOR_MAGIC
),
443 "1" (VMWARE_BDOOR_CMD_GETVERSION
),
444 "2" (VMWARE_BDOOR_PORT
),
449 static jmp_buf segv_handler_env
;
452 segv_handler(__attribute__((__unused__
)) int sig
,
453 __attribute__((__unused__
)) siginfo_t
*info
,
454 __attribute__((__unused__
)) void *ignored
)
456 siglongjmp(segv_handler_env
, 1);
459 static int is_vmware_platform(void)
461 uint32_t eax
, ebx
, ecx
, edx
;
462 struct sigaction act
, oact
;
465 * FIXME: Not reliable for non-root users. Note it works as expected if
466 * vmware_bdoor() is not optimized for PIE, but then it fails to build
467 * on 32bit x86 systems. See lscpu git log for more details (commit
468 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
474 * The assembly routine for vmware detection works
475 * fine under vmware, even if ran as regular user. But
476 * on real HW or under other hypervisors, it segfaults (which is
477 * expected). So we temporarily install SIGSEGV handler to catch
478 * the signal. All this magic is needed because lscpu
479 * isn't supposed to require root privileges.
481 if (sigsetjmp(segv_handler_env
, 1))
484 memset(&act
, 0, sizeof(act
));
485 act
.sa_sigaction
= segv_handler
;
486 act
.sa_flags
= SA_SIGINFO
;
488 if (sigaction(SIGSEGV
, &act
, &oact
))
489 err(EXIT_FAILURE
, _("cannot set signal handler"));
491 vmware_bdoor(&eax
, &ebx
, &ecx
, &edx
);
493 if (sigaction(SIGSEGV
, &oact
, NULL
))
494 err(EXIT_FAILURE
, _("cannot restore signal handler"));
496 return eax
!= (uint32_t)-1 && ebx
== VMWARE_BDOOR_MAGIC
;
499 #else /* ! INCLUDE_VMWARE_BDOOR */
501 static int is_vmware_platform(void)
506 #endif /* INCLUDE_VMWARE_BDOOR */
507 struct lscpu_virt
*lscpu_read_virtualization(struct lscpu_cxt
*cxt
)
510 struct lscpu_cputype
*ct
;
511 struct lscpu_virt
*virt
;
514 DBG(VIRT
, ul_debug("reading virtualization"));
515 virt
= xcalloc(1, sizeof(*virt
));
518 ct
= lscpu_cputype_get_default(cxt
);
519 if (ct
&& ct
->flags
) {
520 snprintf(buf
, sizeof(buf
), " %s ", ct
->flags
);
521 if (strstr(buf
, " svm "))
522 virt
->cpuflag
= xstrdup("svm");
523 else if (strstr(buf
, " vmx "))
524 virt
->cpuflag
= xstrdup("vmx");
528 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
529 fd
= ul_path_fopen(cxt
->procfs
, "r", "sys/kernel/osrelease");
531 if (fgets(buf
, sizeof(buf
), fd
) && strstr(buf
, "Microsoft")) {
532 virt
->vendor
= VIRT_VENDOR_WSL
;
533 virt
->type
= VIRT_TYPE_CONTAINER
;
541 virt
->vendor
= read_hypervisor_cpuid();
543 virt
->vendor
= read_hypervisor_dmi();
544 if (!virt
->vendor
&& is_vmware_platform())
545 virt
->vendor
= VIRT_VENDOR_VMWARE
;
549 virt
->type
= VIRT_TYPE_FULL
;
551 if (virt
->vendor
== VIRT_VENDOR_XEN
) {
554 fd
= ul_prefix_fopen(cxt
->prefix
, "r", _PATH_SYS_HYP_FEATURES
);
556 if (fd
&& fscanf(fd
, "%x", &features
) == 1) {
558 if (features
& XEN_FEATURES_PV_MASK
)
559 virt
->type
= VIRT_TYPE_PARA
;
561 else if ((features
& XEN_FEATURES_PVH_MASK
)
562 == XEN_FEATURES_PVH_MASK
)
563 virt
->type
= VIRT_TYPE_PARA
;
568 } else if ((virt
->vendor
= read_hypervisor_powerpc(cxt
, &virt
->type
))) {
571 /* Xen para-virt or dom0 */
572 } else if (ul_path_access(cxt
->procfs
, F_OK
, "xen") == 0) {
575 fd
= ul_path_fopen(cxt
->procfs
, "r", "xen/capabilities");
579 if (fscanf(fd
, "%255s", xenbuf
) == 1 &&
580 !strcmp(xenbuf
, "control_d"))
584 virt
->type
= dom0
? VIRT_TYPE_NONE
: VIRT_TYPE_PARA
;
585 virt
->vendor
= VIRT_VENDOR_XEN
;
587 /* Xen full-virt on non-x86_64 */
588 } else if (has_pci_device(cxt
, hv_vendor_pci
[VIRT_VENDOR_XEN
], hv_graphics_pci
[VIRT_VENDOR_XEN
])) {
589 virt
->vendor
= VIRT_VENDOR_XEN
;
590 virt
->type
= VIRT_TYPE_FULL
;
591 } else if (has_pci_device(cxt
, hv_vendor_pci
[VIRT_VENDOR_VMWARE
], hv_graphics_pci
[VIRT_VENDOR_VMWARE
])) {
592 virt
->vendor
= VIRT_VENDOR_VMWARE
;
593 virt
->type
= VIRT_TYPE_FULL
;
594 } else if (has_pci_device(cxt
, hv_vendor_pci
[VIRT_VENDOR_VBOX
], hv_graphics_pci
[VIRT_VENDOR_VBOX
])) {
595 virt
->vendor
= VIRT_VENDOR_VBOX
;
596 virt
->type
= VIRT_TYPE_FULL
;
599 } else if ((fd
= ul_path_fopen(cxt
->procfs
, "r", "sysinfo"))) {
601 virt
->vendor
= VIRT_VENDOR_IBM
;
602 virt
->hypervisor
= "PR/SM";
603 virt
->type
= VIRT_TYPE_FULL
;
605 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
606 if (!strstr(buf
, "Control Program:"))
608 virt
->vendor
= strstr(buf
, "KVM") ? VIRT_VENDOR_KVM
: VIRT_VENDOR_IBM
;
609 virt
->hypervisor
= strchr(buf
, ':');
611 if (virt
->hypervisor
) {
613 normalize_whitespace((unsigned char *) virt
->hypervisor
);
617 if (virt
->hypervisor
)
618 virt
->hypervisor
= xstrdup(virt
->hypervisor
);
622 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
623 * /proc/bc should not */
624 else if (ul_path_access(cxt
->procfs
, F_OK
, "vz") == 0 &&
625 ul_path_access(cxt
->procfs
, F_OK
, "bc") != 0) {
626 virt
->vendor
= VIRT_VENDOR_PARALLELS
;
627 virt
->type
= VIRT_TYPE_CONTAINER
;
630 } else if (virt
->hypervisor
&&
631 (strcmp(virt
->hypervisor
, "PowerVM Lx86") == 0 ||
632 strcmp(virt
->hypervisor
, "IBM/S390") == 0)) {
633 virt
->vendor
= VIRT_VENDOR_IBM
;
634 virt
->type
= VIRT_TYPE_FULL
;
636 /* User-mode-linux */
637 } else if (ct
&& ct
->modelname
&& strstr(ct
->modelname
, "UML")) {
638 virt
->vendor
= VIRT_VENDOR_UML
;
639 virt
->type
= VIRT_TYPE_PARA
;
642 } else if ((fd
= ul_path_fopen(cxt
->procfs
, "r", "self/status"))) {
645 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
646 if (lookup(buf
, "VxID", &val
))
654 while (isdigit(*val
))
657 virt
->vendor
= VIRT_VENDOR_VSERVER
;
658 virt
->type
= VIRT_TYPE_CONTAINER
;
664 DBG(VIRT
, ul_debugobj(virt
, "virt: cpu='%s' hypervisor='%s' vendor=%d type=%d",
670 if (!virt
->cpuflag
&& !virt
->hypervisor
&& !virt
->vendor
&& !virt
->type
) {
671 lscpu_free_virtualization(virt
);
677 void lscpu_free_virtualization(struct lscpu_virt
*virt
)
683 free(virt
->hypervisor
);