2 * SPDX-License-Identifier: GPL-2.0-or-later
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
10 * Copyright (C) 2008-2023 Karel Zak <kzak@redhat.com>
14 #include <sys/types.h>
24 #if (defined(__x86_64__) || defined(__i386__))
25 # define INCLUDE_VMWARE_BDOOR
28 #ifdef INCLUDE_VMWARE_BDOOR
38 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
39 #define XENFEAT_supervisor_mode_kernel 3
40 #define XENFEAT_mmu_pt_update_preserve_ad 5
41 #define XENFEAT_hvm_callback_vector 8
43 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
44 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
45 | (1U << XENFEAT_hvm_callback_vector) )
46 static const int hv_vendor_pci
[] = {
47 [VIRT_VENDOR_NONE
] = 0x0000,
48 [VIRT_VENDOR_XEN
] = 0x5853,
49 [VIRT_VENDOR_KVM
] = 0x0000,
50 [VIRT_VENDOR_MSHV
] = 0x1414,
51 [VIRT_VENDOR_VMWARE
] = 0x15ad,
52 [VIRT_VENDOR_VBOX
] = 0x80ee,
55 static const int hv_graphics_pci
[] = {
56 [VIRT_VENDOR_NONE
] = 0x0000,
57 [VIRT_VENDOR_XEN
] = 0x0001,
58 [VIRT_VENDOR_KVM
] = 0x0000,
59 [VIRT_VENDOR_MSHV
] = 0x5353,
60 [VIRT_VENDOR_VMWARE
] = 0x0710,
61 [VIRT_VENDOR_VBOX
] = 0xbeef,
64 #define WORD(x) (uint16_t)(*(const uint16_t *)(x))
65 #define DWORD(x) (uint32_t)(*(const uint32_t *)(x))
67 void *get_mem_chunk(size_t base
, size_t len
, const char *devmem
)
72 if ((fd
= open(devmem
, O_RDONLY
)) < 0)
75 if (!(p
= malloc(len
)))
77 if (lseek(fd
, base
, SEEK_SET
) == -1)
79 if (read_all(fd
, p
, len
) == -1)
91 static int hypervisor_from_dmi_table(uint32_t base
, uint16_t len
,
92 uint16_t num
, const char *devmem
)
95 int rc
= VIRT_VENDOR_NONE
;
98 data
= get_mem_chunk(base
, len
, devmem
);
102 memset(&di
, 0, sizeof(struct dmi_info
));
103 rc
= parse_dmi_table(len
, num
, data
, &di
);
107 if (di
.manufacturer
&& !strcmp(di
.manufacturer
, "innotek GmbH"))
108 rc
= VIRT_VENDOR_INNOTEK
;
109 else if (di
.manufacturer
&& strstr(di
.manufacturer
, "HITACHI") &&
110 di
.product
&& strstr(di
.product
, "LPAR"))
111 rc
= VIRT_VENDOR_HITACHI
;
112 else if (di
.vendor
&& !strcmp(di
.vendor
, "Parallels"))
113 rc
= VIRT_VENDOR_PARALLELS
;
119 static int checksum(const uint8_t *buf
, size_t len
)
124 for (a
= 0; a
< len
; a
++)
129 #if defined(__x86_64__) || defined(__i386__)
130 static int hypervisor_decode_legacy(uint8_t *buf
, const char *devmem
)
132 if (!checksum(buf
, 0x0F))
135 return hypervisor_from_dmi_table(DWORD(buf
+ 0x08), WORD(buf
+ 0x06),
141 static int hypervisor_decode_smbios(uint8_t *buf
, const char *devmem
)
143 if (!checksum(buf
, buf
[0x05])
144 || memcmp(buf
+ 0x10, "_DMI_", 5) != 0
145 || !checksum(buf
+ 0x10, 0x0F))
148 return hypervisor_from_dmi_table(DWORD(buf
+ 0x18), WORD(buf
+ 0x16),
154 * Probe for EFI interface
156 #define EFI_NOT_FOUND (-1)
157 #define EFI_NO_SMBIOS (-2)
158 static int address_from_efi(size_t *address
)
164 *address
= 0; /* Prevent compiler warning */
167 * Linux up to 2.6.6: /proc/efi/systab
168 * Linux 2.6.7 and up: /sys/firmware/efi/systab
170 if (!(tab
= fopen("/sys/firmware/efi/systab", "r")) &&
171 !(tab
= fopen("/proc/efi/systab", "r")))
172 return EFI_NOT_FOUND
; /* No EFI interface */
175 while ((fgets(linebuf
, sizeof(linebuf
) - 1, tab
)) != NULL
) {
176 char *addrp
= strchr(linebuf
, '=');
180 if (strcmp(linebuf
, "SMBIOS") == 0) {
182 *address
= strtoul(addrp
, NULL
, 0);
194 static int read_hypervisor_dmi_from_devmem(void)
196 int rc
= VIRT_VENDOR_NONE
;
200 /* First try EFI (ia64, Intel-based Mac) */
201 switch (address_from_efi(&fp
)) {
208 buf
= get_mem_chunk(fp
, 0x20, _PATH_DEV_MEM
);
212 rc
= hypervisor_decode_smbios(buf
, _PATH_DEV_MEM
);
213 if (rc
>= VIRT_VENDOR_NONE
)
219 #if defined(__x86_64__) || defined(__i386__)
220 /* Fallback to memory scan (x86, x86_64) */
221 buf
= get_mem_chunk(0xF0000, 0x10000, _PATH_DEV_MEM
);
225 for (fp
= 0; fp
<= 0xFFF0; fp
+= 16) {
226 if (memcmp(buf
+ fp
, "_SM_", 4) == 0 && fp
<= 0xFFE0) {
227 rc
= hypervisor_decode_smbios(buf
+ fp
, _PATH_DEV_MEM
);
231 } else if (memcmp(buf
+ fp
, "_DMI_", 5) == 0)
232 rc
= hypervisor_decode_legacy(buf
+ fp
, _PATH_DEV_MEM
);
234 if (rc
>= VIRT_VENDOR_NONE
)
243 static int read_hypervisor_dmi_from_sysfw(void)
245 static char const sys_fw_dmi_tables
[] = _PATH_SYS_DMI
;
248 if (stat(sys_fw_dmi_tables
, &st
))
251 return hypervisor_from_dmi_table(0, st
.st_size
, st
.st_size
/ 4,
255 static int read_hypervisor_dmi(void)
259 if (sizeof(uint8_t) != 1
260 || sizeof(uint16_t) != 2
261 || sizeof(uint32_t) != 4
263 return VIRT_VENDOR_NONE
;
265 /* -1 : no DMI in /sys,
266 * 0 : DMI exist, nothing detected (VIRT_VENDOR_NONE)
267 * >0 : hypervisor detected
269 rc
= read_hypervisor_dmi_from_sysfw();
271 rc
= read_hypervisor_dmi_from_devmem();
273 return rc
< 0 ? VIRT_VENDOR_NONE
: rc
;
276 static int has_pci_device(struct lscpu_cxt
*cxt
,
277 unsigned int vendor
, unsigned int device
)
280 unsigned int num
, fn
, ven
, dev
;
283 f
= ul_path_fopen(cxt
->procfs
, "r", "bus/pci/devices");
287 /* for more details about bus/pci/devices format see
288 * drivers/pci/proc.c in linux kernel
290 while(fscanf(f
, "%02x%02x\t%04x%04x\t%*[^\n]",
291 &num
, &fn
, &ven
, &dev
) == 4) {
293 if (ven
== vendor
&& dev
== device
)
303 #if defined(__x86_64__) || defined(__i386__)
305 * This CPUID leaf returns the information about the hypervisor.
306 * EAX : maximum input value for CPUID supported by the hypervisor.
307 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
309 #define HYPERVISOR_INFO_LEAF 0x40000000
311 static inline void cpuid(unsigned int op
, unsigned int *eax
,
312 unsigned int *ebx
, unsigned int *ecx
,
316 #if defined(__PIC__) && defined(__i386__)
317 /* x86 PIC cannot clobber ebx -- gcc bitches */
332 static int read_hypervisor_cpuid(void)
334 unsigned int eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
335 char hyper_vendor_id
[13] = { 0 };
337 cpuid(HYPERVISOR_INFO_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
338 memcpy(hyper_vendor_id
+ 0, &ebx
, 4);
339 memcpy(hyper_vendor_id
+ 4, &ecx
, 4);
340 memcpy(hyper_vendor_id
+ 8, &edx
, 4);
341 hyper_vendor_id
[12] = '\0';
343 if (!hyper_vendor_id
[0])
346 if (!strncmp("XenVMMXenVMM", hyper_vendor_id
, 12))
347 return VIRT_VENDOR_XEN
;
348 else if (!strncmp("KVMKVMKVM", hyper_vendor_id
, 9))
349 return VIRT_VENDOR_KVM
;
350 else if (!strncmp("Microsoft Hv", hyper_vendor_id
, 12))
351 return VIRT_VENDOR_MSHV
;
352 else if (!strncmp("VMwareVMware", hyper_vendor_id
, 12))
353 return VIRT_VENDOR_VMWARE
;
354 else if (!strncmp("UnisysSpar64", hyper_vendor_id
, 12))
355 return VIRT_VENDOR_SPAR
;
357 return VIRT_VENDOR_NONE
;
360 #else /* ! (__x86_64__ || __i386__) */
361 static int read_hypervisor_cpuid(void)
367 static int is_devtree_compatible(struct lscpu_cxt
*cxt
, const char *str
)
369 FILE *fd
= ul_path_fopen(cxt
->procfs
, "r", "device-tree/compatible");
375 memset(buf
, 0, sizeof(buf
));
376 len
= fread(buf
, 1, sizeof(buf
) - 1, fd
);
379 for (i
= 0; i
< len
;) {
380 if (!strcmp(&buf
[i
], str
))
382 i
+= strlen(&buf
[i
]);
390 static int read_hypervisor_powerpc(struct lscpu_cxt
*cxt
, int *type
)
392 int vendor
= VIRT_VENDOR_NONE
;
394 *type
= VIRT_TYPE_NONE
;
396 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
397 if (ul_path_access(cxt
->procfs
, F_OK
, "iSeries") == 0) {
398 vendor
= VIRT_VENDOR_OS400
;
399 *type
= VIRT_TYPE_PARA
;
401 /* PowerNV (POWER Non-Virtualized, bare-metal) */
402 } else if (is_devtree_compatible(cxt
, "ibm,powernv") != 0) {
405 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
406 } else if (ul_path_access(cxt
->procfs
, F_OK
, "device-tree/ibm,partition-name") == 0
407 && ul_path_access(cxt
->procfs
, F_OK
, "device-tree/hmc-managed?") == 0
408 && ul_path_access(cxt
->procfs
, F_OK
, "device-tree/chosen/qemu,graphic-width") != 0) {
411 vendor
= VIRT_VENDOR_PHYP
;
412 *type
= VIRT_TYPE_PARA
;
414 if (ul_path_scanf(cxt
->procfs
, "device-tree/ibm,partition-name", "%255s", buf
) == 1 &&
415 !strcmp(buf
, "full"))
416 *type
= VIRT_TYPE_NONE
;
419 } else if (is_devtree_compatible(cxt
, "qemu,pseries")) {
420 vendor
= VIRT_VENDOR_KVM
;
421 *type
= VIRT_TYPE_PARA
;
427 #ifdef INCLUDE_VMWARE_BDOOR
429 #define VMWARE_BDOOR_MAGIC 0x564D5868
430 #define VMWARE_BDOOR_PORT 0x5658
431 #define VMWARE_BDOOR_CMD_GETVERSION 10
433 static UL_ASAN_BLACKLIST
434 void vmware_bdoor(uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
437 #if defined(__PIC__) && defined(__i386__)
438 /* x86 PIC cannot clobber ebx -- gcc bitches */
450 : "0" (VMWARE_BDOOR_MAGIC
),
451 "1" (VMWARE_BDOOR_CMD_GETVERSION
),
452 "2" (VMWARE_BDOOR_PORT
),
457 static jmp_buf segv_handler_env
;
458 static sigset_t oset
;
461 segv_handler(__attribute__((__unused__
)) int sig
,
462 __attribute__((__unused__
)) siginfo_t
*info
,
463 __attribute__((__unused__
)) void *ignored
)
465 siglongjmp(segv_handler_env
, 1);
468 static int is_vmware_platform(void)
470 uint32_t eax
, ebx
, ecx
, edx
;
471 struct sigaction act
, oact
;
475 * FIXME: Not reliable for non-root users. Note it works as expected if
476 * vmware_bdoor() is not optimized for PIE, but then it fails to build
477 * on 32bit x86 systems. See lscpu git log for more details (commit
478 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
484 * The assembly routine for vmware detection works
485 * fine under vmware, even if ran as regular user. But
486 * on real HW or under other hypervisors, it segfaults (which is
487 * expected). So we temporarily install SIGSEGV handler to catch
488 * the signal. All this magic is needed because lscpu
489 * isn't supposed to require root privileges.
491 if (sigsetjmp(segv_handler_env
, 1)) {
492 if (sigprocmask(SIG_SETMASK
, &oset
, NULL
))
493 err(EXIT_FAILURE
, _("cannot restore signal mask"));
498 sigaddset(&set
, SIGSEGV
);
499 if (sigprocmask(SIG_UNBLOCK
, &set
, &oset
))
500 err(EXIT_FAILURE
, _("cannot unblock signal"));
502 memset(&act
, 0, sizeof(act
));
503 act
.sa_sigaction
= segv_handler
;
504 act
.sa_flags
= SA_SIGINFO
;
506 if (sigaction(SIGSEGV
, &act
, &oact
))
507 err(EXIT_FAILURE
, _("cannot set signal handler"));
509 vmware_bdoor(&eax
, &ebx
, &ecx
, &edx
);
511 if (sigaction(SIGSEGV
, &oact
, NULL
))
512 err(EXIT_FAILURE
, _("cannot restore signal handler"));
514 if (sigprocmask(SIG_SETMASK
, &oset
, NULL
))
515 err(EXIT_FAILURE
, _("cannot restore signal mask"));
517 return eax
!= (uint32_t)-1 && ebx
== VMWARE_BDOOR_MAGIC
;
520 #else /* ! INCLUDE_VMWARE_BDOOR */
522 static int is_vmware_platform(void)
527 #endif /* INCLUDE_VMWARE_BDOOR */
528 struct lscpu_virt
*lscpu_read_virtualization(struct lscpu_cxt
*cxt
)
531 struct lscpu_cputype
*ct
;
532 struct lscpu_virt
*virt
;
535 DBG(VIRT
, ul_debug("reading virtualization"));
536 virt
= xcalloc(1, sizeof(*virt
));
539 ct
= lscpu_cputype_get_default(cxt
);
540 if (ct
&& ct
->flags
) {
541 snprintf(buf
, sizeof(buf
), " %s ", ct
->flags
);
542 if (strstr(buf
, " svm "))
543 virt
->cpuflag
= xstrdup("svm");
544 else if (strstr(buf
, " vmx "))
545 virt
->cpuflag
= xstrdup("vmx");
549 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
550 fd
= ul_path_fopen(cxt
->procfs
, "r", "sys/kernel/osrelease");
552 if (fgets(buf
, sizeof(buf
), fd
) && strstr(buf
, "Microsoft")) {
553 virt
->vendor
= VIRT_VENDOR_WSL
;
554 virt
->type
= VIRT_TYPE_CONTAINER
;
562 virt
->vendor
= read_hypervisor_cpuid();
564 virt
->vendor
= read_hypervisor_dmi();
565 if (!virt
->vendor
&& is_vmware_platform())
566 virt
->vendor
= VIRT_VENDOR_VMWARE
;
570 virt
->type
= VIRT_TYPE_FULL
;
572 if (virt
->vendor
== VIRT_VENDOR_XEN
) {
575 if (ul_path_scanf(cxt
->rootfs
, _PATH_SYS_HYP_FEATURES
, "%x", &features
) == 1) {
577 if (features
& XEN_FEATURES_PV_MASK
)
578 virt
->type
= VIRT_TYPE_PARA
;
580 else if ((features
& XEN_FEATURES_PVH_MASK
)
581 == XEN_FEATURES_PVH_MASK
)
582 virt
->type
= VIRT_TYPE_PARA
;
585 } else if ((virt
->vendor
= read_hypervisor_powerpc(cxt
, &virt
->type
))) {
588 /* Xen para-virt or dom0 */
589 } else if (ul_path_access(cxt
->procfs
, F_OK
, "xen") == 0) {
593 if (ul_path_scanf(cxt
->procfs
, "xen/capabilities", "%255s", xenbuf
) == 1 &&
594 !strcmp(xenbuf
, "control_d"))
596 virt
->type
= dom0
? VIRT_TYPE_NONE
: VIRT_TYPE_PARA
;
597 virt
->vendor
= VIRT_VENDOR_XEN
;
599 /* Xen full-virt on non-x86_64 */
600 } else if (has_pci_device(cxt
, hv_vendor_pci
[VIRT_VENDOR_XEN
], hv_graphics_pci
[VIRT_VENDOR_XEN
])) {
601 virt
->vendor
= VIRT_VENDOR_XEN
;
602 virt
->type
= VIRT_TYPE_FULL
;
603 } else if (has_pci_device(cxt
, hv_vendor_pci
[VIRT_VENDOR_VMWARE
], hv_graphics_pci
[VIRT_VENDOR_VMWARE
])) {
604 virt
->vendor
= VIRT_VENDOR_VMWARE
;
605 virt
->type
= VIRT_TYPE_FULL
;
606 } else if (has_pci_device(cxt
, hv_vendor_pci
[VIRT_VENDOR_VBOX
], hv_graphics_pci
[VIRT_VENDOR_VBOX
])) {
607 virt
->vendor
= VIRT_VENDOR_VBOX
;
608 virt
->type
= VIRT_TYPE_FULL
;
611 } else if ((fd
= ul_path_fopen(cxt
->procfs
, "r", "sysinfo"))) {
613 virt
->vendor
= VIRT_VENDOR_IBM
;
614 virt
->hypervisor
= "PR/SM";
615 virt
->type
= VIRT_TYPE_FULL
;
617 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
618 if (!strstr(buf
, "Control Program:"))
620 virt
->vendor
= strstr(buf
, "KVM") ? VIRT_VENDOR_KVM
: VIRT_VENDOR_IBM
;
621 virt
->hypervisor
= strchr(buf
, ':');
623 if (virt
->hypervisor
) {
625 normalize_whitespace((unsigned char *) virt
->hypervisor
);
629 if (virt
->hypervisor
)
630 virt
->hypervisor
= xstrdup(virt
->hypervisor
);
634 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
635 * /proc/bc should not */
636 else if (ul_path_access(cxt
->procfs
, F_OK
, "vz") == 0 &&
637 ul_path_access(cxt
->procfs
, F_OK
, "bc") != 0) {
638 virt
->vendor
= VIRT_VENDOR_PARALLELS
;
639 virt
->type
= VIRT_TYPE_CONTAINER
;
642 } else if (virt
->hypervisor
&&
643 (strcmp(virt
->hypervisor
, "PowerVM Lx86") == 0 ||
644 strcmp(virt
->hypervisor
, "IBM/S390") == 0)) {
645 virt
->vendor
= VIRT_VENDOR_IBM
;
646 virt
->type
= VIRT_TYPE_FULL
;
648 /* User-mode-linux */
649 } else if (ct
&& ct
->modelname
&& strstr(ct
->modelname
, "UML")) {
650 virt
->vendor
= VIRT_VENDOR_UML
;
651 virt
->type
= VIRT_TYPE_PARA
;
654 } else if ((fd
= ul_path_fopen(cxt
->procfs
, "r", "self/status"))) {
657 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
658 if (lookup(buf
, "VxID", &val
))
666 while (isdigit(*val
))
669 virt
->vendor
= VIRT_VENDOR_VSERVER
;
670 virt
->type
= VIRT_TYPE_CONTAINER
;
676 DBG(VIRT
, ul_debugobj(virt
, "virt: cpu='%s' hypervisor='%s' vendor=%d type=%d",
682 if (!virt
->cpuflag
&& !virt
->hypervisor
&& !virt
->vendor
&& !virt
->type
) {
683 lscpu_free_virtualization(virt
);
689 void lscpu_free_virtualization(struct lscpu_virt
*virt
)
695 free(virt
->hypervisor
);