]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu-virt.c
e2af63dfa602718bc9b7a7e130fa370322d0bdb9
[thirdparty/util-linux.git] / sys-utils / lscpu-virt.c
1 #include <errno.h>
2 #include <stdlib.h>
3 #include <sys/types.h>
4 #include <sys/stat.h>
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <string.h>
8 #include <stdio.h>
9
10 #include "lscpu.h"
11
12 #if (defined(__x86_64__) || defined(__i386__))
13 # define INCLUDE_VMWARE_BDOOR
14 #endif
15
16 #ifdef INCLUDE_VMWARE_BDOOR
17 # include <stdint.h>
18 # include <signal.h>
19 # include <strings.h>
20 # include <setjmp.h>
21 # ifdef HAVE_SYS_IO_H
22 # include <sys/io.h>
23 # endif
24 #endif
25
26 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
27 #define XENFEAT_supervisor_mode_kernel 3
28 #define XENFEAT_mmu_pt_update_preserve_ad 5
29 #define XENFEAT_hvm_callback_vector 8
30
31 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
32 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
33 | (1U << XENFEAT_hvm_callback_vector) )
34 static const int hv_vendor_pci[] = {
35 [VIRT_VENDOR_NONE] = 0x0000,
36 [VIRT_VENDOR_XEN] = 0x5853,
37 [VIRT_VENDOR_KVM] = 0x0000,
38 [VIRT_VENDOR_MSHV] = 0x1414,
39 [VIRT_VENDOR_VMWARE] = 0x15ad,
40 [VIRT_VENDOR_VBOX] = 0x80ee,
41 };
42
43 static const int hv_graphics_pci[] = {
44 [VIRT_VENDOR_NONE] = 0x0000,
45 [VIRT_VENDOR_XEN] = 0x0001,
46 [VIRT_VENDOR_KVM] = 0x0000,
47 [VIRT_VENDOR_MSHV] = 0x5353,
48 [VIRT_VENDOR_VMWARE] = 0x0710,
49 [VIRT_VENDOR_VBOX] = 0xbeef,
50 };
51
52 #define WORD(x) (uint16_t)(*(const uint16_t *)(x))
53 #define DWORD(x) (uint32_t)(*(const uint32_t *)(x))
54
55 void *get_mem_chunk(size_t base, size_t len, const char *devmem)
56 {
57 void *p = NULL;
58 int fd;
59
60 if ((fd = open(devmem, O_RDONLY)) < 0)
61 return NULL;
62
63 if (!(p = malloc(len)))
64 goto nothing;
65 if (lseek(fd, base, SEEK_SET) == -1)
66 goto nothing;
67 if (read_all(fd, p, len) == -1)
68 goto nothing;
69
70 close(fd);
71 return p;
72
73 nothing:
74 free(p);
75 close(fd);
76 return NULL;
77 }
78
79 static int hypervisor_from_dmi_table(uint32_t base, uint16_t len,
80 uint16_t num, const char *devmem)
81 {
82 uint8_t *data;
83 int rc = VIRT_VENDOR_NONE;
84 struct dmi_info di;
85
86 data = get_mem_chunk(base, len, devmem);
87 if (!data)
88 return rc;
89
90 memset(&di, 0, sizeof(struct dmi_info));
91 rc = parse_dmi_table(len, num, data, &di);
92 if (rc < 0)
93 goto done;
94
95 if (di.manufacturer && !strcmp(di.manufacturer, "innotek GmbH"))
96 rc = VIRT_VENDOR_INNOTEK;
97 else if (di.manufacturer && strstr(di.manufacturer, "HITACHI") &&
98 di.product && strstr(di.product, "LPAR"))
99 rc = VIRT_VENDOR_HITACHI;
100 else if (di.vendor && !strcmp(di.vendor, "Parallels"))
101 rc = VIRT_VENDOR_PARALLELS;
102 done:
103 free(data);
104 return rc;
105 }
106
107 static int checksum(const uint8_t *buf, size_t len)
108 {
109 uint8_t sum = 0;
110 size_t a;
111
112 for (a = 0; a < len; a++)
113 sum += buf[a];
114 return (sum == 0);
115 }
116
117 #if defined(__x86_64__) || defined(__i386__)
118 static int hypervisor_decode_legacy(uint8_t *buf, const char *devmem)
119 {
120 if (!checksum(buf, 0x0F))
121 return -1;
122
123 return hypervisor_from_dmi_table(DWORD(buf + 0x08), WORD(buf + 0x06),
124 WORD(buf + 0x0C),
125 devmem);
126 }
127 #endif
128
129 static int hypervisor_decode_smbios(uint8_t *buf, const char *devmem)
130 {
131 if (!checksum(buf, buf[0x05])
132 || memcmp(buf + 0x10, "_DMI_", 5) != 0
133 || !checksum(buf + 0x10, 0x0F))
134 return -1;
135
136 return hypervisor_from_dmi_table(DWORD(buf + 0x18), WORD(buf + 0x16),
137 WORD(buf + 0x1C),
138 devmem);
139 }
140
141 /*
142 * Probe for EFI interface
143 */
144 #define EFI_NOT_FOUND (-1)
145 #define EFI_NO_SMBIOS (-2)
146 static int address_from_efi(size_t *address)
147 {
148 FILE *tab;
149 char linebuf[64];
150 int ret;
151
152 *address = 0; /* Prevent compiler warning */
153
154 /*
155 * Linux up to 2.6.6: /proc/efi/systab
156 * Linux 2.6.7 and up: /sys/firmware/efi/systab
157 */
158 if (!(tab = fopen("/sys/firmware/efi/systab", "r")) &&
159 !(tab = fopen("/proc/efi/systab", "r")))
160 return EFI_NOT_FOUND; /* No EFI interface */
161
162 ret = EFI_NO_SMBIOS;
163 while ((fgets(linebuf, sizeof(linebuf) - 1, tab)) != NULL) {
164 char *addrp = strchr(linebuf, '=');
165 if (!addrp)
166 continue;
167 *(addrp++) = '\0';
168 if (strcmp(linebuf, "SMBIOS") == 0) {
169 errno = 0;
170 *address = strtoul(addrp, NULL, 0);
171 if (errno)
172 continue;
173 ret = 0;
174 break;
175 }
176 }
177
178 fclose(tab);
179 return ret;
180 }
181
182 static int read_hypervisor_dmi_from_devmem(void)
183 {
184 int rc = VIRT_VENDOR_NONE;
185 uint8_t *buf = NULL;
186 size_t fp = 0;
187
188 /* First try EFI (ia64, Intel-based Mac) */
189 switch (address_from_efi(&fp)) {
190 case EFI_NOT_FOUND:
191 goto memory_scan;
192 case EFI_NO_SMBIOS:
193 goto done;
194 }
195
196 buf = get_mem_chunk(fp, 0x20, _PATH_DEV_MEM);
197 if (!buf)
198 goto done;
199
200 rc = hypervisor_decode_smbios(buf, _PATH_DEV_MEM);
201 if (rc >= VIRT_VENDOR_NONE)
202 goto done;
203
204 free(buf);
205 buf = NULL;
206 memory_scan:
207 #if defined(__x86_64__) || defined(__i386__)
208 /* Fallback to memory scan (x86, x86_64) */
209 buf = get_mem_chunk(0xF0000, 0x10000, _PATH_DEV_MEM);
210 if (!buf)
211 goto done;
212
213 for (fp = 0; fp <= 0xFFF0; fp += 16) {
214 if (memcmp(buf + fp, "_SM_", 4) == 0 && fp <= 0xFFE0) {
215 rc = hypervisor_decode_smbios(buf + fp, _PATH_DEV_MEM);
216 if (rc < 0)
217 fp += 16;
218
219 } else if (memcmp(buf + fp, "_DMI_", 5) == 0)
220 rc = hypervisor_decode_legacy(buf + fp, _PATH_DEV_MEM);
221
222 if (rc >= VIRT_VENDOR_NONE)
223 break;
224 }
225 #endif
226 done:
227 free(buf);
228 return rc;
229 }
230
231 static int read_hypervisor_dmi_from_sysfw(void)
232 {
233 static char const sys_fw_dmi_tables[] = _PATH_SYS_DMI;
234 struct stat st;
235
236 if (stat(sys_fw_dmi_tables, &st))
237 return -1;
238
239 return hypervisor_from_dmi_table(0, st.st_size, st.st_size / 4,
240 sys_fw_dmi_tables);
241 }
242
243 static int read_hypervisor_dmi(void)
244 {
245 int rc;
246
247 if (sizeof(uint8_t) != 1
248 || sizeof(uint16_t) != 2
249 || sizeof(uint32_t) != 4
250 || '\0' != 0)
251 return VIRT_VENDOR_NONE;
252
253 /* -1 : no DMI in /sys,
254 * 0 : DMI exist, nothing detected (VIRT_VENDOR_NONE)
255 * >0 : hypervisor detected
256 */
257 rc = read_hypervisor_dmi_from_sysfw();
258 if (rc < 0)
259 rc = read_hypervisor_dmi_from_devmem();
260
261 return rc < 0 ? VIRT_VENDOR_NONE : rc;
262 }
263
264 static int has_pci_device(struct lscpu_cxt *cxt,
265 unsigned int vendor, unsigned int device)
266 {
267 FILE *f;
268 unsigned int num, fn, ven, dev;
269 int res = 1;
270
271 f = ul_path_fopen(cxt->procfs, "r", "bus/pci/devices");
272 if (!f)
273 return 0;
274
275 /* for more details about bus/pci/devices format see
276 * drivers/pci/proc.c in linux kernel
277 */
278 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
279 &num, &fn, &ven, &dev) == 4) {
280
281 if (ven == vendor && dev == device)
282 goto found;
283 }
284
285 res = 0;
286 found:
287 fclose(f);
288 return res;
289 }
290
291 #if defined(__x86_64__) || defined(__i386__)
292 /*
293 * This CPUID leaf returns the information about the hypervisor.
294 * EAX : maximum input value for CPUID supported by the hypervisor.
295 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
296 */
297 #define HYPERVISOR_INFO_LEAF 0x40000000
298
299 static inline void cpuid(unsigned int op, unsigned int *eax,
300 unsigned int *ebx, unsigned int *ecx,
301 unsigned int *edx)
302 {
303 __asm__(
304 #if defined(__PIC__) && defined(__i386__)
305 /* x86 PIC cannot clobber ebx -- gcc bitches */
306 "xchg %%ebx, %%esi;"
307 "cpuid;"
308 "xchg %%esi, %%ebx;"
309 : "=S" (*ebx),
310 #else
311 "cpuid;"
312 : "=b" (*ebx),
313 #endif
314 "=a" (*eax),
315 "=c" (*ecx),
316 "=d" (*edx)
317 : "1" (op), "c"(0));
318 }
319
320 static int read_hypervisor_cpuid(void)
321 {
322 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
323 char hyper_vendor_id[13] = { 0 };
324
325 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
326 memcpy(hyper_vendor_id + 0, &ebx, 4);
327 memcpy(hyper_vendor_id + 4, &ecx, 4);
328 memcpy(hyper_vendor_id + 8, &edx, 4);
329 hyper_vendor_id[12] = '\0';
330
331 if (!hyper_vendor_id[0])
332 goto none;
333
334 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
335 return VIRT_VENDOR_XEN;
336 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
337 return VIRT_VENDOR_KVM;
338 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
339 return VIRT_VENDOR_MSHV;
340 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
341 return VIRT_VENDOR_VMWARE;
342 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
343 return VIRT_VENDOR_SPAR;
344 none:
345 return VIRT_VENDOR_NONE;
346 }
347
348 #else /* ! (__x86_64__ || __i386__) */
349 static int read_hypervisor_cpuid(void)
350 {
351 return 0;
352 }
353 #endif
354
355 static int is_devtree_compatible(struct lscpu_cxt *cxt, const char *str)
356 {
357 FILE *fd = ul_path_fopen(cxt->procfs, "r", "device-tree/compatible");
358
359 if (fd) {
360 char buf[256];
361 size_t i, len;
362
363 memset(buf, 0, sizeof(buf));
364 len = fread(buf, 1, sizeof(buf) - 1, fd);
365 fclose(fd);
366
367 for (i = 0; i < len;) {
368 if (!strcmp(&buf[i], str))
369 return 1;
370 i += strlen(&buf[i]);
371 i++;
372 }
373 }
374
375 return 0;
376 }
377
378 static int read_hypervisor_powerpc(struct lscpu_cxt *cxt, int *type)
379 {
380 int vendor = VIRT_VENDOR_NONE;
381
382 *type = VIRT_TYPE_NONE;
383
384 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
385 if (ul_path_access(cxt->procfs, F_OK, "iSeries") == 0) {
386 vendor = VIRT_VENDOR_OS400;
387 *type = VIRT_TYPE_PARA;
388
389 /* PowerNV (POWER Non-Virtualized, bare-metal) */
390 } else if (is_devtree_compatible(cxt, "ibm,powernv") != 0) {
391 ;
392
393 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
394 } else if (ul_path_access(cxt->procfs, F_OK, "device-tree/ibm,partition-name") == 0
395 && ul_path_access(cxt->procfs, F_OK, "device-tree/hmc-managed?") == 0
396 && ul_path_access(cxt->procfs, F_OK, "device-tree/chosen/qemu,graphic-width") != 0) {
397
398 char buf[256];
399 vendor = VIRT_VENDOR_PHYP;
400 *type = VIRT_TYPE_PARA;
401
402 if (ul_path_scanf(cxt->procfs, "device-tree/ibm,partition-name", "%255s", buf) == 1 &&
403 !strcmp(buf, "full"))
404 *type = VIRT_TYPE_NONE;
405
406 /* Qemu */
407 } else if (is_devtree_compatible(cxt, "qemu,pseries")) {
408 vendor = VIRT_VENDOR_KVM;
409 *type = VIRT_TYPE_PARA;
410 }
411
412 return vendor;
413 }
414
415 #ifdef INCLUDE_VMWARE_BDOOR
416
417 #define VMWARE_BDOOR_MAGIC 0x564D5868
418 #define VMWARE_BDOOR_PORT 0x5658
419 #define VMWARE_BDOOR_CMD_GETVERSION 10
420
421 static UL_ASAN_BLACKLIST
422 void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
423 {
424 __asm__(
425 #if defined(__PIC__) && defined(__i386__)
426 /* x86 PIC cannot clobber ebx -- gcc bitches */
427 "xchg %%ebx, %%esi;"
428 "inl (%%dx), %%eax;"
429 "xchg %%esi, %%ebx;"
430 : "=S" (*ebx),
431 #else
432 "inl (%%dx), %%eax;"
433 : "=b" (*ebx),
434 #endif
435 "=a" (*eax),
436 "=c" (*ecx),
437 "=d" (*edx)
438 : "0" (VMWARE_BDOOR_MAGIC),
439 "1" (VMWARE_BDOOR_CMD_GETVERSION),
440 "2" (VMWARE_BDOOR_PORT),
441 "3" (0)
442 : "memory");
443 }
444
445 static jmp_buf segv_handler_env;
446
447 static void
448 segv_handler(__attribute__((__unused__)) int sig,
449 __attribute__((__unused__)) siginfo_t *info,
450 __attribute__((__unused__)) void *ignored)
451 {
452 siglongjmp(segv_handler_env, 1);
453 }
454
455 static int is_vmware_platform(void)
456 {
457 uint32_t eax, ebx, ecx, edx;
458 struct sigaction act, oact;
459
460 /*
461 * FIXME: Not reliable for non-root users. Note it works as expected if
462 * vmware_bdoor() is not optimized for PIE, but then it fails to build
463 * on 32bit x86 systems. See lscpu git log for more details (commit
464 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
465 */
466 if (getuid() != 0)
467 return 0;
468
469 /*
470 * The assembly routine for vmware detection works
471 * fine under vmware, even if ran as regular user. But
472 * on real HW or under other hypervisors, it segfaults (which is
473 * expected). So we temporarily install SIGSEGV handler to catch
474 * the signal. All this magic is needed because lscpu
475 * isn't supposed to require root privileges.
476 */
477 if (sigsetjmp(segv_handler_env, 1))
478 return 0;
479
480 memset(&act, 0, sizeof(act));
481 act.sa_sigaction = segv_handler;
482 act.sa_flags = SA_SIGINFO;
483
484 if (sigaction(SIGSEGV, &act, &oact))
485 err(EXIT_FAILURE, _("cannot set signal handler"));
486
487 vmware_bdoor(&eax, &ebx, &ecx, &edx);
488
489 if (sigaction(SIGSEGV, &oact, NULL))
490 err(EXIT_FAILURE, _("cannot restore signal handler"));
491
492 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
493 }
494
495 #else /* ! INCLUDE_VMWARE_BDOOR */
496
497 static int is_vmware_platform(void)
498 {
499 return 0;
500 }
501
502 #endif /* INCLUDE_VMWARE_BDOOR */
503 struct lscpu_virt *lscpu_read_virtualization(struct lscpu_cxt *cxt)
504 {
505 char buf[BUFSIZ];
506 struct lscpu_cputype *ct;
507 struct lscpu_virt *virt;
508 FILE *fd;
509
510 DBG(VIRT, ul_debug("reading virtualization"));
511 virt = xcalloc(1, sizeof(*virt));
512
513 /* CPU flags */
514 ct = lscpu_cputype_get_default(cxt);
515 if (ct && ct->flags) {
516 snprintf(buf, sizeof(buf), " %s ", ct->flags);
517 if (strstr(buf, " svm "))
518 virt->cpuflag = xstrdup("svm");
519 else if (strstr(buf, " vmx "))
520 virt->cpuflag = xstrdup("vmx");
521 }
522
523
524 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
525 fd = ul_path_fopen(cxt->procfs, "r", "sys/kernel/osrelease");
526 if (fd) {
527 if (fgets(buf, sizeof(buf), fd) && strstr(buf, "Microsoft")) {
528 virt->vendor = VIRT_VENDOR_WSL;
529 virt->type = VIRT_TYPE_CONTAINER;
530 }
531 fclose(fd);
532 if (virt->type)
533 goto done;
534 }
535
536 if (!cxt->noalive) {
537 virt->vendor = read_hypervisor_cpuid();
538 if (!virt->vendor)
539 virt->vendor = read_hypervisor_dmi();
540 if (!virt->vendor && is_vmware_platform())
541 virt->vendor = VIRT_VENDOR_VMWARE;
542 }
543
544 if (virt->vendor) {
545 virt->type = VIRT_TYPE_FULL;
546
547 if (virt->vendor == VIRT_VENDOR_XEN) {
548 uint32_t features;
549
550 if (ul_path_scanf(cxt->rootfs, _PATH_SYS_HYP_FEATURES, "%x", &features) == 1) {
551 /* Xen PV domain */
552 if (features & XEN_FEATURES_PV_MASK)
553 virt->type = VIRT_TYPE_PARA;
554 /* Xen PVH domain */
555 else if ((features & XEN_FEATURES_PVH_MASK)
556 == XEN_FEATURES_PVH_MASK)
557 virt->type = VIRT_TYPE_PARA;
558 }
559 }
560 } else if ((virt->vendor = read_hypervisor_powerpc(cxt, &virt->type))) {
561 ;
562
563 /* Xen para-virt or dom0 */
564 } else if (ul_path_access(cxt->procfs, F_OK, "xen") == 0) {
565 char xenbuf[256];
566 int dom0 = 0;
567
568 if (ul_path_scanf(cxt->procfs, "xen/capabilities", "%255s", xenbuf) == 1 &&
569 !strcmp(xenbuf, "control_d"))
570 dom0 = 1;
571 virt->type = dom0 ? VIRT_TYPE_NONE : VIRT_TYPE_PARA;
572 virt->vendor = VIRT_VENDOR_XEN;
573
574 /* Xen full-virt on non-x86_64 */
575 } else if (has_pci_device(cxt, hv_vendor_pci[VIRT_VENDOR_XEN], hv_graphics_pci[VIRT_VENDOR_XEN])) {
576 virt->vendor = VIRT_VENDOR_XEN;
577 virt->type = VIRT_TYPE_FULL;
578 } else if (has_pci_device(cxt, hv_vendor_pci[VIRT_VENDOR_VMWARE], hv_graphics_pci[VIRT_VENDOR_VMWARE])) {
579 virt->vendor = VIRT_VENDOR_VMWARE;
580 virt->type = VIRT_TYPE_FULL;
581 } else if (has_pci_device(cxt, hv_vendor_pci[VIRT_VENDOR_VBOX], hv_graphics_pci[VIRT_VENDOR_VBOX])) {
582 virt->vendor = VIRT_VENDOR_VBOX;
583 virt->type = VIRT_TYPE_FULL;
584
585 /* IBM PR/SM */
586 } else if ((fd = ul_path_fopen(cxt->procfs, "r", "sysinfo"))) {
587
588 virt->vendor = VIRT_VENDOR_IBM;
589 virt->hypervisor = "PR/SM";
590 virt->type = VIRT_TYPE_FULL;
591
592 while (fgets(buf, sizeof(buf), fd) != NULL) {
593 if (!strstr(buf, "Control Program:"))
594 continue;
595 virt->vendor = strstr(buf, "KVM") ? VIRT_VENDOR_KVM : VIRT_VENDOR_IBM;
596 virt->hypervisor = strchr(buf, ':');
597
598 if (virt->hypervisor) {
599 virt->hypervisor++;
600 normalize_whitespace((unsigned char *) virt->hypervisor);
601 break;
602 }
603 }
604 if (virt->hypervisor)
605 virt->hypervisor = xstrdup(virt->hypervisor);
606 fclose(fd);
607 }
608
609 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
610 * /proc/bc should not */
611 else if (ul_path_access(cxt->procfs, F_OK, "vz") == 0 &&
612 ul_path_access(cxt->procfs, F_OK, "bc") != 0) {
613 virt->vendor = VIRT_VENDOR_PARALLELS;
614 virt->type = VIRT_TYPE_CONTAINER;
615
616 /* IBM */
617 } else if (virt->hypervisor &&
618 (strcmp(virt->hypervisor, "PowerVM Lx86") == 0 ||
619 strcmp(virt->hypervisor, "IBM/S390") == 0)) {
620 virt->vendor = VIRT_VENDOR_IBM;
621 virt->type = VIRT_TYPE_FULL;
622
623 /* User-mode-linux */
624 } else if (ct && ct->modelname && strstr(ct->modelname, "UML")) {
625 virt->vendor = VIRT_VENDOR_UML;
626 virt->type = VIRT_TYPE_PARA;
627
628 /* Linux-VServer */
629 } else if ((fd = ul_path_fopen(cxt->procfs, "r", "self/status"))) {
630 char *val = NULL;
631
632 while (fgets(buf, sizeof(buf), fd) != NULL) {
633 if (lookup(buf, "VxID", &val))
634 break;
635 }
636 fclose(fd);
637
638 if (val) {
639 char *org = val;
640
641 while (isdigit(*val))
642 ++val;
643 if (!*val) {
644 virt->vendor = VIRT_VENDOR_VSERVER;
645 virt->type = VIRT_TYPE_CONTAINER;
646 }
647 free(org);
648 }
649 }
650 done:
651 DBG(VIRT, ul_debugobj(virt, "virt: cpu='%s' hypervisor='%s' vendor=%d type=%d",
652 virt->cpuflag,
653 virt->hypervisor,
654 virt->vendor,
655 virt->type));
656
657 if (!virt->cpuflag && !virt->hypervisor && !virt->vendor && !virt->type) {
658 lscpu_free_virtualization(virt);
659 virt = NULL;
660 }
661 return virt;
662 }
663
664 void lscpu_free_virtualization(struct lscpu_virt *virt)
665 {
666 if (!virt)
667 return;
668
669 free(virt->cpuflag);
670 free(virt->hypervisor);
671 free(virt);
672 }
673