]> git.ipfire.org Git - thirdparty/util-linux.git/blame_incremental - sys-utils/lscpu.c
include/c: define UL_ASAN_BLACKLIST address_sanitizer function attribute
[thirdparty/util-linux.git] / sys-utils / lscpu.c
... / ...
CommitLineData
1/*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22#include <assert.h>
23#include <ctype.h>
24#include <dirent.h>
25#include <errno.h>
26#include <fcntl.h>
27#include <getopt.h>
28#include <stdio.h>
29#include <stdlib.h>
30#include <string.h>
31#include <sys/utsname.h>
32#include <unistd.h>
33#include <stdarg.h>
34#include <sys/types.h>
35#include <sys/stat.h>
36
37#if defined(__x86_64__) || defined(__i386__)
38# define INCLUDE_VMWARE_BDOOR
39#endif
40
41#ifdef INCLUDE_VMWARE_BDOOR
42# include <stdint.h>
43# include <signal.h>
44# include <strings.h>
45# include <setjmp.h>
46# ifdef HAVE_SYS_IO_H
47# include <sys/io.h>
48# endif
49#endif
50
51#include <libsmartcols.h>
52
53#include "cpuset.h"
54#include "nls.h"
55#include "xalloc.h"
56#include "c.h"
57#include "strutils.h"
58#include "bitops.h"
59#include "path.h"
60#include "closestream.h"
61#include "optutils.h"
62#include "lscpu.h"
63
64#define CACHE_MAX 100
65
66/* /sys paths */
67#define _PATH_SYS_SYSTEM "/sys/devices/system"
68#define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
69#define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
70#define _PATH_PROC_XEN "/proc/xen"
71#define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
72#define _PATH_PROC_CPUINFO "/proc/cpuinfo"
73#define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
74#define _PATH_PROC_SYSINFO "/proc/sysinfo"
75#define _PATH_PROC_STATUS "/proc/self/status"
76#define _PATH_PROC_VZ "/proc/vz"
77#define _PATH_PROC_BC "/proc/bc"
78#define _PATH_PROC_DEVICETREE "/proc/device-tree"
79#define _PATH_DEV_MEM "/dev/mem"
80
81/* virtualization types */
82enum {
83 VIRT_NONE = 0,
84 VIRT_PARA,
85 VIRT_FULL,
86 VIRT_CONT
87};
88const char *virt_types[] = {
89 [VIRT_NONE] = N_("none"),
90 [VIRT_PARA] = N_("para"),
91 [VIRT_FULL] = N_("full"),
92 [VIRT_CONT] = N_("container"),
93};
94
95const char *hv_vendors[] = {
96 [HYPER_NONE] = NULL,
97 [HYPER_XEN] = "Xen",
98 [HYPER_KVM] = "KVM",
99 [HYPER_MSHV] = "Microsoft",
100 [HYPER_VMWARE] = "VMware",
101 [HYPER_IBM] = "IBM",
102 [HYPER_VSERVER] = "Linux-VServer",
103 [HYPER_UML] = "User-mode Linux",
104 [HYPER_INNOTEK] = "Innotek GmbH",
105 [HYPER_HITACHI] = "Hitachi",
106 [HYPER_PARALLELS] = "Parallels",
107 [HYPER_VBOX] = "Oracle",
108 [HYPER_OS400] = "OS/400",
109 [HYPER_PHYP] = "pHyp",
110};
111
112const int hv_vendor_pci[] = {
113 [HYPER_NONE] = 0x0000,
114 [HYPER_XEN] = 0x5853,
115 [HYPER_KVM] = 0x0000,
116 [HYPER_MSHV] = 0x1414,
117 [HYPER_VMWARE] = 0x15ad,
118 [HYPER_VBOX] = 0x80ee,
119};
120
121const int hv_graphics_pci[] = {
122 [HYPER_NONE] = 0x0000,
123 [HYPER_XEN] = 0x0001,
124 [HYPER_KVM] = 0x0000,
125 [HYPER_MSHV] = 0x5353,
126 [HYPER_VMWARE] = 0x0710,
127 [HYPER_VBOX] = 0xbeef,
128};
129
130/* CPU modes */
131enum {
132 MODE_32BIT = (1 << 1),
133 MODE_64BIT = (1 << 2)
134};
135
136/* cache(s) description */
137struct cpu_cache {
138 char *name;
139 char *size;
140
141 int nsharedmaps;
142 cpu_set_t **sharedmaps;
143};
144
145/* dispatching modes */
146enum {
147 DISP_HORIZONTAL = 0,
148 DISP_VERTICAL = 1
149};
150
151const char *disp_modes[] = {
152 [DISP_HORIZONTAL] = N_("horizontal"),
153 [DISP_VERTICAL] = N_("vertical")
154};
155
156/* cpu polarization */
157enum {
158 POLAR_UNKNOWN = 0,
159 POLAR_VLOW,
160 POLAR_VMEDIUM,
161 POLAR_VHIGH,
162 POLAR_HORIZONTAL
163};
164
165struct polarization_modes {
166 char *parsable;
167 char *readable;
168};
169
170struct polarization_modes polar_modes[] = {
171 [POLAR_UNKNOWN] = {"U", "-"},
172 [POLAR_VLOW] = {"VL", "vert-low"},
173 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
174 [POLAR_VHIGH] = {"VH", "vert-high"},
175 [POLAR_HORIZONTAL] = {"H", "horizontal"},
176};
177
178/* global description */
179struct lscpu_desc {
180 char *arch;
181 char *vendor;
182 char *family;
183 char *model;
184 char *modelname;
185 char *virtflag; /* virtualization flag (vmx, svm) */
186 char *hypervisor; /* hypervisor software */
187 int hyper; /* hypervisor vendor ID */
188 int virtype; /* VIRT_PARA|FULL|NONE ? */
189 char *mhz;
190 char **maxmhz; /* maximum mega hertz */
191 char **minmhz; /* minimum mega hertz */
192 char *stepping;
193 char *bogomips;
194 char *flags;
195 int dispatching; /* none, horizontal or vertical */
196 int mode; /* rm, lm or/and tm */
197
198 int ncpuspos; /* maximal possible CPUs */
199 int ncpus; /* number of present CPUs */
200 cpu_set_t *present; /* mask with present CPUs */
201 cpu_set_t *online; /* mask with online CPUs */
202
203 int nthreads; /* number of online threads */
204
205 int ncaches;
206 struct cpu_cache *caches;
207
208 /*
209 * All maps are sequentially indexed (0..ncpuspos), the array index
210 * does not have match with cpuX number as presented by kernel. You
211 * have to use real_cpu_num() to get the real cpuX number.
212 *
213 * For example, the possible system CPUs are: 1,3,5, it means that
214 * ncpuspos=3, so all arrays are in range 0..3.
215 */
216 int *idx2cpunum; /* mapping index to CPU num */
217
218 int nnodes; /* number of NUMA modes */
219 int *idx2nodenum; /* Support for discontinuous nodes */
220 cpu_set_t **nodemaps; /* array with NUMA nodes */
221
222 /* books -- based on book_siblings (internal kernel map of cpuX's
223 * hardware threads within the same book */
224 int nbooks; /* number of all online books */
225 cpu_set_t **bookmaps; /* unique book_siblings */
226
227 /* sockets -- based on core_siblings (internal kernel map of cpuX's
228 * hardware threads within the same physical_package_id (socket)) */
229 int nsockets; /* number of all online sockets */
230 cpu_set_t **socketmaps; /* unique core_siblings */
231
232 /* cores -- based on thread_siblings (internel kernel map of cpuX's
233 * hardware threads within the same core as cpuX) */
234 int ncores; /* number of all online cores */
235 cpu_set_t **coremaps; /* unique thread_siblings */
236
237 int *polarization; /* cpu polarization */
238 int *addresses; /* physical cpu addresses */
239 int *configured; /* cpu configured */
240};
241
242enum {
243 OUTPUT_SUMMARY = 0, /* default */
244 OUTPUT_PARSABLE, /* -p */
245 OUTPUT_READABLE, /* -e */
246};
247
248enum {
249 SYSTEM_LIVE = 0, /* analyzing a live system */
250 SYSTEM_SNAPSHOT, /* analyzing a snapshot of a different system */
251};
252
253struct lscpu_modifier {
254 int mode; /* OUTPUT_* */
255 int system; /* SYSTEM_* */
256 unsigned int hex:1, /* print CPU masks rather than CPU lists */
257 compat:1, /* use backwardly compatible format */
258 online:1, /* print online CPUs */
259 offline:1; /* print offline CPUs */
260};
261
262static int maxcpus; /* size in bits of kernel cpu mask */
263
264#define is_cpu_online(_d, _cpu) \
265 ((_d) && (_d)->online ? \
266 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
267#define is_cpu_present(_d, _cpu) \
268 ((_d) && (_d)->present ? \
269 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
270
271#define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
272
273/*
274 * IDs
275 */
276enum {
277 COL_CPU,
278 COL_CORE,
279 COL_SOCKET,
280 COL_NODE,
281 COL_BOOK,
282 COL_CACHE,
283 COL_POLARIZATION,
284 COL_ADDRESS,
285 COL_CONFIGURED,
286 COL_ONLINE,
287 COL_MAXMHZ,
288 COL_MINMHZ,
289};
290
291/* column description
292 */
293struct lscpu_coldesc {
294 const char *name;
295 const char *help;
296
297 unsigned int is_abbr:1; /* name is abbreviation */
298};
299
300static struct lscpu_coldesc coldescs[] =
301{
302 [COL_CPU] = { "CPU", N_("logical CPU number"), 1 },
303 [COL_CORE] = { "CORE", N_("logical core number") },
304 [COL_SOCKET] = { "SOCKET", N_("logical socket number") },
305 [COL_NODE] = { "NODE", N_("logical NUMA node number") },
306 [COL_BOOK] = { "BOOK", N_("logical book number") },
307 [COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
308 [COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
309 [COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
310 [COL_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
311 [COL_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
312 [COL_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
313 [COL_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
314};
315
316static int
317column_name_to_id(const char *name, size_t namesz)
318{
319 size_t i;
320
321 for (i = 0; i < ARRAY_SIZE(coldescs); i++) {
322 const char *cn = coldescs[i].name;
323
324 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
325 return i;
326 }
327 warnx(_("unknown column: %s"), name);
328 return -1;
329}
330
331/* Lookup a pattern and get the value from cpuinfo.
332 * Format is:
333 *
334 * "<pattern> : <key>"
335 */
336static int
337lookup(char *line, char *pattern, char **value)
338{
339 char *p, *v;
340 int len = strlen(pattern);
341
342 if (!*line)
343 return 0;
344
345 /* pattern */
346 if (strncmp(line, pattern, len))
347 return 0;
348
349 /* white spaces */
350 for (p = line + len; isspace(*p); p++);
351
352 /* separator */
353 if (*p != ':')
354 return 0;
355
356 /* white spaces */
357 for (++p; isspace(*p); p++);
358
359 /* value */
360 if (!*p)
361 return 0;
362 v = p;
363
364 /* end of value */
365 len = strlen(line) - 1;
366 for (p = line + len; isspace(*(p-1)); p--);
367 *p = '\0';
368
369 *value = xstrdup(v);
370 return 1;
371}
372
373/* Don't init the mode for platforms where we are not able to
374 * detect that CPU supports 64-bit mode.
375 */
376static int
377init_mode(struct lscpu_modifier *mod)
378{
379 int m = 0;
380
381 if (mod->system == SYSTEM_SNAPSHOT)
382 /* reading info from any /{sys,proc} dump, don't mix it with
383 * information about our real CPU */
384 return 0;
385
386#if defined(__alpha__) || defined(__ia64__)
387 m |= MODE_64BIT; /* 64bit platforms only */
388#endif
389 /* platforms with 64bit flag in /proc/cpuinfo, define
390 * 32bit default here */
391#if defined(__i386__) || defined(__x86_64__) || \
392 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
393 m |= MODE_32BIT;
394#endif
395 return m;
396}
397
398static void
399read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
400{
401 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
402 char buf[BUFSIZ];
403 struct utsname utsbuf;
404 size_t setsize;
405
406 /* architecture */
407 if (uname(&utsbuf) == -1)
408 err(EXIT_FAILURE, _("error: uname failed"));
409 desc->arch = xstrdup(utsbuf.machine);
410
411 /* details */
412 while (fgets(buf, sizeof(buf), fp) != NULL) {
413 if (lookup(buf, "vendor", &desc->vendor)) ;
414 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
415 else if (lookup(buf, "family", &desc->family)) ;
416 else if (lookup(buf, "cpu family", &desc->family)) ;
417 else if (lookup(buf, "model", &desc->model)) ;
418 else if (lookup(buf, "model name", &desc->modelname)) ;
419 else if (lookup(buf, "stepping", &desc->stepping)) ;
420 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
421 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
422 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
423 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
424 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
425 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
426 else
427 continue;
428 }
429
430 desc->mode = init_mode(mod);
431
432 if (desc->flags) {
433 snprintf(buf, sizeof(buf), " %s ", desc->flags);
434 if (strstr(buf, " svm "))
435 desc->virtflag = xstrdup("svm");
436 else if (strstr(buf, " vmx "))
437 desc->virtflag = xstrdup("vmx");
438 if (strstr(buf, " lm "))
439 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
440 if (strstr(buf, " zarch "))
441 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
442 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
443 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
444 }
445
446 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
447 if (strcmp(desc->arch, "ppc64") == 0)
448 desc->mode |= MODE_32BIT | MODE_64BIT;
449 else if (strcmp(desc->arch, "ppc") == 0)
450 desc->mode |= MODE_32BIT;
451 }
452
453 fclose(fp);
454
455 if (path_exist(_PATH_SYS_CPU "/kernel_max"))
456 /* note that kernel_max is maximum index [NR_CPUS-1] */
457 maxcpus = path_read_s32(_PATH_SYS_CPU "/kernel_max") + 1;
458
459 else if (mod->system == SYSTEM_LIVE)
460 /* the root is '/' so we are working with data from the current kernel */
461 maxcpus = get_max_number_of_cpus();
462
463 if (maxcpus <= 0)
464 /* error or we are reading some /sys snapshot instead of the
465 * real /sys, let's use any crazy number... */
466 maxcpus = 2048;
467
468 setsize = CPU_ALLOC_SIZE(maxcpus);
469
470 if (path_exist(_PATH_SYS_CPU "/possible")) {
471 cpu_set_t *tmp = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/possible");
472 int num, idx;
473
474 desc->ncpuspos = CPU_COUNT_S(setsize, tmp);
475 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
476
477 for (num = 0, idx = 0; num < maxcpus; num++) {
478 if (CPU_ISSET(num, tmp))
479 desc->idx2cpunum[idx++] = num;
480 }
481 cpuset_free(tmp);
482 } else
483 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
484 _PATH_SYS_CPU "/possible");
485
486
487 /* get mask for present CPUs */
488 if (path_exist(_PATH_SYS_CPU "/present")) {
489 desc->present = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/present");
490 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
491 }
492
493 /* get mask for online CPUs */
494 if (path_exist(_PATH_SYS_CPU "/online")) {
495 desc->online = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/online");
496 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
497 }
498
499 /* get dispatching mode */
500 if (path_exist(_PATH_SYS_CPU "/dispatching"))
501 desc->dispatching = path_read_s32(_PATH_SYS_CPU "/dispatching");
502 else
503 desc->dispatching = -1;
504}
505
506static int
507has_pci_device(unsigned int vendor, unsigned int device)
508{
509 FILE *f;
510 unsigned int num, fn, ven, dev;
511 int res = 1;
512
513 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
514 if (!f)
515 return 0;
516
517 /* for more details about bus/pci/devices format see
518 * drivers/pci/proc.c in linux kernel
519 */
520 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
521 &num, &fn, &ven, &dev) == 4) {
522
523 if (ven == vendor && dev == device)
524 goto found;
525 }
526
527 res = 0;
528found:
529 fclose(f);
530 return res;
531}
532
533#if defined(__x86_64__) || defined(__i386__)
534
535/*
536 * This CPUID leaf returns the information about the hypervisor.
537 * EAX : maximum input value for CPUID supported by the hypervisor.
538 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
539 */
540#define HYPERVISOR_INFO_LEAF 0x40000000
541
542static inline void
543cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
544 unsigned int *ecx, unsigned int *edx)
545{
546 __asm__(
547#if defined(__PIC__) && defined(__i386__)
548 /* x86 PIC cannot clobber ebx -- gcc bitches */
549 "xchg %%ebx, %%esi;"
550 "cpuid;"
551 "xchg %%esi, %%ebx;"
552 : "=S" (*ebx),
553#else
554 "cpuid;"
555 : "=b" (*ebx),
556#endif
557 "=a" (*eax),
558 "=c" (*ecx),
559 "=d" (*edx)
560 : "1" (op), "c"(0));
561}
562
563static void
564read_hypervisor_cpuid(struct lscpu_desc *desc)
565{
566 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
567 char hyper_vendor_id[13];
568
569 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
570
571 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
572 memcpy(hyper_vendor_id + 0, &ebx, 4);
573 memcpy(hyper_vendor_id + 4, &ecx, 4);
574 memcpy(hyper_vendor_id + 8, &edx, 4);
575 hyper_vendor_id[12] = '\0';
576
577 if (!hyper_vendor_id[0])
578 return;
579
580 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
581 desc->hyper = HYPER_XEN;
582 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
583 desc->hyper = HYPER_KVM;
584 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
585 desc->hyper = HYPER_MSHV;
586 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
587 desc->hyper = HYPER_VMWARE;
588}
589
590#else /* ! (__x86_64__ || __i386__) */
591static void
592read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
593{
594}
595#endif
596
597static int
598read_hypervisor_powerpc(struct lscpu_desc *desc)
599{
600 assert(!desc->hyper);
601
602 /* powerpc:
603 * IBM iSeries: legacy, if /proc/iSeries exists, its para-virtualized on top of OS/400
604 * IBM pSeries: always has a hypervisor
605 * if partition-name is "full", its kind of "bare-metal": full-system-partition
606 * otherwise its some partition created by Hardware Management Console
607 * in any case, its always some sort of HVM
608 * Note that pSeries could also be emulated by qemu/KVM.
609 * KVM: "linux,kvm" in /hypervisor/compatible indicates a KVM guest
610 * Xen: not in use, not detected
611 */
612 if (path_exist("/proc/iSeries")) {
613 desc->hyper = HYPER_OS400;
614 desc->virtype = VIRT_PARA;
615 } else if (path_exist(_PATH_PROC_DEVICETREE "/ibm,partition-name")
616 && path_exist(_PATH_PROC_DEVICETREE "/hmc-managed?")
617 && !path_exist(_PATH_PROC_DEVICETREE "/chosen/qemu,graphic-width")) {
618 FILE *fd;
619 desc->hyper = HYPER_PHYP;
620 desc->virtype = VIRT_PARA;
621 fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/ibm,partition-name");
622 if (fd) {
623 char buf[256];
624 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
625 desc->virtype = VIRT_NONE;
626 fclose(fd);
627 }
628 } else if (path_exist(_PATH_PROC_DEVICETREE "/hypervisor/compatible")) {
629 FILE *fd;
630 fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/hypervisor/compatible");
631 if (fd) {
632 char buf[256];
633 size_t i, len;
634 memset(buf, 0, sizeof(buf));
635 len = fread(buf, 1, sizeof(buf) - 1, fd);
636 fclose(fd);
637 for (i = 0; i < len;) {
638 if (!strcmp(&buf[i], "linux,kvm")) {
639 desc->hyper = HYPER_KVM;
640 desc->virtype = VIRT_FULL;
641 break;
642 }
643 i += strlen(&buf[i]);
644 i++;
645 }
646 }
647 }
648
649 return desc->hyper;
650}
651
652#ifdef INCLUDE_VMWARE_BDOOR
653
654#define VMWARE_BDOOR_MAGIC 0x564D5868
655#define VMWARE_BDOOR_PORT 0x5658
656#define VMWARE_BDOOR_CMD_GETVERSION 10
657
658static inline
659void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
660{
661 __asm__(
662#if defined(__PIC__) && defined(__i386__)
663 /* x86 PIC cannot clobber ebx -- gcc bitches */
664 "xchg %%ebx, %%esi;"
665 "inl (%%dx), %%eax;"
666 "xchg %%esi, %%ebx;"
667 : "=S" (*ebx),
668#else
669 "inl (%%dx), %%eax;"
670 : "=b" (*ebx),
671#endif
672 "=a" (*eax),
673 "=c" (*ecx),
674 "=d" (*edx)
675 : "0" (VMWARE_BDOOR_MAGIC),
676 "1" (VMWARE_BDOOR_CMD_GETVERSION),
677 "2" (VMWARE_BDOOR_PORT),
678 "3" (0)
679 : "memory");
680}
681
682static jmp_buf segv_handler_env;
683
684static void
685segv_handler(__attribute__((__unused__)) int sig,
686 __attribute__((__unused__)) siginfo_t *info,
687 __attribute__((__unused__)) void *ignored)
688{
689 siglongjmp(segv_handler_env, 1);
690}
691
692static int
693is_vmware_platform(void)
694{
695 uint32_t eax, ebx, ecx, edx;
696 struct sigaction act, oact;
697
698 /*
699 * The assembly routine for vmware detection works
700 * fine under vmware, even if ran as regular user. But
701 * on real HW or under other hypervisors, it segfaults (which is
702 * expected). So we temporarily install SIGSEGV handler to catch
703 * the signal. All this magic is needed because lscpu
704 * isn't supposed to require root privileges.
705 */
706 if (sigsetjmp(segv_handler_env, 1))
707 return 0;
708
709 memset(&act, 0, sizeof(act));
710 act.sa_sigaction = segv_handler;
711 act.sa_flags = SA_SIGINFO;
712
713 if (sigaction(SIGSEGV, &act, &oact))
714 err(EXIT_FAILURE, _("error: can not set signal handler"));
715
716 vmware_bdoor(&eax, &ebx, &ecx, &edx);
717
718 if (sigaction(SIGSEGV, &oact, NULL))
719 err(EXIT_FAILURE, _("error: can not restore signal handler"));
720
721 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
722}
723
724#else /* ! INCLUDE_VMWARE_BDOOR */
725
726static int
727is_vmware_platform(void)
728{
729 return 0;
730}
731
732#endif /* INCLUDE_VMWARE_BDOOR */
733
734static void
735read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
736{
737 FILE *fd;
738
739 if (mod->system != SYSTEM_SNAPSHOT) {
740 read_hypervisor_cpuid(desc);
741 if (!desc->hyper)
742 desc->hyper = read_hypervisor_dmi();
743 if (!desc->hyper && is_vmware_platform())
744 desc->hyper = HYPER_VMWARE;
745 }
746
747 if (desc->hyper)
748 desc->virtype = VIRT_FULL;
749
750 else if (read_hypervisor_powerpc(desc) > 0) {}
751
752 /* Xen para-virt or dom0 */
753 else if (path_exist(_PATH_PROC_XEN)) {
754 int dom0 = 0;
755 fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
756
757 if (fd) {
758 char buf[256];
759
760 if (fscanf(fd, "%255s", buf) == 1 &&
761 !strcmp(buf, "control_d"))
762 dom0 = 1;
763 fclose(fd);
764 }
765 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
766 desc->hyper = HYPER_XEN;
767
768 /* Xen full-virt on non-x86_64 */
769 } else if (has_pci_device( hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
770 desc->hyper = HYPER_XEN;
771 desc->virtype = VIRT_FULL;
772 } else if (has_pci_device( hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
773 desc->hyper = HYPER_VMWARE;
774 desc->virtype = VIRT_FULL;
775 } else if (has_pci_device( hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
776 desc->hyper = HYPER_VBOX;
777 desc->virtype = VIRT_FULL;
778
779 /* IBM PR/SM */
780 } else if (path_exist(_PATH_PROC_SYSINFO)) {
781 FILE *sysinfo_fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
782 char buf[BUFSIZ];
783
784 if (!sysinfo_fd)
785 return;
786 desc->hyper = HYPER_IBM;
787 desc->hypervisor = "PR/SM";
788 desc->virtype = VIRT_FULL;
789 while (fgets(buf, sizeof(buf), sysinfo_fd) != NULL) {
790 char *str;
791
792 if (!strstr(buf, "Control Program:"))
793 continue;
794 if (!strstr(buf, "KVM"))
795 desc->hyper = HYPER_IBM;
796 else
797 desc->hyper = HYPER_KVM;
798 str = strchr(buf, ':');
799 if (!str)
800 continue;
801 xasprintf(&str, "%s", str + 1);
802
803 /* remove leading, trailing and repeating whitespace */
804 while (*str == ' ')
805 str++;
806 desc->hypervisor = str;
807 str += strlen(str) - 1;
808 while ((*str == '\n') || (*str == ' '))
809 *(str--) = '\0';
810 while ((str = strstr(desc->hypervisor, " ")))
811 memmove(str, str + 1, strlen(str));
812 }
813 fclose(sysinfo_fd);
814 }
815
816 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
817 * /proc/bc should not */
818 else if (path_exist(_PATH_PROC_VZ) && !path_exist(_PATH_PROC_BC)) {
819 desc->hyper = HYPER_PARALLELS;
820 desc->virtype = VIRT_CONT;
821
822 /* IBM */
823 } else if (desc->vendor &&
824 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
825 strcmp(desc->vendor, "IBM/S390") == 0)) {
826 desc->hyper = HYPER_IBM;
827 desc->virtype = VIRT_FULL;
828
829 /* User-mode-linux */
830 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
831 desc->hyper = HYPER_UML;
832 desc->virtype = VIRT_PARA;
833
834 /* Linux-VServer */
835 } else if (path_exist(_PATH_PROC_STATUS)) {
836 char buf[BUFSIZ];
837 char *val = NULL;
838
839 fd = path_fopen("r", 1, _PATH_PROC_STATUS);
840 while (fgets(buf, sizeof(buf), fd) != NULL) {
841 if (lookup(buf, "VxID", &val))
842 break;
843 }
844 fclose(fd);
845
846 if (val) {
847 while (isdigit(*val))
848 ++val;
849 if (!*val) {
850 desc->hyper = HYPER_VSERVER;
851 desc->virtype = VIRT_CONT;
852 }
853 }
854 }
855}
856
857/* add @set to the @ary, unnecessary set is deallocated. */
858static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
859{
860 int i;
861 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
862
863 if (!ary)
864 return -1;
865
866 for (i = 0; i < *items; i++) {
867 if (CPU_EQUAL_S(setsize, set, ary[i]))
868 break;
869 }
870 if (i == *items) {
871 ary[*items] = set;
872 ++*items;
873 return 0;
874 }
875 CPU_FREE(set);
876 return 1;
877}
878
879static void
880read_topology(struct lscpu_desc *desc, int idx)
881{
882 cpu_set_t *thread_siblings, *core_siblings, *book_siblings;
883 int num = real_cpu_num(desc, idx);
884
885 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
886 return;
887
888 thread_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
889 "/cpu%d/topology/thread_siblings", num);
890 core_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
891 "/cpu%d/topology/core_siblings", num);
892 book_siblings = NULL;
893 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num))
894 book_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
895 "/cpu%d/topology/book_siblings", num);
896
897 if (!desc->coremaps) {
898 int nbooks, nsockets, ncores, nthreads;
899 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
900
901 /* threads within one core */
902 nthreads = CPU_COUNT_S(setsize, thread_siblings);
903 if (!nthreads)
904 nthreads = 1;
905
906 /* cores within one socket */
907 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
908 if (!ncores)
909 ncores = 1;
910
911 /* number of sockets within one book. Because of odd /
912 * non-present cpu maps and to keep calculation easy we make
913 * sure that nsockets and nbooks is at least 1.
914 */
915 nsockets = desc->ncpus / nthreads / ncores;
916 if (!nsockets)
917 nsockets = 1;
918
919 /* number of books */
920 nbooks = desc->ncpus / nthreads / ncores / nsockets;
921 if (!nbooks)
922 nbooks = 1;
923
924 /* all threads, see also read_basicinfo()
925 * -- fallback for kernels without
926 * /sys/devices/system/cpu/online.
927 */
928 if (!desc->nthreads)
929 desc->nthreads = nbooks * nsockets * ncores * nthreads;
930
931 /* For each map we make sure that it can have up to ncpuspos
932 * entries. This is because we cannot reliably calculate the
933 * number of cores, sockets and books on all architectures.
934 * E.g. completely virtualized architectures like s390 may
935 * have multiple sockets of different sizes.
936 */
937 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
938 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
939 if (book_siblings)
940 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
941 }
942
943 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
944 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
945 if (book_siblings)
946 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
947}
948
949static void
950read_polarization(struct lscpu_desc *desc, int idx)
951{
952 char mode[64];
953 int num = real_cpu_num(desc, idx);
954
955 if (desc->dispatching < 0)
956 return;
957 if (!path_exist(_PATH_SYS_CPU "/cpu%d/polarization", num))
958 return;
959 if (!desc->polarization)
960 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
961 path_read_str(mode, sizeof(mode), _PATH_SYS_CPU "/cpu%d/polarization", num);
962 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
963 desc->polarization[idx] = POLAR_VLOW;
964 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
965 desc->polarization[idx] = POLAR_VMEDIUM;
966 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
967 desc->polarization[idx] = POLAR_VHIGH;
968 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
969 desc->polarization[idx] = POLAR_HORIZONTAL;
970 else
971 desc->polarization[idx] = POLAR_UNKNOWN;
972}
973
974static void
975read_address(struct lscpu_desc *desc, int idx)
976{
977 int num = real_cpu_num(desc, idx);
978
979 if (!path_exist(_PATH_SYS_CPU "/cpu%d/address", num))
980 return;
981 if (!desc->addresses)
982 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
983 desc->addresses[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/address", num);
984}
985
986static void
987read_configured(struct lscpu_desc *desc, int idx)
988{
989 int num = real_cpu_num(desc, idx);
990
991 if (!path_exist(_PATH_SYS_CPU "/cpu%d/configure", num))
992 return;
993 if (!desc->configured)
994 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
995 desc->configured[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/configure", num);
996}
997
998static void
999read_max_mhz(struct lscpu_desc *desc, int idx)
1000{
1001 int num = real_cpu_num(desc, idx);
1002
1003 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_max_freq", num))
1004 return;
1005 if (!desc->maxmhz)
1006 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1007 xasprintf(&(desc->maxmhz[idx]), "%.4f",
1008 (float)path_read_s32(_PATH_SYS_CPU
1009 "/cpu%d/cpufreq/cpuinfo_max_freq", num) / 1000);
1010}
1011
1012static void
1013read_min_mhz(struct lscpu_desc *desc, int idx)
1014{
1015 int num = real_cpu_num(desc, idx);
1016
1017 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_min_freq", num))
1018 return;
1019 if (!desc->minmhz)
1020 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1021 xasprintf(&(desc->minmhz[idx]), "%.4f",
1022 (float)path_read_s32(_PATH_SYS_CPU
1023 "/cpu%d/cpufreq/cpuinfo_min_freq", num) / 1000);
1024}
1025
1026static int
1027cachecmp(const void *a, const void *b)
1028{
1029 struct cpu_cache *c1 = (struct cpu_cache *) a;
1030 struct cpu_cache *c2 = (struct cpu_cache *) b;
1031
1032 return strcmp(c2->name, c1->name);
1033}
1034
1035static void
1036read_cache(struct lscpu_desc *desc, int idx)
1037{
1038 char buf[256];
1039 int i;
1040 int num = real_cpu_num(desc, idx);
1041
1042 if (!desc->ncaches) {
1043 while(path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1044 num, desc->ncaches))
1045 desc->ncaches++;
1046
1047 if (!desc->ncaches)
1048 return;
1049
1050 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1051 }
1052 for (i = 0; i < desc->ncaches; i++) {
1053 struct cpu_cache *ca = &desc->caches[i];
1054 cpu_set_t *map;
1055
1056 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1057 num, i))
1058 continue;
1059 if (!ca->name) {
1060 int type, level;
1061
1062 /* cache type */
1063 path_read_str(buf, sizeof(buf),
1064 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
1065 num, i);
1066 if (!strcmp(buf, "Data"))
1067 type = 'd';
1068 else if (!strcmp(buf, "Instruction"))
1069 type = 'i';
1070 else
1071 type = 0;
1072
1073 /* cache level */
1074 level = path_read_s32(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
1075 num, i);
1076 if (type)
1077 snprintf(buf, sizeof(buf), "L%d%c", level, type);
1078 else
1079 snprintf(buf, sizeof(buf), "L%d", level);
1080
1081 ca->name = xstrdup(buf);
1082
1083 /* cache size */
1084 if (path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d/size",num, i)) {
1085 path_read_str(buf, sizeof(buf),
1086 _PATH_SYS_CPU "/cpu%d/cache/index%d/size", num, i);
1087 ca->size = xstrdup(buf);
1088 } else {
1089 ca->size = xstrdup("unknown size");
1090 }
1091 }
1092
1093 /* information about how CPUs share different caches */
1094 map = path_read_cpuset(maxcpus,
1095 _PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
1096 num, i);
1097
1098 if (!ca->sharedmaps)
1099 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1100 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1101 }
1102}
1103
1104static inline int is_node_dirent(struct dirent *d)
1105{
1106 return
1107 d &&
1108#ifdef _DIRENT_HAVE_D_TYPE
1109 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1110#endif
1111 strncmp(d->d_name, "node", 4) == 0 &&
1112 isdigit_string(d->d_name + 4);
1113}
1114
1115static int
1116nodecmp(const void *ap, const void *bp)
1117{
1118 int *a = (int *) ap, *b = (int *) bp;
1119 return *a - *b;
1120}
1121
1122static void
1123read_nodes(struct lscpu_desc *desc)
1124{
1125 int i = 0;
1126 DIR *dir;
1127 struct dirent *d;
1128 char *path;
1129
1130 /* number of NUMA node */
1131 path = path_strdup(_PATH_SYS_NODE);
1132 dir = opendir(path);
1133 free(path);
1134
1135 while (dir && (d = readdir(dir))) {
1136 if (is_node_dirent(d))
1137 desc->nnodes++;
1138 }
1139
1140 if (!desc->nnodes) {
1141 if (dir)
1142 closedir(dir);
1143 return;
1144 }
1145
1146 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1147 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1148
1149 if (dir) {
1150 rewinddir(dir);
1151 while ((d = readdir(dir)) && i < desc->nnodes) {
1152 if (is_node_dirent(d))
1153 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1154 _("Failed to extract the node number"));
1155 }
1156 closedir(dir);
1157 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1158 }
1159
1160 /* information about how nodes share different CPUs */
1161 for (i = 0; i < desc->nnodes; i++)
1162 desc->nodemaps[i] = path_read_cpuset(maxcpus,
1163 _PATH_SYS_NODE "/node%d/cpumap",
1164 desc->idx2nodenum[i]);
1165}
1166
1167static char *
1168get_cell_data(struct lscpu_desc *desc, int idx, int col,
1169 struct lscpu_modifier *mod,
1170 char *buf, size_t bufsz)
1171{
1172 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1173 size_t i;
1174 int cpu = real_cpu_num(desc, idx);
1175
1176 *buf = '\0';
1177
1178 switch (col) {
1179 case COL_CPU:
1180 snprintf(buf, bufsz, "%d", cpu);
1181 break;
1182 case COL_CORE:
1183 if (cpuset_ary_isset(cpu, desc->coremaps,
1184 desc->ncores, setsize, &i) == 0)
1185 snprintf(buf, bufsz, "%zu", i);
1186 break;
1187 case COL_SOCKET:
1188 if (cpuset_ary_isset(cpu, desc->socketmaps,
1189 desc->nsockets, setsize, &i) == 0)
1190 snprintf(buf, bufsz, "%zu", i);
1191 break;
1192 case COL_NODE:
1193 if (cpuset_ary_isset(cpu, desc->nodemaps,
1194 desc->nnodes, setsize, &i) == 0)
1195 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1196 break;
1197 case COL_BOOK:
1198 if (cpuset_ary_isset(cpu, desc->bookmaps,
1199 desc->nbooks, setsize, &i) == 0)
1200 snprintf(buf, bufsz, "%zu", i);
1201 break;
1202 case COL_CACHE:
1203 {
1204 char *p = buf;
1205 size_t sz = bufsz;
1206 int j;
1207
1208 for (j = desc->ncaches - 1; j >= 0; j--) {
1209 struct cpu_cache *ca = &desc->caches[j];
1210
1211 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1212 ca->nsharedmaps, setsize, &i) == 0) {
1213 int x = snprintf(p, sz, "%zu", i);
1214 if (x <= 0 || (size_t) x + 2 >= sz)
1215 return NULL;
1216 p += x;
1217 sz -= x;
1218 }
1219 if (j != 0) {
1220 *p++ = mod->compat ? ',' : ':';
1221 *p = '\0';
1222 sz++;
1223 }
1224 }
1225 break;
1226 }
1227 case COL_POLARIZATION:
1228 if (desc->polarization) {
1229 int x = desc->polarization[idx];
1230
1231 snprintf(buf, bufsz, "%s",
1232 mod->mode == OUTPUT_PARSABLE ?
1233 polar_modes[x].parsable :
1234 polar_modes[x].readable);
1235 }
1236 break;
1237 case COL_ADDRESS:
1238 if (desc->addresses)
1239 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1240 break;
1241 case COL_CONFIGURED:
1242 if (!desc->configured)
1243 break;
1244 if (mod->mode == OUTPUT_PARSABLE)
1245 snprintf(buf, bufsz,
1246 desc->configured[idx] ? _("Y") : _("N"));
1247 else
1248 snprintf(buf, bufsz,
1249 desc->configured[idx] ? _("yes") : _("no"));
1250 break;
1251 case COL_ONLINE:
1252 if (!desc->online)
1253 break;
1254 if (mod->mode == OUTPUT_PARSABLE)
1255 snprintf(buf, bufsz,
1256 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1257 else
1258 snprintf(buf, bufsz,
1259 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1260 break;
1261 case COL_MAXMHZ:
1262 if (desc->maxmhz)
1263 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1264 break;
1265 case COL_MINMHZ:
1266 if (desc->minmhz)
1267 xstrncpy(buf, desc->minmhz[idx], bufsz);
1268 break;
1269 }
1270 return buf;
1271}
1272
1273static char *
1274get_cell_header(struct lscpu_desc *desc, int col,
1275 struct lscpu_modifier *mod,
1276 char *buf, size_t bufsz)
1277{
1278 *buf = '\0';
1279
1280 if (col == COL_CACHE) {
1281 char *p = buf;
1282 size_t sz = bufsz;
1283 int i;
1284
1285 for (i = desc->ncaches - 1; i >= 0; i--) {
1286 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1287 if (x <= 0 || (size_t) x + 2 > sz)
1288 return NULL;
1289 sz -= x;
1290 p += x;
1291 if (i > 0) {
1292 *p++ = mod->compat ? ',' : ':';
1293 *p = '\0';
1294 sz++;
1295 }
1296 }
1297 if (desc->ncaches)
1298 return buf;
1299 }
1300 snprintf(buf, bufsz, "%s", coldescs[col].name);
1301 return buf;
1302}
1303
1304/*
1305 * [-p] backend, we support two parsable formats:
1306 *
1307 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1308 * output and it contains fixed set of the columns. The CACHE columns are at
1309 * the end of the line and the CACHE is not printed if the number of the caches
1310 * is zero. The CACHE columns are separated by two commas, for example:
1311 *
1312 * $ lscpu --parse
1313 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1314 * 0,0,0,0,,0,0,0
1315 * 1,1,0,0,,1,1,0
1316 *
1317 * 2) "user defined output" -- this format prints always all columns without
1318 * special prefix for CACHE column. If there are not CACHEs then the column is
1319 * empty and the header "Cache" is printed rather than a real name of the cache.
1320 * The CACHE columns are separated by ':'.
1321 *
1322 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1323 * # CPU,Core,Socket,Node,L1d:L1i:L2
1324 * 0,0,0,0,0:0:0
1325 * 1,1,0,0,1:1:0
1326 */
1327static void
1328print_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1329 struct lscpu_modifier *mod)
1330{
1331 char buf[BUFSIZ], *data;
1332 int i;
1333
1334 /*
1335 * Header
1336 */
1337 printf(_(
1338 "# The following is the parsable format, which can be fed to other\n"
1339 "# programs. Each different item in every column has an unique ID\n"
1340 "# starting from zero.\n"));
1341
1342 fputs("# ", stdout);
1343 for (i = 0; i < ncols; i++) {
1344 int col = cols[i];
1345
1346 if (col == COL_CACHE) {
1347 if (mod->compat && !desc->ncaches)
1348 continue;
1349 if (mod->compat && i != 0)
1350 putchar(',');
1351 }
1352 if (i > 0)
1353 putchar(',');
1354
1355 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1356
1357 if (data && * data && col != COL_CACHE &&
1358 !coldescs[col].is_abbr) {
1359 /*
1360 * For normal column names use mixed case (e.g. "Socket")
1361 */
1362 char *p = data + 1;
1363
1364 while (p && *p != '\0') {
1365 *p = tolower((unsigned int) *p);
1366 p++;
1367 }
1368 }
1369 fputs(data && *data ? data : "", stdout);
1370 }
1371 putchar('\n');
1372
1373 /*
1374 * Data
1375 */
1376 for (i = 0; i < desc->ncpuspos; i++) {
1377 int c;
1378 int cpu = real_cpu_num(desc, i);
1379
1380 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1381 continue;
1382 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1383 continue;
1384 if (desc->present && !is_cpu_present(desc, cpu))
1385 continue;
1386 for (c = 0; c < ncols; c++) {
1387 if (mod->compat && cols[c] == COL_CACHE) {
1388 if (!desc->ncaches)
1389 continue;
1390 if (c > 0)
1391 putchar(',');
1392 }
1393 if (c > 0)
1394 putchar(',');
1395
1396 data = get_cell_data(desc, i, cols[c], mod,
1397 buf, sizeof(buf));
1398 fputs(data && *data ? data : "", stdout);
1399 }
1400 putchar('\n');
1401 }
1402}
1403
1404/*
1405 * [-e] backend
1406 */
1407static void
1408print_readable(struct lscpu_desc *desc, int cols[], int ncols,
1409 struct lscpu_modifier *mod)
1410{
1411 int i;
1412 char buf[BUFSIZ];
1413 const char *data;
1414 struct libscols_table *table;
1415
1416 scols_init_debug(0);
1417
1418 table = scols_new_table();
1419 if (!table)
1420 err(EXIT_FAILURE, _("failed to initialize output table"));
1421
1422 for (i = 0; i < ncols; i++) {
1423 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1424 if (!scols_table_new_column(table, xstrdup(data), 0, 0))
1425 err(EXIT_FAILURE, _("failed to initialize output column"));
1426 }
1427
1428 for (i = 0; i < desc->ncpuspos; i++) {
1429 int c;
1430 struct libscols_line *line;
1431 int cpu = real_cpu_num(desc, i);
1432
1433 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1434 continue;
1435 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1436 continue;
1437 if (desc->present && !is_cpu_present(desc, cpu))
1438 continue;
1439
1440 line = scols_table_new_line(table, NULL);
1441 if (!line)
1442 err(EXIT_FAILURE, _("failed to initialize output line"));
1443
1444 for (c = 0; c < ncols; c++) {
1445 data = get_cell_data(desc, i, cols[c], mod,
1446 buf, sizeof(buf));
1447 if (!data || !*data)
1448 data = "-";
1449 scols_line_set_data(line, c, data);
1450 }
1451 }
1452
1453 scols_print_table(table);
1454 scols_unref_table(table);
1455}
1456
1457/* output formats "<key> <value>"*/
1458#define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
1459#define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
1460
1461static void
1462print_cpuset(const char *key, cpu_set_t *set, int hex)
1463{
1464 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1465 size_t setbuflen = 7 * maxcpus;
1466 char setbuf[setbuflen], *p;
1467
1468 if (hex) {
1469 p = cpumask_create(setbuf, setbuflen, set, setsize);
1470 printf("%-23s0x%s\n", key, p);
1471 } else {
1472 p = cpulist_create(setbuf, setbuflen, set, setsize);
1473 print_s(key, p);
1474 }
1475
1476}
1477
1478/*
1479 * default output
1480 */
1481static void
1482print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1483{
1484 char buf[512];
1485 int i;
1486 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1487
1488 print_s(_("Architecture:"), desc->arch);
1489
1490 if (desc->mode) {
1491 char mbuf[64], *p = mbuf;
1492
1493 if (desc->mode & MODE_32BIT) {
1494 strcpy(p, "32-bit, ");
1495 p += 8;
1496 }
1497 if (desc->mode & MODE_64BIT) {
1498 strcpy(p, "64-bit, ");
1499 p += 8;
1500 }
1501 *(p - 2) = '\0';
1502 print_s(_("CPU op-mode(s):"), mbuf);
1503 }
1504#if !defined(WORDS_BIGENDIAN)
1505 print_s(_("Byte Order:"), "Little Endian");
1506#else
1507 print_s(_("Byte Order:"), "Big Endian");
1508#endif
1509 print_n(_("CPU(s):"), desc->ncpus);
1510
1511 if (desc->online)
1512 print_cpuset(mod->hex ? _("On-line CPU(s) mask:") :
1513 _("On-line CPU(s) list:"),
1514 desc->online, mod->hex);
1515
1516 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1517 cpu_set_t *set;
1518
1519 /* Linux kernel provides cpuset of off-line CPUs that contains
1520 * all configured CPUs (see /sys/devices/system/cpu/offline),
1521 * but want to print real (present in system) off-line CPUs only.
1522 */
1523 set = cpuset_alloc(maxcpus, NULL, NULL);
1524 if (!set)
1525 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1526 CPU_ZERO_S(setsize, set);
1527 for (i = 0; i < desc->ncpuspos; i++) {
1528 int cpu = real_cpu_num(desc, i);
1529 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
1530 CPU_SET_S(cpu, setsize, set);
1531 }
1532 print_cpuset(mod->hex ? _("Off-line CPU(s) mask:") :
1533 _("Off-line CPU(s) list:"),
1534 set, mod->hex);
1535 cpuset_free(set);
1536 }
1537
1538 if (desc->nsockets) {
1539 int cores_per_socket, sockets_per_book, books;
1540
1541 cores_per_socket = sockets_per_book = books = 0;
1542 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1543 * Using simply the cpu topology masks in sysfs will not give
1544 * usable results since everything is virtualized. E.g.
1545 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1546 * five cpus.
1547 * If the cpu topology is not exported (e.g. 2nd level guest)
1548 * fall back to old calculation scheme.
1549 */
1550 if (path_exist(_PATH_PROC_SYSINFO)) {
1551 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
1552 char pbuf[BUFSIZ];
1553 int t0, t1, t2;
1554
1555 while (fd && fgets(pbuf, sizeof(pbuf), fd) != NULL) {
1556 if (sscanf(pbuf, "CPU Topology SW:%d%d%d%d%d%d",
1557 &t0, &t1, &t2, &books, &sockets_per_book,
1558 &cores_per_socket) == 6)
1559 break;
1560 }
1561 if (fd)
1562 fclose(fd);
1563 }
1564 print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
1565 print_n(_("Core(s) per socket:"),
1566 cores_per_socket ?: desc->ncores / desc->nsockets);
1567 if (desc->nbooks) {
1568 print_n(_("Socket(s) per book:"),
1569 sockets_per_book ?: desc->nsockets / desc->nbooks);
1570 print_n(_("Book(s):"), books ?: desc->nbooks);
1571 } else {
1572 print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
1573 }
1574 }
1575 if (desc->nnodes)
1576 print_n(_("NUMA node(s):"), desc->nnodes);
1577 if (desc->vendor)
1578 print_s(_("Vendor ID:"), desc->vendor);
1579 if (desc->family)
1580 print_s(_("CPU family:"), desc->family);
1581 if (desc->model)
1582 print_s(_("Model:"), desc->model);
1583 if (desc->modelname)
1584 print_s(_("Model name:"), desc->modelname);
1585 if (desc->stepping)
1586 print_s(_("Stepping:"), desc->stepping);
1587 if (desc->mhz)
1588 print_s(_("CPU MHz:"), desc->mhz);
1589 if (desc->maxmhz)
1590 print_s(_("CPU max MHz:"), desc->maxmhz[0]);
1591 if (desc->minmhz)
1592 print_s(_("CPU min MHz:"), desc->minmhz[0]);
1593 if (desc->bogomips)
1594 print_s(_("BogoMIPS:"), desc->bogomips);
1595 if (desc->virtflag) {
1596 if (!strcmp(desc->virtflag, "svm"))
1597 print_s(_("Virtualization:"), "AMD-V");
1598 else if (!strcmp(desc->virtflag, "vmx"))
1599 print_s(_("Virtualization:"), "VT-x");
1600 }
1601 if (desc->hypervisor)
1602 print_s(_("Hypervisor:"), desc->hypervisor);
1603 if (desc->hyper) {
1604 print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1605 print_s(_("Virtualization type:"), _(virt_types[desc->virtype]));
1606 }
1607 if (desc->dispatching >= 0)
1608 print_s(_("Dispatching mode:"), _(disp_modes[desc->dispatching]));
1609 if (desc->ncaches) {
1610 char cbuf[512];
1611
1612 for (i = desc->ncaches - 1; i >= 0; i--) {
1613 snprintf(cbuf, sizeof(cbuf),
1614 _("%s cache:"), desc->caches[i].name);
1615 print_s(cbuf, desc->caches[i].size);
1616 }
1617 }
1618
1619 for (i = 0; i < desc->nnodes; i++) {
1620 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
1621 print_cpuset(buf, desc->nodemaps[i], mod->hex);
1622 }
1623}
1624
1625static void __attribute__((__noreturn__)) usage(FILE *out)
1626{
1627 size_t i;
1628
1629 fputs(USAGE_HEADER, out);
1630 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
1631
1632 fputs(USAGE_OPTIONS, out);
1633 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
1634 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
1635 fputs(_(" -c, --offline print offline CPUs only\n"), out);
1636 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
1637 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
1638 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
1639 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
1640 fputs(USAGE_SEPARATOR, out);
1641 fputs(USAGE_HELP, out);
1642 fputs(USAGE_VERSION, out);
1643
1644 fprintf(out, _("\nAvailable columns:\n"));
1645
1646 for (i = 0; i < ARRAY_SIZE(coldescs); i++)
1647 fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
1648
1649 fprintf(out, USAGE_MAN_TAIL("lscpu(1)"));
1650
1651 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
1652}
1653
1654int main(int argc, char *argv[])
1655{
1656 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
1657 struct lscpu_desc _desc = { .flags = 0 }, *desc = &_desc;
1658 int c, i;
1659 int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
1660 int cpu_modifier_specified = 0;
1661
1662 static const struct option longopts[] = {
1663 { "all", no_argument, 0, 'a' },
1664 { "online", no_argument, 0, 'b' },
1665 { "offline", no_argument, 0, 'c' },
1666 { "help", no_argument, 0, 'h' },
1667 { "extended", optional_argument, 0, 'e' },
1668 { "parse", optional_argument, 0, 'p' },
1669 { "sysroot", required_argument, 0, 's' },
1670 { "hex", no_argument, 0, 'x' },
1671 { "version", no_argument, 0, 'V' },
1672 { NULL, 0, 0, 0 }
1673 };
1674
1675 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
1676 { 'a','b','c' },
1677 { 'e','p' },
1678 { 0 }
1679 };
1680 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
1681
1682 setlocale(LC_ALL, "");
1683 bindtextdomain(PACKAGE, LOCALEDIR);
1684 textdomain(PACKAGE);
1685 atexit(close_stdout);
1686
1687 while ((c = getopt_long(argc, argv, "abce::hp::s:xV", longopts, NULL)) != -1) {
1688
1689 err_exclusive_options(c, longopts, excl, excl_st);
1690
1691 switch (c) {
1692 case 'a':
1693 mod->online = mod->offline = 1;
1694 cpu_modifier_specified = 1;
1695 break;
1696 case 'b':
1697 mod->online = 1;
1698 cpu_modifier_specified = 1;
1699 break;
1700 case 'c':
1701 mod->offline = 1;
1702 cpu_modifier_specified = 1;
1703 break;
1704 case 'h':
1705 usage(stdout);
1706 case 'p':
1707 case 'e':
1708 if (optarg) {
1709 if (*optarg == '=')
1710 optarg++;
1711 ncolumns = string_to_idarray(optarg,
1712 columns, ARRAY_SIZE(columns),
1713 column_name_to_id);
1714 if (ncolumns < 0)
1715 return EXIT_FAILURE;
1716 }
1717 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
1718 break;
1719 case 's':
1720 path_set_prefix(optarg);
1721 mod->system = SYSTEM_SNAPSHOT;
1722 break;
1723 case 'x':
1724 mod->hex = 1;
1725 break;
1726 case 'V':
1727 printf(UTIL_LINUX_VERSION);
1728 return EXIT_SUCCESS;
1729 default:
1730 usage(stderr);
1731 }
1732 }
1733
1734 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
1735 fprintf(stderr,
1736 _("%s: options --all, --online and --offline may only "
1737 "be used with options --extended or --parse.\n"),
1738 program_invocation_short_name);
1739 return EXIT_FAILURE;
1740 }
1741
1742 if (argc != optind)
1743 usage(stderr);
1744
1745 /* set default cpu display mode if none was specified */
1746 if (!mod->online && !mod->offline) {
1747 mod->online = 1;
1748 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
1749 }
1750
1751 read_basicinfo(desc, mod);
1752
1753 for (i = 0; i < desc->ncpuspos; i++) {
1754 read_topology(desc, i);
1755 read_cache(desc, i);
1756 read_polarization(desc, i);
1757 read_address(desc, i);
1758 read_configured(desc, i);
1759 read_max_mhz(desc, i);
1760 read_min_mhz(desc, i);
1761 }
1762
1763 if (desc->caches)
1764 qsort(desc->caches, desc->ncaches,
1765 sizeof(struct cpu_cache), cachecmp);
1766
1767 read_nodes(desc);
1768 read_hypervisor(desc, mod);
1769
1770 switch(mod->mode) {
1771 case OUTPUT_SUMMARY:
1772 print_summary(desc, mod);
1773 break;
1774 case OUTPUT_PARSABLE:
1775 if (!ncolumns) {
1776 columns[ncolumns++] = COL_CPU;
1777 columns[ncolumns++] = COL_CORE;
1778 columns[ncolumns++] = COL_SOCKET;
1779 columns[ncolumns++] = COL_NODE;
1780 columns[ncolumns++] = COL_CACHE;
1781 mod->compat = 1;
1782 }
1783 print_parsable(desc, columns, ncolumns, mod);
1784 break;
1785 case OUTPUT_READABLE:
1786 if (!ncolumns) {
1787 /* No list was given. Just print whatever is there. */
1788 columns[ncolumns++] = COL_CPU;
1789 if (desc->nodemaps)
1790 columns[ncolumns++] = COL_NODE;
1791 if (desc->bookmaps)
1792 columns[ncolumns++] = COL_BOOK;
1793 if (desc->socketmaps)
1794 columns[ncolumns++] = COL_SOCKET;
1795 if (desc->coremaps)
1796 columns[ncolumns++] = COL_CORE;
1797 if (desc->caches)
1798 columns[ncolumns++] = COL_CACHE;
1799 if (desc->online)
1800 columns[ncolumns++] = COL_ONLINE;
1801 if (desc->configured)
1802 columns[ncolumns++] = COL_CONFIGURED;
1803 if (desc->polarization)
1804 columns[ncolumns++] = COL_POLARIZATION;
1805 if (desc->addresses)
1806 columns[ncolumns++] = COL_ADDRESS;
1807 if (desc->maxmhz)
1808 columns[ncolumns++] = COL_MAXMHZ;
1809 if (desc->minmhz)
1810 columns[ncolumns++] = COL_MINMHZ;
1811 }
1812 print_readable(desc, columns, ncolumns, mod);
1813 break;
1814 }
1815
1816 return EXIT_SUCCESS;
1817}