]> git.ipfire.org Git - thirdparty/util-linux.git/blame_incremental - sys-utils/lscpu.c
lscpu: show machine type (s390)
[thirdparty/util-linux.git] / sys-utils / lscpu.c
... / ...
CommitLineData
1/*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22#include <assert.h>
23#include <ctype.h>
24#include <dirent.h>
25#include <errno.h>
26#include <fcntl.h>
27#include <getopt.h>
28#include <stdio.h>
29#include <stdlib.h>
30#include <string.h>
31#include <sys/utsname.h>
32#include <unistd.h>
33#include <stdarg.h>
34#include <sys/types.h>
35#include <sys/stat.h>
36
37#if (defined(__x86_64__) || defined(__i386__))
38# if !defined( __SANITIZE_ADDRESS__)
39# define INCLUDE_VMWARE_BDOOR
40# else
41# warning VMWARE detection disabled by __SANITIZE_ADDRESS__
42# endif
43#endif
44
45#ifdef INCLUDE_VMWARE_BDOOR
46# include <stdint.h>
47# include <signal.h>
48# include <strings.h>
49# include <setjmp.h>
50# ifdef HAVE_SYS_IO_H
51# include <sys/io.h>
52# endif
53#endif
54
55#if defined(HAVE_LIBRTAS)
56#include <librtas.h>
57#endif
58
59#include <libsmartcols.h>
60
61#include "cpuset.h"
62#include "nls.h"
63#include "xalloc.h"
64#include "c.h"
65#include "strutils.h"
66#include "bitops.h"
67#include "path.h"
68#include "closestream.h"
69#include "optutils.h"
70#include "lscpu.h"
71
72#define CACHE_MAX 100
73
74/* /sys paths */
75#define _PATH_SYS_SYSTEM "/sys/devices/system"
76#define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
77#define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
78#define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
79#define _PATH_PROC_XEN "/proc/xen"
80#define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
81#define _PATH_PROC_CPUINFO "/proc/cpuinfo"
82#define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
83#define _PATH_PROC_SYSINFO "/proc/sysinfo"
84#define _PATH_PROC_STATUS "/proc/self/status"
85#define _PATH_PROC_VZ "/proc/vz"
86#define _PATH_PROC_BC "/proc/bc"
87#define _PATH_PROC_DEVICETREE "/proc/device-tree"
88#define _PATH_DEV_MEM "/dev/mem"
89
90/* Xen Domain feature flag used for /sys/hypervisor/properties/features */
91#define XENFEAT_supervisor_mode_kernel 3
92#define XENFEAT_mmu_pt_update_preserve_ad 5
93#define XENFEAT_hvm_callback_vector 8
94
95#define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
96#define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
97 | (1U << XENFEAT_hvm_callback_vector) )
98
99/* virtualization types */
100enum {
101 VIRT_NONE = 0,
102 VIRT_PARA,
103 VIRT_FULL,
104 VIRT_CONT
105};
106const char *virt_types[] = {
107 [VIRT_NONE] = N_("none"),
108 [VIRT_PARA] = N_("para"),
109 [VIRT_FULL] = N_("full"),
110 [VIRT_CONT] = N_("container"),
111};
112
113const char *hv_vendors[] = {
114 [HYPER_NONE] = NULL,
115 [HYPER_XEN] = "Xen",
116 [HYPER_KVM] = "KVM",
117 [HYPER_MSHV] = "Microsoft",
118 [HYPER_VMWARE] = "VMware",
119 [HYPER_IBM] = "IBM",
120 [HYPER_VSERVER] = "Linux-VServer",
121 [HYPER_UML] = "User-mode Linux",
122 [HYPER_INNOTEK] = "Innotek GmbH",
123 [HYPER_HITACHI] = "Hitachi",
124 [HYPER_PARALLELS] = "Parallels",
125 [HYPER_VBOX] = "Oracle",
126 [HYPER_OS400] = "OS/400",
127 [HYPER_PHYP] = "pHyp",
128 [HYPER_SPAR] = "Unisys s-Par"
129};
130
131const int hv_vendor_pci[] = {
132 [HYPER_NONE] = 0x0000,
133 [HYPER_XEN] = 0x5853,
134 [HYPER_KVM] = 0x0000,
135 [HYPER_MSHV] = 0x1414,
136 [HYPER_VMWARE] = 0x15ad,
137 [HYPER_VBOX] = 0x80ee,
138};
139
140const int hv_graphics_pci[] = {
141 [HYPER_NONE] = 0x0000,
142 [HYPER_XEN] = 0x0001,
143 [HYPER_KVM] = 0x0000,
144 [HYPER_MSHV] = 0x5353,
145 [HYPER_VMWARE] = 0x0710,
146 [HYPER_VBOX] = 0xbeef,
147};
148
149/* CPU modes */
150enum {
151 MODE_32BIT = (1 << 1),
152 MODE_64BIT = (1 << 2)
153};
154
155/* cache(s) description */
156struct cpu_cache {
157 char *name;
158 char *size;
159
160 int nsharedmaps;
161 cpu_set_t **sharedmaps;
162};
163
164/* dispatching modes */
165enum {
166 DISP_HORIZONTAL = 0,
167 DISP_VERTICAL = 1
168};
169
170const char *disp_modes[] = {
171 [DISP_HORIZONTAL] = N_("horizontal"),
172 [DISP_VERTICAL] = N_("vertical")
173};
174
175/* cpu polarization */
176enum {
177 POLAR_UNKNOWN = 0,
178 POLAR_VLOW,
179 POLAR_VMEDIUM,
180 POLAR_VHIGH,
181 POLAR_HORIZONTAL
182};
183
184struct polarization_modes {
185 char *parsable;
186 char *readable;
187};
188
189struct polarization_modes polar_modes[] = {
190 [POLAR_UNKNOWN] = {"U", "-"},
191 [POLAR_VLOW] = {"VL", "vert-low"},
192 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
193 [POLAR_VHIGH] = {"VH", "vert-high"},
194 [POLAR_HORIZONTAL] = {"H", "horizontal"},
195};
196
197/* global description */
198struct lscpu_desc {
199 char *arch;
200 char *vendor;
201 char *machinetype; /* s390 */
202 char *family;
203 char *model;
204 char *modelname;
205 char *revision; /* alternative for model (ppc) */
206 char *cpu; /* alternative for modelname (ppc, sparc) */
207 char *virtflag; /* virtualization flag (vmx, svm) */
208 char *hypervisor; /* hypervisor software */
209 int hyper; /* hypervisor vendor ID */
210 int virtype; /* VIRT_PARA|FULL|NONE ? */
211 char *mhz;
212 char *dynamic_mhz; /* dynamic mega hertz (s390) */
213 char *static_mhz; /* static mega hertz (s390) */
214 char **maxmhz; /* maximum mega hertz */
215 char **minmhz; /* minimum mega hertz */
216 char *stepping;
217 char *bogomips;
218 char *flags;
219 int dispatching; /* none, horizontal or vertical */
220 int mode; /* rm, lm or/and tm */
221
222 int ncpuspos; /* maximal possible CPUs */
223 int ncpus; /* number of present CPUs */
224 cpu_set_t *present; /* mask with present CPUs */
225 cpu_set_t *online; /* mask with online CPUs */
226
227 int nthreads; /* number of online threads */
228
229 int ncaches;
230 struct cpu_cache *caches;
231
232 /*
233 * All maps are sequentially indexed (0..ncpuspos), the array index
234 * does not have match with cpuX number as presented by kernel. You
235 * have to use real_cpu_num() to get the real cpuX number.
236 *
237 * For example, the possible system CPUs are: 1,3,5, it means that
238 * ncpuspos=3, so all arrays are in range 0..3.
239 */
240 int *idx2cpunum; /* mapping index to CPU num */
241
242 int nnodes; /* number of NUMA modes */
243 int *idx2nodenum; /* Support for discontinuous nodes */
244 cpu_set_t **nodemaps; /* array with NUMA nodes */
245
246 /* drawers -- based on drawer_siblings (internal kernel map of cpuX's
247 * hardware threads within the same drawer */
248 int ndrawers; /* number of all online drawers */
249 cpu_set_t **drawermaps; /* unique drawer_siblings */
250
251 /* books -- based on book_siblings (internal kernel map of cpuX's
252 * hardware threads within the same book */
253 int nbooks; /* number of all online books */
254 cpu_set_t **bookmaps; /* unique book_siblings */
255
256 /* sockets -- based on core_siblings (internal kernel map of cpuX's
257 * hardware threads within the same physical_package_id (socket)) */
258 int nsockets; /* number of all online sockets */
259 cpu_set_t **socketmaps; /* unique core_siblings */
260
261 /* cores -- based on thread_siblings (internal kernel map of cpuX's
262 * hardware threads within the same core as cpuX) */
263 int ncores; /* number of all online cores */
264 cpu_set_t **coremaps; /* unique thread_siblings */
265
266 int *polarization; /* cpu polarization */
267 int *addresses; /* physical cpu addresses */
268 int *configured; /* cpu configured */
269 int physsockets; /* Physical sockets (modules) */
270 int physchips; /* Physical chips */
271 int physcoresperchip; /* Physical cores per chip */
272};
273
274enum {
275 OUTPUT_SUMMARY = 0, /* default */
276 OUTPUT_PARSABLE, /* -p */
277 OUTPUT_READABLE, /* -e */
278};
279
280enum {
281 SYSTEM_LIVE = 0, /* analyzing a live system */
282 SYSTEM_SNAPSHOT, /* analyzing a snapshot of a different system */
283};
284
285struct lscpu_modifier {
286 int mode; /* OUTPUT_* */
287 int system; /* SYSTEM_* */
288 unsigned int hex:1, /* print CPU masks rather than CPU lists */
289 compat:1, /* use backwardly compatible format */
290 online:1, /* print online CPUs */
291 offline:1; /* print offline CPUs */
292};
293
294static int maxcpus; /* size in bits of kernel cpu mask */
295
296#define is_cpu_online(_d, _cpu) \
297 ((_d) && (_d)->online ? \
298 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
299#define is_cpu_present(_d, _cpu) \
300 ((_d) && (_d)->present ? \
301 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
302
303#define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
304
305/*
306 * IDs
307 */
308enum {
309 COL_CPU,
310 COL_CORE,
311 COL_SOCKET,
312 COL_NODE,
313 COL_BOOK,
314 COL_DRAWER,
315 COL_CACHE,
316 COL_POLARIZATION,
317 COL_ADDRESS,
318 COL_CONFIGURED,
319 COL_ONLINE,
320 COL_MAXMHZ,
321 COL_MINMHZ,
322};
323
324/* column description
325 */
326struct lscpu_coldesc {
327 const char *name;
328 const char *help;
329
330 unsigned int is_abbr:1; /* name is abbreviation */
331};
332
333static struct lscpu_coldesc coldescs[] =
334{
335 [COL_CPU] = { "CPU", N_("logical CPU number"), 1 },
336 [COL_CORE] = { "CORE", N_("logical core number") },
337 [COL_SOCKET] = { "SOCKET", N_("logical socket number") },
338 [COL_NODE] = { "NODE", N_("logical NUMA node number") },
339 [COL_BOOK] = { "BOOK", N_("logical book number") },
340 [COL_DRAWER] = { "DRAWER", N_("logical drawer number") },
341 [COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
342 [COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
343 [COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
344 [COL_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
345 [COL_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
346 [COL_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
347 [COL_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
348};
349
350static int
351column_name_to_id(const char *name, size_t namesz)
352{
353 size_t i;
354
355 for (i = 0; i < ARRAY_SIZE(coldescs); i++) {
356 const char *cn = coldescs[i].name;
357
358 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
359 return i;
360 }
361 warnx(_("unknown column: %s"), name);
362 return -1;
363}
364
365/* Lookup a pattern and get the value from cpuinfo.
366 * Format is:
367 *
368 * "<pattern> : <key>"
369 */
370static int
371lookup(char *line, char *pattern, char **value)
372{
373 char *p, *v;
374 int len = strlen(pattern);
375
376 /* don't re-fill already found tags, first one wins */
377 if (!*line || *value)
378 return 0;
379
380 /* pattern */
381 if (strncmp(line, pattern, len))
382 return 0;
383
384 /* white spaces */
385 for (p = line + len; isspace(*p); p++);
386
387 /* separator */
388 if (*p != ':')
389 return 0;
390
391 /* white spaces */
392 for (++p; isspace(*p); p++);
393
394 /* value */
395 if (!*p)
396 return 0;
397 v = p;
398
399 /* end of value */
400 len = strlen(line) - 1;
401 for (p = line + len; isspace(*(p-1)); p--);
402 *p = '\0';
403
404 *value = xstrdup(v);
405 return 1;
406}
407
408/* Don't init the mode for platforms where we are not able to
409 * detect that CPU supports 64-bit mode.
410 */
411static int
412init_mode(struct lscpu_modifier *mod)
413{
414 int m = 0;
415
416 if (mod->system == SYSTEM_SNAPSHOT)
417 /* reading info from any /{sys,proc} dump, don't mix it with
418 * information about our real CPU */
419 return 0;
420
421#if defined(__alpha__) || defined(__ia64__)
422 m |= MODE_64BIT; /* 64bit platforms only */
423#endif
424 /* platforms with 64bit flag in /proc/cpuinfo, define
425 * 32bit default here */
426#if defined(__i386__) || defined(__x86_64__) || \
427 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
428 m |= MODE_32BIT;
429#endif
430 return m;
431}
432
433#if defined(HAVE_LIBRTAS)
434#define PROCESSOR_MODULE_INFO 43
435static int strbe16toh(const char *buf, int offset)
436{
437 return (buf[offset] << 8) + buf[offset+1];
438}
439
440static void read_physical_info_powerpc(struct lscpu_desc *desc)
441{
442 char buf[BUFSIZ];
443 int rc, len, ntypes;
444
445 desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
446
447 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
448 if (rc < 0)
449 return;
450
451 len = strbe16toh(buf, 0);
452 if (len < 8)
453 return;
454
455 ntypes = strbe16toh(buf, 2);
456
457 assert(ntypes <= 1);
458 if (!ntypes)
459 return;
460
461 desc->physsockets = strbe16toh(buf, 4);
462 desc->physchips = strbe16toh(buf, 6);
463 desc->physcoresperchip = strbe16toh(buf, 8);
464}
465#else
466static void read_physical_info_powerpc(
467 struct lscpu_desc *desc __attribute__((__unused__)))
468{
469}
470#endif
471
472static void
473read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
474{
475 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
476 char buf[BUFSIZ];
477 struct utsname utsbuf;
478 size_t setsize;
479
480 /* architecture */
481 if (uname(&utsbuf) == -1)
482 err(EXIT_FAILURE, _("error: uname failed"));
483 desc->arch = xstrdup(utsbuf.machine);
484
485 /* details */
486 while (fgets(buf, sizeof(buf), fp) != NULL) {
487 if (lookup(buf, "vendor", &desc->vendor)) ;
488 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
489 else if (lookup(buf, "family", &desc->family)) ;
490 else if (lookup(buf, "cpu family", &desc->family)) ;
491 else if (lookup(buf, "model", &desc->model)) ;
492 else if (lookup(buf, "model name", &desc->modelname)) ;
493 else if (lookup(buf, "stepping", &desc->stepping)) ;
494 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
495 else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
496 else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
497 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
498 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
499 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
500 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
501 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
502 else if (lookup(buf, "cpu", &desc->cpu)) ;
503 else if (lookup(buf, "revision", &desc->revision)) ;
504 else
505 continue;
506 }
507
508 desc->mode = init_mode(mod);
509
510 if (desc->flags) {
511 snprintf(buf, sizeof(buf), " %s ", desc->flags);
512 if (strstr(buf, " svm "))
513 desc->virtflag = xstrdup("svm");
514 else if (strstr(buf, " vmx "))
515 desc->virtflag = xstrdup("vmx");
516 if (strstr(buf, " lm "))
517 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
518 if (strstr(buf, " zarch "))
519 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
520 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
521 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
522 }
523
524 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
525 if (strcmp(desc->arch, "ppc64") == 0)
526 desc->mode |= MODE_32BIT | MODE_64BIT;
527 else if (strcmp(desc->arch, "ppc") == 0)
528 desc->mode |= MODE_32BIT;
529 }
530
531 fclose(fp);
532
533 if (path_exist(_PATH_SYS_CPU "/kernel_max"))
534 /* note that kernel_max is maximum index [NR_CPUS-1] */
535 maxcpus = path_read_s32(_PATH_SYS_CPU "/kernel_max") + 1;
536
537 else if (mod->system == SYSTEM_LIVE)
538 /* the root is '/' so we are working with data from the current kernel */
539 maxcpus = get_max_number_of_cpus();
540
541 if (maxcpus <= 0)
542 /* error or we are reading some /sys snapshot instead of the
543 * real /sys, let's use any crazy number... */
544 maxcpus = 2048;
545
546 setsize = CPU_ALLOC_SIZE(maxcpus);
547
548 if (path_exist(_PATH_SYS_CPU "/possible")) {
549 cpu_set_t *tmp = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/possible");
550 int num, idx;
551
552 desc->ncpuspos = CPU_COUNT_S(setsize, tmp);
553 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
554
555 for (num = 0, idx = 0; num < maxcpus; num++) {
556 if (CPU_ISSET(num, tmp))
557 desc->idx2cpunum[idx++] = num;
558 }
559 cpuset_free(tmp);
560 } else
561 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
562 _PATH_SYS_CPU "/possible");
563
564
565 /* get mask for present CPUs */
566 if (path_exist(_PATH_SYS_CPU "/present")) {
567 desc->present = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/present");
568 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
569 }
570
571 /* get mask for online CPUs */
572 if (path_exist(_PATH_SYS_CPU "/online")) {
573 desc->online = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/online");
574 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
575 }
576
577 /* get dispatching mode */
578 if (path_exist(_PATH_SYS_CPU "/dispatching"))
579 desc->dispatching = path_read_s32(_PATH_SYS_CPU "/dispatching");
580 else
581 desc->dispatching = -1;
582
583 if (mod->system == SYSTEM_LIVE)
584 read_physical_info_powerpc(desc);
585
586 if (path_exist(_PATH_PROC_SYSINFO)) {
587 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
588
589 while (fd && fgets(buf, sizeof(buf), fd) != NULL && !desc->machinetype)
590 lookup(buf, "Type", &desc->machinetype);
591 if (fd)
592 fclose(fd);
593 }
594}
595
596static int
597has_pci_device(unsigned int vendor, unsigned int device)
598{
599 FILE *f;
600 unsigned int num, fn, ven, dev;
601 int res = 1;
602
603 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
604 if (!f)
605 return 0;
606
607 /* for more details about bus/pci/devices format see
608 * drivers/pci/proc.c in linux kernel
609 */
610 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
611 &num, &fn, &ven, &dev) == 4) {
612
613 if (ven == vendor && dev == device)
614 goto found;
615 }
616
617 res = 0;
618found:
619 fclose(f);
620 return res;
621}
622
623#if defined(__x86_64__) || defined(__i386__)
624
625/*
626 * This CPUID leaf returns the information about the hypervisor.
627 * EAX : maximum input value for CPUID supported by the hypervisor.
628 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
629 */
630#define HYPERVISOR_INFO_LEAF 0x40000000
631
632static inline void
633cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
634 unsigned int *ecx, unsigned int *edx)
635{
636 __asm__(
637#if defined(__PIC__) && defined(__i386__)
638 /* x86 PIC cannot clobber ebx -- gcc bitches */
639 "xchg %%ebx, %%esi;"
640 "cpuid;"
641 "xchg %%esi, %%ebx;"
642 : "=S" (*ebx),
643#else
644 "cpuid;"
645 : "=b" (*ebx),
646#endif
647 "=a" (*eax),
648 "=c" (*ecx),
649 "=d" (*edx)
650 : "1" (op), "c"(0));
651}
652
653static void
654read_hypervisor_cpuid(struct lscpu_desc *desc)
655{
656 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
657 char hyper_vendor_id[13];
658
659 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
660
661 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
662 memcpy(hyper_vendor_id + 0, &ebx, 4);
663 memcpy(hyper_vendor_id + 4, &ecx, 4);
664 memcpy(hyper_vendor_id + 8, &edx, 4);
665 hyper_vendor_id[12] = '\0';
666
667 if (!hyper_vendor_id[0])
668 return;
669
670 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
671 desc->hyper = HYPER_XEN;
672 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
673 desc->hyper = HYPER_KVM;
674 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
675 desc->hyper = HYPER_MSHV;
676 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
677 desc->hyper = HYPER_VMWARE;
678 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
679 desc->hyper = HYPER_SPAR;
680}
681
682#else /* ! (__x86_64__ || __i386__) */
683static void
684read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
685{
686}
687#endif
688
689static int is_compatible(const char *path, const char *str)
690{
691 FILE *fd = path_fopen("r", 0, "%s", path);
692
693 if (fd) {
694 char buf[256];
695 size_t i, len;
696
697 memset(buf, 0, sizeof(buf));
698 len = fread(buf, 1, sizeof(buf) - 1, fd);
699 fclose(fd);
700
701 for (i = 0; i < len;) {
702 if (!strcmp(&buf[i], str))
703 return 1;
704 i += strlen(&buf[i]);
705 i++;
706 }
707 }
708
709 return 0;
710}
711
712static int
713read_hypervisor_powerpc(struct lscpu_desc *desc)
714{
715 assert(!desc->hyper);
716
717 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
718 if (path_exist("/proc/iSeries")) {
719 desc->hyper = HYPER_OS400;
720 desc->virtype = VIRT_PARA;
721
722 /* PowerNV (POWER Non-Virtualized, bare-metal) */
723 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "ibm,powernv")) {
724 desc->hyper = HYPER_NONE;
725 desc->virtype = VIRT_NONE;
726
727 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
728 } else if (path_exist(_PATH_PROC_DEVICETREE "/ibm,partition-name")
729 && path_exist(_PATH_PROC_DEVICETREE "/hmc-managed?")
730 && !path_exist(_PATH_PROC_DEVICETREE "/chosen/qemu,graphic-width")) {
731 FILE *fd;
732 desc->hyper = HYPER_PHYP;
733 desc->virtype = VIRT_PARA;
734 fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/ibm,partition-name");
735 if (fd) {
736 char buf[256];
737 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
738 desc->virtype = VIRT_NONE;
739 fclose(fd);
740 }
741
742 /* Qemu */
743 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "qemu,pseries")) {
744 desc->hyper = HYPER_KVM;
745 desc->virtype = VIRT_PARA;
746 }
747 return desc->hyper;
748}
749
750#ifdef INCLUDE_VMWARE_BDOOR
751
752#define VMWARE_BDOOR_MAGIC 0x564D5868
753#define VMWARE_BDOOR_PORT 0x5658
754#define VMWARE_BDOOR_CMD_GETVERSION 10
755
756static UL_ASAN_BLACKLIST
757void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
758{
759 __asm__(
760#if defined(__PIC__) && defined(__i386__)
761 /* x86 PIC cannot clobber ebx -- gcc bitches */
762 "xchg %%ebx, %%esi;"
763 "inl (%%dx), %%eax;"
764 "xchg %%esi, %%ebx;"
765 : "=S" (*ebx),
766#else
767 "inl (%%dx), %%eax;"
768 : "=b" (*ebx),
769#endif
770 "=a" (*eax),
771 "=c" (*ecx),
772 "=d" (*edx)
773 : "0" (VMWARE_BDOOR_MAGIC),
774 "1" (VMWARE_BDOOR_CMD_GETVERSION),
775 "2" (VMWARE_BDOOR_PORT),
776 "3" (0)
777 : "memory");
778}
779
780static jmp_buf segv_handler_env;
781
782static void
783segv_handler(__attribute__((__unused__)) int sig,
784 __attribute__((__unused__)) siginfo_t *info,
785 __attribute__((__unused__)) void *ignored)
786{
787 siglongjmp(segv_handler_env, 1);
788}
789
790static int
791is_vmware_platform(void)
792{
793 uint32_t eax, ebx, ecx, edx;
794 struct sigaction act, oact;
795
796 /*
797 * The assembly routine for vmware detection works
798 * fine under vmware, even if ran as regular user. But
799 * on real HW or under other hypervisors, it segfaults (which is
800 * expected). So we temporarily install SIGSEGV handler to catch
801 * the signal. All this magic is needed because lscpu
802 * isn't supposed to require root privileges.
803 */
804 if (sigsetjmp(segv_handler_env, 1))
805 return 0;
806
807 memset(&act, 0, sizeof(act));
808 act.sa_sigaction = segv_handler;
809 act.sa_flags = SA_SIGINFO;
810
811 if (sigaction(SIGSEGV, &act, &oact))
812 err(EXIT_FAILURE, _("error: can not set signal handler"));
813
814 vmware_bdoor(&eax, &ebx, &ecx, &edx);
815
816 if (sigaction(SIGSEGV, &oact, NULL))
817 err(EXIT_FAILURE, _("error: can not restore signal handler"));
818
819 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
820}
821
822#else /* ! INCLUDE_VMWARE_BDOOR */
823
824static int
825is_vmware_platform(void)
826{
827 return 0;
828}
829
830#endif /* INCLUDE_VMWARE_BDOOR */
831
832static void
833read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
834{
835 FILE *fd;
836
837 if (mod->system != SYSTEM_SNAPSHOT) {
838 read_hypervisor_cpuid(desc);
839 if (!desc->hyper)
840 desc->hyper = read_hypervisor_dmi();
841 if (!desc->hyper && is_vmware_platform())
842 desc->hyper = HYPER_VMWARE;
843 }
844
845 if (desc->hyper) {
846 desc->virtype = VIRT_FULL;
847
848 if (desc->hyper == HYPER_XEN) {
849 uint32_t features;
850
851 fd = path_fopen("r", 0, _PATH_SYS_HYP_FEATURES);
852 if (fd && fscanf(fd, "%x", &features) == 1) {
853 /* Xen PV domain */
854 if (features & XEN_FEATURES_PV_MASK)
855 desc->virtype = VIRT_PARA;
856 /* Xen PVH domain */
857 else if ((features & XEN_FEATURES_PVH_MASK)
858 == XEN_FEATURES_PVH_MASK)
859 desc->virtype = VIRT_PARA;
860 fclose(fd);
861 } else {
862 err(EXIT_FAILURE, _("failed to read from: %s"),
863 _PATH_SYS_HYP_FEATURES);
864 }
865 }
866 } else if (read_hypervisor_powerpc(desc) > 0) {}
867
868 /* Xen para-virt or dom0 */
869 else if (path_exist(_PATH_PROC_XEN)) {
870 int dom0 = 0;
871 fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
872
873 if (fd) {
874 char buf[256];
875
876 if (fscanf(fd, "%255s", buf) == 1 &&
877 !strcmp(buf, "control_d"))
878 dom0 = 1;
879 fclose(fd);
880 }
881 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
882 desc->hyper = HYPER_XEN;
883
884 /* Xen full-virt on non-x86_64 */
885 } else if (has_pci_device( hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
886 desc->hyper = HYPER_XEN;
887 desc->virtype = VIRT_FULL;
888 } else if (has_pci_device( hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
889 desc->hyper = HYPER_VMWARE;
890 desc->virtype = VIRT_FULL;
891 } else if (has_pci_device( hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
892 desc->hyper = HYPER_VBOX;
893 desc->virtype = VIRT_FULL;
894
895 /* IBM PR/SM */
896 } else if (path_exist(_PATH_PROC_SYSINFO)) {
897 FILE *sysinfo_fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
898 char buf[BUFSIZ];
899
900 if (!sysinfo_fd)
901 return;
902 desc->hyper = HYPER_IBM;
903 desc->hypervisor = "PR/SM";
904 desc->virtype = VIRT_FULL;
905 while (fgets(buf, sizeof(buf), sysinfo_fd) != NULL) {
906 char *str;
907
908 if (!strstr(buf, "Control Program:"))
909 continue;
910 if (!strstr(buf, "KVM"))
911 desc->hyper = HYPER_IBM;
912 else
913 desc->hyper = HYPER_KVM;
914 str = strchr(buf, ':');
915 if (!str)
916 continue;
917 xasprintf(&str, "%s", str + 1);
918
919 /* remove leading, trailing and repeating whitespace */
920 while (*str == ' ')
921 str++;
922 desc->hypervisor = str;
923 str += strlen(str) - 1;
924 while ((*str == '\n') || (*str == ' '))
925 *(str--) = '\0';
926 while ((str = strstr(desc->hypervisor, " ")))
927 memmove(str, str + 1, strlen(str));
928 }
929 fclose(sysinfo_fd);
930 }
931
932 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
933 * /proc/bc should not */
934 else if (path_exist(_PATH_PROC_VZ) && !path_exist(_PATH_PROC_BC)) {
935 desc->hyper = HYPER_PARALLELS;
936 desc->virtype = VIRT_CONT;
937
938 /* IBM */
939 } else if (desc->vendor &&
940 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
941 strcmp(desc->vendor, "IBM/S390") == 0)) {
942 desc->hyper = HYPER_IBM;
943 desc->virtype = VIRT_FULL;
944
945 /* User-mode-linux */
946 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
947 desc->hyper = HYPER_UML;
948 desc->virtype = VIRT_PARA;
949
950 /* Linux-VServer */
951 } else if (path_exist(_PATH_PROC_STATUS)) {
952 char buf[BUFSIZ];
953 char *val = NULL;
954
955 fd = path_fopen("r", 1, _PATH_PROC_STATUS);
956 while (fgets(buf, sizeof(buf), fd) != NULL) {
957 if (lookup(buf, "VxID", &val))
958 break;
959 }
960 fclose(fd);
961
962 if (val) {
963 while (isdigit(*val))
964 ++val;
965 if (!*val) {
966 desc->hyper = HYPER_VSERVER;
967 desc->virtype = VIRT_CONT;
968 }
969 }
970 }
971}
972
973/* add @set to the @ary, unnecessary set is deallocated. */
974static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
975{
976 int i;
977 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
978
979 if (!ary)
980 return -1;
981
982 for (i = 0; i < *items; i++) {
983 if (CPU_EQUAL_S(setsize, set, ary[i]))
984 break;
985 }
986 if (i == *items) {
987 ary[*items] = set;
988 ++*items;
989 return 0;
990 }
991 CPU_FREE(set);
992 return 1;
993}
994
995static void
996read_topology(struct lscpu_desc *desc, int idx)
997{
998 cpu_set_t *thread_siblings, *core_siblings;
999 cpu_set_t *book_siblings, *drawer_siblings;
1000 int num = real_cpu_num(desc, idx);
1001
1002 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
1003 return;
1004
1005 thread_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1006 "/cpu%d/topology/thread_siblings", num);
1007 core_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1008 "/cpu%d/topology/core_siblings", num);
1009 book_siblings = NULL;
1010 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num))
1011 book_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1012 "/cpu%d/topology/book_siblings", num);
1013 drawer_siblings = NULL;
1014 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/drawer_siblings", num))
1015 drawer_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1016 "/cpu%d/topology/drawer_siblings", num);
1017
1018 if (!desc->coremaps) {
1019 int ndrawers, nbooks, nsockets, ncores, nthreads;
1020 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1021
1022 /* threads within one core */
1023 nthreads = CPU_COUNT_S(setsize, thread_siblings);
1024 if (!nthreads)
1025 nthreads = 1;
1026
1027 /* cores within one socket */
1028 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
1029 if (!ncores)
1030 ncores = 1;
1031
1032 /* number of sockets within one book. Because of odd /
1033 * non-present cpu maps and to keep calculation easy we make
1034 * sure that nsockets and nbooks is at least 1.
1035 */
1036 nsockets = desc->ncpus / nthreads / ncores;
1037 if (!nsockets)
1038 nsockets = 1;
1039
1040 /* number of books */
1041 nbooks = desc->ncpus / nthreads / ncores / nsockets;
1042 if (!nbooks)
1043 nbooks = 1;
1044
1045 /* number of drawers */
1046 ndrawers = desc->ncpus / nbooks / nthreads / ncores / nsockets;
1047 if (!ndrawers)
1048 ndrawers = 1;
1049
1050 /* all threads, see also read_basicinfo()
1051 * -- fallback for kernels without
1052 * /sys/devices/system/cpu/online.
1053 */
1054 if (!desc->nthreads)
1055 desc->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
1056
1057 /* For each map we make sure that it can have up to ncpuspos
1058 * entries. This is because we cannot reliably calculate the
1059 * number of cores, sockets and books on all architectures.
1060 * E.g. completely virtualized architectures like s390 may
1061 * have multiple sockets of different sizes.
1062 */
1063 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1064 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1065 if (book_siblings)
1066 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1067 if (drawer_siblings)
1068 desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1069 }
1070
1071 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
1072 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
1073 if (book_siblings)
1074 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
1075 if (drawer_siblings)
1076 add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
1077}
1078
1079static void
1080read_polarization(struct lscpu_desc *desc, int idx)
1081{
1082 char mode[64];
1083 int num = real_cpu_num(desc, idx);
1084
1085 if (desc->dispatching < 0)
1086 return;
1087 if (!path_exist(_PATH_SYS_CPU "/cpu%d/polarization", num))
1088 return;
1089 if (!desc->polarization)
1090 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
1091 path_read_str(mode, sizeof(mode), _PATH_SYS_CPU "/cpu%d/polarization", num);
1092 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
1093 desc->polarization[idx] = POLAR_VLOW;
1094 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
1095 desc->polarization[idx] = POLAR_VMEDIUM;
1096 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
1097 desc->polarization[idx] = POLAR_VHIGH;
1098 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
1099 desc->polarization[idx] = POLAR_HORIZONTAL;
1100 else
1101 desc->polarization[idx] = POLAR_UNKNOWN;
1102}
1103
1104static void
1105read_address(struct lscpu_desc *desc, int idx)
1106{
1107 int num = real_cpu_num(desc, idx);
1108
1109 if (!path_exist(_PATH_SYS_CPU "/cpu%d/address", num))
1110 return;
1111 if (!desc->addresses)
1112 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
1113 desc->addresses[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/address", num);
1114}
1115
1116static void
1117read_configured(struct lscpu_desc *desc, int idx)
1118{
1119 int num = real_cpu_num(desc, idx);
1120
1121 if (!path_exist(_PATH_SYS_CPU "/cpu%d/configure", num))
1122 return;
1123 if (!desc->configured)
1124 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
1125 desc->configured[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/configure", num);
1126}
1127
1128static void
1129read_max_mhz(struct lscpu_desc *desc, int idx)
1130{
1131 int num = real_cpu_num(desc, idx);
1132
1133 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_max_freq", num))
1134 return;
1135 if (!desc->maxmhz)
1136 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1137 xasprintf(&(desc->maxmhz[idx]), "%.4f",
1138 (float)path_read_s32(_PATH_SYS_CPU
1139 "/cpu%d/cpufreq/cpuinfo_max_freq", num) / 1000);
1140}
1141
1142static void
1143read_min_mhz(struct lscpu_desc *desc, int idx)
1144{
1145 int num = real_cpu_num(desc, idx);
1146
1147 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_min_freq", num))
1148 return;
1149 if (!desc->minmhz)
1150 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1151 xasprintf(&(desc->minmhz[idx]), "%.4f",
1152 (float)path_read_s32(_PATH_SYS_CPU
1153 "/cpu%d/cpufreq/cpuinfo_min_freq", num) / 1000);
1154}
1155
1156static int
1157cachecmp(const void *a, const void *b)
1158{
1159 struct cpu_cache *c1 = (struct cpu_cache *) a;
1160 struct cpu_cache *c2 = (struct cpu_cache *) b;
1161
1162 return strcmp(c2->name, c1->name);
1163}
1164
1165static void
1166read_cache(struct lscpu_desc *desc, int idx)
1167{
1168 char buf[256];
1169 int i;
1170 int num = real_cpu_num(desc, idx);
1171
1172 if (!desc->ncaches) {
1173 while(path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1174 num, desc->ncaches))
1175 desc->ncaches++;
1176
1177 if (!desc->ncaches)
1178 return;
1179
1180 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1181 }
1182 for (i = 0; i < desc->ncaches; i++) {
1183 struct cpu_cache *ca = &desc->caches[i];
1184 cpu_set_t *map;
1185
1186 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1187 num, i))
1188 continue;
1189 if (!ca->name) {
1190 int type, level;
1191
1192 /* cache type */
1193 path_read_str(buf, sizeof(buf),
1194 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
1195 num, i);
1196 if (!strcmp(buf, "Data"))
1197 type = 'd';
1198 else if (!strcmp(buf, "Instruction"))
1199 type = 'i';
1200 else
1201 type = 0;
1202
1203 /* cache level */
1204 level = path_read_s32(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
1205 num, i);
1206 if (type)
1207 snprintf(buf, sizeof(buf), "L%d%c", level, type);
1208 else
1209 snprintf(buf, sizeof(buf), "L%d", level);
1210
1211 ca->name = xstrdup(buf);
1212
1213 /* cache size */
1214 if (path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d/size",num, i)) {
1215 path_read_str(buf, sizeof(buf),
1216 _PATH_SYS_CPU "/cpu%d/cache/index%d/size", num, i);
1217 ca->size = xstrdup(buf);
1218 } else {
1219 ca->size = xstrdup("unknown size");
1220 }
1221 }
1222
1223 /* information about how CPUs share different caches */
1224 map = path_read_cpuset(maxcpus,
1225 _PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
1226 num, i);
1227
1228 if (!ca->sharedmaps)
1229 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1230 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1231 }
1232}
1233
1234static inline int is_node_dirent(struct dirent *d)
1235{
1236 return
1237 d &&
1238#ifdef _DIRENT_HAVE_D_TYPE
1239 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1240#endif
1241 strncmp(d->d_name, "node", 4) == 0 &&
1242 isdigit_string(d->d_name + 4);
1243}
1244
1245static int
1246nodecmp(const void *ap, const void *bp)
1247{
1248 int *a = (int *) ap, *b = (int *) bp;
1249 return *a - *b;
1250}
1251
1252static void
1253read_nodes(struct lscpu_desc *desc)
1254{
1255 int i = 0;
1256 DIR *dir;
1257 struct dirent *d;
1258 char *path;
1259
1260 /* number of NUMA node */
1261 path = path_strdup(_PATH_SYS_NODE);
1262 dir = opendir(path);
1263 free(path);
1264
1265 while (dir && (d = readdir(dir))) {
1266 if (is_node_dirent(d))
1267 desc->nnodes++;
1268 }
1269
1270 if (!desc->nnodes) {
1271 if (dir)
1272 closedir(dir);
1273 return;
1274 }
1275
1276 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1277 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1278
1279 if (dir) {
1280 rewinddir(dir);
1281 while ((d = readdir(dir)) && i < desc->nnodes) {
1282 if (is_node_dirent(d))
1283 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1284 _("Failed to extract the node number"));
1285 }
1286 closedir(dir);
1287 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1288 }
1289
1290 /* information about how nodes share different CPUs */
1291 for (i = 0; i < desc->nnodes; i++)
1292 desc->nodemaps[i] = path_read_cpuset(maxcpus,
1293 _PATH_SYS_NODE "/node%d/cpumap",
1294 desc->idx2nodenum[i]);
1295}
1296
1297static char *
1298get_cell_data(struct lscpu_desc *desc, int idx, int col,
1299 struct lscpu_modifier *mod,
1300 char *buf, size_t bufsz)
1301{
1302 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1303 size_t i;
1304 int cpu = real_cpu_num(desc, idx);
1305
1306 *buf = '\0';
1307
1308 switch (col) {
1309 case COL_CPU:
1310 snprintf(buf, bufsz, "%d", cpu);
1311 break;
1312 case COL_CORE:
1313 if (cpuset_ary_isset(cpu, desc->coremaps,
1314 desc->ncores, setsize, &i) == 0)
1315 snprintf(buf, bufsz, "%zu", i);
1316 break;
1317 case COL_SOCKET:
1318 if (cpuset_ary_isset(cpu, desc->socketmaps,
1319 desc->nsockets, setsize, &i) == 0)
1320 snprintf(buf, bufsz, "%zu", i);
1321 break;
1322 case COL_NODE:
1323 if (cpuset_ary_isset(cpu, desc->nodemaps,
1324 desc->nnodes, setsize, &i) == 0)
1325 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1326 break;
1327 case COL_DRAWER:
1328 if (cpuset_ary_isset(cpu, desc->drawermaps,
1329 desc->ndrawers, setsize, &i) == 0)
1330 snprintf(buf, bufsz, "%zu", i);
1331 break;
1332 case COL_BOOK:
1333 if (cpuset_ary_isset(cpu, desc->bookmaps,
1334 desc->nbooks, setsize, &i) == 0)
1335 snprintf(buf, bufsz, "%zu", i);
1336 break;
1337 case COL_CACHE:
1338 {
1339 char *p = buf;
1340 size_t sz = bufsz;
1341 int j;
1342
1343 for (j = desc->ncaches - 1; j >= 0; j--) {
1344 struct cpu_cache *ca = &desc->caches[j];
1345
1346 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1347 ca->nsharedmaps, setsize, &i) == 0) {
1348 int x = snprintf(p, sz, "%zu", i);
1349 if (x < 0 || (size_t) x >= sz)
1350 return NULL;
1351 p += x;
1352 sz -= x;
1353 }
1354 if (j != 0) {
1355 if (sz < 2)
1356 return NULL;
1357 *p++ = mod->compat ? ',' : ':';
1358 *p = '\0';
1359 sz--;
1360 }
1361 }
1362 break;
1363 }
1364 case COL_POLARIZATION:
1365 if (desc->polarization) {
1366 int x = desc->polarization[idx];
1367
1368 snprintf(buf, bufsz, "%s",
1369 mod->mode == OUTPUT_PARSABLE ?
1370 polar_modes[x].parsable :
1371 polar_modes[x].readable);
1372 }
1373 break;
1374 case COL_ADDRESS:
1375 if (desc->addresses)
1376 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1377 break;
1378 case COL_CONFIGURED:
1379 if (!desc->configured)
1380 break;
1381 if (mod->mode == OUTPUT_PARSABLE)
1382 snprintf(buf, bufsz, "%s",
1383 desc->configured[idx] ? _("Y") : _("N"));
1384 else
1385 snprintf(buf, bufsz, "%s",
1386 desc->configured[idx] ? _("yes") : _("no"));
1387 break;
1388 case COL_ONLINE:
1389 if (!desc->online)
1390 break;
1391 if (mod->mode == OUTPUT_PARSABLE)
1392 snprintf(buf, bufsz, "%s",
1393 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1394 else
1395 snprintf(buf, bufsz, "%s",
1396 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1397 break;
1398 case COL_MAXMHZ:
1399 if (desc->maxmhz)
1400 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1401 break;
1402 case COL_MINMHZ:
1403 if (desc->minmhz)
1404 xstrncpy(buf, desc->minmhz[idx], bufsz);
1405 break;
1406 }
1407 return buf;
1408}
1409
1410static char *
1411get_cell_header(struct lscpu_desc *desc, int col,
1412 struct lscpu_modifier *mod,
1413 char *buf, size_t bufsz)
1414{
1415 *buf = '\0';
1416
1417 if (col == COL_CACHE) {
1418 char *p = buf;
1419 size_t sz = bufsz;
1420 int i;
1421
1422 for (i = desc->ncaches - 1; i >= 0; i--) {
1423 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1424 if (x < 0 || (size_t) x >= sz)
1425 return NULL;
1426 sz -= x;
1427 p += x;
1428 if (i > 0) {
1429 if (sz < 2)
1430 return NULL;
1431 *p++ = mod->compat ? ',' : ':';
1432 *p = '\0';
1433 sz--;
1434 }
1435 }
1436 if (desc->ncaches)
1437 return buf;
1438 }
1439 snprintf(buf, bufsz, "%s", coldescs[col].name);
1440 return buf;
1441}
1442
1443/*
1444 * [-p] backend, we support two parsable formats:
1445 *
1446 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1447 * output and it contains fixed set of the columns. The CACHE columns are at
1448 * the end of the line and the CACHE is not printed if the number of the caches
1449 * is zero. The CACHE columns are separated by two commas, for example:
1450 *
1451 * $ lscpu --parse
1452 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1453 * 0,0,0,0,,0,0,0
1454 * 1,1,0,0,,1,1,0
1455 *
1456 * 2) "user defined output" -- this format prints always all columns without
1457 * special prefix for CACHE column. If there are not CACHEs then the column is
1458 * empty and the header "Cache" is printed rather than a real name of the cache.
1459 * The CACHE columns are separated by ':'.
1460 *
1461 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1462 * # CPU,Core,Socket,Node,L1d:L1i:L2
1463 * 0,0,0,0,0:0:0
1464 * 1,1,0,0,1:1:0
1465 */
1466static void
1467print_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1468 struct lscpu_modifier *mod)
1469{
1470 char buf[BUFSIZ], *data;
1471 int i;
1472
1473 /*
1474 * Header
1475 */
1476 printf(_(
1477 "# The following is the parsable format, which can be fed to other\n"
1478 "# programs. Each different item in every column has an unique ID\n"
1479 "# starting from zero.\n"));
1480
1481 fputs("# ", stdout);
1482 for (i = 0; i < ncols; i++) {
1483 int col = cols[i];
1484
1485 if (col == COL_CACHE) {
1486 if (mod->compat && !desc->ncaches)
1487 continue;
1488 if (mod->compat && i != 0)
1489 putchar(',');
1490 }
1491 if (i > 0)
1492 putchar(',');
1493
1494 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1495
1496 if (data && * data && col != COL_CACHE &&
1497 !coldescs[col].is_abbr) {
1498 /*
1499 * For normal column names use mixed case (e.g. "Socket")
1500 */
1501 char *p = data + 1;
1502
1503 while (p && *p != '\0') {
1504 *p = tolower((unsigned int) *p);
1505 p++;
1506 }
1507 }
1508 fputs(data && *data ? data : "", stdout);
1509 }
1510 putchar('\n');
1511
1512 /*
1513 * Data
1514 */
1515 for (i = 0; i < desc->ncpuspos; i++) {
1516 int c;
1517 int cpu = real_cpu_num(desc, i);
1518
1519 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1520 continue;
1521 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1522 continue;
1523 if (desc->present && !is_cpu_present(desc, cpu))
1524 continue;
1525 for (c = 0; c < ncols; c++) {
1526 if (mod->compat && cols[c] == COL_CACHE) {
1527 if (!desc->ncaches)
1528 continue;
1529 if (c > 0)
1530 putchar(',');
1531 }
1532 if (c > 0)
1533 putchar(',');
1534
1535 data = get_cell_data(desc, i, cols[c], mod,
1536 buf, sizeof(buf));
1537 fputs(data && *data ? data : "", stdout);
1538 }
1539 putchar('\n');
1540 }
1541}
1542
1543/*
1544 * [-e] backend
1545 */
1546static void
1547print_readable(struct lscpu_desc *desc, int cols[], int ncols,
1548 struct lscpu_modifier *mod)
1549{
1550 int i;
1551 char buf[BUFSIZ];
1552 const char *data;
1553 struct libscols_table *table;
1554
1555 scols_init_debug(0);
1556
1557 table = scols_new_table();
1558 if (!table)
1559 err(EXIT_FAILURE, _("failed to initialize output table"));
1560
1561 for (i = 0; i < ncols; i++) {
1562 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1563 if (!scols_table_new_column(table, xstrdup(data), 0, 0))
1564 err(EXIT_FAILURE, _("failed to initialize output column"));
1565 }
1566
1567 for (i = 0; i < desc->ncpuspos; i++) {
1568 int c;
1569 struct libscols_line *line;
1570 int cpu = real_cpu_num(desc, i);
1571
1572 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1573 continue;
1574 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1575 continue;
1576 if (desc->present && !is_cpu_present(desc, cpu))
1577 continue;
1578
1579 line = scols_table_new_line(table, NULL);
1580 if (!line)
1581 err(EXIT_FAILURE, _("failed to initialize output line"));
1582
1583 for (c = 0; c < ncols; c++) {
1584 data = get_cell_data(desc, i, cols[c], mod,
1585 buf, sizeof(buf));
1586 if (!data || !*data)
1587 data = "-";
1588 scols_line_set_data(line, c, data);
1589 }
1590 }
1591
1592 scols_print_table(table);
1593 scols_unref_table(table);
1594}
1595
1596/* output formats "<key> <value>"*/
1597#define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
1598#define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
1599
1600static void
1601print_cpuset(const char *key, cpu_set_t *set, int hex)
1602{
1603 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1604 size_t setbuflen = 7 * maxcpus;
1605 char setbuf[setbuflen], *p;
1606
1607 if (hex) {
1608 p = cpumask_create(setbuf, setbuflen, set, setsize);
1609 printf("%-23s0x%s\n", key, p);
1610 } else {
1611 p = cpulist_create(setbuf, setbuflen, set, setsize);
1612 print_s(key, p);
1613 }
1614
1615}
1616
1617/*
1618 * default output
1619 */
1620static void
1621print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1622{
1623 char buf[512];
1624 int i;
1625 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1626
1627 print_s(_("Architecture:"), desc->arch);
1628
1629 if (desc->mode) {
1630 char mbuf[64], *p = mbuf;
1631
1632 if (desc->mode & MODE_32BIT) {
1633 strcpy(p, "32-bit, ");
1634 p += 8;
1635 }
1636 if (desc->mode & MODE_64BIT) {
1637 strcpy(p, "64-bit, ");
1638 p += 8;
1639 }
1640 *(p - 2) = '\0';
1641 print_s(_("CPU op-mode(s):"), mbuf);
1642 }
1643#if !defined(WORDS_BIGENDIAN)
1644 print_s(_("Byte Order:"), "Little Endian");
1645#else
1646 print_s(_("Byte Order:"), "Big Endian");
1647#endif
1648 print_n(_("CPU(s):"), desc->ncpus);
1649
1650 if (desc->online)
1651 print_cpuset(mod->hex ? _("On-line CPU(s) mask:") :
1652 _("On-line CPU(s) list:"),
1653 desc->online, mod->hex);
1654
1655 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1656 cpu_set_t *set;
1657
1658 /* Linux kernel provides cpuset of off-line CPUs that contains
1659 * all configured CPUs (see /sys/devices/system/cpu/offline),
1660 * but want to print real (present in system) off-line CPUs only.
1661 */
1662 set = cpuset_alloc(maxcpus, NULL, NULL);
1663 if (!set)
1664 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1665 CPU_ZERO_S(setsize, set);
1666 for (i = 0; i < desc->ncpuspos; i++) {
1667 int cpu = real_cpu_num(desc, i);
1668 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
1669 CPU_SET_S(cpu, setsize, set);
1670 }
1671 print_cpuset(mod->hex ? _("Off-line CPU(s) mask:") :
1672 _("Off-line CPU(s) list:"),
1673 set, mod->hex);
1674 cpuset_free(set);
1675 }
1676
1677 if (desc->nsockets) {
1678 int cores_per_socket, sockets_per_book, books_per_drawer, drawers;
1679
1680 cores_per_socket = sockets_per_book = books_per_drawer = drawers = 0;
1681 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1682 * Using simply the cpu topology masks in sysfs will not give
1683 * usable results since everything is virtualized. E.g.
1684 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1685 * five cpus.
1686 * If the cpu topology is not exported (e.g. 2nd level guest)
1687 * fall back to old calculation scheme.
1688 */
1689 if (path_exist(_PATH_PROC_SYSINFO)) {
1690 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
1691 char pbuf[BUFSIZ];
1692 int t0, t1;
1693
1694 while (fd && fgets(pbuf, sizeof(pbuf), fd) != NULL) {
1695 if (sscanf(pbuf, "CPU Topology SW:%d%d%d%d%d%d",
1696 &t0, &t1, &drawers, &books_per_drawer,
1697 &sockets_per_book,
1698 &cores_per_socket) == 6)
1699 break;
1700 }
1701 if (fd)
1702 fclose(fd);
1703 }
1704 print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
1705 print_n(_("Core(s) per socket:"),
1706 cores_per_socket ?: desc->ncores / desc->nsockets);
1707 if (desc->nbooks) {
1708 print_n(_("Socket(s) per book:"),
1709 sockets_per_book ?: desc->nsockets / desc->nbooks);
1710 if (desc->ndrawers) {
1711 print_n(_("Book(s) per drawer:"),
1712 books_per_drawer ?: desc->nbooks / desc->ndrawers);
1713 print_n(_("Drawer(s):"), drawers ?: desc->ndrawers);
1714 } else {
1715 print_n(_("Book(s):"), books_per_drawer ?: desc->nbooks);
1716 }
1717 } else {
1718 print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
1719 }
1720 }
1721 if (desc->nnodes)
1722 print_n(_("NUMA node(s):"), desc->nnodes);
1723 if (desc->vendor)
1724 print_s(_("Vendor ID:"), desc->vendor);
1725 if (desc->machinetype)
1726 print_s(_("Machine type:"), desc->machinetype);
1727 if (desc->family)
1728 print_s(_("CPU family:"), desc->family);
1729 if (desc->model || desc->revision)
1730 print_s(_("Model:"), desc->revision ? desc->revision : desc->model);
1731 if (desc->modelname || desc->cpu)
1732 print_s(_("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
1733 if (desc->stepping)
1734 print_s(_("Stepping:"), desc->stepping);
1735 if (desc->mhz)
1736 print_s(_("CPU MHz:"), desc->mhz);
1737 if (desc->dynamic_mhz)
1738 print_s(_("CPU dynamic MHz:"), desc->dynamic_mhz);
1739 if (desc->static_mhz)
1740 print_s(_("CPU static MHz:"), desc->static_mhz);
1741 if (desc->maxmhz)
1742 print_s(_("CPU max MHz:"), desc->maxmhz[0]);
1743 if (desc->minmhz)
1744 print_s(_("CPU min MHz:"), desc->minmhz[0]);
1745 if (desc->bogomips)
1746 print_s(_("BogoMIPS:"), desc->bogomips);
1747 if (desc->virtflag) {
1748 if (!strcmp(desc->virtflag, "svm"))
1749 print_s(_("Virtualization:"), "AMD-V");
1750 else if (!strcmp(desc->virtflag, "vmx"))
1751 print_s(_("Virtualization:"), "VT-x");
1752 }
1753 if (desc->hypervisor)
1754 print_s(_("Hypervisor:"), desc->hypervisor);
1755 if (desc->hyper) {
1756 print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1757 print_s(_("Virtualization type:"), _(virt_types[desc->virtype]));
1758 }
1759 if (desc->dispatching >= 0)
1760 print_s(_("Dispatching mode:"), _(disp_modes[desc->dispatching]));
1761 if (desc->ncaches) {
1762 char cbuf[512];
1763
1764 for (i = desc->ncaches - 1; i >= 0; i--) {
1765 snprintf(cbuf, sizeof(cbuf),
1766 _("%s cache:"), desc->caches[i].name);
1767 print_s(cbuf, desc->caches[i].size);
1768 }
1769 }
1770
1771 for (i = 0; i < desc->nnodes; i++) {
1772 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
1773 print_cpuset(buf, desc->nodemaps[i], mod->hex);
1774 }
1775
1776 if (desc->flags)
1777 print_s(_("Flags:"), desc->flags);
1778
1779 if (desc->physsockets) {
1780 print_n(_("Physical sockets:"), desc->physsockets);
1781 print_n(_("Physical chips:"), desc->physchips);
1782 print_n(_("Physical cores/chip:"), desc->physcoresperchip);
1783 }
1784}
1785
1786static void __attribute__((__noreturn__)) usage(FILE *out)
1787{
1788 size_t i;
1789
1790 fputs(USAGE_HEADER, out);
1791 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
1792
1793 fputs(USAGE_SEPARATOR, out);
1794 fputs(_("Display information about the CPU architecture.\n"), out);
1795
1796 fputs(USAGE_OPTIONS, out);
1797 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
1798 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
1799 fputs(_(" -c, --offline print offline CPUs only\n"), out);
1800 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
1801 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
1802 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
1803 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
1804 fputs(USAGE_SEPARATOR, out);
1805 fputs(USAGE_HELP, out);
1806 fputs(USAGE_VERSION, out);
1807
1808 fprintf(out, _("\nAvailable columns:\n"));
1809
1810 for (i = 0; i < ARRAY_SIZE(coldescs); i++)
1811 fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
1812
1813 fprintf(out, USAGE_MAN_TAIL("lscpu(1)"));
1814
1815 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
1816}
1817
1818int main(int argc, char *argv[])
1819{
1820 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
1821 struct lscpu_desc _desc = { .flags = 0 }, *desc = &_desc;
1822 int c, i;
1823 int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
1824 int cpu_modifier_specified = 0;
1825
1826 static const struct option longopts[] = {
1827 { "all", no_argument, 0, 'a' },
1828 { "online", no_argument, 0, 'b' },
1829 { "offline", no_argument, 0, 'c' },
1830 { "help", no_argument, 0, 'h' },
1831 { "extended", optional_argument, 0, 'e' },
1832 { "parse", optional_argument, 0, 'p' },
1833 { "sysroot", required_argument, 0, 's' },
1834 { "hex", no_argument, 0, 'x' },
1835 { "version", no_argument, 0, 'V' },
1836 { NULL, 0, 0, 0 }
1837 };
1838
1839 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
1840 { 'a','b','c' },
1841 { 'e','p' },
1842 { 0 }
1843 };
1844 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
1845
1846 setlocale(LC_ALL, "");
1847 bindtextdomain(PACKAGE, LOCALEDIR);
1848 textdomain(PACKAGE);
1849 atexit(close_stdout);
1850
1851 while ((c = getopt_long(argc, argv, "abce::hp::s:xV", longopts, NULL)) != -1) {
1852
1853 err_exclusive_options(c, longopts, excl, excl_st);
1854
1855 switch (c) {
1856 case 'a':
1857 mod->online = mod->offline = 1;
1858 cpu_modifier_specified = 1;
1859 break;
1860 case 'b':
1861 mod->online = 1;
1862 cpu_modifier_specified = 1;
1863 break;
1864 case 'c':
1865 mod->offline = 1;
1866 cpu_modifier_specified = 1;
1867 break;
1868 case 'h':
1869 usage(stdout);
1870 case 'p':
1871 case 'e':
1872 if (optarg) {
1873 if (*optarg == '=')
1874 optarg++;
1875 ncolumns = string_to_idarray(optarg,
1876 columns, ARRAY_SIZE(columns),
1877 column_name_to_id);
1878 if (ncolumns < 0)
1879 return EXIT_FAILURE;
1880 }
1881 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
1882 break;
1883 case 's':
1884 path_set_prefix(optarg);
1885 mod->system = SYSTEM_SNAPSHOT;
1886 break;
1887 case 'x':
1888 mod->hex = 1;
1889 break;
1890 case 'V':
1891 printf(UTIL_LINUX_VERSION);
1892 return EXIT_SUCCESS;
1893 default:
1894 usage(stderr);
1895 }
1896 }
1897
1898 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
1899 fprintf(stderr,
1900 _("%s: options --all, --online and --offline may only "
1901 "be used with options --extended or --parse.\n"),
1902 program_invocation_short_name);
1903 return EXIT_FAILURE;
1904 }
1905
1906 if (argc != optind)
1907 usage(stderr);
1908
1909 /* set default cpu display mode if none was specified */
1910 if (!mod->online && !mod->offline) {
1911 mod->online = 1;
1912 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
1913 }
1914
1915 read_basicinfo(desc, mod);
1916
1917 for (i = 0; i < desc->ncpuspos; i++) {
1918 read_topology(desc, i);
1919 read_cache(desc, i);
1920 read_polarization(desc, i);
1921 read_address(desc, i);
1922 read_configured(desc, i);
1923 read_max_mhz(desc, i);
1924 read_min_mhz(desc, i);
1925 }
1926
1927 if (desc->caches)
1928 qsort(desc->caches, desc->ncaches,
1929 sizeof(struct cpu_cache), cachecmp);
1930
1931 read_nodes(desc);
1932 read_hypervisor(desc, mod);
1933
1934 switch(mod->mode) {
1935 case OUTPUT_SUMMARY:
1936 print_summary(desc, mod);
1937 break;
1938 case OUTPUT_PARSABLE:
1939 if (!ncolumns) {
1940 columns[ncolumns++] = COL_CPU;
1941 columns[ncolumns++] = COL_CORE;
1942 columns[ncolumns++] = COL_SOCKET;
1943 columns[ncolumns++] = COL_NODE;
1944 columns[ncolumns++] = COL_CACHE;
1945 mod->compat = 1;
1946 }
1947 print_parsable(desc, columns, ncolumns, mod);
1948 break;
1949 case OUTPUT_READABLE:
1950 if (!ncolumns) {
1951 /* No list was given. Just print whatever is there. */
1952 columns[ncolumns++] = COL_CPU;
1953 if (desc->nodemaps)
1954 columns[ncolumns++] = COL_NODE;
1955 if (desc->drawermaps)
1956 columns[ncolumns++] = COL_DRAWER;
1957 if (desc->bookmaps)
1958 columns[ncolumns++] = COL_BOOK;
1959 if (desc->socketmaps)
1960 columns[ncolumns++] = COL_SOCKET;
1961 if (desc->coremaps)
1962 columns[ncolumns++] = COL_CORE;
1963 if (desc->caches)
1964 columns[ncolumns++] = COL_CACHE;
1965 if (desc->online)
1966 columns[ncolumns++] = COL_ONLINE;
1967 if (desc->configured)
1968 columns[ncolumns++] = COL_CONFIGURED;
1969 if (desc->polarization)
1970 columns[ncolumns++] = COL_POLARIZATION;
1971 if (desc->addresses)
1972 columns[ncolumns++] = COL_ADDRESS;
1973 if (desc->maxmhz)
1974 columns[ncolumns++] = COL_MAXMHZ;
1975 if (desc->minmhz)
1976 columns[ncolumns++] = COL_MINMHZ;
1977 }
1978 print_readable(desc, columns, ncolumns, mod);
1979 break;
1980 }
1981
1982 return EXIT_SUCCESS;
1983}