]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
Merge branch '2016-03-02/cgns' of https://github.com/hallyn/util-linux
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #include <assert.h>
23 #include <ctype.h>
24 #include <dirent.h>
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <getopt.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/utsname.h>
32 #include <unistd.h>
33 #include <stdarg.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36
37 #if (defined(__x86_64__) || defined(__i386__))
38 # if !defined( __SANITIZE_ADDRESS__)
39 # define INCLUDE_VMWARE_BDOOR
40 # else
41 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
42 # endif
43 #endif
44
45 #ifdef INCLUDE_VMWARE_BDOOR
46 # include <stdint.h>
47 # include <signal.h>
48 # include <strings.h>
49 # include <setjmp.h>
50 # ifdef HAVE_SYS_IO_H
51 # include <sys/io.h>
52 # endif
53 #endif
54
55 #if defined(HAVE_LIBRTAS)
56 #include <librtas.h>
57 #endif
58
59 #include <libsmartcols.h>
60
61 #include "cpuset.h"
62 #include "nls.h"
63 #include "xalloc.h"
64 #include "c.h"
65 #include "strutils.h"
66 #include "bitops.h"
67 #include "path.h"
68 #include "closestream.h"
69 #include "optutils.h"
70 #include "lscpu.h"
71
72 #define CACHE_MAX 100
73
74 /* /sys paths */
75 #define _PATH_SYS_SYSTEM "/sys/devices/system"
76 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
77 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
78 #define _PATH_PROC_XEN "/proc/xen"
79 #define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
80 #define _PATH_PROC_CPUINFO "/proc/cpuinfo"
81 #define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
82 #define _PATH_PROC_SYSINFO "/proc/sysinfo"
83 #define _PATH_PROC_STATUS "/proc/self/status"
84 #define _PATH_PROC_VZ "/proc/vz"
85 #define _PATH_PROC_BC "/proc/bc"
86 #define _PATH_PROC_DEVICETREE "/proc/device-tree"
87 #define _PATH_DEV_MEM "/dev/mem"
88
89 /* virtualization types */
90 enum {
91 VIRT_NONE = 0,
92 VIRT_PARA,
93 VIRT_FULL,
94 VIRT_CONT
95 };
96 const char *virt_types[] = {
97 [VIRT_NONE] = N_("none"),
98 [VIRT_PARA] = N_("para"),
99 [VIRT_FULL] = N_("full"),
100 [VIRT_CONT] = N_("container"),
101 };
102
103 const char *hv_vendors[] = {
104 [HYPER_NONE] = NULL,
105 [HYPER_XEN] = "Xen",
106 [HYPER_KVM] = "KVM",
107 [HYPER_MSHV] = "Microsoft",
108 [HYPER_VMWARE] = "VMware",
109 [HYPER_IBM] = "IBM",
110 [HYPER_VSERVER] = "Linux-VServer",
111 [HYPER_UML] = "User-mode Linux",
112 [HYPER_INNOTEK] = "Innotek GmbH",
113 [HYPER_HITACHI] = "Hitachi",
114 [HYPER_PARALLELS] = "Parallels",
115 [HYPER_VBOX] = "Oracle",
116 [HYPER_OS400] = "OS/400",
117 [HYPER_PHYP] = "pHyp",
118 [HYPER_SPAR] = "Unisys s-Par"
119 };
120
121 const int hv_vendor_pci[] = {
122 [HYPER_NONE] = 0x0000,
123 [HYPER_XEN] = 0x5853,
124 [HYPER_KVM] = 0x0000,
125 [HYPER_MSHV] = 0x1414,
126 [HYPER_VMWARE] = 0x15ad,
127 [HYPER_VBOX] = 0x80ee,
128 };
129
130 const int hv_graphics_pci[] = {
131 [HYPER_NONE] = 0x0000,
132 [HYPER_XEN] = 0x0001,
133 [HYPER_KVM] = 0x0000,
134 [HYPER_MSHV] = 0x5353,
135 [HYPER_VMWARE] = 0x0710,
136 [HYPER_VBOX] = 0xbeef,
137 };
138
139 /* CPU modes */
140 enum {
141 MODE_32BIT = (1 << 1),
142 MODE_64BIT = (1 << 2)
143 };
144
145 /* cache(s) description */
146 struct cpu_cache {
147 char *name;
148 char *size;
149
150 int nsharedmaps;
151 cpu_set_t **sharedmaps;
152 };
153
154 /* dispatching modes */
155 enum {
156 DISP_HORIZONTAL = 0,
157 DISP_VERTICAL = 1
158 };
159
160 const char *disp_modes[] = {
161 [DISP_HORIZONTAL] = N_("horizontal"),
162 [DISP_VERTICAL] = N_("vertical")
163 };
164
165 /* cpu polarization */
166 enum {
167 POLAR_UNKNOWN = 0,
168 POLAR_VLOW,
169 POLAR_VMEDIUM,
170 POLAR_VHIGH,
171 POLAR_HORIZONTAL
172 };
173
174 struct polarization_modes {
175 char *parsable;
176 char *readable;
177 };
178
179 struct polarization_modes polar_modes[] = {
180 [POLAR_UNKNOWN] = {"U", "-"},
181 [POLAR_VLOW] = {"VL", "vert-low"},
182 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
183 [POLAR_VHIGH] = {"VH", "vert-high"},
184 [POLAR_HORIZONTAL] = {"H", "horizontal"},
185 };
186
187 /* global description */
188 struct lscpu_desc {
189 char *arch;
190 char *vendor;
191 char *family;
192 char *model;
193 char *modelname;
194 char *revision; /* alternative for model (ppc) */
195 char *cpu; /* alternative for modelname (ppc, sparc) */
196 char *virtflag; /* virtualization flag (vmx, svm) */
197 char *hypervisor; /* hypervisor software */
198 int hyper; /* hypervisor vendor ID */
199 int virtype; /* VIRT_PARA|FULL|NONE ? */
200 char *mhz;
201 char **maxmhz; /* maximum mega hertz */
202 char **minmhz; /* minimum mega hertz */
203 char *stepping;
204 char *bogomips;
205 char *flags;
206 int dispatching; /* none, horizontal or vertical */
207 int mode; /* rm, lm or/and tm */
208
209 int ncpuspos; /* maximal possible CPUs */
210 int ncpus; /* number of present CPUs */
211 cpu_set_t *present; /* mask with present CPUs */
212 cpu_set_t *online; /* mask with online CPUs */
213
214 int nthreads; /* number of online threads */
215
216 int ncaches;
217 struct cpu_cache *caches;
218
219 /*
220 * All maps are sequentially indexed (0..ncpuspos), the array index
221 * does not have match with cpuX number as presented by kernel. You
222 * have to use real_cpu_num() to get the real cpuX number.
223 *
224 * For example, the possible system CPUs are: 1,3,5, it means that
225 * ncpuspos=3, so all arrays are in range 0..3.
226 */
227 int *idx2cpunum; /* mapping index to CPU num */
228
229 int nnodes; /* number of NUMA modes */
230 int *idx2nodenum; /* Support for discontinuous nodes */
231 cpu_set_t **nodemaps; /* array with NUMA nodes */
232
233 /* books -- based on book_siblings (internal kernel map of cpuX's
234 * hardware threads within the same book */
235 int nbooks; /* number of all online books */
236 cpu_set_t **bookmaps; /* unique book_siblings */
237
238 /* sockets -- based on core_siblings (internal kernel map of cpuX's
239 * hardware threads within the same physical_package_id (socket)) */
240 int nsockets; /* number of all online sockets */
241 cpu_set_t **socketmaps; /* unique core_siblings */
242
243 /* cores -- based on thread_siblings (internel kernel map of cpuX's
244 * hardware threads within the same core as cpuX) */
245 int ncores; /* number of all online cores */
246 cpu_set_t **coremaps; /* unique thread_siblings */
247
248 int *polarization; /* cpu polarization */
249 int *addresses; /* physical cpu addresses */
250 int *configured; /* cpu configured */
251 int physsockets; /* Physical sockets (modules) */
252 int physchips; /* Physical chips */
253 int physcoresperchip; /* Physical cores per chip */
254 };
255
256 enum {
257 OUTPUT_SUMMARY = 0, /* default */
258 OUTPUT_PARSABLE, /* -p */
259 OUTPUT_READABLE, /* -e */
260 };
261
262 enum {
263 SYSTEM_LIVE = 0, /* analyzing a live system */
264 SYSTEM_SNAPSHOT, /* analyzing a snapshot of a different system */
265 };
266
267 struct lscpu_modifier {
268 int mode; /* OUTPUT_* */
269 int system; /* SYSTEM_* */
270 unsigned int hex:1, /* print CPU masks rather than CPU lists */
271 compat:1, /* use backwardly compatible format */
272 online:1, /* print online CPUs */
273 offline:1; /* print offline CPUs */
274 };
275
276 static int maxcpus; /* size in bits of kernel cpu mask */
277
278 #define is_cpu_online(_d, _cpu) \
279 ((_d) && (_d)->online ? \
280 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
281 #define is_cpu_present(_d, _cpu) \
282 ((_d) && (_d)->present ? \
283 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
284
285 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
286
287 /*
288 * IDs
289 */
290 enum {
291 COL_CPU,
292 COL_CORE,
293 COL_SOCKET,
294 COL_NODE,
295 COL_BOOK,
296 COL_CACHE,
297 COL_POLARIZATION,
298 COL_ADDRESS,
299 COL_CONFIGURED,
300 COL_ONLINE,
301 COL_MAXMHZ,
302 COL_MINMHZ,
303 };
304
305 /* column description
306 */
307 struct lscpu_coldesc {
308 const char *name;
309 const char *help;
310
311 unsigned int is_abbr:1; /* name is abbreviation */
312 };
313
314 static struct lscpu_coldesc coldescs[] =
315 {
316 [COL_CPU] = { "CPU", N_("logical CPU number"), 1 },
317 [COL_CORE] = { "CORE", N_("logical core number") },
318 [COL_SOCKET] = { "SOCKET", N_("logical socket number") },
319 [COL_NODE] = { "NODE", N_("logical NUMA node number") },
320 [COL_BOOK] = { "BOOK", N_("logical book number") },
321 [COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
322 [COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
323 [COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
324 [COL_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
325 [COL_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
326 [COL_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
327 [COL_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
328 };
329
330 static int
331 column_name_to_id(const char *name, size_t namesz)
332 {
333 size_t i;
334
335 for (i = 0; i < ARRAY_SIZE(coldescs); i++) {
336 const char *cn = coldescs[i].name;
337
338 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
339 return i;
340 }
341 warnx(_("unknown column: %s"), name);
342 return -1;
343 }
344
345 /* Lookup a pattern and get the value from cpuinfo.
346 * Format is:
347 *
348 * "<pattern> : <key>"
349 */
350 static int
351 lookup(char *line, char *pattern, char **value)
352 {
353 char *p, *v;
354 int len = strlen(pattern);
355
356 /* don't re-fill already found tags, first one wins */
357 if (!*line || *value)
358 return 0;
359
360 /* pattern */
361 if (strncmp(line, pattern, len))
362 return 0;
363
364 /* white spaces */
365 for (p = line + len; isspace(*p); p++);
366
367 /* separator */
368 if (*p != ':')
369 return 0;
370
371 /* white spaces */
372 for (++p; isspace(*p); p++);
373
374 /* value */
375 if (!*p)
376 return 0;
377 v = p;
378
379 /* end of value */
380 len = strlen(line) - 1;
381 for (p = line + len; isspace(*(p-1)); p--);
382 *p = '\0';
383
384 *value = xstrdup(v);
385 return 1;
386 }
387
388 /* Don't init the mode for platforms where we are not able to
389 * detect that CPU supports 64-bit mode.
390 */
391 static int
392 init_mode(struct lscpu_modifier *mod)
393 {
394 int m = 0;
395
396 if (mod->system == SYSTEM_SNAPSHOT)
397 /* reading info from any /{sys,proc} dump, don't mix it with
398 * information about our real CPU */
399 return 0;
400
401 #if defined(__alpha__) || defined(__ia64__)
402 m |= MODE_64BIT; /* 64bit platforms only */
403 #endif
404 /* platforms with 64bit flag in /proc/cpuinfo, define
405 * 32bit default here */
406 #if defined(__i386__) || defined(__x86_64__) || \
407 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
408 m |= MODE_32BIT;
409 #endif
410 return m;
411 }
412
413 #if defined(HAVE_LIBRTAS)
414 #define PROCESSOR_MODULE_INFO 43
415 static int strbe16toh(const char *buf, int offset)
416 {
417 return (buf[offset] << 8) + buf[offset+1];
418 }
419
420 static void read_physical_info_powerpc(struct lscpu_desc *desc)
421 {
422 char buf[BUFSIZ];
423 int rc, len, ntypes;
424
425 desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
426
427 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
428 if (rc < 0)
429 return;
430
431 len = strbe16toh(buf, 0);
432 if (len < 8)
433 return;
434
435 ntypes = strbe16toh(buf, 2);
436
437 assert(ntypes <= 1);
438 if (!ntypes)
439 return;
440
441 desc->physsockets = strbe16toh(buf, 4);
442 desc->physchips = strbe16toh(buf, 6);
443 desc->physcoresperchip = strbe16toh(buf, 8);
444 }
445 #else
446 static void read_physical_info_powerpc(
447 struct lscpu_desc *desc __attribute__((__unused__)))
448 {
449 }
450 #endif
451
452 static void
453 read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
454 {
455 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
456 char buf[BUFSIZ];
457 struct utsname utsbuf;
458 size_t setsize;
459
460 /* architecture */
461 if (uname(&utsbuf) == -1)
462 err(EXIT_FAILURE, _("error: uname failed"));
463 desc->arch = xstrdup(utsbuf.machine);
464
465 /* details */
466 while (fgets(buf, sizeof(buf), fp) != NULL) {
467 if (lookup(buf, "vendor", &desc->vendor)) ;
468 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
469 else if (lookup(buf, "family", &desc->family)) ;
470 else if (lookup(buf, "cpu family", &desc->family)) ;
471 else if (lookup(buf, "model", &desc->model)) ;
472 else if (lookup(buf, "model name", &desc->modelname)) ;
473 else if (lookup(buf, "stepping", &desc->stepping)) ;
474 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
475 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
476 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
477 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
478 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
479 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
480 else if (lookup(buf, "cpu", &desc->cpu)) ;
481 else if (lookup(buf, "revision", &desc->revision)) ;
482 else
483 continue;
484 }
485
486 desc->mode = init_mode(mod);
487
488 if (desc->flags) {
489 snprintf(buf, sizeof(buf), " %s ", desc->flags);
490 if (strstr(buf, " svm "))
491 desc->virtflag = xstrdup("svm");
492 else if (strstr(buf, " vmx "))
493 desc->virtflag = xstrdup("vmx");
494 if (strstr(buf, " lm "))
495 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
496 if (strstr(buf, " zarch "))
497 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
498 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
499 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
500 }
501
502 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
503 if (strcmp(desc->arch, "ppc64") == 0)
504 desc->mode |= MODE_32BIT | MODE_64BIT;
505 else if (strcmp(desc->arch, "ppc") == 0)
506 desc->mode |= MODE_32BIT;
507 }
508
509 fclose(fp);
510
511 if (path_exist(_PATH_SYS_CPU "/kernel_max"))
512 /* note that kernel_max is maximum index [NR_CPUS-1] */
513 maxcpus = path_read_s32(_PATH_SYS_CPU "/kernel_max") + 1;
514
515 else if (mod->system == SYSTEM_LIVE)
516 /* the root is '/' so we are working with data from the current kernel */
517 maxcpus = get_max_number_of_cpus();
518
519 if (maxcpus <= 0)
520 /* error or we are reading some /sys snapshot instead of the
521 * real /sys, let's use any crazy number... */
522 maxcpus = 2048;
523
524 setsize = CPU_ALLOC_SIZE(maxcpus);
525
526 if (path_exist(_PATH_SYS_CPU "/possible")) {
527 cpu_set_t *tmp = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/possible");
528 int num, idx;
529
530 desc->ncpuspos = CPU_COUNT_S(setsize, tmp);
531 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
532
533 for (num = 0, idx = 0; num < maxcpus; num++) {
534 if (CPU_ISSET(num, tmp))
535 desc->idx2cpunum[idx++] = num;
536 }
537 cpuset_free(tmp);
538 } else
539 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
540 _PATH_SYS_CPU "/possible");
541
542
543 /* get mask for present CPUs */
544 if (path_exist(_PATH_SYS_CPU "/present")) {
545 desc->present = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/present");
546 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
547 }
548
549 /* get mask for online CPUs */
550 if (path_exist(_PATH_SYS_CPU "/online")) {
551 desc->online = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/online");
552 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
553 }
554
555 /* get dispatching mode */
556 if (path_exist(_PATH_SYS_CPU "/dispatching"))
557 desc->dispatching = path_read_s32(_PATH_SYS_CPU "/dispatching");
558 else
559 desc->dispatching = -1;
560
561 if (mod->system == SYSTEM_LIVE)
562 read_physical_info_powerpc(desc);
563 }
564
565 static int
566 has_pci_device(unsigned int vendor, unsigned int device)
567 {
568 FILE *f;
569 unsigned int num, fn, ven, dev;
570 int res = 1;
571
572 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
573 if (!f)
574 return 0;
575
576 /* for more details about bus/pci/devices format see
577 * drivers/pci/proc.c in linux kernel
578 */
579 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
580 &num, &fn, &ven, &dev) == 4) {
581
582 if (ven == vendor && dev == device)
583 goto found;
584 }
585
586 res = 0;
587 found:
588 fclose(f);
589 return res;
590 }
591
592 #if defined(__x86_64__) || defined(__i386__)
593
594 /*
595 * This CPUID leaf returns the information about the hypervisor.
596 * EAX : maximum input value for CPUID supported by the hypervisor.
597 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
598 */
599 #define HYPERVISOR_INFO_LEAF 0x40000000
600
601 static inline void
602 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
603 unsigned int *ecx, unsigned int *edx)
604 {
605 __asm__(
606 #if defined(__PIC__) && defined(__i386__)
607 /* x86 PIC cannot clobber ebx -- gcc bitches */
608 "xchg %%ebx, %%esi;"
609 "cpuid;"
610 "xchg %%esi, %%ebx;"
611 : "=S" (*ebx),
612 #else
613 "cpuid;"
614 : "=b" (*ebx),
615 #endif
616 "=a" (*eax),
617 "=c" (*ecx),
618 "=d" (*edx)
619 : "1" (op), "c"(0));
620 }
621
622 static void
623 read_hypervisor_cpuid(struct lscpu_desc *desc)
624 {
625 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
626 char hyper_vendor_id[13];
627
628 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
629
630 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
631 memcpy(hyper_vendor_id + 0, &ebx, 4);
632 memcpy(hyper_vendor_id + 4, &ecx, 4);
633 memcpy(hyper_vendor_id + 8, &edx, 4);
634 hyper_vendor_id[12] = '\0';
635
636 if (!hyper_vendor_id[0])
637 return;
638
639 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
640 desc->hyper = HYPER_XEN;
641 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
642 desc->hyper = HYPER_KVM;
643 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
644 desc->hyper = HYPER_MSHV;
645 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
646 desc->hyper = HYPER_VMWARE;
647 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
648 desc->hyper = HYPER_SPAR;
649 }
650
651 #else /* ! (__x86_64__ || __i386__) */
652 static void
653 read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
654 {
655 }
656 #endif
657
658 static int is_compatible(const char *path, const char *str)
659 {
660 FILE *fd = path_fopen("r", 0, "%s", path);
661
662 if (fd) {
663 char buf[256];
664 size_t i, len;
665
666 memset(buf, 0, sizeof(buf));
667 len = fread(buf, 1, sizeof(buf) - 1, fd);
668 fclose(fd);
669
670 for (i = 0; i < len;) {
671 if (!strcmp(&buf[i], str))
672 return 1;
673 i += strlen(&buf[i]);
674 i++;
675 }
676 }
677
678 return 0;
679 }
680
681 static int
682 read_hypervisor_powerpc(struct lscpu_desc *desc)
683 {
684 assert(!desc->hyper);
685
686 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
687 if (path_exist("/proc/iSeries")) {
688 desc->hyper = HYPER_OS400;
689 desc->virtype = VIRT_PARA;
690
691 /* PowerNV (POWER Non-Virtualized, bare-metal) */
692 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "ibm,powernv")) {
693 desc->hyper = HYPER_NONE;
694 desc->virtype = VIRT_NONE;
695
696 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
697 } else if (path_exist(_PATH_PROC_DEVICETREE "/ibm,partition-name")
698 && path_exist(_PATH_PROC_DEVICETREE "/hmc-managed?")
699 && !path_exist(_PATH_PROC_DEVICETREE "/chosen/qemu,graphic-width")) {
700 FILE *fd;
701 desc->hyper = HYPER_PHYP;
702 desc->virtype = VIRT_PARA;
703 fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/ibm,partition-name");
704 if (fd) {
705 char buf[256];
706 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
707 desc->virtype = VIRT_NONE;
708 fclose(fd);
709 }
710
711 /* Qemu */
712 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "qemu,pseries")) {
713 desc->hyper = HYPER_KVM;
714 desc->virtype = VIRT_PARA;
715 }
716 return desc->hyper;
717 }
718
719 #ifdef INCLUDE_VMWARE_BDOOR
720
721 #define VMWARE_BDOOR_MAGIC 0x564D5868
722 #define VMWARE_BDOOR_PORT 0x5658
723 #define VMWARE_BDOOR_CMD_GETVERSION 10
724
725 static UL_ASAN_BLACKLIST
726 void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
727 {
728 __asm__(
729 #if defined(__PIC__) && defined(__i386__)
730 /* x86 PIC cannot clobber ebx -- gcc bitches */
731 "xchg %%ebx, %%esi;"
732 "inl (%%dx), %%eax;"
733 "xchg %%esi, %%ebx;"
734 : "=S" (*ebx),
735 #else
736 "inl (%%dx), %%eax;"
737 : "=b" (*ebx),
738 #endif
739 "=a" (*eax),
740 "=c" (*ecx),
741 "=d" (*edx)
742 : "0" (VMWARE_BDOOR_MAGIC),
743 "1" (VMWARE_BDOOR_CMD_GETVERSION),
744 "2" (VMWARE_BDOOR_PORT),
745 "3" (0)
746 : "memory");
747 }
748
749 static jmp_buf segv_handler_env;
750
751 static void
752 segv_handler(__attribute__((__unused__)) int sig,
753 __attribute__((__unused__)) siginfo_t *info,
754 __attribute__((__unused__)) void *ignored)
755 {
756 siglongjmp(segv_handler_env, 1);
757 }
758
759 static int
760 is_vmware_platform(void)
761 {
762 uint32_t eax, ebx, ecx, edx;
763 struct sigaction act, oact;
764
765 /*
766 * The assembly routine for vmware detection works
767 * fine under vmware, even if ran as regular user. But
768 * on real HW or under other hypervisors, it segfaults (which is
769 * expected). So we temporarily install SIGSEGV handler to catch
770 * the signal. All this magic is needed because lscpu
771 * isn't supposed to require root privileges.
772 */
773 if (sigsetjmp(segv_handler_env, 1))
774 return 0;
775
776 memset(&act, 0, sizeof(act));
777 act.sa_sigaction = segv_handler;
778 act.sa_flags = SA_SIGINFO;
779
780 if (sigaction(SIGSEGV, &act, &oact))
781 err(EXIT_FAILURE, _("error: can not set signal handler"));
782
783 vmware_bdoor(&eax, &ebx, &ecx, &edx);
784
785 if (sigaction(SIGSEGV, &oact, NULL))
786 err(EXIT_FAILURE, _("error: can not restore signal handler"));
787
788 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
789 }
790
791 #else /* ! INCLUDE_VMWARE_BDOOR */
792
793 static int
794 is_vmware_platform(void)
795 {
796 return 0;
797 }
798
799 #endif /* INCLUDE_VMWARE_BDOOR */
800
801 static void
802 read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
803 {
804 FILE *fd;
805
806 if (mod->system != SYSTEM_SNAPSHOT) {
807 read_hypervisor_cpuid(desc);
808 if (!desc->hyper)
809 desc->hyper = read_hypervisor_dmi();
810 if (!desc->hyper && is_vmware_platform())
811 desc->hyper = HYPER_VMWARE;
812 }
813
814 if (desc->hyper)
815 desc->virtype = VIRT_FULL;
816
817 else if (read_hypervisor_powerpc(desc) > 0) {}
818
819 /* Xen para-virt or dom0 */
820 else if (path_exist(_PATH_PROC_XEN)) {
821 int dom0 = 0;
822 fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
823
824 if (fd) {
825 char buf[256];
826
827 if (fscanf(fd, "%255s", buf) == 1 &&
828 !strcmp(buf, "control_d"))
829 dom0 = 1;
830 fclose(fd);
831 }
832 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
833 desc->hyper = HYPER_XEN;
834
835 /* Xen full-virt on non-x86_64 */
836 } else if (has_pci_device( hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
837 desc->hyper = HYPER_XEN;
838 desc->virtype = VIRT_FULL;
839 } else if (has_pci_device( hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
840 desc->hyper = HYPER_VMWARE;
841 desc->virtype = VIRT_FULL;
842 } else if (has_pci_device( hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
843 desc->hyper = HYPER_VBOX;
844 desc->virtype = VIRT_FULL;
845
846 /* IBM PR/SM */
847 } else if (path_exist(_PATH_PROC_SYSINFO)) {
848 FILE *sysinfo_fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
849 char buf[BUFSIZ];
850
851 if (!sysinfo_fd)
852 return;
853 desc->hyper = HYPER_IBM;
854 desc->hypervisor = "PR/SM";
855 desc->virtype = VIRT_FULL;
856 while (fgets(buf, sizeof(buf), sysinfo_fd) != NULL) {
857 char *str;
858
859 if (!strstr(buf, "Control Program:"))
860 continue;
861 if (!strstr(buf, "KVM"))
862 desc->hyper = HYPER_IBM;
863 else
864 desc->hyper = HYPER_KVM;
865 str = strchr(buf, ':');
866 if (!str)
867 continue;
868 xasprintf(&str, "%s", str + 1);
869
870 /* remove leading, trailing and repeating whitespace */
871 while (*str == ' ')
872 str++;
873 desc->hypervisor = str;
874 str += strlen(str) - 1;
875 while ((*str == '\n') || (*str == ' '))
876 *(str--) = '\0';
877 while ((str = strstr(desc->hypervisor, " ")))
878 memmove(str, str + 1, strlen(str));
879 }
880 fclose(sysinfo_fd);
881 }
882
883 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
884 * /proc/bc should not */
885 else if (path_exist(_PATH_PROC_VZ) && !path_exist(_PATH_PROC_BC)) {
886 desc->hyper = HYPER_PARALLELS;
887 desc->virtype = VIRT_CONT;
888
889 /* IBM */
890 } else if (desc->vendor &&
891 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
892 strcmp(desc->vendor, "IBM/S390") == 0)) {
893 desc->hyper = HYPER_IBM;
894 desc->virtype = VIRT_FULL;
895
896 /* User-mode-linux */
897 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
898 desc->hyper = HYPER_UML;
899 desc->virtype = VIRT_PARA;
900
901 /* Linux-VServer */
902 } else if (path_exist(_PATH_PROC_STATUS)) {
903 char buf[BUFSIZ];
904 char *val = NULL;
905
906 fd = path_fopen("r", 1, _PATH_PROC_STATUS);
907 while (fgets(buf, sizeof(buf), fd) != NULL) {
908 if (lookup(buf, "VxID", &val))
909 break;
910 }
911 fclose(fd);
912
913 if (val) {
914 while (isdigit(*val))
915 ++val;
916 if (!*val) {
917 desc->hyper = HYPER_VSERVER;
918 desc->virtype = VIRT_CONT;
919 }
920 }
921 }
922 }
923
924 /* add @set to the @ary, unnecessary set is deallocated. */
925 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
926 {
927 int i;
928 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
929
930 if (!ary)
931 return -1;
932
933 for (i = 0; i < *items; i++) {
934 if (CPU_EQUAL_S(setsize, set, ary[i]))
935 break;
936 }
937 if (i == *items) {
938 ary[*items] = set;
939 ++*items;
940 return 0;
941 }
942 CPU_FREE(set);
943 return 1;
944 }
945
946 static void
947 read_topology(struct lscpu_desc *desc, int idx)
948 {
949 cpu_set_t *thread_siblings, *core_siblings, *book_siblings;
950 int num = real_cpu_num(desc, idx);
951
952 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
953 return;
954
955 thread_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
956 "/cpu%d/topology/thread_siblings", num);
957 core_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
958 "/cpu%d/topology/core_siblings", num);
959 book_siblings = NULL;
960 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num))
961 book_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
962 "/cpu%d/topology/book_siblings", num);
963
964 if (!desc->coremaps) {
965 int nbooks, nsockets, ncores, nthreads;
966 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
967
968 /* threads within one core */
969 nthreads = CPU_COUNT_S(setsize, thread_siblings);
970 if (!nthreads)
971 nthreads = 1;
972
973 /* cores within one socket */
974 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
975 if (!ncores)
976 ncores = 1;
977
978 /* number of sockets within one book. Because of odd /
979 * non-present cpu maps and to keep calculation easy we make
980 * sure that nsockets and nbooks is at least 1.
981 */
982 nsockets = desc->ncpus / nthreads / ncores;
983 if (!nsockets)
984 nsockets = 1;
985
986 /* number of books */
987 nbooks = desc->ncpus / nthreads / ncores / nsockets;
988 if (!nbooks)
989 nbooks = 1;
990
991 /* all threads, see also read_basicinfo()
992 * -- fallback for kernels without
993 * /sys/devices/system/cpu/online.
994 */
995 if (!desc->nthreads)
996 desc->nthreads = nbooks * nsockets * ncores * nthreads;
997
998 /* For each map we make sure that it can have up to ncpuspos
999 * entries. This is because we cannot reliably calculate the
1000 * number of cores, sockets and books on all architectures.
1001 * E.g. completely virtualized architectures like s390 may
1002 * have multiple sockets of different sizes.
1003 */
1004 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1005 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1006 if (book_siblings)
1007 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1008 }
1009
1010 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
1011 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
1012 if (book_siblings)
1013 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
1014 }
1015
1016 static void
1017 read_polarization(struct lscpu_desc *desc, int idx)
1018 {
1019 char mode[64];
1020 int num = real_cpu_num(desc, idx);
1021
1022 if (desc->dispatching < 0)
1023 return;
1024 if (!path_exist(_PATH_SYS_CPU "/cpu%d/polarization", num))
1025 return;
1026 if (!desc->polarization)
1027 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
1028 path_read_str(mode, sizeof(mode), _PATH_SYS_CPU "/cpu%d/polarization", num);
1029 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
1030 desc->polarization[idx] = POLAR_VLOW;
1031 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
1032 desc->polarization[idx] = POLAR_VMEDIUM;
1033 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
1034 desc->polarization[idx] = POLAR_VHIGH;
1035 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
1036 desc->polarization[idx] = POLAR_HORIZONTAL;
1037 else
1038 desc->polarization[idx] = POLAR_UNKNOWN;
1039 }
1040
1041 static void
1042 read_address(struct lscpu_desc *desc, int idx)
1043 {
1044 int num = real_cpu_num(desc, idx);
1045
1046 if (!path_exist(_PATH_SYS_CPU "/cpu%d/address", num))
1047 return;
1048 if (!desc->addresses)
1049 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
1050 desc->addresses[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/address", num);
1051 }
1052
1053 static void
1054 read_configured(struct lscpu_desc *desc, int idx)
1055 {
1056 int num = real_cpu_num(desc, idx);
1057
1058 if (!path_exist(_PATH_SYS_CPU "/cpu%d/configure", num))
1059 return;
1060 if (!desc->configured)
1061 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
1062 desc->configured[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/configure", num);
1063 }
1064
1065 static void
1066 read_max_mhz(struct lscpu_desc *desc, int idx)
1067 {
1068 int num = real_cpu_num(desc, idx);
1069
1070 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_max_freq", num))
1071 return;
1072 if (!desc->maxmhz)
1073 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1074 xasprintf(&(desc->maxmhz[idx]), "%.4f",
1075 (float)path_read_s32(_PATH_SYS_CPU
1076 "/cpu%d/cpufreq/cpuinfo_max_freq", num) / 1000);
1077 }
1078
1079 static void
1080 read_min_mhz(struct lscpu_desc *desc, int idx)
1081 {
1082 int num = real_cpu_num(desc, idx);
1083
1084 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_min_freq", num))
1085 return;
1086 if (!desc->minmhz)
1087 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1088 xasprintf(&(desc->minmhz[idx]), "%.4f",
1089 (float)path_read_s32(_PATH_SYS_CPU
1090 "/cpu%d/cpufreq/cpuinfo_min_freq", num) / 1000);
1091 }
1092
1093 static int
1094 cachecmp(const void *a, const void *b)
1095 {
1096 struct cpu_cache *c1 = (struct cpu_cache *) a;
1097 struct cpu_cache *c2 = (struct cpu_cache *) b;
1098
1099 return strcmp(c2->name, c1->name);
1100 }
1101
1102 static void
1103 read_cache(struct lscpu_desc *desc, int idx)
1104 {
1105 char buf[256];
1106 int i;
1107 int num = real_cpu_num(desc, idx);
1108
1109 if (!desc->ncaches) {
1110 while(path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1111 num, desc->ncaches))
1112 desc->ncaches++;
1113
1114 if (!desc->ncaches)
1115 return;
1116
1117 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1118 }
1119 for (i = 0; i < desc->ncaches; i++) {
1120 struct cpu_cache *ca = &desc->caches[i];
1121 cpu_set_t *map;
1122
1123 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1124 num, i))
1125 continue;
1126 if (!ca->name) {
1127 int type, level;
1128
1129 /* cache type */
1130 path_read_str(buf, sizeof(buf),
1131 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
1132 num, i);
1133 if (!strcmp(buf, "Data"))
1134 type = 'd';
1135 else if (!strcmp(buf, "Instruction"))
1136 type = 'i';
1137 else
1138 type = 0;
1139
1140 /* cache level */
1141 level = path_read_s32(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
1142 num, i);
1143 if (type)
1144 snprintf(buf, sizeof(buf), "L%d%c", level, type);
1145 else
1146 snprintf(buf, sizeof(buf), "L%d", level);
1147
1148 ca->name = xstrdup(buf);
1149
1150 /* cache size */
1151 if (path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d/size",num, i)) {
1152 path_read_str(buf, sizeof(buf),
1153 _PATH_SYS_CPU "/cpu%d/cache/index%d/size", num, i);
1154 ca->size = xstrdup(buf);
1155 } else {
1156 ca->size = xstrdup("unknown size");
1157 }
1158 }
1159
1160 /* information about how CPUs share different caches */
1161 map = path_read_cpuset(maxcpus,
1162 _PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
1163 num, i);
1164
1165 if (!ca->sharedmaps)
1166 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1167 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1168 }
1169 }
1170
1171 static inline int is_node_dirent(struct dirent *d)
1172 {
1173 return
1174 d &&
1175 #ifdef _DIRENT_HAVE_D_TYPE
1176 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1177 #endif
1178 strncmp(d->d_name, "node", 4) == 0 &&
1179 isdigit_string(d->d_name + 4);
1180 }
1181
1182 static int
1183 nodecmp(const void *ap, const void *bp)
1184 {
1185 int *a = (int *) ap, *b = (int *) bp;
1186 return *a - *b;
1187 }
1188
1189 static void
1190 read_nodes(struct lscpu_desc *desc)
1191 {
1192 int i = 0;
1193 DIR *dir;
1194 struct dirent *d;
1195 char *path;
1196
1197 /* number of NUMA node */
1198 path = path_strdup(_PATH_SYS_NODE);
1199 dir = opendir(path);
1200 free(path);
1201
1202 while (dir && (d = readdir(dir))) {
1203 if (is_node_dirent(d))
1204 desc->nnodes++;
1205 }
1206
1207 if (!desc->nnodes) {
1208 if (dir)
1209 closedir(dir);
1210 return;
1211 }
1212
1213 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1214 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1215
1216 if (dir) {
1217 rewinddir(dir);
1218 while ((d = readdir(dir)) && i < desc->nnodes) {
1219 if (is_node_dirent(d))
1220 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1221 _("Failed to extract the node number"));
1222 }
1223 closedir(dir);
1224 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1225 }
1226
1227 /* information about how nodes share different CPUs */
1228 for (i = 0; i < desc->nnodes; i++)
1229 desc->nodemaps[i] = path_read_cpuset(maxcpus,
1230 _PATH_SYS_NODE "/node%d/cpumap",
1231 desc->idx2nodenum[i]);
1232 }
1233
1234 static char *
1235 get_cell_data(struct lscpu_desc *desc, int idx, int col,
1236 struct lscpu_modifier *mod,
1237 char *buf, size_t bufsz)
1238 {
1239 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1240 size_t i;
1241 int cpu = real_cpu_num(desc, idx);
1242
1243 *buf = '\0';
1244
1245 switch (col) {
1246 case COL_CPU:
1247 snprintf(buf, bufsz, "%d", cpu);
1248 break;
1249 case COL_CORE:
1250 if (cpuset_ary_isset(cpu, desc->coremaps,
1251 desc->ncores, setsize, &i) == 0)
1252 snprintf(buf, bufsz, "%zu", i);
1253 break;
1254 case COL_SOCKET:
1255 if (cpuset_ary_isset(cpu, desc->socketmaps,
1256 desc->nsockets, setsize, &i) == 0)
1257 snprintf(buf, bufsz, "%zu", i);
1258 break;
1259 case COL_NODE:
1260 if (cpuset_ary_isset(cpu, desc->nodemaps,
1261 desc->nnodes, setsize, &i) == 0)
1262 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1263 break;
1264 case COL_BOOK:
1265 if (cpuset_ary_isset(cpu, desc->bookmaps,
1266 desc->nbooks, setsize, &i) == 0)
1267 snprintf(buf, bufsz, "%zu", i);
1268 break;
1269 case COL_CACHE:
1270 {
1271 char *p = buf;
1272 size_t sz = bufsz;
1273 int j;
1274
1275 for (j = desc->ncaches - 1; j >= 0; j--) {
1276 struct cpu_cache *ca = &desc->caches[j];
1277
1278 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1279 ca->nsharedmaps, setsize, &i) == 0) {
1280 int x = snprintf(p, sz, "%zu", i);
1281 if (x < 0 || (size_t) x >= sz)
1282 return NULL;
1283 p += x;
1284 sz -= x;
1285 }
1286 if (j != 0) {
1287 if (sz < 2)
1288 return NULL;
1289 *p++ = mod->compat ? ',' : ':';
1290 *p = '\0';
1291 sz--;
1292 }
1293 }
1294 break;
1295 }
1296 case COL_POLARIZATION:
1297 if (desc->polarization) {
1298 int x = desc->polarization[idx];
1299
1300 snprintf(buf, bufsz, "%s",
1301 mod->mode == OUTPUT_PARSABLE ?
1302 polar_modes[x].parsable :
1303 polar_modes[x].readable);
1304 }
1305 break;
1306 case COL_ADDRESS:
1307 if (desc->addresses)
1308 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1309 break;
1310 case COL_CONFIGURED:
1311 if (!desc->configured)
1312 break;
1313 if (mod->mode == OUTPUT_PARSABLE)
1314 snprintf(buf, bufsz, "%s",
1315 desc->configured[idx] ? _("Y") : _("N"));
1316 else
1317 snprintf(buf, bufsz, "%s",
1318 desc->configured[idx] ? _("yes") : _("no"));
1319 break;
1320 case COL_ONLINE:
1321 if (!desc->online)
1322 break;
1323 if (mod->mode == OUTPUT_PARSABLE)
1324 snprintf(buf, bufsz, "%s",
1325 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1326 else
1327 snprintf(buf, bufsz, "%s",
1328 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1329 break;
1330 case COL_MAXMHZ:
1331 if (desc->maxmhz)
1332 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1333 break;
1334 case COL_MINMHZ:
1335 if (desc->minmhz)
1336 xstrncpy(buf, desc->minmhz[idx], bufsz);
1337 break;
1338 }
1339 return buf;
1340 }
1341
1342 static char *
1343 get_cell_header(struct lscpu_desc *desc, int col,
1344 struct lscpu_modifier *mod,
1345 char *buf, size_t bufsz)
1346 {
1347 *buf = '\0';
1348
1349 if (col == COL_CACHE) {
1350 char *p = buf;
1351 size_t sz = bufsz;
1352 int i;
1353
1354 for (i = desc->ncaches - 1; i >= 0; i--) {
1355 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1356 if (x < 0 || (size_t) x >= sz)
1357 return NULL;
1358 sz -= x;
1359 p += x;
1360 if (i > 0) {
1361 if (sz < 2)
1362 return NULL;
1363 *p++ = mod->compat ? ',' : ':';
1364 *p = '\0';
1365 sz--;
1366 }
1367 }
1368 if (desc->ncaches)
1369 return buf;
1370 }
1371 snprintf(buf, bufsz, "%s", coldescs[col].name);
1372 return buf;
1373 }
1374
1375 /*
1376 * [-p] backend, we support two parsable formats:
1377 *
1378 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1379 * output and it contains fixed set of the columns. The CACHE columns are at
1380 * the end of the line and the CACHE is not printed if the number of the caches
1381 * is zero. The CACHE columns are separated by two commas, for example:
1382 *
1383 * $ lscpu --parse
1384 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1385 * 0,0,0,0,,0,0,0
1386 * 1,1,0,0,,1,1,0
1387 *
1388 * 2) "user defined output" -- this format prints always all columns without
1389 * special prefix for CACHE column. If there are not CACHEs then the column is
1390 * empty and the header "Cache" is printed rather than a real name of the cache.
1391 * The CACHE columns are separated by ':'.
1392 *
1393 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1394 * # CPU,Core,Socket,Node,L1d:L1i:L2
1395 * 0,0,0,0,0:0:0
1396 * 1,1,0,0,1:1:0
1397 */
1398 static void
1399 print_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1400 struct lscpu_modifier *mod)
1401 {
1402 char buf[BUFSIZ], *data;
1403 int i;
1404
1405 /*
1406 * Header
1407 */
1408 printf(_(
1409 "# The following is the parsable format, which can be fed to other\n"
1410 "# programs. Each different item in every column has an unique ID\n"
1411 "# starting from zero.\n"));
1412
1413 fputs("# ", stdout);
1414 for (i = 0; i < ncols; i++) {
1415 int col = cols[i];
1416
1417 if (col == COL_CACHE) {
1418 if (mod->compat && !desc->ncaches)
1419 continue;
1420 if (mod->compat && i != 0)
1421 putchar(',');
1422 }
1423 if (i > 0)
1424 putchar(',');
1425
1426 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1427
1428 if (data && * data && col != COL_CACHE &&
1429 !coldescs[col].is_abbr) {
1430 /*
1431 * For normal column names use mixed case (e.g. "Socket")
1432 */
1433 char *p = data + 1;
1434
1435 while (p && *p != '\0') {
1436 *p = tolower((unsigned int) *p);
1437 p++;
1438 }
1439 }
1440 fputs(data && *data ? data : "", stdout);
1441 }
1442 putchar('\n');
1443
1444 /*
1445 * Data
1446 */
1447 for (i = 0; i < desc->ncpuspos; i++) {
1448 int c;
1449 int cpu = real_cpu_num(desc, i);
1450
1451 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1452 continue;
1453 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1454 continue;
1455 if (desc->present && !is_cpu_present(desc, cpu))
1456 continue;
1457 for (c = 0; c < ncols; c++) {
1458 if (mod->compat && cols[c] == COL_CACHE) {
1459 if (!desc->ncaches)
1460 continue;
1461 if (c > 0)
1462 putchar(',');
1463 }
1464 if (c > 0)
1465 putchar(',');
1466
1467 data = get_cell_data(desc, i, cols[c], mod,
1468 buf, sizeof(buf));
1469 fputs(data && *data ? data : "", stdout);
1470 }
1471 putchar('\n');
1472 }
1473 }
1474
1475 /*
1476 * [-e] backend
1477 */
1478 static void
1479 print_readable(struct lscpu_desc *desc, int cols[], int ncols,
1480 struct lscpu_modifier *mod)
1481 {
1482 int i;
1483 char buf[BUFSIZ];
1484 const char *data;
1485 struct libscols_table *table;
1486
1487 scols_init_debug(0);
1488
1489 table = scols_new_table();
1490 if (!table)
1491 err(EXIT_FAILURE, _("failed to initialize output table"));
1492
1493 for (i = 0; i < ncols; i++) {
1494 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1495 if (!scols_table_new_column(table, xstrdup(data), 0, 0))
1496 err(EXIT_FAILURE, _("failed to initialize output column"));
1497 }
1498
1499 for (i = 0; i < desc->ncpuspos; i++) {
1500 int c;
1501 struct libscols_line *line;
1502 int cpu = real_cpu_num(desc, i);
1503
1504 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1505 continue;
1506 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1507 continue;
1508 if (desc->present && !is_cpu_present(desc, cpu))
1509 continue;
1510
1511 line = scols_table_new_line(table, NULL);
1512 if (!line)
1513 err(EXIT_FAILURE, _("failed to initialize output line"));
1514
1515 for (c = 0; c < ncols; c++) {
1516 data = get_cell_data(desc, i, cols[c], mod,
1517 buf, sizeof(buf));
1518 if (!data || !*data)
1519 data = "-";
1520 scols_line_set_data(line, c, data);
1521 }
1522 }
1523
1524 scols_print_table(table);
1525 scols_unref_table(table);
1526 }
1527
1528 /* output formats "<key> <value>"*/
1529 #define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
1530 #define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
1531
1532 static void
1533 print_cpuset(const char *key, cpu_set_t *set, int hex)
1534 {
1535 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1536 size_t setbuflen = 7 * maxcpus;
1537 char setbuf[setbuflen], *p;
1538
1539 if (hex) {
1540 p = cpumask_create(setbuf, setbuflen, set, setsize);
1541 printf("%-23s0x%s\n", key, p);
1542 } else {
1543 p = cpulist_create(setbuf, setbuflen, set, setsize);
1544 print_s(key, p);
1545 }
1546
1547 }
1548
1549 /*
1550 * default output
1551 */
1552 static void
1553 print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1554 {
1555 char buf[512];
1556 int i;
1557 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1558
1559 print_s(_("Architecture:"), desc->arch);
1560
1561 if (desc->mode) {
1562 char mbuf[64], *p = mbuf;
1563
1564 if (desc->mode & MODE_32BIT) {
1565 strcpy(p, "32-bit, ");
1566 p += 8;
1567 }
1568 if (desc->mode & MODE_64BIT) {
1569 strcpy(p, "64-bit, ");
1570 p += 8;
1571 }
1572 *(p - 2) = '\0';
1573 print_s(_("CPU op-mode(s):"), mbuf);
1574 }
1575 #if !defined(WORDS_BIGENDIAN)
1576 print_s(_("Byte Order:"), "Little Endian");
1577 #else
1578 print_s(_("Byte Order:"), "Big Endian");
1579 #endif
1580 print_n(_("CPU(s):"), desc->ncpus);
1581
1582 if (desc->online)
1583 print_cpuset(mod->hex ? _("On-line CPU(s) mask:") :
1584 _("On-line CPU(s) list:"),
1585 desc->online, mod->hex);
1586
1587 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1588 cpu_set_t *set;
1589
1590 /* Linux kernel provides cpuset of off-line CPUs that contains
1591 * all configured CPUs (see /sys/devices/system/cpu/offline),
1592 * but want to print real (present in system) off-line CPUs only.
1593 */
1594 set = cpuset_alloc(maxcpus, NULL, NULL);
1595 if (!set)
1596 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1597 CPU_ZERO_S(setsize, set);
1598 for (i = 0; i < desc->ncpuspos; i++) {
1599 int cpu = real_cpu_num(desc, i);
1600 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
1601 CPU_SET_S(cpu, setsize, set);
1602 }
1603 print_cpuset(mod->hex ? _("Off-line CPU(s) mask:") :
1604 _("Off-line CPU(s) list:"),
1605 set, mod->hex);
1606 cpuset_free(set);
1607 }
1608
1609 if (desc->nsockets) {
1610 int cores_per_socket, sockets_per_book, books;
1611
1612 cores_per_socket = sockets_per_book = books = 0;
1613 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1614 * Using simply the cpu topology masks in sysfs will not give
1615 * usable results since everything is virtualized. E.g.
1616 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1617 * five cpus.
1618 * If the cpu topology is not exported (e.g. 2nd level guest)
1619 * fall back to old calculation scheme.
1620 */
1621 if (path_exist(_PATH_PROC_SYSINFO)) {
1622 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
1623 char pbuf[BUFSIZ];
1624 int t0, t1, t2;
1625
1626 while (fd && fgets(pbuf, sizeof(pbuf), fd) != NULL) {
1627 if (sscanf(pbuf, "CPU Topology SW:%d%d%d%d%d%d",
1628 &t0, &t1, &t2, &books, &sockets_per_book,
1629 &cores_per_socket) == 6)
1630 break;
1631 }
1632 if (fd)
1633 fclose(fd);
1634 }
1635 print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
1636 print_n(_("Core(s) per socket:"),
1637 cores_per_socket ?: desc->ncores / desc->nsockets);
1638 if (desc->nbooks) {
1639 print_n(_("Socket(s) per book:"),
1640 sockets_per_book ?: desc->nsockets / desc->nbooks);
1641 print_n(_("Book(s):"), books ?: desc->nbooks);
1642 } else {
1643 print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
1644 }
1645 }
1646 if (desc->nnodes)
1647 print_n(_("NUMA node(s):"), desc->nnodes);
1648 if (desc->vendor)
1649 print_s(_("Vendor ID:"), desc->vendor);
1650 if (desc->family)
1651 print_s(_("CPU family:"), desc->family);
1652 if (desc->model || desc->revision)
1653 print_s(_("Model:"), desc->revision ? desc->revision : desc->model);
1654 if (desc->modelname || desc->cpu)
1655 print_s(_("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
1656 if (desc->stepping)
1657 print_s(_("Stepping:"), desc->stepping);
1658 if (desc->mhz)
1659 print_s(_("CPU MHz:"), desc->mhz);
1660 if (desc->maxmhz)
1661 print_s(_("CPU max MHz:"), desc->maxmhz[0]);
1662 if (desc->minmhz)
1663 print_s(_("CPU min MHz:"), desc->minmhz[0]);
1664 if (desc->bogomips)
1665 print_s(_("BogoMIPS:"), desc->bogomips);
1666 if (desc->virtflag) {
1667 if (!strcmp(desc->virtflag, "svm"))
1668 print_s(_("Virtualization:"), "AMD-V");
1669 else if (!strcmp(desc->virtflag, "vmx"))
1670 print_s(_("Virtualization:"), "VT-x");
1671 }
1672 if (desc->hypervisor)
1673 print_s(_("Hypervisor:"), desc->hypervisor);
1674 if (desc->hyper) {
1675 print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1676 print_s(_("Virtualization type:"), _(virt_types[desc->virtype]));
1677 }
1678 if (desc->dispatching >= 0)
1679 print_s(_("Dispatching mode:"), _(disp_modes[desc->dispatching]));
1680 if (desc->ncaches) {
1681 char cbuf[512];
1682
1683 for (i = desc->ncaches - 1; i >= 0; i--) {
1684 snprintf(cbuf, sizeof(cbuf),
1685 _("%s cache:"), desc->caches[i].name);
1686 print_s(cbuf, desc->caches[i].size);
1687 }
1688 }
1689
1690 for (i = 0; i < desc->nnodes; i++) {
1691 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
1692 print_cpuset(buf, desc->nodemaps[i], mod->hex);
1693 }
1694
1695 if (desc->flags)
1696 print_s(_("Flags:"), desc->flags);
1697
1698 if (desc->physsockets) {
1699 print_n(_("Physical sockets:"), desc->physsockets);
1700 print_n(_("Physical chips:"), desc->physchips);
1701 print_n(_("Physical cores/chip:"), desc->physcoresperchip);
1702 }
1703 }
1704
1705 static void __attribute__((__noreturn__)) usage(FILE *out)
1706 {
1707 size_t i;
1708
1709 fputs(USAGE_HEADER, out);
1710 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
1711
1712 fputs(USAGE_SEPARATOR, out);
1713 fputs(_("Display information about the CPU architecture.\n"), out);
1714
1715 fputs(USAGE_OPTIONS, out);
1716 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
1717 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
1718 fputs(_(" -c, --offline print offline CPUs only\n"), out);
1719 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
1720 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
1721 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
1722 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
1723 fputs(USAGE_SEPARATOR, out);
1724 fputs(USAGE_HELP, out);
1725 fputs(USAGE_VERSION, out);
1726
1727 fprintf(out, _("\nAvailable columns:\n"));
1728
1729 for (i = 0; i < ARRAY_SIZE(coldescs); i++)
1730 fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
1731
1732 fprintf(out, USAGE_MAN_TAIL("lscpu(1)"));
1733
1734 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
1735 }
1736
1737 int main(int argc, char *argv[])
1738 {
1739 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
1740 struct lscpu_desc _desc = { .flags = 0 }, *desc = &_desc;
1741 int c, i;
1742 int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
1743 int cpu_modifier_specified = 0;
1744
1745 static const struct option longopts[] = {
1746 { "all", no_argument, 0, 'a' },
1747 { "online", no_argument, 0, 'b' },
1748 { "offline", no_argument, 0, 'c' },
1749 { "help", no_argument, 0, 'h' },
1750 { "extended", optional_argument, 0, 'e' },
1751 { "parse", optional_argument, 0, 'p' },
1752 { "sysroot", required_argument, 0, 's' },
1753 { "hex", no_argument, 0, 'x' },
1754 { "version", no_argument, 0, 'V' },
1755 { NULL, 0, 0, 0 }
1756 };
1757
1758 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
1759 { 'a','b','c' },
1760 { 'e','p' },
1761 { 0 }
1762 };
1763 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
1764
1765 setlocale(LC_ALL, "");
1766 bindtextdomain(PACKAGE, LOCALEDIR);
1767 textdomain(PACKAGE);
1768 atexit(close_stdout);
1769
1770 while ((c = getopt_long(argc, argv, "abce::hp::s:xV", longopts, NULL)) != -1) {
1771
1772 err_exclusive_options(c, longopts, excl, excl_st);
1773
1774 switch (c) {
1775 case 'a':
1776 mod->online = mod->offline = 1;
1777 cpu_modifier_specified = 1;
1778 break;
1779 case 'b':
1780 mod->online = 1;
1781 cpu_modifier_specified = 1;
1782 break;
1783 case 'c':
1784 mod->offline = 1;
1785 cpu_modifier_specified = 1;
1786 break;
1787 case 'h':
1788 usage(stdout);
1789 case 'p':
1790 case 'e':
1791 if (optarg) {
1792 if (*optarg == '=')
1793 optarg++;
1794 ncolumns = string_to_idarray(optarg,
1795 columns, ARRAY_SIZE(columns),
1796 column_name_to_id);
1797 if (ncolumns < 0)
1798 return EXIT_FAILURE;
1799 }
1800 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
1801 break;
1802 case 's':
1803 path_set_prefix(optarg);
1804 mod->system = SYSTEM_SNAPSHOT;
1805 break;
1806 case 'x':
1807 mod->hex = 1;
1808 break;
1809 case 'V':
1810 printf(UTIL_LINUX_VERSION);
1811 return EXIT_SUCCESS;
1812 default:
1813 usage(stderr);
1814 }
1815 }
1816
1817 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
1818 fprintf(stderr,
1819 _("%s: options --all, --online and --offline may only "
1820 "be used with options --extended or --parse.\n"),
1821 program_invocation_short_name);
1822 return EXIT_FAILURE;
1823 }
1824
1825 if (argc != optind)
1826 usage(stderr);
1827
1828 /* set default cpu display mode if none was specified */
1829 if (!mod->online && !mod->offline) {
1830 mod->online = 1;
1831 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
1832 }
1833
1834 read_basicinfo(desc, mod);
1835
1836 for (i = 0; i < desc->ncpuspos; i++) {
1837 read_topology(desc, i);
1838 read_cache(desc, i);
1839 read_polarization(desc, i);
1840 read_address(desc, i);
1841 read_configured(desc, i);
1842 read_max_mhz(desc, i);
1843 read_min_mhz(desc, i);
1844 }
1845
1846 if (desc->caches)
1847 qsort(desc->caches, desc->ncaches,
1848 sizeof(struct cpu_cache), cachecmp);
1849
1850 read_nodes(desc);
1851 read_hypervisor(desc, mod);
1852
1853 switch(mod->mode) {
1854 case OUTPUT_SUMMARY:
1855 print_summary(desc, mod);
1856 break;
1857 case OUTPUT_PARSABLE:
1858 if (!ncolumns) {
1859 columns[ncolumns++] = COL_CPU;
1860 columns[ncolumns++] = COL_CORE;
1861 columns[ncolumns++] = COL_SOCKET;
1862 columns[ncolumns++] = COL_NODE;
1863 columns[ncolumns++] = COL_CACHE;
1864 mod->compat = 1;
1865 }
1866 print_parsable(desc, columns, ncolumns, mod);
1867 break;
1868 case OUTPUT_READABLE:
1869 if (!ncolumns) {
1870 /* No list was given. Just print whatever is there. */
1871 columns[ncolumns++] = COL_CPU;
1872 if (desc->nodemaps)
1873 columns[ncolumns++] = COL_NODE;
1874 if (desc->bookmaps)
1875 columns[ncolumns++] = COL_BOOK;
1876 if (desc->socketmaps)
1877 columns[ncolumns++] = COL_SOCKET;
1878 if (desc->coremaps)
1879 columns[ncolumns++] = COL_CORE;
1880 if (desc->caches)
1881 columns[ncolumns++] = COL_CACHE;
1882 if (desc->online)
1883 columns[ncolumns++] = COL_ONLINE;
1884 if (desc->configured)
1885 columns[ncolumns++] = COL_CONFIGURED;
1886 if (desc->polarization)
1887 columns[ncolumns++] = COL_POLARIZATION;
1888 if (desc->addresses)
1889 columns[ncolumns++] = COL_ADDRESS;
1890 if (desc->maxmhz)
1891 columns[ncolumns++] = COL_MAXMHZ;
1892 if (desc->minmhz)
1893 columns[ncolumns++] = COL_MINMHZ;
1894 }
1895 print_readable(desc, columns, ncolumns, mod);
1896 break;
1897 }
1898
1899 return EXIT_SUCCESS;
1900 }