]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
lscpu: Print physical cpu information
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #include <assert.h>
23 #include <ctype.h>
24 #include <dirent.h>
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <getopt.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/utsname.h>
32 #include <unistd.h>
33 #include <stdarg.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36
37 #if (defined(__x86_64__) || defined(__i386__))
38 # if !defined( __SANITIZE_ADDRESS__)
39 # define INCLUDE_VMWARE_BDOOR
40 # else
41 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
42 # endif
43 #endif
44
45 #ifdef INCLUDE_VMWARE_BDOOR
46 # include <stdint.h>
47 # include <signal.h>
48 # include <strings.h>
49 # include <setjmp.h>
50 # ifdef HAVE_SYS_IO_H
51 # include <sys/io.h>
52 # endif
53 #endif
54
55 #if defined(HAVE_LIBRTAS)
56 #include <librtas.h>
57 #endif
58
59 #include <libsmartcols.h>
60
61 #include "cpuset.h"
62 #include "nls.h"
63 #include "xalloc.h"
64 #include "c.h"
65 #include "strutils.h"
66 #include "bitops.h"
67 #include "path.h"
68 #include "closestream.h"
69 #include "optutils.h"
70 #include "lscpu.h"
71
72 #define CACHE_MAX 100
73
74 /* /sys paths */
75 #define _PATH_SYS_SYSTEM "/sys/devices/system"
76 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
77 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
78 #define _PATH_PROC_XEN "/proc/xen"
79 #define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
80 #define _PATH_PROC_CPUINFO "/proc/cpuinfo"
81 #define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
82 #define _PATH_PROC_SYSINFO "/proc/sysinfo"
83 #define _PATH_PROC_STATUS "/proc/self/status"
84 #define _PATH_PROC_VZ "/proc/vz"
85 #define _PATH_PROC_BC "/proc/bc"
86 #define _PATH_PROC_DEVICETREE "/proc/device-tree"
87 #define _PATH_DEV_MEM "/dev/mem"
88
89 /* virtualization types */
90 enum {
91 VIRT_NONE = 0,
92 VIRT_PARA,
93 VIRT_FULL,
94 VIRT_CONT
95 };
96 const char *virt_types[] = {
97 [VIRT_NONE] = N_("none"),
98 [VIRT_PARA] = N_("para"),
99 [VIRT_FULL] = N_("full"),
100 [VIRT_CONT] = N_("container"),
101 };
102
103 const char *hv_vendors[] = {
104 [HYPER_NONE] = NULL,
105 [HYPER_XEN] = "Xen",
106 [HYPER_KVM] = "KVM",
107 [HYPER_MSHV] = "Microsoft",
108 [HYPER_VMWARE] = "VMware",
109 [HYPER_IBM] = "IBM",
110 [HYPER_VSERVER] = "Linux-VServer",
111 [HYPER_UML] = "User-mode Linux",
112 [HYPER_INNOTEK] = "Innotek GmbH",
113 [HYPER_HITACHI] = "Hitachi",
114 [HYPER_PARALLELS] = "Parallels",
115 [HYPER_VBOX] = "Oracle",
116 [HYPER_OS400] = "OS/400",
117 [HYPER_PHYP] = "pHyp",
118 [HYPER_SPAR] = "Unisys s-Par"
119 };
120
121 const int hv_vendor_pci[] = {
122 [HYPER_NONE] = 0x0000,
123 [HYPER_XEN] = 0x5853,
124 [HYPER_KVM] = 0x0000,
125 [HYPER_MSHV] = 0x1414,
126 [HYPER_VMWARE] = 0x15ad,
127 [HYPER_VBOX] = 0x80ee,
128 };
129
130 const int hv_graphics_pci[] = {
131 [HYPER_NONE] = 0x0000,
132 [HYPER_XEN] = 0x0001,
133 [HYPER_KVM] = 0x0000,
134 [HYPER_MSHV] = 0x5353,
135 [HYPER_VMWARE] = 0x0710,
136 [HYPER_VBOX] = 0xbeef,
137 };
138
139 /* CPU modes */
140 enum {
141 MODE_32BIT = (1 << 1),
142 MODE_64BIT = (1 << 2)
143 };
144
145 /* cache(s) description */
146 struct cpu_cache {
147 char *name;
148 char *size;
149
150 int nsharedmaps;
151 cpu_set_t **sharedmaps;
152 };
153
154 /* dispatching modes */
155 enum {
156 DISP_HORIZONTAL = 0,
157 DISP_VERTICAL = 1
158 };
159
160 const char *disp_modes[] = {
161 [DISP_HORIZONTAL] = N_("horizontal"),
162 [DISP_VERTICAL] = N_("vertical")
163 };
164
165 /* cpu polarization */
166 enum {
167 POLAR_UNKNOWN = 0,
168 POLAR_VLOW,
169 POLAR_VMEDIUM,
170 POLAR_VHIGH,
171 POLAR_HORIZONTAL
172 };
173
174 struct polarization_modes {
175 char *parsable;
176 char *readable;
177 };
178
179 struct polarization_modes polar_modes[] = {
180 [POLAR_UNKNOWN] = {"U", "-"},
181 [POLAR_VLOW] = {"VL", "vert-low"},
182 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
183 [POLAR_VHIGH] = {"VH", "vert-high"},
184 [POLAR_HORIZONTAL] = {"H", "horizontal"},
185 };
186
187 /* global description */
188 struct lscpu_desc {
189 char *arch;
190 char *vendor;
191 char *family;
192 char *model;
193 char *modelname;
194 char *virtflag; /* virtualization flag (vmx, svm) */
195 char *hypervisor; /* hypervisor software */
196 int hyper; /* hypervisor vendor ID */
197 int virtype; /* VIRT_PARA|FULL|NONE ? */
198 char *mhz;
199 char **maxmhz; /* maximum mega hertz */
200 char **minmhz; /* minimum mega hertz */
201 char *stepping;
202 char *bogomips;
203 char *flags;
204 int dispatching; /* none, horizontal or vertical */
205 int mode; /* rm, lm or/and tm */
206
207 int ncpuspos; /* maximal possible CPUs */
208 int ncpus; /* number of present CPUs */
209 cpu_set_t *present; /* mask with present CPUs */
210 cpu_set_t *online; /* mask with online CPUs */
211
212 int nthreads; /* number of online threads */
213
214 int ncaches;
215 struct cpu_cache *caches;
216
217 /*
218 * All maps are sequentially indexed (0..ncpuspos), the array index
219 * does not have match with cpuX number as presented by kernel. You
220 * have to use real_cpu_num() to get the real cpuX number.
221 *
222 * For example, the possible system CPUs are: 1,3,5, it means that
223 * ncpuspos=3, so all arrays are in range 0..3.
224 */
225 int *idx2cpunum; /* mapping index to CPU num */
226
227 int nnodes; /* number of NUMA modes */
228 int *idx2nodenum; /* Support for discontinuous nodes */
229 cpu_set_t **nodemaps; /* array with NUMA nodes */
230
231 /* books -- based on book_siblings (internal kernel map of cpuX's
232 * hardware threads within the same book */
233 int nbooks; /* number of all online books */
234 cpu_set_t **bookmaps; /* unique book_siblings */
235
236 /* sockets -- based on core_siblings (internal kernel map of cpuX's
237 * hardware threads within the same physical_package_id (socket)) */
238 int nsockets; /* number of all online sockets */
239 cpu_set_t **socketmaps; /* unique core_siblings */
240
241 /* cores -- based on thread_siblings (internel kernel map of cpuX's
242 * hardware threads within the same core as cpuX) */
243 int ncores; /* number of all online cores */
244 cpu_set_t **coremaps; /* unique thread_siblings */
245
246 int *polarization; /* cpu polarization */
247 int *addresses; /* physical cpu addresses */
248 int *configured; /* cpu configured */
249 int physsockets; /* Physical sockets (modules) */
250 int physchips; /* Physical chips */
251 int physcoresperchip; /* Physical cores per chip */
252 };
253
254 enum {
255 OUTPUT_SUMMARY = 0, /* default */
256 OUTPUT_PARSABLE, /* -p */
257 OUTPUT_READABLE, /* -e */
258 };
259
260 enum {
261 SYSTEM_LIVE = 0, /* analyzing a live system */
262 SYSTEM_SNAPSHOT, /* analyzing a snapshot of a different system */
263 };
264
265 struct lscpu_modifier {
266 int mode; /* OUTPUT_* */
267 int system; /* SYSTEM_* */
268 unsigned int hex:1, /* print CPU masks rather than CPU lists */
269 compat:1, /* use backwardly compatible format */
270 online:1, /* print online CPUs */
271 offline:1; /* print offline CPUs */
272 };
273
274 static int maxcpus; /* size in bits of kernel cpu mask */
275
276 #define is_cpu_online(_d, _cpu) \
277 ((_d) && (_d)->online ? \
278 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
279 #define is_cpu_present(_d, _cpu) \
280 ((_d) && (_d)->present ? \
281 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
282
283 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
284
285 /*
286 * IDs
287 */
288 enum {
289 COL_CPU,
290 COL_CORE,
291 COL_SOCKET,
292 COL_NODE,
293 COL_BOOK,
294 COL_CACHE,
295 COL_POLARIZATION,
296 COL_ADDRESS,
297 COL_CONFIGURED,
298 COL_ONLINE,
299 COL_MAXMHZ,
300 COL_MINMHZ,
301 };
302
303 /* column description
304 */
305 struct lscpu_coldesc {
306 const char *name;
307 const char *help;
308
309 unsigned int is_abbr:1; /* name is abbreviation */
310 };
311
312 static struct lscpu_coldesc coldescs[] =
313 {
314 [COL_CPU] = { "CPU", N_("logical CPU number"), 1 },
315 [COL_CORE] = { "CORE", N_("logical core number") },
316 [COL_SOCKET] = { "SOCKET", N_("logical socket number") },
317 [COL_NODE] = { "NODE", N_("logical NUMA node number") },
318 [COL_BOOK] = { "BOOK", N_("logical book number") },
319 [COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
320 [COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
321 [COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
322 [COL_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
323 [COL_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
324 [COL_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
325 [COL_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
326 };
327
328 static int
329 column_name_to_id(const char *name, size_t namesz)
330 {
331 size_t i;
332
333 for (i = 0; i < ARRAY_SIZE(coldescs); i++) {
334 const char *cn = coldescs[i].name;
335
336 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
337 return i;
338 }
339 warnx(_("unknown column: %s"), name);
340 return -1;
341 }
342
343 /* Lookup a pattern and get the value from cpuinfo.
344 * Format is:
345 *
346 * "<pattern> : <key>"
347 */
348 static int
349 lookup(char *line, char *pattern, char **value)
350 {
351 char *p, *v;
352 int len = strlen(pattern);
353
354 if (!*line)
355 return 0;
356
357 /* pattern */
358 if (strncmp(line, pattern, len))
359 return 0;
360
361 /* white spaces */
362 for (p = line + len; isspace(*p); p++);
363
364 /* separator */
365 if (*p != ':')
366 return 0;
367
368 /* white spaces */
369 for (++p; isspace(*p); p++);
370
371 /* value */
372 if (!*p)
373 return 0;
374 v = p;
375
376 /* end of value */
377 len = strlen(line) - 1;
378 for (p = line + len; isspace(*(p-1)); p--);
379 *p = '\0';
380
381 *value = xstrdup(v);
382 return 1;
383 }
384
385 /* Don't init the mode for platforms where we are not able to
386 * detect that CPU supports 64-bit mode.
387 */
388 static int
389 init_mode(struct lscpu_modifier *mod)
390 {
391 int m = 0;
392
393 if (mod->system == SYSTEM_SNAPSHOT)
394 /* reading info from any /{sys,proc} dump, don't mix it with
395 * information about our real CPU */
396 return 0;
397
398 #if defined(__alpha__) || defined(__ia64__)
399 m |= MODE_64BIT; /* 64bit platforms only */
400 #endif
401 /* platforms with 64bit flag in /proc/cpuinfo, define
402 * 32bit default here */
403 #if defined(__i386__) || defined(__x86_64__) || \
404 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
405 m |= MODE_32BIT;
406 #endif
407 return m;
408 }
409
410 #if defined(HAVE_LIBRTAS)
411 #define PROCESSOR_MODULE_INFO 43
412 static int strbe16toh(const char *buf, int offset)
413 {
414 return (buf[offset] << 8) + buf[offset+1];
415 }
416
417 static void read_physical_info_powerpc(struct lscpu_desc *desc)
418 {
419 char buf[BUFSIZ];
420 int rc, len, ntypes;
421
422 desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
423
424 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
425 if (rc < 0)
426 return;
427
428 len = strbe16toh(buf, 0);
429 if (len < 8)
430 return;
431
432 ntypes = strbe16toh(buf, 2);
433
434 assert(ntypes <= 1);
435 if (!ntypes)
436 return;
437
438 desc->physsockets = strbe16toh(buf, 4);
439 desc->physchips = strbe16toh(buf, 6);
440 desc->physcoresperchip = strbe16toh(buf, 8);
441 }
442 #else
443 static void read_physical_info_powerpc(
444 struct lscpu_desc *desc __attribute__((__unused__)))
445 {
446 }
447 #endif
448
449 static void
450 read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
451 {
452 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
453 char buf[BUFSIZ];
454 struct utsname utsbuf;
455 size_t setsize;
456
457 /* architecture */
458 if (uname(&utsbuf) == -1)
459 err(EXIT_FAILURE, _("error: uname failed"));
460 desc->arch = xstrdup(utsbuf.machine);
461
462 /* details */
463 while (fgets(buf, sizeof(buf), fp) != NULL) {
464 if (lookup(buf, "vendor", &desc->vendor)) ;
465 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
466 else if (lookup(buf, "family", &desc->family)) ;
467 else if (lookup(buf, "cpu family", &desc->family)) ;
468 else if (lookup(buf, "model", &desc->model)) ;
469 else if (lookup(buf, "model name", &desc->modelname)) ;
470 else if (lookup(buf, "stepping", &desc->stepping)) ;
471 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
472 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
473 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
474 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
475 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
476 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
477 else
478 continue;
479 }
480
481 desc->mode = init_mode(mod);
482
483 if (desc->flags) {
484 snprintf(buf, sizeof(buf), " %s ", desc->flags);
485 if (strstr(buf, " svm "))
486 desc->virtflag = xstrdup("svm");
487 else if (strstr(buf, " vmx "))
488 desc->virtflag = xstrdup("vmx");
489 if (strstr(buf, " lm "))
490 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
491 if (strstr(buf, " zarch "))
492 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
493 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
494 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
495 }
496
497 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
498 if (strcmp(desc->arch, "ppc64") == 0)
499 desc->mode |= MODE_32BIT | MODE_64BIT;
500 else if (strcmp(desc->arch, "ppc") == 0)
501 desc->mode |= MODE_32BIT;
502 }
503
504 fclose(fp);
505
506 if (path_exist(_PATH_SYS_CPU "/kernel_max"))
507 /* note that kernel_max is maximum index [NR_CPUS-1] */
508 maxcpus = path_read_s32(_PATH_SYS_CPU "/kernel_max") + 1;
509
510 else if (mod->system == SYSTEM_LIVE)
511 /* the root is '/' so we are working with data from the current kernel */
512 maxcpus = get_max_number_of_cpus();
513
514 if (maxcpus <= 0)
515 /* error or we are reading some /sys snapshot instead of the
516 * real /sys, let's use any crazy number... */
517 maxcpus = 2048;
518
519 setsize = CPU_ALLOC_SIZE(maxcpus);
520
521 if (path_exist(_PATH_SYS_CPU "/possible")) {
522 cpu_set_t *tmp = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/possible");
523 int num, idx;
524
525 desc->ncpuspos = CPU_COUNT_S(setsize, tmp);
526 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
527
528 for (num = 0, idx = 0; num < maxcpus; num++) {
529 if (CPU_ISSET(num, tmp))
530 desc->idx2cpunum[idx++] = num;
531 }
532 cpuset_free(tmp);
533 } else
534 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
535 _PATH_SYS_CPU "/possible");
536
537
538 /* get mask for present CPUs */
539 if (path_exist(_PATH_SYS_CPU "/present")) {
540 desc->present = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/present");
541 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
542 }
543
544 /* get mask for online CPUs */
545 if (path_exist(_PATH_SYS_CPU "/online")) {
546 desc->online = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/online");
547 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
548 }
549
550 /* get dispatching mode */
551 if (path_exist(_PATH_SYS_CPU "/dispatching"))
552 desc->dispatching = path_read_s32(_PATH_SYS_CPU "/dispatching");
553 else
554 desc->dispatching = -1;
555
556 if (mod->system == SYSTEM_LIVE)
557 read_physical_info_powerpc(desc);
558 }
559
560 static int
561 has_pci_device(unsigned int vendor, unsigned int device)
562 {
563 FILE *f;
564 unsigned int num, fn, ven, dev;
565 int res = 1;
566
567 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
568 if (!f)
569 return 0;
570
571 /* for more details about bus/pci/devices format see
572 * drivers/pci/proc.c in linux kernel
573 */
574 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
575 &num, &fn, &ven, &dev) == 4) {
576
577 if (ven == vendor && dev == device)
578 goto found;
579 }
580
581 res = 0;
582 found:
583 fclose(f);
584 return res;
585 }
586
587 #if defined(__x86_64__) || defined(__i386__)
588
589 /*
590 * This CPUID leaf returns the information about the hypervisor.
591 * EAX : maximum input value for CPUID supported by the hypervisor.
592 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
593 */
594 #define HYPERVISOR_INFO_LEAF 0x40000000
595
596 static inline void
597 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
598 unsigned int *ecx, unsigned int *edx)
599 {
600 __asm__(
601 #if defined(__PIC__) && defined(__i386__)
602 /* x86 PIC cannot clobber ebx -- gcc bitches */
603 "xchg %%ebx, %%esi;"
604 "cpuid;"
605 "xchg %%esi, %%ebx;"
606 : "=S" (*ebx),
607 #else
608 "cpuid;"
609 : "=b" (*ebx),
610 #endif
611 "=a" (*eax),
612 "=c" (*ecx),
613 "=d" (*edx)
614 : "1" (op), "c"(0));
615 }
616
617 static void
618 read_hypervisor_cpuid(struct lscpu_desc *desc)
619 {
620 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
621 char hyper_vendor_id[13];
622
623 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
624
625 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
626 memcpy(hyper_vendor_id + 0, &ebx, 4);
627 memcpy(hyper_vendor_id + 4, &ecx, 4);
628 memcpy(hyper_vendor_id + 8, &edx, 4);
629 hyper_vendor_id[12] = '\0';
630
631 if (!hyper_vendor_id[0])
632 return;
633
634 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
635 desc->hyper = HYPER_XEN;
636 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
637 desc->hyper = HYPER_KVM;
638 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
639 desc->hyper = HYPER_MSHV;
640 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
641 desc->hyper = HYPER_VMWARE;
642 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
643 desc->hyper = HYPER_SPAR;
644 }
645
646 #else /* ! (__x86_64__ || __i386__) */
647 static void
648 read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
649 {
650 }
651 #endif
652
653 static int is_compatible(const char *path, const char *str)
654 {
655 FILE *fd = path_fopen("r", 0, "%s", path);
656
657 if (fd) {
658 char buf[256];
659 size_t i, len;
660
661 memset(buf, 0, sizeof(buf));
662 len = fread(buf, 1, sizeof(buf) - 1, fd);
663 fclose(fd);
664
665 for (i = 0; i < len;) {
666 if (!strcmp(&buf[i], str))
667 return 1;
668 i += strlen(&buf[i]);
669 i++;
670 }
671 }
672
673 return 0;
674 }
675
676 static int
677 read_hypervisor_powerpc(struct lscpu_desc *desc)
678 {
679 assert(!desc->hyper);
680
681 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
682 if (path_exist("/proc/iSeries")) {
683 desc->hyper = HYPER_OS400;
684 desc->virtype = VIRT_PARA;
685
686 /* PowerNV (POWER Non-Virtualized, bare-metal) */
687 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "ibm,powernv")) {
688 desc->hyper = HYPER_NONE;
689 desc->virtype = VIRT_NONE;
690
691 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
692 } else if (path_exist(_PATH_PROC_DEVICETREE "/ibm,partition-name")
693 && path_exist(_PATH_PROC_DEVICETREE "/hmc-managed?")
694 && !path_exist(_PATH_PROC_DEVICETREE "/chosen/qemu,graphic-width")) {
695 FILE *fd;
696 desc->hyper = HYPER_PHYP;
697 desc->virtype = VIRT_PARA;
698 fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/ibm,partition-name");
699 if (fd) {
700 char buf[256];
701 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
702 desc->virtype = VIRT_NONE;
703 fclose(fd);
704 }
705
706 /* Qemu */
707 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "qemu,pseries")) {
708 desc->hyper = HYPER_KVM;
709 desc->virtype = VIRT_PARA;
710 }
711 return desc->hyper;
712 }
713
714 #ifdef INCLUDE_VMWARE_BDOOR
715
716 #define VMWARE_BDOOR_MAGIC 0x564D5868
717 #define VMWARE_BDOOR_PORT 0x5658
718 #define VMWARE_BDOOR_CMD_GETVERSION 10
719
720 static UL_ASAN_BLACKLIST
721 void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
722 {
723 __asm__(
724 #if defined(__PIC__) && defined(__i386__)
725 /* x86 PIC cannot clobber ebx -- gcc bitches */
726 "xchg %%ebx, %%esi;"
727 "inl (%%dx), %%eax;"
728 "xchg %%esi, %%ebx;"
729 : "=S" (*ebx),
730 #else
731 "inl (%%dx), %%eax;"
732 : "=b" (*ebx),
733 #endif
734 "=a" (*eax),
735 "=c" (*ecx),
736 "=d" (*edx)
737 : "0" (VMWARE_BDOOR_MAGIC),
738 "1" (VMWARE_BDOOR_CMD_GETVERSION),
739 "2" (VMWARE_BDOOR_PORT),
740 "3" (0)
741 : "memory");
742 }
743
744 static jmp_buf segv_handler_env;
745
746 static void
747 segv_handler(__attribute__((__unused__)) int sig,
748 __attribute__((__unused__)) siginfo_t *info,
749 __attribute__((__unused__)) void *ignored)
750 {
751 siglongjmp(segv_handler_env, 1);
752 }
753
754 static int
755 is_vmware_platform(void)
756 {
757 uint32_t eax, ebx, ecx, edx;
758 struct sigaction act, oact;
759
760 /*
761 * The assembly routine for vmware detection works
762 * fine under vmware, even if ran as regular user. But
763 * on real HW or under other hypervisors, it segfaults (which is
764 * expected). So we temporarily install SIGSEGV handler to catch
765 * the signal. All this magic is needed because lscpu
766 * isn't supposed to require root privileges.
767 */
768 if (sigsetjmp(segv_handler_env, 1))
769 return 0;
770
771 memset(&act, 0, sizeof(act));
772 act.sa_sigaction = segv_handler;
773 act.sa_flags = SA_SIGINFO;
774
775 if (sigaction(SIGSEGV, &act, &oact))
776 err(EXIT_FAILURE, _("error: can not set signal handler"));
777
778 vmware_bdoor(&eax, &ebx, &ecx, &edx);
779
780 if (sigaction(SIGSEGV, &oact, NULL))
781 err(EXIT_FAILURE, _("error: can not restore signal handler"));
782
783 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
784 }
785
786 #else /* ! INCLUDE_VMWARE_BDOOR */
787
788 static int
789 is_vmware_platform(void)
790 {
791 return 0;
792 }
793
794 #endif /* INCLUDE_VMWARE_BDOOR */
795
796 static void
797 read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
798 {
799 FILE *fd;
800
801 if (mod->system != SYSTEM_SNAPSHOT) {
802 read_hypervisor_cpuid(desc);
803 if (!desc->hyper)
804 desc->hyper = read_hypervisor_dmi();
805 if (!desc->hyper && is_vmware_platform())
806 desc->hyper = HYPER_VMWARE;
807 }
808
809 if (desc->hyper)
810 desc->virtype = VIRT_FULL;
811
812 else if (read_hypervisor_powerpc(desc) > 0) {}
813
814 /* Xen para-virt or dom0 */
815 else if (path_exist(_PATH_PROC_XEN)) {
816 int dom0 = 0;
817 fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
818
819 if (fd) {
820 char buf[256];
821
822 if (fscanf(fd, "%255s", buf) == 1 &&
823 !strcmp(buf, "control_d"))
824 dom0 = 1;
825 fclose(fd);
826 }
827 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
828 desc->hyper = HYPER_XEN;
829
830 /* Xen full-virt on non-x86_64 */
831 } else if (has_pci_device( hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
832 desc->hyper = HYPER_XEN;
833 desc->virtype = VIRT_FULL;
834 } else if (has_pci_device( hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
835 desc->hyper = HYPER_VMWARE;
836 desc->virtype = VIRT_FULL;
837 } else if (has_pci_device( hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
838 desc->hyper = HYPER_VBOX;
839 desc->virtype = VIRT_FULL;
840
841 /* IBM PR/SM */
842 } else if (path_exist(_PATH_PROC_SYSINFO)) {
843 FILE *sysinfo_fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
844 char buf[BUFSIZ];
845
846 if (!sysinfo_fd)
847 return;
848 desc->hyper = HYPER_IBM;
849 desc->hypervisor = "PR/SM";
850 desc->virtype = VIRT_FULL;
851 while (fgets(buf, sizeof(buf), sysinfo_fd) != NULL) {
852 char *str;
853
854 if (!strstr(buf, "Control Program:"))
855 continue;
856 if (!strstr(buf, "KVM"))
857 desc->hyper = HYPER_IBM;
858 else
859 desc->hyper = HYPER_KVM;
860 str = strchr(buf, ':');
861 if (!str)
862 continue;
863 xasprintf(&str, "%s", str + 1);
864
865 /* remove leading, trailing and repeating whitespace */
866 while (*str == ' ')
867 str++;
868 desc->hypervisor = str;
869 str += strlen(str) - 1;
870 while ((*str == '\n') || (*str == ' '))
871 *(str--) = '\0';
872 while ((str = strstr(desc->hypervisor, " ")))
873 memmove(str, str + 1, strlen(str));
874 }
875 fclose(sysinfo_fd);
876 }
877
878 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
879 * /proc/bc should not */
880 else if (path_exist(_PATH_PROC_VZ) && !path_exist(_PATH_PROC_BC)) {
881 desc->hyper = HYPER_PARALLELS;
882 desc->virtype = VIRT_CONT;
883
884 /* IBM */
885 } else if (desc->vendor &&
886 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
887 strcmp(desc->vendor, "IBM/S390") == 0)) {
888 desc->hyper = HYPER_IBM;
889 desc->virtype = VIRT_FULL;
890
891 /* User-mode-linux */
892 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
893 desc->hyper = HYPER_UML;
894 desc->virtype = VIRT_PARA;
895
896 /* Linux-VServer */
897 } else if (path_exist(_PATH_PROC_STATUS)) {
898 char buf[BUFSIZ];
899 char *val = NULL;
900
901 fd = path_fopen("r", 1, _PATH_PROC_STATUS);
902 while (fgets(buf, sizeof(buf), fd) != NULL) {
903 if (lookup(buf, "VxID", &val))
904 break;
905 }
906 fclose(fd);
907
908 if (val) {
909 while (isdigit(*val))
910 ++val;
911 if (!*val) {
912 desc->hyper = HYPER_VSERVER;
913 desc->virtype = VIRT_CONT;
914 }
915 }
916 }
917 }
918
919 /* add @set to the @ary, unnecessary set is deallocated. */
920 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
921 {
922 int i;
923 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
924
925 if (!ary)
926 return -1;
927
928 for (i = 0; i < *items; i++) {
929 if (CPU_EQUAL_S(setsize, set, ary[i]))
930 break;
931 }
932 if (i == *items) {
933 ary[*items] = set;
934 ++*items;
935 return 0;
936 }
937 CPU_FREE(set);
938 return 1;
939 }
940
941 static void
942 read_topology(struct lscpu_desc *desc, int idx)
943 {
944 cpu_set_t *thread_siblings, *core_siblings, *book_siblings;
945 int num = real_cpu_num(desc, idx);
946
947 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
948 return;
949
950 thread_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
951 "/cpu%d/topology/thread_siblings", num);
952 core_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
953 "/cpu%d/topology/core_siblings", num);
954 book_siblings = NULL;
955 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num))
956 book_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
957 "/cpu%d/topology/book_siblings", num);
958
959 if (!desc->coremaps) {
960 int nbooks, nsockets, ncores, nthreads;
961 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
962
963 /* threads within one core */
964 nthreads = CPU_COUNT_S(setsize, thread_siblings);
965 if (!nthreads)
966 nthreads = 1;
967
968 /* cores within one socket */
969 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
970 if (!ncores)
971 ncores = 1;
972
973 /* number of sockets within one book. Because of odd /
974 * non-present cpu maps and to keep calculation easy we make
975 * sure that nsockets and nbooks is at least 1.
976 */
977 nsockets = desc->ncpus / nthreads / ncores;
978 if (!nsockets)
979 nsockets = 1;
980
981 /* number of books */
982 nbooks = desc->ncpus / nthreads / ncores / nsockets;
983 if (!nbooks)
984 nbooks = 1;
985
986 /* all threads, see also read_basicinfo()
987 * -- fallback for kernels without
988 * /sys/devices/system/cpu/online.
989 */
990 if (!desc->nthreads)
991 desc->nthreads = nbooks * nsockets * ncores * nthreads;
992
993 /* For each map we make sure that it can have up to ncpuspos
994 * entries. This is because we cannot reliably calculate the
995 * number of cores, sockets and books on all architectures.
996 * E.g. completely virtualized architectures like s390 may
997 * have multiple sockets of different sizes.
998 */
999 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1000 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1001 if (book_siblings)
1002 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1003 }
1004
1005 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
1006 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
1007 if (book_siblings)
1008 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
1009 }
1010
1011 static void
1012 read_polarization(struct lscpu_desc *desc, int idx)
1013 {
1014 char mode[64];
1015 int num = real_cpu_num(desc, idx);
1016
1017 if (desc->dispatching < 0)
1018 return;
1019 if (!path_exist(_PATH_SYS_CPU "/cpu%d/polarization", num))
1020 return;
1021 if (!desc->polarization)
1022 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
1023 path_read_str(mode, sizeof(mode), _PATH_SYS_CPU "/cpu%d/polarization", num);
1024 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
1025 desc->polarization[idx] = POLAR_VLOW;
1026 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
1027 desc->polarization[idx] = POLAR_VMEDIUM;
1028 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
1029 desc->polarization[idx] = POLAR_VHIGH;
1030 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
1031 desc->polarization[idx] = POLAR_HORIZONTAL;
1032 else
1033 desc->polarization[idx] = POLAR_UNKNOWN;
1034 }
1035
1036 static void
1037 read_address(struct lscpu_desc *desc, int idx)
1038 {
1039 int num = real_cpu_num(desc, idx);
1040
1041 if (!path_exist(_PATH_SYS_CPU "/cpu%d/address", num))
1042 return;
1043 if (!desc->addresses)
1044 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
1045 desc->addresses[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/address", num);
1046 }
1047
1048 static void
1049 read_configured(struct lscpu_desc *desc, int idx)
1050 {
1051 int num = real_cpu_num(desc, idx);
1052
1053 if (!path_exist(_PATH_SYS_CPU "/cpu%d/configure", num))
1054 return;
1055 if (!desc->configured)
1056 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
1057 desc->configured[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/configure", num);
1058 }
1059
1060 static void
1061 read_max_mhz(struct lscpu_desc *desc, int idx)
1062 {
1063 int num = real_cpu_num(desc, idx);
1064
1065 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_max_freq", num))
1066 return;
1067 if (!desc->maxmhz)
1068 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1069 xasprintf(&(desc->maxmhz[idx]), "%.4f",
1070 (float)path_read_s32(_PATH_SYS_CPU
1071 "/cpu%d/cpufreq/cpuinfo_max_freq", num) / 1000);
1072 }
1073
1074 static void
1075 read_min_mhz(struct lscpu_desc *desc, int idx)
1076 {
1077 int num = real_cpu_num(desc, idx);
1078
1079 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_min_freq", num))
1080 return;
1081 if (!desc->minmhz)
1082 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1083 xasprintf(&(desc->minmhz[idx]), "%.4f",
1084 (float)path_read_s32(_PATH_SYS_CPU
1085 "/cpu%d/cpufreq/cpuinfo_min_freq", num) / 1000);
1086 }
1087
1088 static int
1089 cachecmp(const void *a, const void *b)
1090 {
1091 struct cpu_cache *c1 = (struct cpu_cache *) a;
1092 struct cpu_cache *c2 = (struct cpu_cache *) b;
1093
1094 return strcmp(c2->name, c1->name);
1095 }
1096
1097 static void
1098 read_cache(struct lscpu_desc *desc, int idx)
1099 {
1100 char buf[256];
1101 int i;
1102 int num = real_cpu_num(desc, idx);
1103
1104 if (!desc->ncaches) {
1105 while(path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1106 num, desc->ncaches))
1107 desc->ncaches++;
1108
1109 if (!desc->ncaches)
1110 return;
1111
1112 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1113 }
1114 for (i = 0; i < desc->ncaches; i++) {
1115 struct cpu_cache *ca = &desc->caches[i];
1116 cpu_set_t *map;
1117
1118 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1119 num, i))
1120 continue;
1121 if (!ca->name) {
1122 int type, level;
1123
1124 /* cache type */
1125 path_read_str(buf, sizeof(buf),
1126 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
1127 num, i);
1128 if (!strcmp(buf, "Data"))
1129 type = 'd';
1130 else if (!strcmp(buf, "Instruction"))
1131 type = 'i';
1132 else
1133 type = 0;
1134
1135 /* cache level */
1136 level = path_read_s32(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
1137 num, i);
1138 if (type)
1139 snprintf(buf, sizeof(buf), "L%d%c", level, type);
1140 else
1141 snprintf(buf, sizeof(buf), "L%d", level);
1142
1143 ca->name = xstrdup(buf);
1144
1145 /* cache size */
1146 if (path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d/size",num, i)) {
1147 path_read_str(buf, sizeof(buf),
1148 _PATH_SYS_CPU "/cpu%d/cache/index%d/size", num, i);
1149 ca->size = xstrdup(buf);
1150 } else {
1151 ca->size = xstrdup("unknown size");
1152 }
1153 }
1154
1155 /* information about how CPUs share different caches */
1156 map = path_read_cpuset(maxcpus,
1157 _PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
1158 num, i);
1159
1160 if (!ca->sharedmaps)
1161 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1162 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1163 }
1164 }
1165
1166 static inline int is_node_dirent(struct dirent *d)
1167 {
1168 return
1169 d &&
1170 #ifdef _DIRENT_HAVE_D_TYPE
1171 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1172 #endif
1173 strncmp(d->d_name, "node", 4) == 0 &&
1174 isdigit_string(d->d_name + 4);
1175 }
1176
1177 static int
1178 nodecmp(const void *ap, const void *bp)
1179 {
1180 int *a = (int *) ap, *b = (int *) bp;
1181 return *a - *b;
1182 }
1183
1184 static void
1185 read_nodes(struct lscpu_desc *desc)
1186 {
1187 int i = 0;
1188 DIR *dir;
1189 struct dirent *d;
1190 char *path;
1191
1192 /* number of NUMA node */
1193 path = path_strdup(_PATH_SYS_NODE);
1194 dir = opendir(path);
1195 free(path);
1196
1197 while (dir && (d = readdir(dir))) {
1198 if (is_node_dirent(d))
1199 desc->nnodes++;
1200 }
1201
1202 if (!desc->nnodes) {
1203 if (dir)
1204 closedir(dir);
1205 return;
1206 }
1207
1208 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1209 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1210
1211 if (dir) {
1212 rewinddir(dir);
1213 while ((d = readdir(dir)) && i < desc->nnodes) {
1214 if (is_node_dirent(d))
1215 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1216 _("Failed to extract the node number"));
1217 }
1218 closedir(dir);
1219 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1220 }
1221
1222 /* information about how nodes share different CPUs */
1223 for (i = 0; i < desc->nnodes; i++)
1224 desc->nodemaps[i] = path_read_cpuset(maxcpus,
1225 _PATH_SYS_NODE "/node%d/cpumap",
1226 desc->idx2nodenum[i]);
1227 }
1228
1229 static char *
1230 get_cell_data(struct lscpu_desc *desc, int idx, int col,
1231 struct lscpu_modifier *mod,
1232 char *buf, size_t bufsz)
1233 {
1234 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1235 size_t i;
1236 int cpu = real_cpu_num(desc, idx);
1237
1238 *buf = '\0';
1239
1240 switch (col) {
1241 case COL_CPU:
1242 snprintf(buf, bufsz, "%d", cpu);
1243 break;
1244 case COL_CORE:
1245 if (cpuset_ary_isset(cpu, desc->coremaps,
1246 desc->ncores, setsize, &i) == 0)
1247 snprintf(buf, bufsz, "%zu", i);
1248 break;
1249 case COL_SOCKET:
1250 if (cpuset_ary_isset(cpu, desc->socketmaps,
1251 desc->nsockets, setsize, &i) == 0)
1252 snprintf(buf, bufsz, "%zu", i);
1253 break;
1254 case COL_NODE:
1255 if (cpuset_ary_isset(cpu, desc->nodemaps,
1256 desc->nnodes, setsize, &i) == 0)
1257 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1258 break;
1259 case COL_BOOK:
1260 if (cpuset_ary_isset(cpu, desc->bookmaps,
1261 desc->nbooks, setsize, &i) == 0)
1262 snprintf(buf, bufsz, "%zu", i);
1263 break;
1264 case COL_CACHE:
1265 {
1266 char *p = buf;
1267 size_t sz = bufsz;
1268 int j;
1269
1270 for (j = desc->ncaches - 1; j >= 0; j--) {
1271 struct cpu_cache *ca = &desc->caches[j];
1272
1273 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1274 ca->nsharedmaps, setsize, &i) == 0) {
1275 int x = snprintf(p, sz, "%zu", i);
1276 if (x <= 0 || (size_t) x + 2 >= sz)
1277 return NULL;
1278 p += x;
1279 sz -= x;
1280 }
1281 if (j != 0) {
1282 *p++ = mod->compat ? ',' : ':';
1283 *p = '\0';
1284 sz++;
1285 }
1286 }
1287 break;
1288 }
1289 case COL_POLARIZATION:
1290 if (desc->polarization) {
1291 int x = desc->polarization[idx];
1292
1293 snprintf(buf, bufsz, "%s",
1294 mod->mode == OUTPUT_PARSABLE ?
1295 polar_modes[x].parsable :
1296 polar_modes[x].readable);
1297 }
1298 break;
1299 case COL_ADDRESS:
1300 if (desc->addresses)
1301 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1302 break;
1303 case COL_CONFIGURED:
1304 if (!desc->configured)
1305 break;
1306 if (mod->mode == OUTPUT_PARSABLE)
1307 snprintf(buf, bufsz, "%s",
1308 desc->configured[idx] ? _("Y") : _("N"));
1309 else
1310 snprintf(buf, bufsz, "%s",
1311 desc->configured[idx] ? _("yes") : _("no"));
1312 break;
1313 case COL_ONLINE:
1314 if (!desc->online)
1315 break;
1316 if (mod->mode == OUTPUT_PARSABLE)
1317 snprintf(buf, bufsz, "%s",
1318 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1319 else
1320 snprintf(buf, bufsz, "%s",
1321 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1322 break;
1323 case COL_MAXMHZ:
1324 if (desc->maxmhz)
1325 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1326 break;
1327 case COL_MINMHZ:
1328 if (desc->minmhz)
1329 xstrncpy(buf, desc->minmhz[idx], bufsz);
1330 break;
1331 }
1332 return buf;
1333 }
1334
1335 static char *
1336 get_cell_header(struct lscpu_desc *desc, int col,
1337 struct lscpu_modifier *mod,
1338 char *buf, size_t bufsz)
1339 {
1340 *buf = '\0';
1341
1342 if (col == COL_CACHE) {
1343 char *p = buf;
1344 size_t sz = bufsz;
1345 int i;
1346
1347 for (i = desc->ncaches - 1; i >= 0; i--) {
1348 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1349 if (x <= 0 || (size_t) x + 2 > sz)
1350 return NULL;
1351 sz -= x;
1352 p += x;
1353 if (i > 0) {
1354 *p++ = mod->compat ? ',' : ':';
1355 *p = '\0';
1356 sz++;
1357 }
1358 }
1359 if (desc->ncaches)
1360 return buf;
1361 }
1362 snprintf(buf, bufsz, "%s", coldescs[col].name);
1363 return buf;
1364 }
1365
1366 /*
1367 * [-p] backend, we support two parsable formats:
1368 *
1369 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1370 * output and it contains fixed set of the columns. The CACHE columns are at
1371 * the end of the line and the CACHE is not printed if the number of the caches
1372 * is zero. The CACHE columns are separated by two commas, for example:
1373 *
1374 * $ lscpu --parse
1375 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1376 * 0,0,0,0,,0,0,0
1377 * 1,1,0,0,,1,1,0
1378 *
1379 * 2) "user defined output" -- this format prints always all columns without
1380 * special prefix for CACHE column. If there are not CACHEs then the column is
1381 * empty and the header "Cache" is printed rather than a real name of the cache.
1382 * The CACHE columns are separated by ':'.
1383 *
1384 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1385 * # CPU,Core,Socket,Node,L1d:L1i:L2
1386 * 0,0,0,0,0:0:0
1387 * 1,1,0,0,1:1:0
1388 */
1389 static void
1390 print_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1391 struct lscpu_modifier *mod)
1392 {
1393 char buf[BUFSIZ], *data;
1394 int i;
1395
1396 /*
1397 * Header
1398 */
1399 printf(_(
1400 "# The following is the parsable format, which can be fed to other\n"
1401 "# programs. Each different item in every column has an unique ID\n"
1402 "# starting from zero.\n"));
1403
1404 fputs("# ", stdout);
1405 for (i = 0; i < ncols; i++) {
1406 int col = cols[i];
1407
1408 if (col == COL_CACHE) {
1409 if (mod->compat && !desc->ncaches)
1410 continue;
1411 if (mod->compat && i != 0)
1412 putchar(',');
1413 }
1414 if (i > 0)
1415 putchar(',');
1416
1417 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1418
1419 if (data && * data && col != COL_CACHE &&
1420 !coldescs[col].is_abbr) {
1421 /*
1422 * For normal column names use mixed case (e.g. "Socket")
1423 */
1424 char *p = data + 1;
1425
1426 while (p && *p != '\0') {
1427 *p = tolower((unsigned int) *p);
1428 p++;
1429 }
1430 }
1431 fputs(data && *data ? data : "", stdout);
1432 }
1433 putchar('\n');
1434
1435 /*
1436 * Data
1437 */
1438 for (i = 0; i < desc->ncpuspos; i++) {
1439 int c;
1440 int cpu = real_cpu_num(desc, i);
1441
1442 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1443 continue;
1444 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1445 continue;
1446 if (desc->present && !is_cpu_present(desc, cpu))
1447 continue;
1448 for (c = 0; c < ncols; c++) {
1449 if (mod->compat && cols[c] == COL_CACHE) {
1450 if (!desc->ncaches)
1451 continue;
1452 if (c > 0)
1453 putchar(',');
1454 }
1455 if (c > 0)
1456 putchar(',');
1457
1458 data = get_cell_data(desc, i, cols[c], mod,
1459 buf, sizeof(buf));
1460 fputs(data && *data ? data : "", stdout);
1461 }
1462 putchar('\n');
1463 }
1464 }
1465
1466 /*
1467 * [-e] backend
1468 */
1469 static void
1470 print_readable(struct lscpu_desc *desc, int cols[], int ncols,
1471 struct lscpu_modifier *mod)
1472 {
1473 int i;
1474 char buf[BUFSIZ];
1475 const char *data;
1476 struct libscols_table *table;
1477
1478 scols_init_debug(0);
1479
1480 table = scols_new_table();
1481 if (!table)
1482 err(EXIT_FAILURE, _("failed to initialize output table"));
1483
1484 for (i = 0; i < ncols; i++) {
1485 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1486 if (!scols_table_new_column(table, xstrdup(data), 0, 0))
1487 err(EXIT_FAILURE, _("failed to initialize output column"));
1488 }
1489
1490 for (i = 0; i < desc->ncpuspos; i++) {
1491 int c;
1492 struct libscols_line *line;
1493 int cpu = real_cpu_num(desc, i);
1494
1495 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1496 continue;
1497 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1498 continue;
1499 if (desc->present && !is_cpu_present(desc, cpu))
1500 continue;
1501
1502 line = scols_table_new_line(table, NULL);
1503 if (!line)
1504 err(EXIT_FAILURE, _("failed to initialize output line"));
1505
1506 for (c = 0; c < ncols; c++) {
1507 data = get_cell_data(desc, i, cols[c], mod,
1508 buf, sizeof(buf));
1509 if (!data || !*data)
1510 data = "-";
1511 scols_line_set_data(line, c, data);
1512 }
1513 }
1514
1515 scols_print_table(table);
1516 scols_unref_table(table);
1517 }
1518
1519 /* output formats "<key> <value>"*/
1520 #define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
1521 #define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
1522
1523 static void
1524 print_cpuset(const char *key, cpu_set_t *set, int hex)
1525 {
1526 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1527 size_t setbuflen = 7 * maxcpus;
1528 char setbuf[setbuflen], *p;
1529
1530 if (hex) {
1531 p = cpumask_create(setbuf, setbuflen, set, setsize);
1532 printf("%-23s0x%s\n", key, p);
1533 } else {
1534 p = cpulist_create(setbuf, setbuflen, set, setsize);
1535 print_s(key, p);
1536 }
1537
1538 }
1539
1540 /*
1541 * default output
1542 */
1543 static void
1544 print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1545 {
1546 char buf[512];
1547 int i;
1548 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1549
1550 print_s(_("Architecture:"), desc->arch);
1551
1552 if (desc->mode) {
1553 char mbuf[64], *p = mbuf;
1554
1555 if (desc->mode & MODE_32BIT) {
1556 strcpy(p, "32-bit, ");
1557 p += 8;
1558 }
1559 if (desc->mode & MODE_64BIT) {
1560 strcpy(p, "64-bit, ");
1561 p += 8;
1562 }
1563 *(p - 2) = '\0';
1564 print_s(_("CPU op-mode(s):"), mbuf);
1565 }
1566 #if !defined(WORDS_BIGENDIAN)
1567 print_s(_("Byte Order:"), "Little Endian");
1568 #else
1569 print_s(_("Byte Order:"), "Big Endian");
1570 #endif
1571 print_n(_("CPU(s):"), desc->ncpus);
1572
1573 if (desc->online)
1574 print_cpuset(mod->hex ? _("On-line CPU(s) mask:") :
1575 _("On-line CPU(s) list:"),
1576 desc->online, mod->hex);
1577
1578 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1579 cpu_set_t *set;
1580
1581 /* Linux kernel provides cpuset of off-line CPUs that contains
1582 * all configured CPUs (see /sys/devices/system/cpu/offline),
1583 * but want to print real (present in system) off-line CPUs only.
1584 */
1585 set = cpuset_alloc(maxcpus, NULL, NULL);
1586 if (!set)
1587 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1588 CPU_ZERO_S(setsize, set);
1589 for (i = 0; i < desc->ncpuspos; i++) {
1590 int cpu = real_cpu_num(desc, i);
1591 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
1592 CPU_SET_S(cpu, setsize, set);
1593 }
1594 print_cpuset(mod->hex ? _("Off-line CPU(s) mask:") :
1595 _("Off-line CPU(s) list:"),
1596 set, mod->hex);
1597 cpuset_free(set);
1598 }
1599
1600 if (desc->nsockets) {
1601 int cores_per_socket, sockets_per_book, books;
1602
1603 cores_per_socket = sockets_per_book = books = 0;
1604 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1605 * Using simply the cpu topology masks in sysfs will not give
1606 * usable results since everything is virtualized. E.g.
1607 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1608 * five cpus.
1609 * If the cpu topology is not exported (e.g. 2nd level guest)
1610 * fall back to old calculation scheme.
1611 */
1612 if (path_exist(_PATH_PROC_SYSINFO)) {
1613 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
1614 char pbuf[BUFSIZ];
1615 int t0, t1, t2;
1616
1617 while (fd && fgets(pbuf, sizeof(pbuf), fd) != NULL) {
1618 if (sscanf(pbuf, "CPU Topology SW:%d%d%d%d%d%d",
1619 &t0, &t1, &t2, &books, &sockets_per_book,
1620 &cores_per_socket) == 6)
1621 break;
1622 }
1623 if (fd)
1624 fclose(fd);
1625 }
1626 print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
1627 print_n(_("Core(s) per socket:"),
1628 cores_per_socket ?: desc->ncores / desc->nsockets);
1629 if (desc->nbooks) {
1630 print_n(_("Socket(s) per book:"),
1631 sockets_per_book ?: desc->nsockets / desc->nbooks);
1632 print_n(_("Book(s):"), books ?: desc->nbooks);
1633 } else {
1634 print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
1635 }
1636 }
1637 if (desc->nnodes)
1638 print_n(_("NUMA node(s):"), desc->nnodes);
1639 if (desc->vendor)
1640 print_s(_("Vendor ID:"), desc->vendor);
1641 if (desc->family)
1642 print_s(_("CPU family:"), desc->family);
1643 if (desc->model)
1644 print_s(_("Model:"), desc->model);
1645 if (desc->modelname)
1646 print_s(_("Model name:"), desc->modelname);
1647 if (desc->stepping)
1648 print_s(_("Stepping:"), desc->stepping);
1649 if (desc->mhz)
1650 print_s(_("CPU MHz:"), desc->mhz);
1651 if (desc->maxmhz)
1652 print_s(_("CPU max MHz:"), desc->maxmhz[0]);
1653 if (desc->minmhz)
1654 print_s(_("CPU min MHz:"), desc->minmhz[0]);
1655 if (desc->bogomips)
1656 print_s(_("BogoMIPS:"), desc->bogomips);
1657 if (desc->virtflag) {
1658 if (!strcmp(desc->virtflag, "svm"))
1659 print_s(_("Virtualization:"), "AMD-V");
1660 else if (!strcmp(desc->virtflag, "vmx"))
1661 print_s(_("Virtualization:"), "VT-x");
1662 }
1663 if (desc->hypervisor)
1664 print_s(_("Hypervisor:"), desc->hypervisor);
1665 if (desc->hyper) {
1666 print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1667 print_s(_("Virtualization type:"), _(virt_types[desc->virtype]));
1668 }
1669 if (desc->dispatching >= 0)
1670 print_s(_("Dispatching mode:"), _(disp_modes[desc->dispatching]));
1671 if (desc->ncaches) {
1672 char cbuf[512];
1673
1674 for (i = desc->ncaches - 1; i >= 0; i--) {
1675 snprintf(cbuf, sizeof(cbuf),
1676 _("%s cache:"), desc->caches[i].name);
1677 print_s(cbuf, desc->caches[i].size);
1678 }
1679 }
1680
1681 for (i = 0; i < desc->nnodes; i++) {
1682 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
1683 print_cpuset(buf, desc->nodemaps[i], mod->hex);
1684 }
1685
1686 if (desc->flags)
1687 print_s(_("Flags:"), desc->flags);
1688
1689 if (desc->physsockets) {
1690 print_n(_("Physical sockets:"), desc->physsockets);
1691 print_n(_("Physical chips:"), desc->physchips);
1692 print_n(_("Physical cores/chip:"), desc->physcoresperchip);
1693 }
1694 }
1695
1696 static void __attribute__((__noreturn__)) usage(FILE *out)
1697 {
1698 size_t i;
1699
1700 fputs(USAGE_HEADER, out);
1701 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
1702
1703 fputs(USAGE_SEPARATOR, out);
1704 fputs(_("Display information about the CPU architecture.\n"), out);
1705
1706 fputs(USAGE_OPTIONS, out);
1707 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
1708 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
1709 fputs(_(" -c, --offline print offline CPUs only\n"), out);
1710 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
1711 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
1712 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
1713 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
1714 fputs(USAGE_SEPARATOR, out);
1715 fputs(USAGE_HELP, out);
1716 fputs(USAGE_VERSION, out);
1717
1718 fprintf(out, _("\nAvailable columns:\n"));
1719
1720 for (i = 0; i < ARRAY_SIZE(coldescs); i++)
1721 fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
1722
1723 fprintf(out, USAGE_MAN_TAIL("lscpu(1)"));
1724
1725 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
1726 }
1727
1728 int main(int argc, char *argv[])
1729 {
1730 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
1731 struct lscpu_desc _desc = { .flags = 0 }, *desc = &_desc;
1732 int c, i;
1733 int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
1734 int cpu_modifier_specified = 0;
1735
1736 static const struct option longopts[] = {
1737 { "all", no_argument, 0, 'a' },
1738 { "online", no_argument, 0, 'b' },
1739 { "offline", no_argument, 0, 'c' },
1740 { "help", no_argument, 0, 'h' },
1741 { "extended", optional_argument, 0, 'e' },
1742 { "parse", optional_argument, 0, 'p' },
1743 { "sysroot", required_argument, 0, 's' },
1744 { "hex", no_argument, 0, 'x' },
1745 { "version", no_argument, 0, 'V' },
1746 { NULL, 0, 0, 0 }
1747 };
1748
1749 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
1750 { 'a','b','c' },
1751 { 'e','p' },
1752 { 0 }
1753 };
1754 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
1755
1756 setlocale(LC_ALL, "");
1757 bindtextdomain(PACKAGE, LOCALEDIR);
1758 textdomain(PACKAGE);
1759 atexit(close_stdout);
1760
1761 while ((c = getopt_long(argc, argv, "abce::hp::s:xV", longopts, NULL)) != -1) {
1762
1763 err_exclusive_options(c, longopts, excl, excl_st);
1764
1765 switch (c) {
1766 case 'a':
1767 mod->online = mod->offline = 1;
1768 cpu_modifier_specified = 1;
1769 break;
1770 case 'b':
1771 mod->online = 1;
1772 cpu_modifier_specified = 1;
1773 break;
1774 case 'c':
1775 mod->offline = 1;
1776 cpu_modifier_specified = 1;
1777 break;
1778 case 'h':
1779 usage(stdout);
1780 case 'p':
1781 case 'e':
1782 if (optarg) {
1783 if (*optarg == '=')
1784 optarg++;
1785 ncolumns = string_to_idarray(optarg,
1786 columns, ARRAY_SIZE(columns),
1787 column_name_to_id);
1788 if (ncolumns < 0)
1789 return EXIT_FAILURE;
1790 }
1791 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
1792 break;
1793 case 's':
1794 path_set_prefix(optarg);
1795 mod->system = SYSTEM_SNAPSHOT;
1796 break;
1797 case 'x':
1798 mod->hex = 1;
1799 break;
1800 case 'V':
1801 printf(UTIL_LINUX_VERSION);
1802 return EXIT_SUCCESS;
1803 default:
1804 usage(stderr);
1805 }
1806 }
1807
1808 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
1809 fprintf(stderr,
1810 _("%s: options --all, --online and --offline may only "
1811 "be used with options --extended or --parse.\n"),
1812 program_invocation_short_name);
1813 return EXIT_FAILURE;
1814 }
1815
1816 if (argc != optind)
1817 usage(stderr);
1818
1819 /* set default cpu display mode if none was specified */
1820 if (!mod->online && !mod->offline) {
1821 mod->online = 1;
1822 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
1823 }
1824
1825 read_basicinfo(desc, mod);
1826
1827 for (i = 0; i < desc->ncpuspos; i++) {
1828 read_topology(desc, i);
1829 read_cache(desc, i);
1830 read_polarization(desc, i);
1831 read_address(desc, i);
1832 read_configured(desc, i);
1833 read_max_mhz(desc, i);
1834 read_min_mhz(desc, i);
1835 }
1836
1837 if (desc->caches)
1838 qsort(desc->caches, desc->ncaches,
1839 sizeof(struct cpu_cache), cachecmp);
1840
1841 read_nodes(desc);
1842 read_hypervisor(desc, mod);
1843
1844 switch(mod->mode) {
1845 case OUTPUT_SUMMARY:
1846 print_summary(desc, mod);
1847 break;
1848 case OUTPUT_PARSABLE:
1849 if (!ncolumns) {
1850 columns[ncolumns++] = COL_CPU;
1851 columns[ncolumns++] = COL_CORE;
1852 columns[ncolumns++] = COL_SOCKET;
1853 columns[ncolumns++] = COL_NODE;
1854 columns[ncolumns++] = COL_CACHE;
1855 mod->compat = 1;
1856 }
1857 print_parsable(desc, columns, ncolumns, mod);
1858 break;
1859 case OUTPUT_READABLE:
1860 if (!ncolumns) {
1861 /* No list was given. Just print whatever is there. */
1862 columns[ncolumns++] = COL_CPU;
1863 if (desc->nodemaps)
1864 columns[ncolumns++] = COL_NODE;
1865 if (desc->bookmaps)
1866 columns[ncolumns++] = COL_BOOK;
1867 if (desc->socketmaps)
1868 columns[ncolumns++] = COL_SOCKET;
1869 if (desc->coremaps)
1870 columns[ncolumns++] = COL_CORE;
1871 if (desc->caches)
1872 columns[ncolumns++] = COL_CACHE;
1873 if (desc->online)
1874 columns[ncolumns++] = COL_ONLINE;
1875 if (desc->configured)
1876 columns[ncolumns++] = COL_CONFIGURED;
1877 if (desc->polarization)
1878 columns[ncolumns++] = COL_POLARIZATION;
1879 if (desc->addresses)
1880 columns[ncolumns++] = COL_ADDRESS;
1881 if (desc->maxmhz)
1882 columns[ncolumns++] = COL_MAXMHZ;
1883 if (desc->minmhz)
1884 columns[ncolumns++] = COL_MINMHZ;
1885 }
1886 print_readable(desc, columns, ncolumns, mod);
1887 break;
1888 }
1889
1890 return EXIT_SUCCESS;
1891 }