]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
lscpu: add --version option
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #include <ctype.h>
23 #include <dirent.h>
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <getopt.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/utsname.h>
31 #include <unistd.h>
32 #include <stdarg.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35
36 #include "cpuset.h"
37 #include "nls.h"
38 #include "xalloc.h"
39 #include "c.h"
40 #include "strutils.h"
41 #include "bitops.h"
42
43
44 #define CACHE_MAX 100
45
46 /* /sys paths */
47 #define _PATH_SYS_SYSTEM "/sys/devices/system"
48 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
49 #define _PATH_PROC_XEN "/proc/xen"
50 #define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
51 #define _PATH_PROC_CPUINFO "/proc/cpuinfo"
52 #define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
53 #define _PATH_PROC_SYSINFO "/proc/sysinfo"
54
55 /* virtualization types */
56 enum {
57 VIRT_NONE = 0,
58 VIRT_PARA,
59 VIRT_FULL
60 };
61 const char *virt_types[] = {
62 [VIRT_NONE] = N_("none"),
63 [VIRT_PARA] = N_("para"),
64 [VIRT_FULL] = N_("full")
65 };
66
67 /* hypervisor vendors */
68 enum {
69 HYPER_NONE = 0,
70 HYPER_XEN,
71 HYPER_KVM,
72 HYPER_MSHV,
73 HYPER_VMWARE,
74 HYPER_IBM
75 };
76 const char *hv_vendors[] = {
77 [HYPER_NONE] = NULL,
78 [HYPER_XEN] = "Xen",
79 [HYPER_KVM] = "KVM",
80 [HYPER_MSHV] = "Microsoft",
81 [HYPER_VMWARE] = "VMware",
82 [HYPER_IBM] = "IBM"
83 };
84
85 /* CPU modes */
86 enum {
87 MODE_32BIT = (1 << 1),
88 MODE_64BIT = (1 << 2)
89 };
90
91 /* cache(s) description */
92 struct cpu_cache {
93 char *name;
94 char *size;
95
96 int nsharedmaps;
97 cpu_set_t **sharedmaps;
98 };
99
100 /* dispatching modes */
101 enum {
102 DISP_HORIZONTAL = 0,
103 DISP_VERTICAL = 1
104 };
105
106 const char *disp_modes[] = {
107 [DISP_HORIZONTAL] = N_("horizontal"),
108 [DISP_VERTICAL] = N_("vertical")
109 };
110
111 /* cpu polarization */
112 enum {
113 POLAR_UNKNOWN = 0,
114 POLAR_VLOW,
115 POLAR_VMEDIUM,
116 POLAR_VHIGH,
117 POLAR_HORIZONTAL
118 };
119
120 const char *polar_modes[] = {
121 [POLAR_UNKNOWN] = "U",
122 [POLAR_VLOW] = "VL",
123 [POLAR_VMEDIUM] = "VM",
124 [POLAR_VHIGH] = "VH",
125 [POLAR_HORIZONTAL] = "H"
126 };
127
128 /* global description */
129 struct lscpu_desc {
130 char *arch;
131 char *vendor;
132 char *family;
133 char *model;
134 char *virtflag; /* virtualization flag (vmx, svm) */
135 int hyper; /* hypervisor vendor ID */
136 int virtype; /* VIRT_PARA|FULL|NONE ? */
137 char *mhz;
138 char *stepping;
139 char *bogomips;
140 char *flags;
141 int dispatching; /* none, horizontal or vertical */
142 int mode; /* rm, lm or/and tm */
143
144 int ncpus; /* number of CPUs */
145 cpu_set_t *online; /* mask with online CPUs */
146
147 int nnodes; /* number of NUMA modes */
148 cpu_set_t **nodemaps; /* array with NUMA nodes */
149
150 /* books -- based on book_siblings (internal kernel map of cpuX's
151 * hardware threads within the same book */
152 int nbooks; /* number of all online books */
153 cpu_set_t **bookmaps; /* unique book_siblings */
154
155 /* sockets -- based on core_siblings (internal kernel map of cpuX's
156 * hardware threads within the same physical_package_id (socket)) */
157 int nsockets; /* number of all online sockets */
158 cpu_set_t **socketmaps; /* unique core_siblings */
159
160 /* cores -- based on thread_siblings (internel kernel map of cpuX's
161 * hardware threads within the same core as cpuX) */
162 int ncores; /* number of all online cores */
163 cpu_set_t **coremaps; /* unique thread_siblings */
164
165 int nthreads; /* number of online threads */
166
167 int ncaches;
168 struct cpu_cache *caches;
169
170 int *polarization; /* cpu polarization */
171 int *addresses; /* physical cpu addresses */
172 };
173
174 static size_t sysrootlen;
175 static char pathbuf[PATH_MAX];
176 static int maxcpus; /* size in bits of kernel cpu mask */
177
178 #define is_cpu_online(_d, _cpu) \
179 ((_d) && (_d)->online ? \
180 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
181
182 static FILE *path_fopen(const char *mode, int exit_on_err, const char *path, ...)
183 __attribute__ ((__format__ (__printf__, 3, 4)));
184 static void path_getstr(char *result, size_t len, const char *path, ...)
185 __attribute__ ((__format__ (__printf__, 3, 4)));
186 static int path_getnum(const char *path, ...)
187 __attribute__ ((__format__ (__printf__, 1, 2)));
188 static int path_exist(const char *path, ...)
189 __attribute__ ((__format__ (__printf__, 1, 2)));
190 static cpu_set_t *path_cpuset(const char *path, ...)
191 __attribute__ ((__format__ (__printf__, 1, 2)));
192
193 /*
194 * Parsable output
195 */
196 enum {
197 COL_CPU,
198 COL_CORE,
199 COL_SOCKET,
200 COL_NODE,
201 COL_BOOK,
202 COL_CACHE,
203 COL_POLARIZATION,
204 COL_ADDRESS
205 };
206
207 static const char *colnames[] =
208 {
209 [COL_CPU] = "CPU",
210 [COL_CORE] = "Core",
211 [COL_SOCKET] = "Socket",
212 [COL_NODE] = "Node",
213 [COL_BOOK] = "Book",
214 [COL_CACHE] = "Cache",
215 [COL_POLARIZATION] = "Polarization",
216 [COL_ADDRESS] = "Address"
217 };
218
219
220 static int column_name_to_id(const char *name, size_t namesz)
221 {
222 size_t i;
223
224 for (i = 0; i < ARRAY_SIZE(colnames); i++) {
225 const char *cn = colnames[i];
226
227 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
228 return i;
229 }
230 warnx(_("unknown column: %s"), name);
231 return -1;
232 }
233
234 static const char *
235 path_vcreate(const char *path, va_list ap)
236 {
237 if (sysrootlen)
238 vsnprintf(pathbuf + sysrootlen,
239 sizeof(pathbuf) - sysrootlen, path, ap);
240 else
241 vsnprintf(pathbuf, sizeof(pathbuf), path, ap);
242 return pathbuf;
243 }
244
245 static FILE *
246 path_vfopen(const char *mode, int exit_on_error, const char *path, va_list ap)
247 {
248 FILE *f;
249 const char *p = path_vcreate(path, ap);
250
251 f = fopen(p, mode);
252 if (!f && exit_on_error)
253 err(EXIT_FAILURE, _("error: cannot open %s"), p);
254 return f;
255 }
256
257 static FILE *
258 path_fopen(const char *mode, int exit_on_error, const char *path, ...)
259 {
260 FILE *fd;
261 va_list ap;
262
263 va_start(ap, path);
264 fd = path_vfopen(mode, exit_on_error, path, ap);
265 va_end(ap);
266
267 return fd;
268 }
269
270 static void
271 path_getstr(char *result, size_t len, const char *path, ...)
272 {
273 FILE *fd;
274 va_list ap;
275
276 va_start(ap, path);
277 fd = path_vfopen("r", 1, path, ap);
278 va_end(ap);
279
280 if (!fgets(result, len, fd))
281 err(EXIT_FAILURE, _("failed to read: %s"), pathbuf);
282 fclose(fd);
283
284 len = strlen(result);
285 if (result[len - 1] == '\n')
286 result[len - 1] = '\0';
287 }
288
289 static int
290 path_getnum(const char *path, ...)
291 {
292 FILE *fd;
293 va_list ap;
294 int result;
295
296 va_start(ap, path);
297 fd = path_vfopen("r", 1, path, ap);
298 va_end(ap);
299
300 if (fscanf(fd, "%d", &result) != 1) {
301 if (ferror(fd))
302 err(EXIT_FAILURE, _("failed to read: %s"), pathbuf);
303 else
304 errx(EXIT_FAILURE, _("parse error: %s"), pathbuf);
305 }
306 fclose(fd);
307 return result;
308 }
309
310 static int
311 path_exist(const char *path, ...)
312 {
313 va_list ap;
314 const char *p;
315
316 va_start(ap, path);
317 p = path_vcreate(path, ap);
318 va_end(ap);
319
320 return access(p, F_OK) == 0;
321 }
322
323 static cpu_set_t *
324 path_cpuparse(int islist, const char *path, va_list ap)
325 {
326 FILE *fd;
327 cpu_set_t *set;
328 size_t setsize, len = maxcpus * 7;
329 char buf[len];
330
331 fd = path_vfopen("r", 1, path, ap);
332
333 if (!fgets(buf, len, fd))
334 err(EXIT_FAILURE, _("failed to read: %s"), pathbuf);
335 fclose(fd);
336
337 len = strlen(buf);
338 if (buf[len - 1] == '\n')
339 buf[len - 1] = '\0';
340
341 set = cpuset_alloc(maxcpus, &setsize, NULL);
342 if (!set)
343 err(EXIT_FAILURE, _("failed to callocate cpu set"));
344
345 if (islist) {
346 if (cpulist_parse(buf, set, setsize, 0))
347 errx(EXIT_FAILURE, _("failed to parse CPU list %s"), buf);
348 } else {
349 if (cpumask_parse(buf, set, setsize))
350 errx(EXIT_FAILURE, _("failed to parse CPU mask %s"), buf);
351 }
352 return set;
353 }
354
355 static cpu_set_t *
356 path_cpuset(const char *path, ...)
357 {
358 va_list ap;
359 cpu_set_t *set;
360
361 va_start(ap, path);
362 set = path_cpuparse(0, path, ap);
363 va_end(ap);
364
365 return set;
366 }
367
368 static cpu_set_t *
369 path_cpulist(const char *path, ...)
370 {
371 va_list ap;
372 cpu_set_t *set;
373
374 va_start(ap, path);
375 set = path_cpuparse(1, path, ap);
376 va_end(ap);
377
378 return set;
379 }
380
381 /* Lookup a pattern and get the value from cpuinfo.
382 * Format is:
383 *
384 * "<pattern> : <key>"
385 */
386 int lookup(char *line, char *pattern, char **value)
387 {
388 char *p, *v;
389 int len = strlen(pattern);
390
391 if (!*line)
392 return 0;
393
394 /* pattern */
395 if (strncmp(line, pattern, len))
396 return 0;
397
398 /* white spaces */
399 for (p = line + len; isspace(*p); p++);
400
401 /* separator */
402 if (*p != ':')
403 return 0;
404
405 /* white spaces */
406 for (++p; isspace(*p); p++);
407
408 /* value */
409 if (!*p)
410 return 0;
411 v = p;
412
413 /* end of value */
414 len = strlen(line) - 1;
415 for (p = line + len; isspace(*(p-1)); p--);
416 *p = '\0';
417
418 *value = xstrdup(v);
419 return 1;
420 }
421
422 /* Don't init the mode for platforms where we are not able to
423 * detect that CPU supports 64-bit mode.
424 */
425 static int
426 init_mode(void)
427 {
428 int m = 0;
429
430 if (sysrootlen)
431 /* reading info from any /{sys,proc} dump, don't mix it with
432 * information about our real CPU */
433 return 0;
434
435 #if defined(__alpha__) || defined(__ia64__)
436 m |= MODE_64BIT; /* 64bit platforms only */
437 #endif
438 /* platforms with 64bit flag in /proc/cpuinfo, define
439 * 32bit default here */
440 #if defined(__i386__) || defined(__x86_64__) || \
441 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
442 m |= MODE_32BIT;
443 #endif
444 return m;
445 }
446
447 static void
448 read_basicinfo(struct lscpu_desc *desc)
449 {
450 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
451 char buf[BUFSIZ];
452 struct utsname utsbuf;
453
454 /* architecture */
455 if (uname(&utsbuf) == -1)
456 err(EXIT_FAILURE, _("error: uname failed"));
457 desc->arch = xstrdup(utsbuf.machine);
458
459 /* count CPU(s) */
460 while(path_exist(_PATH_SYS_SYSTEM "/cpu/cpu%d", desc->ncpus))
461 desc->ncpus++;
462
463 /* details */
464 while (fgets(buf, sizeof(buf), fp) != NULL) {
465 if (lookup(buf, "vendor", &desc->vendor)) ;
466 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
467 else if (lookup(buf, "family", &desc->family)) ;
468 else if (lookup(buf, "cpu family", &desc->family)) ;
469 else if (lookup(buf, "model", &desc->model)) ;
470 else if (lookup(buf, "stepping", &desc->stepping)) ;
471 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
472 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
473 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
474 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
475 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
476 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
477 else
478 continue;
479 }
480
481 desc->mode = init_mode();
482
483 if (desc->flags) {
484 snprintf(buf, sizeof(buf), " %s ", desc->flags);
485 if (strstr(buf, " svm "))
486 desc->virtflag = strdup("svm");
487 else if (strstr(buf, " vmx "))
488 desc->virtflag = strdup("vmx");
489 if (strstr(buf, " lm "))
490 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
491 if (strstr(buf, " zarch "))
492 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
493 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
494 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
495 }
496
497 fclose(fp);
498
499 if (path_exist(_PATH_SYS_SYSTEM "/cpu/kernel_max"))
500 /* note that kernel_max is maximum index [NR_CPUS-1] */
501 maxcpus = path_getnum(_PATH_SYS_SYSTEM "/cpu/kernel_max") + 1;
502
503 else if (!sysrootlen)
504 /* the root is '/' so we are working with data from the current kernel */
505 maxcpus = get_max_number_of_cpus();
506 else
507 /* we are reading some /sys snapshot instead of the real /sys,
508 * let's use any crazy number... */
509 maxcpus = desc->ncpus > 2048 ? desc->ncpus : 2048;
510
511 /* get mask for online CPUs */
512 if (path_exist(_PATH_SYS_SYSTEM "/cpu/online")) {
513 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
514 desc->online = path_cpulist(_PATH_SYS_SYSTEM "/cpu/online");
515 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
516 }
517
518 /* get dispatching mode */
519 if (path_exist(_PATH_SYS_SYSTEM "/cpu/dispatching"))
520 desc->dispatching = path_getnum(_PATH_SYS_SYSTEM "/cpu/dispatching");
521 else
522 desc->dispatching = -1;
523 }
524
525 static int
526 has_pci_device(int vendor, int device)
527 {
528 FILE *f;
529 int num, fn, ven, dev;
530 int res = 1;
531
532 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
533 if (!f)
534 return 0;
535
536 /* for more details about bus/pci/devices format see
537 * drivers/pci/proc.c in linux kernel
538 */
539 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
540 &num, &fn, &ven, &dev) == 4) {
541
542 if (ven == vendor && dev == device)
543 goto found;
544 }
545
546 res = 0;
547 found:
548 fclose(f);
549 return res;
550 }
551
552 #if defined(__x86_64__) || defined(__i386__)
553
554 /*
555 * This CPUID leaf returns the information about the hypervisor.
556 * EAX : maximum input value for CPUID supported by the hypervisor.
557 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
558 */
559 #define HYPERVISOR_INFO_LEAF 0x40000000
560
561 static inline void
562 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
563 unsigned int *ecx, unsigned int *edx)
564 {
565 __asm__(
566 #if defined(__PIC__) && defined(__i386__)
567 /* x86 PIC cannot clobber ebx -- gcc bitches */
568 "pushl %%ebx;"
569 "cpuid;"
570 "movl %%ebx, %%esi;"
571 "popl %%ebx;"
572 : "=S" (*ebx),
573 #else
574 "cpuid;"
575 : "=b" (*ebx),
576 #endif
577 "=a" (*eax),
578 "=c" (*ecx),
579 "=d" (*edx)
580 : "1" (op), "c"(0));
581 }
582
583 static void
584 read_hypervisor_cpuid(struct lscpu_desc *desc)
585 {
586 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
587 char hyper_vendor_id[13];
588
589 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
590
591 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
592 memcpy(hyper_vendor_id + 0, &ebx, 4);
593 memcpy(hyper_vendor_id + 4, &ecx, 4);
594 memcpy(hyper_vendor_id + 8, &edx, 4);
595 hyper_vendor_id[12] = '\0';
596
597 if (!hyper_vendor_id[0])
598 return;
599
600 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
601 desc->hyper = HYPER_XEN;
602 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
603 desc->hyper = HYPER_KVM;
604 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
605 desc->hyper = HYPER_MSHV;
606 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
607 desc->hyper = HYPER_VMWARE;
608 }
609
610 #else /* ! __x86_64__ */
611 static void
612 read_hypervisor_cpuid(struct lscpu_desc *desc)
613 {
614 }
615 #endif
616
617 static void
618 read_hypervisor(struct lscpu_desc *desc)
619 {
620 read_hypervisor_cpuid(desc);
621
622 if (desc->hyper)
623 /* hvm */
624 desc->virtype = VIRT_FULL;
625
626 else if (path_exist(_PATH_PROC_XEN)) {
627 /* Xen para-virt or dom0 */
628 FILE *fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
629 int dom0 = 0;
630
631 if (fd) {
632 char buf[256];
633
634 if (fscanf(fd, "%s", buf) == 1 &&
635 !strcmp(buf, "control_d"))
636 dom0 = 1;
637 fclose(fd);
638 }
639 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
640 desc->hyper = HYPER_XEN;
641
642 } else if (has_pci_device(0x5853, 0x0001)) {
643 /* Xen full-virt on non-x86_64 */
644 desc->hyper = HYPER_XEN;
645 desc->virtype = VIRT_FULL;
646 } else if (path_exist(_PATH_PROC_SYSINFO)) {
647 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
648 char buf[BUFSIZ];
649
650 desc->hyper = HYPER_IBM;
651 desc->virtype = VIRT_FULL;
652 while (fgets(buf, sizeof(buf), fd) != NULL) {
653 if (!strstr(buf, "Control Program:"))
654 continue;
655 if (!strstr(buf, "KVM"))
656 desc->hyper = HYPER_IBM;
657 else
658 desc->hyper = HYPER_KVM;
659 }
660 fclose(fd);
661 }
662 }
663
664 /* add @set to the @ary, unnecesary set is deallocated. */
665 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
666 {
667 int i;
668 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
669
670 if (!ary)
671 return -1;
672
673 for (i = 0; i < *items; i++) {
674 if (CPU_EQUAL_S(setsize, set, ary[i]))
675 break;
676 }
677 if (i == *items) {
678 ary[*items] = set;
679 ++*items;
680 return 0;
681 }
682 CPU_FREE(set);
683 return 1;
684 }
685
686 static void
687 read_topology(struct lscpu_desc *desc, int num)
688 {
689 cpu_set_t *thread_siblings, *core_siblings, *book_siblings;
690
691 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
692 return;
693
694 thread_siblings = path_cpuset(_PATH_SYS_CPU
695 "/cpu%d/topology/thread_siblings", num);
696 core_siblings = path_cpuset(_PATH_SYS_CPU
697 "/cpu%d/topology/core_siblings", num);
698 book_siblings = NULL;
699 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num)) {
700 book_siblings = path_cpuset(_PATH_SYS_CPU
701 "/cpu%d/topology/book_siblings", num);
702 }
703
704 if (!desc->coremaps) {
705 int nbooks, nsockets, ncores, nthreads;
706 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
707
708 /* threads within one core */
709 nthreads = CPU_COUNT_S(setsize, thread_siblings);
710 /* cores within one socket */
711 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
712 /* number of sockets within one book.
713 * Because of odd / non-present cpu maps and to keep
714 * calculation easy we make sure that nsockets and
715 * nbooks is at least 1.
716 */
717 nsockets = desc->ncpus / nthreads / ncores ?: 1;
718 /* number of books */
719 nbooks = desc->ncpus / nthreads / ncores / nsockets ?: 1;
720
721 /* all threads, see also read_basicinfo()
722 * -- fallback for kernels without
723 * /sys/devices/system/cpu/online.
724 */
725 if (!desc->nthreads)
726 desc->nthreads = nbooks * nsockets * ncores * nthreads;
727 /* For each map we make sure that it can have up to ncpus
728 * entries. This is because we cannot reliably calculate the
729 * number of cores, sockets and books on all architectures.
730 * E.g. completely virtualized architectures like s390 may
731 * have multiple sockets of different sizes.
732 */
733 desc->coremaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
734 desc->socketmaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
735 if (book_siblings)
736 desc->bookmaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
737 }
738
739 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
740 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
741 if (book_siblings)
742 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
743 }
744 static void
745 read_polarization(struct lscpu_desc *desc, int num)
746 {
747 char mode[64];
748
749 if (desc->dispatching < 0)
750 return;
751 if (!path_exist(_PATH_SYS_CPU "/cpu%d/polarization", num))
752 return;
753 if (!desc->polarization)
754 desc->polarization = xcalloc(desc->ncpus, sizeof(int));
755 path_getstr(mode, sizeof(mode), _PATH_SYS_CPU "/cpu%d/polarization", num);
756 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
757 desc->polarization[num] = POLAR_VLOW;
758 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
759 desc->polarization[num] = POLAR_VMEDIUM;
760 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
761 desc->polarization[num] = POLAR_VHIGH;
762 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
763 desc->polarization[num] = POLAR_HORIZONTAL;
764 else
765 desc->polarization[num] = POLAR_UNKNOWN;
766 }
767
768 static void
769 read_address(struct lscpu_desc *desc, int num)
770 {
771 if (!path_exist(_PATH_SYS_CPU "/cpu%d/address", num))
772 return;
773 if (!desc->addresses)
774 desc->addresses = xcalloc(desc->ncpus, sizeof(int));
775 desc->addresses[num] = path_getnum(_PATH_SYS_CPU "/cpu%d/address", num);
776 }
777
778 static int
779 cachecmp(const void *a, const void *b)
780 {
781 struct cpu_cache *c1 = (struct cpu_cache *) a;
782 struct cpu_cache *c2 = (struct cpu_cache *) b;
783
784 return strcmp(c2->name, c1->name);
785 }
786
787 static void
788 read_cache(struct lscpu_desc *desc, int num)
789 {
790 char buf[256];
791 int i;
792
793 if (!desc->ncaches) {
794 while(path_exist(_PATH_SYS_SYSTEM "/cpu/cpu%d/cache/index%d",
795 num, desc->ncaches))
796 desc->ncaches++;
797
798 if (!desc->ncaches)
799 return;
800
801 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
802 }
803 for (i = 0; i < desc->ncaches; i++) {
804 struct cpu_cache *ca = &desc->caches[i];
805 cpu_set_t *map;
806
807 if (!path_exist(_PATH_SYS_SYSTEM "/cpu/cpu%d/cache/index%d",
808 num, i))
809 continue;
810 if (!ca->name) {
811 int type, level;
812
813 /* cache type */
814 path_getstr(buf, sizeof(buf),
815 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
816 num, i);
817 if (!strcmp(buf, "Data"))
818 type = 'd';
819 else if (!strcmp(buf, "Instruction"))
820 type = 'i';
821 else
822 type = 0;
823
824 /* cache level */
825 level = path_getnum(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
826 num, i);
827 if (type)
828 snprintf(buf, sizeof(buf), "L%d%c", level, type);
829 else
830 snprintf(buf, sizeof(buf), "L%d", level);
831
832 ca->name = xstrdup(buf);
833
834 /* cache size */
835 path_getstr(buf, sizeof(buf),
836 _PATH_SYS_CPU "/cpu%d/cache/index%d/size",
837 num, i);
838 ca->size = xstrdup(buf);
839 }
840
841 /* information about how CPUs share different caches */
842 map = path_cpuset(_PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
843 num, i);
844
845 if (!ca->sharedmaps)
846 ca->sharedmaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
847 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
848 }
849 }
850
851 static void
852 read_nodes(struct lscpu_desc *desc)
853 {
854 int i;
855
856 /* number of NUMA node */
857 while (path_exist(_PATH_SYS_SYSTEM "/node/node%d", desc->nnodes))
858 desc->nnodes++;
859
860 if (!desc->nnodes)
861 return;
862
863 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
864
865 /* information about how nodes share different CPUs */
866 for (i = 0; i < desc->nnodes; i++)
867 desc->nodemaps[i] = path_cpuset(
868 _PATH_SYS_SYSTEM "/node/node%d/cpumap",
869 i);
870 }
871
872 static void
873 print_parsable_cell(struct lscpu_desc *desc, int i, int col, int compatible)
874 {
875 int j;
876 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
877
878 switch (col) {
879 case COL_CPU:
880 printf("%d", i);
881 break;
882 case COL_CORE:
883 for (j = 0; j < desc->ncores; j++) {
884 if (CPU_ISSET_S(i, setsize, desc->coremaps[j])) {
885 printf("%d", j);
886 break;
887 }
888 }
889 break;
890 case COL_SOCKET:
891 for (j = 0; j < desc->nsockets; j++) {
892 if (CPU_ISSET_S(i, setsize, desc->socketmaps[j])) {
893 printf("%d", j);
894 break;
895 }
896 }
897 break;
898 case COL_NODE:
899 for (j = 0; j < desc->nnodes; j++) {
900 if (CPU_ISSET_S(i, setsize, desc->nodemaps[j])) {
901 printf("%d", j);
902 break;
903 }
904 }
905 break;
906 case COL_BOOK:
907 for (j = 0; j < desc->nbooks; j++) {
908 if (CPU_ISSET_S(i, setsize, desc->bookmaps[j])) {
909 printf("%d", j);
910 break;
911 }
912 }
913 break;
914 case COL_CACHE:
915 for (j = desc->ncaches - 1; j >= 0; j--) {
916 struct cpu_cache *ca = &desc->caches[j];
917 int x;
918
919 for (x = 0; x < ca->nsharedmaps; x++) {
920 if (CPU_ISSET_S(i, setsize, ca->sharedmaps[x])) {
921 printf("%d", x);
922 break;
923 }
924 }
925 if (j != 0)
926 putchar(compatible ? ',' : ':');
927 }
928 break;
929 case COL_POLARIZATION:
930 if (desc->polarization)
931 printf("%s", polar_modes[desc->polarization[i]]);
932 break;
933 case COL_ADDRESS:
934 if (desc->addresses)
935 printf("%d", desc->addresses[i]);
936 break;
937 }
938 }
939
940 /*
941 * We support two formats:
942 *
943 * 1) "compatible" -- this format is compatible with the original lscpu(1)
944 * output and it contains fixed set of the columns. The CACHE columns are at
945 * the end of the line and the CACHE is not printed if the number of the caches
946 * is zero. The CACHE columns are separated by two commas, for example:
947 *
948 * $ lscpu --parse
949 * # CPU,Core,Socket,Node,,L1d,L1i,L2
950 * 0,0,0,0,,0,0,0
951 * 1,1,0,0,,1,1,0
952 *
953 * 2) "user defined output" -- this format prints always all columns without
954 * special prefix for CACHE column. If there are not CACHEs then the column is
955 * empty and the header "Cache" is printed rather than a real name of the cache.
956 * The CACHE columns are separated by ':'.
957 *
958 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
959 * # CPU,Core,Socket,Node,L1d:L1i:L2
960 * 0,0,0,0,0:0:0
961 * 1,1,0,0,1:1:0
962 */
963 static void
964 print_parsable(struct lscpu_desc *desc, int cols[], int ncols, int compatible)
965 {
966 int i, c;
967
968 printf(_(
969 "# The following is the parsable format, which can be fed to other\n"
970 "# programs. Each different item in every column has an unique ID\n"
971 "# starting from zero.\n"));
972
973 fputs("# ", stdout);
974 for (i = 0; i < ncols; i++) {
975 if (cols[i] == COL_CACHE) {
976 if (compatible && !desc->ncaches)
977 continue;
978 if (i > 0)
979 putchar(',');
980 if (compatible && i != 0)
981 putchar(',');
982 for (c = desc->ncaches - 1; c >= 0; c--) {
983 printf("%s", desc->caches[c].name);
984 if (c > 0)
985 putchar(compatible ? ',' : ':');
986 }
987 if (!desc->ncaches)
988 fputs(colnames[cols[i]], stdout);
989 } else {
990 if (i > 0)
991 putchar(',');
992 fputs(colnames[cols[i]], stdout);
993 }
994 }
995 putchar('\n');
996
997 for (i = 0; i < desc->ncpus; i++) {
998 if (desc->online && !is_cpu_online(desc, i))
999 continue;
1000 for (c = 0; c < ncols; c++) {
1001 if (compatible && cols[c] == COL_CACHE) {
1002 if (!desc->ncaches)
1003 continue;
1004 if (c > 0)
1005 putchar(',');
1006 }
1007 if (c > 0)
1008 putchar(',');
1009 print_parsable_cell(desc, i, cols[c], compatible);
1010 }
1011 putchar('\n');
1012 }
1013 }
1014
1015
1016 /* output formats "<key> <value>"*/
1017 #define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
1018 #define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
1019
1020 static void
1021 print_cpuset(const char *key, cpu_set_t *set, int hex)
1022 {
1023 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1024 size_t setbuflen = 7 * maxcpus;
1025 char setbuf[setbuflen], *p;
1026
1027 if (hex) {
1028 p = cpumask_create(setbuf, setbuflen, set, setsize);
1029 printf("%-23s0x%s\n", key, p);
1030 } else {
1031 p = cpulist_create(setbuf, setbuflen, set, setsize);
1032 print_s(key, p);
1033 }
1034
1035 }
1036
1037 static void
1038 print_readable(struct lscpu_desc *desc, int hex)
1039 {
1040 char buf[512];
1041 int i;
1042 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1043
1044 print_s(_("Architecture:"), desc->arch);
1045
1046 if (desc->mode) {
1047 char buf[64], *p = buf;
1048
1049 if (desc->mode & MODE_32BIT) {
1050 strcpy(p, "32-bit, ");
1051 p += 8;
1052 }
1053 if (desc->mode & MODE_64BIT) {
1054 strcpy(p, "64-bit, ");
1055 p += 8;
1056 }
1057 *(p - 2) = '\0';
1058 print_s(_("CPU op-mode(s):"), buf);
1059 }
1060 #if !defined(WORDS_BIGENDIAN)
1061 print_s(_("Byte Order:"), "Little Endian");
1062 #else
1063 print_s(_("Byte Order:"), "Big Endian");
1064 #endif
1065 print_n(_("CPU(s):"), desc->ncpus);
1066
1067 if (desc->online)
1068 print_cpuset(hex ? _("On-line CPU(s) mask:") :
1069 _("On-line CPU(s) list:"),
1070 desc->online, hex);
1071
1072 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1073 cpu_set_t *set;
1074
1075 /* Linux kernel provides cpuset of off-line CPUs that contains
1076 * all configured CPUs (see /sys/devices/system/cpu/offline),
1077 * but want to print real (present in system) off-line CPUs only.
1078 */
1079 set = cpuset_alloc(maxcpus, NULL, NULL);
1080 if (!set)
1081 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1082 CPU_ZERO_S(setsize, set);
1083 for (i = 0; i < desc->ncpus; i++) {
1084 if (!is_cpu_online(desc, i))
1085 CPU_SET_S(i, setsize, set);
1086 }
1087 print_cpuset(hex ? _("Off-line CPU(s) mask:") :
1088 _("Off-line CPU(s) list:"),
1089 set, hex);
1090 cpuset_free(set);
1091 }
1092
1093 if (desc->nsockets) {
1094 int cores_per_socket, sockets_per_book, books;
1095
1096 cores_per_socket = sockets_per_book = books = 0;
1097 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1098 * Using simply the cpu topology masks in sysfs will not give
1099 * usable results since everything is virtualized. E.g.
1100 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1101 * five cpus.
1102 * If the cpu topology is not exported (e.g. 2nd level guest)
1103 * fall back to old calculation scheme.
1104 */
1105 if (path_exist(_PATH_PROC_SYSINFO)) {
1106 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
1107 char buf[BUFSIZ];
1108 int t0, t1, t2;
1109
1110 while (fgets(buf, sizeof(buf), fd) != NULL) {
1111 if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
1112 &t0, &t1, &t2, &books, &sockets_per_book,
1113 &cores_per_socket) == 6)
1114 break;
1115 }
1116 }
1117 print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
1118 print_n(_("Core(s) per socket:"),
1119 cores_per_socket ?: desc->ncores / desc->nsockets);
1120 if (desc->nbooks) {
1121 print_n(_("Socket(s) per book:"),
1122 sockets_per_book ?: desc->nsockets / desc->nbooks);
1123 print_n(_("Book(s):"), books ?: desc->nbooks);
1124 } else {
1125 print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
1126 }
1127 }
1128 if (desc->nnodes)
1129 print_n(_("NUMA node(s):"), desc->nnodes);
1130 if (desc->vendor)
1131 print_s(_("Vendor ID:"), desc->vendor);
1132 if (desc->family)
1133 print_s(_("CPU family:"), desc->family);
1134 if (desc->model)
1135 print_s(_("Model:"), desc->model);
1136 if (desc->stepping)
1137 print_s(_("Stepping:"), desc->stepping);
1138 if (desc->mhz)
1139 print_s(_("CPU MHz:"), desc->mhz);
1140 if (desc->bogomips)
1141 print_s(_("BogoMIPS:"), desc->bogomips);
1142 if (desc->virtflag) {
1143 if (!strcmp(desc->virtflag, "svm"))
1144 print_s(_("Virtualization:"), "AMD-V");
1145 else if (!strcmp(desc->virtflag, "vmx"))
1146 print_s(_("Virtualization:"), "VT-x");
1147 }
1148 if (desc->hyper) {
1149 print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1150 print_s(_("Virtualization type:"), virt_types[desc->virtype]);
1151 }
1152 if (desc->dispatching >= 0)
1153 print_s(_("Dispatching mode:"), disp_modes[desc->dispatching]);
1154 if (desc->ncaches) {
1155 char buf[512];
1156 int i;
1157
1158 for (i = desc->ncaches - 1; i >= 0; i--) {
1159 snprintf(buf, sizeof(buf),
1160 _("%s cache:"), desc->caches[i].name);
1161 print_s(buf, desc->caches[i].size);
1162 }
1163 }
1164
1165 for (i = 0; i < desc->nnodes; i++) {
1166 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), i);
1167 print_cpuset(buf, desc->nodemaps[i], hex);
1168 }
1169 }
1170
1171 static void __attribute__((__noreturn__)) usage(FILE *out)
1172 {
1173 fputs(_("\nUsage:\n"), out);
1174 fprintf(out,
1175 _(" %s [options]\n"), program_invocation_short_name);
1176
1177 fputs(_("\nOptions:\n"), out);
1178 fputs(_(" -h, --help print this help\n"
1179 " -p, --parse <list> print out a parsable instead of a readable format\n"
1180 " -s, --sysroot <dir> use directory DIR as system root\n"
1181 " -x, --hex print hexadecimal masks rather than lists of CPUs\n"
1182 " -V, --version print version information and exit\n\n"), out);
1183
1184 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
1185 }
1186
1187 int main(int argc, char *argv[])
1188 {
1189 struct lscpu_desc _desc, *desc = &_desc;
1190 int parsable = 0, c, i, hex = 0;
1191 int columns[ARRAY_SIZE(colnames)], ncolumns = 0;
1192 int compatible = 0;
1193
1194 static const struct option longopts[] = {
1195 { "help", no_argument, 0, 'h' },
1196 { "parse", optional_argument, 0, 'p' },
1197 { "sysroot", required_argument, 0, 's' },
1198 { "hex", no_argument, 0, 'x' },
1199 { "version", no_argument, 0, 'V' },
1200 { NULL, 0, 0, 0 }
1201 };
1202
1203 setlocale(LC_ALL, "");
1204 bindtextdomain(PACKAGE, LOCALEDIR);
1205 textdomain(PACKAGE);
1206
1207 while ((c = getopt_long(argc, argv, "hp::s:xV", longopts, NULL)) != -1) {
1208 switch (c) {
1209 case 'h':
1210 usage(stdout);
1211 case 'p':
1212 parsable = 1;
1213 if (optarg) {
1214 if (*optarg == '=')
1215 optarg++;
1216 ncolumns = string_to_idarray(optarg,
1217 columns, ARRAY_SIZE(columns),
1218 column_name_to_id);
1219 if (ncolumns < 0)
1220 return EXIT_FAILURE;
1221 } else {
1222 columns[ncolumns++] = COL_CPU;
1223 columns[ncolumns++] = COL_CORE;
1224 columns[ncolumns++] = COL_SOCKET;
1225 columns[ncolumns++] = COL_NODE;
1226 columns[ncolumns++] = COL_CACHE;
1227 compatible = 1;
1228 }
1229 break;
1230 case 's':
1231 sysrootlen = strlen(optarg);
1232 strncpy(pathbuf, optarg, sizeof(pathbuf));
1233 pathbuf[sizeof(pathbuf) - 1] = '\0';
1234 break;
1235 case 'x':
1236 hex = 1;
1237 break;
1238 case 'V':
1239 printf(_("%s from %s\n"), program_invocation_short_name,
1240 PACKAGE_STRING);
1241 return EXIT_SUCCESS;
1242 default:
1243 usage(stderr);
1244 }
1245 }
1246
1247 memset(desc, 0, sizeof(*desc));
1248
1249 read_basicinfo(desc);
1250
1251 for (i = 0; i < desc->ncpus; i++) {
1252 if (desc->online && !is_cpu_online(desc, i))
1253 continue;
1254 read_topology(desc, i);
1255 read_cache(desc, i);
1256 read_polarization(desc, i);
1257 read_address(desc, i);
1258 }
1259
1260 qsort(desc->caches, desc->ncaches, sizeof(struct cpu_cache), cachecmp);
1261
1262 read_nodes(desc);
1263
1264 read_hypervisor(desc);
1265
1266 /* Show time! */
1267 if (parsable)
1268 print_parsable(desc, columns, ncolumns, compatible);
1269 else
1270 print_readable(desc, hex);
1271
1272 return EXIT_SUCCESS;
1273 }