]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
lscpu: use hypervisor generated topology information
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #include <ctype.h>
23 #include <dirent.h>
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <getopt.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/utsname.h>
31 #include <unistd.h>
32 #include <stdarg.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35
36 #include "cpuset.h"
37 #include "nls.h"
38 #include "xalloc.h"
39 #include "c.h"
40 #include "strutils.h"
41 #include "bitops.h"
42
43
44 #define CACHE_MAX 100
45
46 /* /sys paths */
47 #define _PATH_SYS_SYSTEM "/sys/devices/system"
48 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
49 #define _PATH_PROC_XEN "/proc/xen"
50 #define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
51 #define _PATH_PROC_CPUINFO "/proc/cpuinfo"
52 #define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
53 #define _PATH_PROC_SYSINFO "/proc/sysinfo"
54
55 /* virtualization types */
56 enum {
57 VIRT_NONE = 0,
58 VIRT_PARA,
59 VIRT_FULL
60 };
61 const char *virt_types[] = {
62 [VIRT_NONE] = N_("none"),
63 [VIRT_PARA] = N_("para"),
64 [VIRT_FULL] = N_("full")
65 };
66
67 /* hypervisor vendors */
68 enum {
69 HYPER_NONE = 0,
70 HYPER_XEN,
71 HYPER_KVM,
72 HYPER_MSHV,
73 HYPER_VMWARE,
74 HYPER_IBM
75 };
76 const char *hv_vendors[] = {
77 [HYPER_NONE] = NULL,
78 [HYPER_XEN] = "Xen",
79 [HYPER_KVM] = "KVM",
80 [HYPER_MSHV] = "Microsoft",
81 [HYPER_VMWARE] = "VMware",
82 [HYPER_IBM] = "IBM"
83 };
84
85 /* CPU modes */
86 enum {
87 MODE_32BIT = (1 << 1),
88 MODE_64BIT = (1 << 2)
89 };
90
91 /* cache(s) description */
92 struct cpu_cache {
93 char *name;
94 char *size;
95
96 int nsharedmaps;
97 cpu_set_t **sharedmaps;
98 };
99
100 /* global description */
101 struct lscpu_desc {
102 char *arch;
103 char *vendor;
104 char *family;
105 char *model;
106 char *virtflag; /* virtualization flag (vmx, svm) */
107 int hyper; /* hypervisor vendor ID */
108 int virtype; /* VIRT_PARA|FULL|NONE ? */
109 char *mhz;
110 char *stepping;
111 char *bogomips;
112 char *flags;
113 int mode; /* rm, lm or/and tm */
114
115 int ncpus; /* number of CPUs */
116 cpu_set_t *online; /* mask with online CPUs */
117
118 int nnodes; /* number of NUMA modes */
119 cpu_set_t **nodemaps; /* array with NUMA nodes */
120
121 /* books -- based on book_siblings (internal kernel map of cpuX's
122 * hardware threads within the same book */
123 int nbooks; /* number of all online books */
124 cpu_set_t **bookmaps; /* unique book_siblings */
125
126 /* sockets -- based on core_siblings (internal kernel map of cpuX's
127 * hardware threads within the same physical_package_id (socket)) */
128 int nsockets; /* number of all online sockets */
129 cpu_set_t **socketmaps; /* unique core_siblings */
130
131 /* cores -- based on thread_siblings (internel kernel map of cpuX's
132 * hardware threads within the same core as cpuX) */
133 int ncores; /* number of all online cores */
134 cpu_set_t **coremaps; /* unique thread_siblings */
135
136 int nthreads; /* number of online threads */
137
138 int ncaches;
139 struct cpu_cache *caches;
140 };
141
142 static size_t sysrootlen;
143 static char pathbuf[PATH_MAX];
144 static int maxcpus; /* size in bits of kernel cpu mask */
145
146 #define is_cpu_online(_d, _cpu) \
147 ((_d) && (_d)->online ? \
148 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
149
150 static FILE *path_fopen(const char *mode, int exit_on_err, const char *path, ...)
151 __attribute__ ((__format__ (__printf__, 3, 4)));
152 static void path_getstr(char *result, size_t len, const char *path, ...)
153 __attribute__ ((__format__ (__printf__, 3, 4)));
154 static int path_getnum(const char *path, ...)
155 __attribute__ ((__format__ (__printf__, 1, 2)));
156 static int path_exist(const char *path, ...)
157 __attribute__ ((__format__ (__printf__, 1, 2)));
158 static cpu_set_t *path_cpuset(const char *path, ...)
159 __attribute__ ((__format__ (__printf__, 1, 2)));
160
161 /*
162 * Parsable output
163 */
164 enum {
165 COL_CPU,
166 COL_CORE,
167 COL_SOCKET,
168 COL_NODE,
169 COL_BOOK,
170 COL_CACHE
171 };
172
173 static const char *colnames[] =
174 {
175 [COL_CPU] = "CPU",
176 [COL_CORE] = "Core",
177 [COL_SOCKET] = "Socket",
178 [COL_NODE] = "Node",
179 [COL_BOOK] = "Book",
180 [COL_CACHE] = "Cache"
181 };
182
183
184 static int column_name_to_id(const char *name, size_t namesz)
185 {
186 size_t i;
187
188 for (i = 0; i < ARRAY_SIZE(colnames); i++) {
189 const char *cn = colnames[i];
190
191 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
192 return i;
193 }
194 warnx(_("unknown column: %s"), name);
195 return -1;
196 }
197
198 static const char *
199 path_vcreate(const char *path, va_list ap)
200 {
201 if (sysrootlen)
202 vsnprintf(pathbuf + sysrootlen,
203 sizeof(pathbuf) - sysrootlen, path, ap);
204 else
205 vsnprintf(pathbuf, sizeof(pathbuf), path, ap);
206 return pathbuf;
207 }
208
209 static FILE *
210 path_vfopen(const char *mode, int exit_on_error, const char *path, va_list ap)
211 {
212 FILE *f;
213 const char *p = path_vcreate(path, ap);
214
215 f = fopen(p, mode);
216 if (!f && exit_on_error)
217 err(EXIT_FAILURE, _("error: cannot open %s"), p);
218 return f;
219 }
220
221 static FILE *
222 path_fopen(const char *mode, int exit_on_error, const char *path, ...)
223 {
224 FILE *fd;
225 va_list ap;
226
227 va_start(ap, path);
228 fd = path_vfopen(mode, exit_on_error, path, ap);
229 va_end(ap);
230
231 return fd;
232 }
233
234 static void
235 path_getstr(char *result, size_t len, const char *path, ...)
236 {
237 FILE *fd;
238 va_list ap;
239
240 va_start(ap, path);
241 fd = path_vfopen("r", 1, path, ap);
242 va_end(ap);
243
244 if (!fgets(result, len, fd))
245 err(EXIT_FAILURE, _("failed to read: %s"), pathbuf);
246 fclose(fd);
247
248 len = strlen(result);
249 if (result[len - 1] == '\n')
250 result[len - 1] = '\0';
251 }
252
253 static int
254 path_getnum(const char *path, ...)
255 {
256 FILE *fd;
257 va_list ap;
258 int result;
259
260 va_start(ap, path);
261 fd = path_vfopen("r", 1, path, ap);
262 va_end(ap);
263
264 if (fscanf(fd, "%d", &result) != 1) {
265 if (ferror(fd))
266 err(EXIT_FAILURE, _("failed to read: %s"), pathbuf);
267 else
268 errx(EXIT_FAILURE, _("parse error: %s"), pathbuf);
269 }
270 fclose(fd);
271 return result;
272 }
273
274 static int
275 path_exist(const char *path, ...)
276 {
277 va_list ap;
278 const char *p;
279
280 va_start(ap, path);
281 p = path_vcreate(path, ap);
282 va_end(ap);
283
284 return access(p, F_OK) == 0;
285 }
286
287 static cpu_set_t *
288 path_cpuparse(int islist, const char *path, va_list ap)
289 {
290 FILE *fd;
291 cpu_set_t *set;
292 size_t setsize, len = maxcpus * 7;
293 char buf[len];
294
295 fd = path_vfopen("r", 1, path, ap);
296
297 if (!fgets(buf, len, fd))
298 err(EXIT_FAILURE, _("failed to read: %s"), pathbuf);
299 fclose(fd);
300
301 len = strlen(buf);
302 if (buf[len - 1] == '\n')
303 buf[len - 1] = '\0';
304
305 set = cpuset_alloc(maxcpus, &setsize, NULL);
306 if (!set)
307 err(EXIT_FAILURE, _("failed to callocate cpu set"));
308
309 if (islist) {
310 if (cpulist_parse(buf, set, setsize))
311 errx(EXIT_FAILURE, _("failed to parse CPU list %s"), buf);
312 } else {
313 if (cpumask_parse(buf, set, setsize))
314 errx(EXIT_FAILURE, _("failed to parse CPU mask %s"), buf);
315 }
316 return set;
317 }
318
319 static cpu_set_t *
320 path_cpuset(const char *path, ...)
321 {
322 va_list ap;
323 cpu_set_t *set;
324
325 va_start(ap, path);
326 set = path_cpuparse(0, path, ap);
327 va_end(ap);
328
329 return set;
330 }
331
332 static cpu_set_t *
333 path_cpulist(const char *path, ...)
334 {
335 va_list ap;
336 cpu_set_t *set;
337
338 va_start(ap, path);
339 set = path_cpuparse(1, path, ap);
340 va_end(ap);
341
342 return set;
343 }
344
345 /* Lookup a pattern and get the value from cpuinfo.
346 * Format is:
347 *
348 * "<pattern> : <key>"
349 */
350 int lookup(char *line, char *pattern, char **value)
351 {
352 char *p, *v;
353 int len = strlen(pattern);
354
355 if (!*line)
356 return 0;
357
358 /* pattern */
359 if (strncmp(line, pattern, len))
360 return 0;
361
362 /* white spaces */
363 for (p = line + len; isspace(*p); p++);
364
365 /* separator */
366 if (*p != ':')
367 return 0;
368
369 /* white spaces */
370 for (++p; isspace(*p); p++);
371
372 /* value */
373 if (!*p)
374 return 0;
375 v = p;
376
377 /* end of value */
378 len = strlen(line) - 1;
379 for (p = line + len; isspace(*(p-1)); p--);
380 *p = '\0';
381
382 *value = xstrdup(v);
383 return 1;
384 }
385
386 /* Don't init the mode for platforms where we are not able to
387 * detect that CPU supports 64-bit mode.
388 */
389 static int
390 init_mode(void)
391 {
392 int m = 0;
393
394 if (sysrootlen)
395 /* reading info from any /{sys,proc} dump, don't mix it with
396 * information about our real CPU */
397 return 0;
398
399 #if defined(__alpha__) || defined(__ia64__)
400 m |= MODE_64BIT; /* 64bit platforms only */
401 #endif
402 /* platforms with 64bit flag in /proc/cpuinfo, define
403 * 32bit default here */
404 #if defined(__i386__) || defined(__x86_64__) || \
405 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
406 m |= MODE_32BIT;
407 #endif
408 return m;
409 }
410
411 static void
412 read_basicinfo(struct lscpu_desc *desc)
413 {
414 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
415 char buf[BUFSIZ];
416 struct utsname utsbuf;
417
418 /* architecture */
419 if (uname(&utsbuf) == -1)
420 err(EXIT_FAILURE, _("error: uname failed"));
421 desc->arch = xstrdup(utsbuf.machine);
422
423 /* count CPU(s) */
424 while(path_exist(_PATH_SYS_SYSTEM "/cpu/cpu%d", desc->ncpus))
425 desc->ncpus++;
426
427 /* details */
428 while (fgets(buf, sizeof(buf), fp) != NULL) {
429 if (lookup(buf, "vendor", &desc->vendor)) ;
430 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
431 else if (lookup(buf, "family", &desc->family)) ;
432 else if (lookup(buf, "cpu family", &desc->family)) ;
433 else if (lookup(buf, "model", &desc->model)) ;
434 else if (lookup(buf, "stepping", &desc->stepping)) ;
435 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
436 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
437 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
438 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
439 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
440 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
441 else
442 continue;
443 }
444
445 desc->mode = init_mode();
446
447 if (desc->flags) {
448 snprintf(buf, sizeof(buf), " %s ", desc->flags);
449 if (strstr(buf, " svm "))
450 desc->virtflag = strdup("svm");
451 else if (strstr(buf, " vmx "))
452 desc->virtflag = strdup("vmx");
453 if (strstr(buf, " lm "))
454 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
455 if (strstr(buf, " zarch "))
456 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
457 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
458 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
459 }
460
461 fclose(fp);
462
463 if (path_exist(_PATH_SYS_SYSTEM "/cpu/kernel_max"))
464 /* note that kernel_max is maximum index [NR_CPUS-1] */
465 maxcpus = path_getnum(_PATH_SYS_SYSTEM "/cpu/kernel_max") + 1;
466
467 else if (!sysrootlen)
468 /* the root is '/' so we are working with data from the current kernel */
469 maxcpus = get_max_number_of_cpus();
470 else
471 /* we are reading some /sys snapshot instead of the real /sys,
472 * let's use any crazy number... */
473 maxcpus = desc->ncpus > 2048 ? desc->ncpus : 2048;
474
475 /* get mask for online CPUs */
476 if (path_exist(_PATH_SYS_SYSTEM "/cpu/online")) {
477 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
478 desc->online = path_cpulist(_PATH_SYS_SYSTEM "/cpu/online");
479 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
480 }
481 }
482
483 static int
484 has_pci_device(int vendor, int device)
485 {
486 FILE *f;
487 int num, fn, ven, dev;
488 int res = 1;
489
490 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
491 if (!f)
492 return 0;
493
494 /* for more details about bus/pci/devices format see
495 * drivers/pci/proc.c in linux kernel
496 */
497 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
498 &num, &fn, &ven, &dev) == 4) {
499
500 if (ven == vendor && dev == device)
501 goto found;
502 }
503
504 res = 0;
505 found:
506 fclose(f);
507 return res;
508 }
509
510 #if defined(__x86_64__) || defined(__i386__)
511
512 /*
513 * This CPUID leaf returns the information about the hypervisor.
514 * EAX : maximum input value for CPUID supported by the hypervisor.
515 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
516 */
517 #define HYPERVISOR_INFO_LEAF 0x40000000
518
519 static inline void
520 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
521 unsigned int *ecx, unsigned int *edx)
522 {
523 __asm__(
524 #if defined(__PIC__) && defined(__i386__)
525 /* x86 PIC cannot clobber ebx -- gcc bitches */
526 "pushl %%ebx;"
527 "cpuid;"
528 "movl %%ebx, %%esi;"
529 "popl %%ebx;"
530 : "=S" (*ebx),
531 #else
532 "cpuid;"
533 : "=b" (*ebx),
534 #endif
535 "=a" (*eax),
536 "=c" (*ecx),
537 "=d" (*edx)
538 : "1" (op), "c"(0));
539 }
540
541 static void
542 read_hypervisor_cpuid(struct lscpu_desc *desc)
543 {
544 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
545 char hyper_vendor_id[13];
546
547 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
548
549 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
550 memcpy(hyper_vendor_id + 0, &ebx, 4);
551 memcpy(hyper_vendor_id + 4, &ecx, 4);
552 memcpy(hyper_vendor_id + 8, &edx, 4);
553 hyper_vendor_id[12] = '\0';
554
555 if (!hyper_vendor_id[0])
556 return;
557
558 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
559 desc->hyper = HYPER_XEN;
560 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
561 desc->hyper = HYPER_KVM;
562 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
563 desc->hyper = HYPER_MSHV;
564 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
565 desc->hyper = HYPER_VMWARE;
566 }
567
568 #else /* ! __x86_64__ */
569 static void
570 read_hypervisor_cpuid(struct lscpu_desc *desc)
571 {
572 }
573 #endif
574
575 static void
576 read_hypervisor(struct lscpu_desc *desc)
577 {
578 read_hypervisor_cpuid(desc);
579
580 if (desc->hyper)
581 /* hvm */
582 desc->virtype = VIRT_FULL;
583
584 else if (path_exist(_PATH_PROC_XEN)) {
585 /* Xen para-virt or dom0 */
586 FILE *fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
587 int dom0 = 0;
588
589 if (fd) {
590 char buf[256];
591
592 if (fscanf(fd, "%s", buf) == 1 &&
593 !strcmp(buf, "control_d"))
594 dom0 = 1;
595 fclose(fd);
596 }
597 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
598 desc->hyper = HYPER_XEN;
599
600 } else if (has_pci_device(0x5853, 0x0001)) {
601 /* Xen full-virt on non-x86_64 */
602 desc->hyper = HYPER_XEN;
603 desc->virtype = VIRT_FULL;
604 } else if (path_exist(_PATH_PROC_SYSINFO)) {
605 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
606 char buf[BUFSIZ];
607
608 desc->hyper = HYPER_IBM;
609 desc->virtype = VIRT_FULL;
610 while (fgets(buf, sizeof(buf), fd) != NULL) {
611 if (!strstr(buf, "Control Program:"))
612 continue;
613 if (!strstr(buf, "KVM"))
614 desc->hyper = HYPER_IBM;
615 else
616 desc->hyper = HYPER_KVM;
617 }
618 fclose(fd);
619 }
620 }
621
622 /* add @set to the @ary, unnecesary set is deallocated. */
623 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
624 {
625 int i;
626 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
627
628 if (!ary)
629 return -1;
630
631 for (i = 0; i < *items; i++) {
632 if (CPU_EQUAL_S(setsize, set, ary[i]))
633 break;
634 }
635 if (i == *items) {
636 ary[*items] = set;
637 ++*items;
638 return 0;
639 }
640 CPU_FREE(set);
641 return 1;
642 }
643
644 static void
645 read_topology(struct lscpu_desc *desc, int num)
646 {
647 cpu_set_t *thread_siblings, *core_siblings, *book_siblings;
648
649 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
650 return;
651
652 thread_siblings = path_cpuset(_PATH_SYS_CPU
653 "/cpu%d/topology/thread_siblings", num);
654 core_siblings = path_cpuset(_PATH_SYS_CPU
655 "/cpu%d/topology/core_siblings", num);
656 book_siblings = NULL;
657 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num)) {
658 book_siblings = path_cpuset(_PATH_SYS_CPU
659 "/cpu%d/topology/book_siblings", num);
660 }
661
662 if (!desc->coremaps) {
663 int nbooks, nsockets, ncores, nthreads;
664 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
665
666 /* threads within one core */
667 nthreads = CPU_COUNT_S(setsize, thread_siblings);
668 /* cores within one socket */
669 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
670 /* number of sockets within one book.
671 * Because of odd / non-present cpu maps and to keep
672 * calculation easy we make sure that nsockets and
673 * nbooks is at least 1.
674 */
675 nsockets = desc->ncpus / nthreads / ncores ?: 1;
676 /* number of books */
677 nbooks = desc->ncpus / nthreads / ncores / nsockets ?: 1;
678
679 /* all threads, see also read_basicinfo()
680 * -- fallback for kernels without
681 * /sys/devices/system/cpu/online.
682 */
683 if (!desc->nthreads)
684 desc->nthreads = nbooks * nsockets * ncores * nthreads;
685 /* For each map we make sure that it can have up to ncpus
686 * entries. This is because we cannot reliably calculate the
687 * number of cores, sockets and books on all architectures.
688 * E.g. completely virtualized architectures like s390 may
689 * have multiple sockets of different sizes.
690 */
691 desc->coremaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
692 desc->socketmaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
693 if (book_siblings)
694 desc->bookmaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
695 }
696
697 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
698 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
699 if (book_siblings)
700 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
701 }
702
703 static int
704 cachecmp(const void *a, const void *b)
705 {
706 struct cpu_cache *c1 = (struct cpu_cache *) a;
707 struct cpu_cache *c2 = (struct cpu_cache *) b;
708
709 return strcmp(c2->name, c1->name);
710 }
711
712 static void
713 read_cache(struct lscpu_desc *desc, int num)
714 {
715 char buf[256];
716 int i;
717
718 if (!desc->ncaches) {
719 while(path_exist(_PATH_SYS_SYSTEM "/cpu/cpu%d/cache/index%d",
720 num, desc->ncaches))
721 desc->ncaches++;
722
723 if (!desc->ncaches)
724 return;
725
726 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
727 }
728 for (i = 0; i < desc->ncaches; i++) {
729 struct cpu_cache *ca = &desc->caches[i];
730 cpu_set_t *map;
731
732 if (!ca->name) {
733 int type, level;
734
735 /* cache type */
736 path_getstr(buf, sizeof(buf),
737 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
738 num, i);
739 if (!strcmp(buf, "Data"))
740 type = 'd';
741 else if (!strcmp(buf, "Instruction"))
742 type = 'i';
743 else
744 type = 0;
745
746 /* cache level */
747 level = path_getnum(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
748 num, i);
749 if (type)
750 snprintf(buf, sizeof(buf), "L%d%c", level, type);
751 else
752 snprintf(buf, sizeof(buf), "L%d", level);
753
754 ca->name = xstrdup(buf);
755
756 /* cache size */
757 path_getstr(buf, sizeof(buf),
758 _PATH_SYS_CPU "/cpu%d/cache/index%d/size",
759 num, i);
760 ca->size = xstrdup(buf);
761 }
762
763 /* information about how CPUs share different caches */
764 map = path_cpuset(_PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
765 num, i);
766
767 if (!ca->sharedmaps)
768 ca->sharedmaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
769 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
770 }
771 }
772
773 static void
774 read_nodes(struct lscpu_desc *desc)
775 {
776 int i;
777
778 /* number of NUMA node */
779 while (path_exist(_PATH_SYS_SYSTEM "/node/node%d", desc->nnodes))
780 desc->nnodes++;
781
782 if (!desc->nnodes)
783 return;
784
785 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
786
787 /* information about how nodes share different CPUs */
788 for (i = 0; i < desc->nnodes; i++)
789 desc->nodemaps[i] = path_cpuset(
790 _PATH_SYS_SYSTEM "/node/node%d/cpumap",
791 i);
792 }
793
794 static void
795 print_parsable_cell(struct lscpu_desc *desc, int i, int col, int compatible)
796 {
797 int j;
798 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
799
800 switch (col) {
801 case COL_CPU:
802 printf("%d", i);
803 break;
804 case COL_CORE:
805 for (j = 0; j < desc->ncores; j++) {
806 if (CPU_ISSET_S(i, setsize, desc->coremaps[j])) {
807 printf("%d", j);
808 break;
809 }
810 }
811 break;
812 case COL_SOCKET:
813 for (j = 0; j < desc->nsockets; j++) {
814 if (CPU_ISSET_S(i, setsize, desc->socketmaps[j])) {
815 printf("%d", j);
816 break;
817 }
818 }
819 break;
820 case COL_NODE:
821 for (j = 0; j < desc->nnodes; j++) {
822 if (CPU_ISSET_S(i, setsize, desc->nodemaps[j])) {
823 printf("%d", j);
824 break;
825 }
826 }
827 break;
828 case COL_BOOK:
829 for (j = 0; j < desc->nbooks; j++) {
830 if (CPU_ISSET_S(i, setsize, desc->bookmaps[j])) {
831 printf("%d", j);
832 break;
833 }
834 }
835 break;
836 case COL_CACHE:
837 for (j = desc->ncaches - 1; j >= 0; j--) {
838 struct cpu_cache *ca = &desc->caches[j];
839 int x;
840
841 for (x = 0; x < ca->nsharedmaps; x++) {
842 if (CPU_ISSET_S(i, setsize, ca->sharedmaps[x])) {
843 if (j != desc->ncaches - 1)
844 putchar(compatible ? ',' : ':');
845 printf("%d", x);
846 break;
847 }
848 }
849 if (x == ca->nsharedmaps)
850 putchar(',');
851 }
852 break;
853 }
854 }
855
856 /*
857 * We support two formats:
858 *
859 * 1) "compatible" -- this format is compatible with the original lscpu(1)
860 * output and it contains fixed set of the columns. The CACHE columns are at
861 * the end of the line and the CACHE is not printed if the number of the caches
862 * is zero. The CACHE columns are separated by two commas, for example:
863 *
864 * $ lscpu --parse
865 * # CPU,Core,Socket,Node,,L1d,L1i,L2
866 * 0,0,0,0,,0,0,0
867 * 1,1,0,0,,1,1,0
868 *
869 * 2) "user defined output" -- this format prints always all columns without
870 * special prefix for CACHE column. If there are not CACHEs then the column is
871 * empty and the header "Cache" is printed rather than a real name of the cache.
872 * The CACHE columns are separated by ':'.
873 *
874 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
875 * # CPU,Core,Socket,Node,L1d:L1i:L2
876 * 0,0,0,0,0:0:0
877 * 1,1,0,0,1:1:0
878 */
879 static void
880 print_parsable(struct lscpu_desc *desc, int cols[], int ncols, int compatible)
881 {
882 int i, c;
883
884 printf(_(
885 "# The following is the parsable format, which can be fed to other\n"
886 "# programs. Each different item in every column has an unique ID\n"
887 "# starting from zero.\n"));
888
889 fputs("# ", stdout);
890 for (i = 0; i < ncols; i++) {
891 if (cols[i] == COL_CACHE) {
892 if (compatible && !desc->ncaches)
893 continue;
894 if (i > 0)
895 putchar(',');
896 if (compatible && i != 0)
897 putchar(',');
898 for (c = desc->ncaches - 1; c >= 0; c--) {
899 printf("%s", desc->caches[c].name);
900 if (c > 0)
901 putchar(compatible ? ',' : ':');
902 }
903 if (!desc->ncaches)
904 fputs(colnames[cols[i]], stdout);
905 } else {
906 if (i > 0)
907 putchar(',');
908 fputs(colnames[cols[i]], stdout);
909 }
910 }
911 putchar('\n');
912
913 for (i = 0; i < desc->ncpus; i++) {
914 if (desc->online && !is_cpu_online(desc, i))
915 continue;
916 for (c = 0; c < ncols; c++) {
917 if (compatible && cols[c] == COL_CACHE) {
918 if (!desc->ncaches)
919 continue;
920 if (c > 0)
921 putchar(',');
922 }
923 if (c > 0)
924 putchar(',');
925 print_parsable_cell(desc, i, cols[c], compatible);
926 }
927 putchar('\n');
928 }
929 }
930
931
932 /* output formats "<key> <value>"*/
933 #define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
934 #define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
935
936 static void
937 print_cpuset(const char *key, cpu_set_t *set, int hex)
938 {
939 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
940 size_t setbuflen = 7 * maxcpus;
941 char setbuf[setbuflen], *p;
942
943 if (hex) {
944 p = cpumask_create(setbuf, setbuflen, set, setsize);
945 printf("%-23s0x%s\n", key, p);
946 } else {
947 p = cpulist_create(setbuf, setbuflen, set, setsize);
948 print_s(key, p);
949 }
950
951 }
952
953 static void
954 print_readable(struct lscpu_desc *desc, int hex)
955 {
956 char buf[512];
957 int i;
958 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
959
960 print_s(_("Architecture:"), desc->arch);
961
962 if (desc->mode) {
963 char buf[64], *p = buf;
964
965 if (desc->mode & MODE_32BIT) {
966 strcpy(p, "32-bit, ");
967 p += 8;
968 }
969 if (desc->mode & MODE_64BIT) {
970 strcpy(p, "64-bit, ");
971 p += 8;
972 }
973 *(p - 2) = '\0';
974 print_s(_("CPU op-mode(s):"), buf);
975 }
976 #if !defined(WORDS_BIGENDIAN)
977 print_s(_("Byte Order:"), "Little Endian");
978 #else
979 print_s(_("Byte Order:"), "Big Endian");
980 #endif
981 print_n(_("CPU(s):"), desc->ncpus);
982
983 if (desc->online)
984 print_cpuset(hex ? _("On-line CPU(s) mask:") :
985 _("On-line CPU(s) list:"),
986 desc->online, hex);
987
988 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
989 cpu_set_t *set;
990
991 /* Linux kernel provides cpuset of off-line CPUs that contains
992 * all configured CPUs (see /sys/devices/system/cpu/offline),
993 * but want to print real (present in system) off-line CPUs only.
994 */
995 set = cpuset_alloc(maxcpus, NULL, NULL);
996 if (!set)
997 err(EXIT_FAILURE, _("failed to callocate cpu set"));
998 CPU_ZERO_S(setsize, set);
999 for (i = 0; i < desc->ncpus; i++) {
1000 if (!is_cpu_online(desc, i))
1001 CPU_SET_S(i, setsize, set);
1002 }
1003 print_cpuset(hex ? _("Off-line CPU(s) mask:") :
1004 _("Off-line CPU(s) list:"),
1005 set, hex);
1006 cpuset_free(set);
1007 }
1008
1009 if (desc->nsockets) {
1010 int cores_per_socket, sockets_per_book, books;
1011
1012 cores_per_socket = sockets_per_book = books = 0;
1013 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1014 * Using simply the cpu topology masks in sysfs will not give
1015 * usable results since everything is virtualized. E.g.
1016 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1017 * five cpus.
1018 * If the cpu topology is not exported (e.g. 2nd level guest)
1019 * fall back to old calculation scheme.
1020 */
1021 if (path_exist(_PATH_PROC_SYSINFO)) {
1022 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
1023 char buf[BUFSIZ];
1024 int t0, t1, t2;
1025
1026 while (fgets(buf, sizeof(buf), fd) != NULL) {
1027 if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
1028 &t0, &t1, &t2, &books, &sockets_per_book,
1029 &cores_per_socket) == 6)
1030 break;
1031 }
1032 }
1033 print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
1034 print_n(_("Core(s) per socket:"),
1035 cores_per_socket ?: desc->ncores / desc->nsockets);
1036 if (desc->nbooks) {
1037 print_n(_("Socket(s) per book:"),
1038 sockets_per_book ?: desc->nsockets / desc->nbooks);
1039 print_n(_("Book(s):"), books ?: desc->nbooks);
1040 } else {
1041 print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
1042 }
1043 }
1044 if (desc->nnodes)
1045 print_n(_("NUMA node(s):"), desc->nnodes);
1046 if (desc->vendor)
1047 print_s(_("Vendor ID:"), desc->vendor);
1048 if (desc->family)
1049 print_s(_("CPU family:"), desc->family);
1050 if (desc->model)
1051 print_s(_("Model:"), desc->model);
1052 if (desc->stepping)
1053 print_s(_("Stepping:"), desc->stepping);
1054 if (desc->mhz)
1055 print_s(_("CPU MHz:"), desc->mhz);
1056 if (desc->bogomips)
1057 print_s(_("BogoMIPS:"), desc->bogomips);
1058 if (desc->virtflag) {
1059 if (!strcmp(desc->virtflag, "svm"))
1060 print_s(_("Virtualization:"), "AMD-V");
1061 else if (!strcmp(desc->virtflag, "vmx"))
1062 print_s(_("Virtualization:"), "VT-x");
1063 }
1064 if (desc->hyper) {
1065 print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1066 print_s(_("Virtualization type:"), virt_types[desc->virtype]);
1067 }
1068 if (desc->ncaches) {
1069 char buf[512];
1070 int i;
1071
1072 for (i = desc->ncaches - 1; i >= 0; i--) {
1073 snprintf(buf, sizeof(buf),
1074 _("%s cache:"), desc->caches[i].name);
1075 print_s(buf, desc->caches[i].size);
1076 }
1077 }
1078
1079 for (i = 0; i < desc->nnodes; i++) {
1080 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), i);
1081 print_cpuset(buf, desc->nodemaps[i], hex);
1082 }
1083 }
1084
1085 static void __attribute__((__noreturn__)) usage(FILE *out)
1086 {
1087 fprintf(out, _(
1088 "\nUsage:\n"
1089 " %s [options]\n"), program_invocation_short_name);
1090
1091 puts(_( "\nOptions:\n"
1092 " -h, --help print this help\n"
1093 " -p, --parse[=LIST] print out a parsable instead of a readable format\n"
1094 " -s, --sysroot DIR use directory DIR as system root\n"
1095 " -x, --hex print hexadecimal masks rather than lists of CPUs\n"));
1096
1097 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
1098 }
1099
1100 int main(int argc, char *argv[])
1101 {
1102 struct lscpu_desc _desc, *desc = &_desc;
1103 int parsable = 0, c, i, hex = 0;
1104 int columns[ARRAY_SIZE(colnames)], ncolumns = 0;
1105 int compatible = 0;
1106
1107 static const struct option longopts[] = {
1108 { "help", no_argument, 0, 'h' },
1109 { "parse", optional_argument, 0, 'p' },
1110 { "sysroot", required_argument, 0, 's' },
1111 { "hex", no_argument, 0, 'x' },
1112 { NULL, 0, 0, 0 }
1113 };
1114
1115 setlocale(LC_ALL, "");
1116 bindtextdomain(PACKAGE, LOCALEDIR);
1117 textdomain(PACKAGE);
1118
1119 while ((c = getopt_long(argc, argv, "hp::s:x", longopts, NULL)) != -1) {
1120 switch (c) {
1121 case 'h':
1122 usage(stdout);
1123 case 'p':
1124 parsable = 1;
1125 if (optarg) {
1126 if (*optarg == '=')
1127 optarg++;
1128 ncolumns = string_to_idarray(optarg,
1129 columns, ARRAY_SIZE(columns),
1130 column_name_to_id);
1131 if (ncolumns < 0)
1132 return EXIT_FAILURE;
1133 } else {
1134 columns[ncolumns++] = COL_CPU;
1135 columns[ncolumns++] = COL_CORE;
1136 columns[ncolumns++] = COL_SOCKET,
1137 columns[ncolumns++] = COL_NODE,
1138 columns[ncolumns++] = COL_CACHE;
1139 compatible = 1;
1140 }
1141 break;
1142 case 's':
1143 sysrootlen = strlen(optarg);
1144 strncpy(pathbuf, optarg, sizeof(pathbuf));
1145 pathbuf[sizeof(pathbuf) - 1] = '\0';
1146 break;
1147 case 'x':
1148 hex = 1;
1149 break;
1150 default:
1151 usage(stderr);
1152 }
1153 }
1154
1155 memset(desc, 0, sizeof(*desc));
1156
1157 read_basicinfo(desc);
1158
1159 for (i = 0; i < desc->ncpus; i++) {
1160 if (desc->online && !is_cpu_online(desc, i))
1161 continue;
1162 read_topology(desc, i);
1163 read_cache(desc, i);
1164 }
1165
1166 qsort(desc->caches, desc->ncaches, sizeof(struct cpu_cache), cachecmp);
1167
1168 read_nodes(desc);
1169
1170 read_hypervisor(desc);
1171
1172 /* Show time! */
1173 if (parsable)
1174 print_parsable(desc, columns, ncolumns, compatible);
1175 else
1176 print_readable(desc, hex);
1177
1178 return EXIT_SUCCESS;
1179 }