]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
3b3faf81ad8d7db328f0fad0a9c9525db0303638
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #include <assert.h>
23 #include <ctype.h>
24 #include <dirent.h>
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <getopt.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/utsname.h>
32 #include <unistd.h>
33 #include <stdarg.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36
37 #if defined(__x86_64__) || defined(__i386__)
38 # define INCLUDE_VMWARE_BDOOR
39 #endif
40
41 #ifdef INCLUDE_VMWARE_BDOOR
42 # include <stdint.h>
43 # include <signal.h>
44 # include <strings.h>
45 # include <setjmp.h>
46 # ifdef HAVE_sys_io_h
47 # include <sys/io.h>
48 # endif
49 #endif
50
51 #include <libsmartcols.h>
52
53 #include "cpuset.h"
54 #include "nls.h"
55 #include "xalloc.h"
56 #include "c.h"
57 #include "strutils.h"
58 #include "bitops.h"
59 #include "path.h"
60 #include "closestream.h"
61 #include "optutils.h"
62 #include "lscpu.h"
63
64 #define CACHE_MAX 100
65
66 /* /sys paths */
67 #define _PATH_SYS_SYSTEM "/sys/devices/system"
68 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
69 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
70 #define _PATH_PROC_XEN "/proc/xen"
71 #define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
72 #define _PATH_PROC_CPUINFO "/proc/cpuinfo"
73 #define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
74 #define _PATH_PROC_SYSINFO "/proc/sysinfo"
75 #define _PATH_PROC_STATUS "/proc/self/status"
76 #define _PATH_PROC_VZ "/proc/vz"
77 #define _PATH_PROC_BC "/proc/bc"
78 #define _PATH_PROC_DEVICETREE "/proc/device-tree"
79 #define _PATH_DEV_MEM "/dev/mem"
80
81 /* virtualization types */
82 enum {
83 VIRT_NONE = 0,
84 VIRT_PARA,
85 VIRT_FULL,
86 VIRT_CONT
87 };
88 const char *virt_types[] = {
89 [VIRT_NONE] = N_("none"),
90 [VIRT_PARA] = N_("para"),
91 [VIRT_FULL] = N_("full"),
92 [VIRT_CONT] = N_("container"),
93 };
94
95 const char *hv_vendors[] = {
96 [HYPER_NONE] = NULL,
97 [HYPER_XEN] = "Xen",
98 [HYPER_KVM] = "KVM",
99 [HYPER_MSHV] = "Microsoft",
100 [HYPER_VMWARE] = "VMware",
101 [HYPER_IBM] = "IBM",
102 [HYPER_VSERVER] = "Linux-VServer",
103 [HYPER_UML] = "User-mode Linux",
104 [HYPER_INNOTEK] = "Innotek GmbH",
105 [HYPER_HITACHI] = "Hitachi",
106 [HYPER_PARALLELS] = "Parallels",
107 [HYPER_VBOX] = "Oracle",
108 [HYPER_OS400] = "OS/400",
109 [HYPER_PHYP] = "pHyp",
110 };
111
112 const int hv_vendor_pci[] = {
113 [HYPER_NONE] = 0x0000,
114 [HYPER_XEN] = 0x5853,
115 [HYPER_KVM] = 0x0000,
116 [HYPER_MSHV] = 0x1414,
117 [HYPER_VMWARE] = 0x15ad,
118 [HYPER_VBOX] = 0x80ee,
119 };
120
121 const int hv_graphics_pci[] = {
122 [HYPER_NONE] = 0x0000,
123 [HYPER_XEN] = 0x0001,
124 [HYPER_KVM] = 0x0000,
125 [HYPER_MSHV] = 0x5353,
126 [HYPER_VMWARE] = 0x0710,
127 [HYPER_VBOX] = 0xbeef,
128 };
129
130 /* CPU modes */
131 enum {
132 MODE_32BIT = (1 << 1),
133 MODE_64BIT = (1 << 2)
134 };
135
136 /* cache(s) description */
137 struct cpu_cache {
138 char *name;
139 char *size;
140
141 int nsharedmaps;
142 cpu_set_t **sharedmaps;
143 };
144
145 /* dispatching modes */
146 enum {
147 DISP_HORIZONTAL = 0,
148 DISP_VERTICAL = 1
149 };
150
151 const char *disp_modes[] = {
152 [DISP_HORIZONTAL] = N_("horizontal"),
153 [DISP_VERTICAL] = N_("vertical")
154 };
155
156 /* cpu polarization */
157 enum {
158 POLAR_UNKNOWN = 0,
159 POLAR_VLOW,
160 POLAR_VMEDIUM,
161 POLAR_VHIGH,
162 POLAR_HORIZONTAL
163 };
164
165 struct polarization_modes {
166 char *parsable;
167 char *readable;
168 };
169
170 struct polarization_modes polar_modes[] = {
171 [POLAR_UNKNOWN] = {"U", "-"},
172 [POLAR_VLOW] = {"VL", "vert-low"},
173 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
174 [POLAR_VHIGH] = {"VH", "vert-high"},
175 [POLAR_HORIZONTAL] = {"H", "horizontal"},
176 };
177
178 /* global description */
179 struct lscpu_desc {
180 char *arch;
181 char *vendor;
182 char *family;
183 char *model;
184 char *modelname;
185 char *virtflag; /* virtualization flag (vmx, svm) */
186 char *hypervisor; /* hypervisor software */
187 int hyper; /* hypervisor vendor ID */
188 int virtype; /* VIRT_PARA|FULL|NONE ? */
189 char *mhz;
190 char **maxmhz; /* maximum mega hertz */
191 char **minmhz; /* minimum mega hertz */
192 char *stepping;
193 char *bogomips;
194 char *flags;
195 int dispatching; /* none, horizontal or vertical */
196 int mode; /* rm, lm or/and tm */
197
198 int ncpuspos; /* maximal possible CPUs */
199 int ncpus; /* number of present CPUs */
200 cpu_set_t *present; /* mask with present CPUs */
201 cpu_set_t *online; /* mask with online CPUs */
202
203 int nthreads; /* number of online threads */
204
205 int ncaches;
206 struct cpu_cache *caches;
207
208 /*
209 * All maps are sequentially indexed (0..ncpuspos), the array index
210 * does not have match with cpuX number as presented by kernel. You
211 * have to use real_cpu_num() to get the real cpuX number.
212 *
213 * For example, the possible system CPUs are: 1,3,5, it means that
214 * ncpuspos=3, so all arrays are in range 0..3.
215 */
216 int *idx2cpunum; /* mapping index to CPU num */
217
218 int nnodes; /* number of NUMA modes */
219 int *idx2nodenum; /* Support for discontinuous nodes */
220 cpu_set_t **nodemaps; /* array with NUMA nodes */
221
222 /* books -- based on book_siblings (internal kernel map of cpuX's
223 * hardware threads within the same book */
224 int nbooks; /* number of all online books */
225 cpu_set_t **bookmaps; /* unique book_siblings */
226
227 /* sockets -- based on core_siblings (internal kernel map of cpuX's
228 * hardware threads within the same physical_package_id (socket)) */
229 int nsockets; /* number of all online sockets */
230 cpu_set_t **socketmaps; /* unique core_siblings */
231
232 /* cores -- based on thread_siblings (internel kernel map of cpuX's
233 * hardware threads within the same core as cpuX) */
234 int ncores; /* number of all online cores */
235 cpu_set_t **coremaps; /* unique thread_siblings */
236
237 int *polarization; /* cpu polarization */
238 int *addresses; /* physical cpu addresses */
239 int *configured; /* cpu configured */
240 };
241
242 enum {
243 OUTPUT_SUMMARY = 0, /* default */
244 OUTPUT_PARSABLE, /* -p */
245 OUTPUT_READABLE, /* -e */
246 };
247
248 enum {
249 SYSTEM_LIVE = 0, /* analyzing a live system */
250 SYSTEM_SNAPSHOT, /* analyzing a snapshot of a different system */
251 };
252
253 struct lscpu_modifier {
254 int mode; /* OUTPUT_* */
255 int system; /* SYSTEM_* */
256 unsigned int hex:1, /* print CPU masks rather than CPU lists */
257 compat:1, /* use backwardly compatible format */
258 online:1, /* print online CPUs */
259 offline:1; /* print offline CPUs */
260 };
261
262 static int maxcpus; /* size in bits of kernel cpu mask */
263
264 #define is_cpu_online(_d, _cpu) \
265 ((_d) && (_d)->online ? \
266 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
267 #define is_cpu_present(_d, _cpu) \
268 ((_d) && (_d)->present ? \
269 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
270
271 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
272
273 /*
274 * IDs
275 */
276 enum {
277 COL_CPU,
278 COL_CORE,
279 COL_SOCKET,
280 COL_NODE,
281 COL_BOOK,
282 COL_CACHE,
283 COL_POLARIZATION,
284 COL_ADDRESS,
285 COL_CONFIGURED,
286 COL_ONLINE,
287 COL_MAXMHZ,
288 COL_MINMHZ,
289 };
290
291 /* column description
292 */
293 struct lscpu_coldesc {
294 const char *name;
295 const char *help;
296
297 unsigned int is_abbr:1; /* name is abbreviation */
298 };
299
300 static struct lscpu_coldesc coldescs[] =
301 {
302 [COL_CPU] = { "CPU", N_("logical CPU number"), 1 },
303 [COL_CORE] = { "CORE", N_("logical core number") },
304 [COL_SOCKET] = { "SOCKET", N_("logical socket number") },
305 [COL_NODE] = { "NODE", N_("logical NUMA node number") },
306 [COL_BOOK] = { "BOOK", N_("logical book number") },
307 [COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
308 [COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
309 [COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
310 [COL_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
311 [COL_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
312 [COL_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
313 [COL_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
314 };
315
316 static int
317 column_name_to_id(const char *name, size_t namesz)
318 {
319 size_t i;
320
321 for (i = 0; i < ARRAY_SIZE(coldescs); i++) {
322 const char *cn = coldescs[i].name;
323
324 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
325 return i;
326 }
327 warnx(_("unknown column: %s"), name);
328 return -1;
329 }
330
331 /* Lookup a pattern and get the value from cpuinfo.
332 * Format is:
333 *
334 * "<pattern> : <key>"
335 */
336 static int
337 lookup(char *line, char *pattern, char **value)
338 {
339 char *p, *v;
340 int len = strlen(pattern);
341
342 if (!*line)
343 return 0;
344
345 /* pattern */
346 if (strncmp(line, pattern, len))
347 return 0;
348
349 /* white spaces */
350 for (p = line + len; isspace(*p); p++);
351
352 /* separator */
353 if (*p != ':')
354 return 0;
355
356 /* white spaces */
357 for (++p; isspace(*p); p++);
358
359 /* value */
360 if (!*p)
361 return 0;
362 v = p;
363
364 /* end of value */
365 len = strlen(line) - 1;
366 for (p = line + len; isspace(*(p-1)); p--);
367 *p = '\0';
368
369 *value = xstrdup(v);
370 return 1;
371 }
372
373 /* Don't init the mode for platforms where we are not able to
374 * detect that CPU supports 64-bit mode.
375 */
376 static int
377 init_mode(struct lscpu_modifier *mod)
378 {
379 int m = 0;
380
381 if (mod->system == SYSTEM_SNAPSHOT)
382 /* reading info from any /{sys,proc} dump, don't mix it with
383 * information about our real CPU */
384 return 0;
385
386 #if defined(__alpha__) || defined(__ia64__)
387 m |= MODE_64BIT; /* 64bit platforms only */
388 #endif
389 /* platforms with 64bit flag in /proc/cpuinfo, define
390 * 32bit default here */
391 #if defined(__i386__) || defined(__x86_64__) || \
392 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
393 m |= MODE_32BIT;
394 #endif
395 return m;
396 }
397
398 static void
399 read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
400 {
401 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
402 char buf[BUFSIZ];
403 struct utsname utsbuf;
404 size_t setsize;
405
406 /* architecture */
407 if (uname(&utsbuf) == -1)
408 err(EXIT_FAILURE, _("error: uname failed"));
409 desc->arch = xstrdup(utsbuf.machine);
410
411 /* details */
412 while (fgets(buf, sizeof(buf), fp) != NULL) {
413 if (lookup(buf, "vendor", &desc->vendor)) ;
414 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
415 else if (lookup(buf, "family", &desc->family)) ;
416 else if (lookup(buf, "cpu family", &desc->family)) ;
417 else if (lookup(buf, "model", &desc->model)) ;
418 else if (lookup(buf, "model name", &desc->modelname)) ;
419 else if (lookup(buf, "stepping", &desc->stepping)) ;
420 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
421 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
422 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
423 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
424 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
425 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
426 else
427 continue;
428 }
429
430 desc->mode = init_mode(mod);
431
432 if (desc->flags) {
433 snprintf(buf, sizeof(buf), " %s ", desc->flags);
434 if (strstr(buf, " svm "))
435 desc->virtflag = xstrdup("svm");
436 else if (strstr(buf, " vmx "))
437 desc->virtflag = xstrdup("vmx");
438 if (strstr(buf, " lm "))
439 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
440 if (strstr(buf, " zarch "))
441 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
442 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
443 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
444 }
445
446 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
447 if (strcmp(desc->arch, "ppc64") == 0)
448 desc->mode |= MODE_32BIT | MODE_64BIT;
449 else if (strcmp(desc->arch, "ppc") == 0)
450 desc->mode |= MODE_32BIT;
451 }
452
453 fclose(fp);
454
455 if (path_exist(_PATH_SYS_CPU "/kernel_max"))
456 /* note that kernel_max is maximum index [NR_CPUS-1] */
457 maxcpus = path_read_s32(_PATH_SYS_CPU "/kernel_max") + 1;
458
459 else if (mod->system == SYSTEM_LIVE)
460 /* the root is '/' so we are working with data from the current kernel */
461 maxcpus = get_max_number_of_cpus();
462
463 if (maxcpus <= 0)
464 /* error or we are reading some /sys snapshot instead of the
465 * real /sys, let's use any crazy number... */
466 maxcpus = 2048;
467
468 setsize = CPU_ALLOC_SIZE(maxcpus);
469
470 if (path_exist(_PATH_SYS_CPU "/possible")) {
471 cpu_set_t *tmp = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/possible");
472 int num, idx;
473
474 desc->ncpuspos = CPU_COUNT_S(setsize, tmp);
475 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
476
477 for (num = 0, idx = 0; num < maxcpus; num++) {
478 if (CPU_ISSET(num, tmp))
479 desc->idx2cpunum[idx++] = num;
480 }
481 cpuset_free(tmp);
482 } else
483 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
484 _PATH_SYS_CPU "/possible");
485
486
487 /* get mask for present CPUs */
488 if (path_exist(_PATH_SYS_CPU "/present")) {
489 desc->present = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/present");
490 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
491 }
492
493 /* get mask for online CPUs */
494 if (path_exist(_PATH_SYS_CPU "/online")) {
495 desc->online = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/online");
496 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
497 }
498
499 /* get dispatching mode */
500 if (path_exist(_PATH_SYS_CPU "/dispatching"))
501 desc->dispatching = path_read_s32(_PATH_SYS_CPU "/dispatching");
502 else
503 desc->dispatching = -1;
504 }
505
506 static int
507 has_pci_device(unsigned int vendor, unsigned int device)
508 {
509 FILE *f;
510 unsigned int num, fn, ven, dev;
511 int res = 1;
512
513 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
514 if (!f)
515 return 0;
516
517 /* for more details about bus/pci/devices format see
518 * drivers/pci/proc.c in linux kernel
519 */
520 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
521 &num, &fn, &ven, &dev) == 4) {
522
523 if (ven == vendor && dev == device)
524 goto found;
525 }
526
527 res = 0;
528 found:
529 fclose(f);
530 return res;
531 }
532
533 #if defined(__x86_64__) || defined(__i386__)
534
535 /*
536 * This CPUID leaf returns the information about the hypervisor.
537 * EAX : maximum input value for CPUID supported by the hypervisor.
538 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
539 */
540 #define HYPERVISOR_INFO_LEAF 0x40000000
541
542 static inline void
543 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
544 unsigned int *ecx, unsigned int *edx)
545 {
546 __asm__(
547 #if defined(__PIC__) && defined(__i386__)
548 /* x86 PIC cannot clobber ebx -- gcc bitches */
549 "pushl %%ebx;"
550 "cpuid;"
551 "movl %%ebx, %%esi;"
552 "popl %%ebx;"
553 : "=S" (*ebx),
554 #else
555 "cpuid;"
556 : "=b" (*ebx),
557 #endif
558 "=a" (*eax),
559 "=c" (*ecx),
560 "=d" (*edx)
561 : "1" (op), "c"(0));
562 }
563
564 static void
565 read_hypervisor_cpuid(struct lscpu_desc *desc)
566 {
567 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
568 char hyper_vendor_id[13];
569
570 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
571
572 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
573 memcpy(hyper_vendor_id + 0, &ebx, 4);
574 memcpy(hyper_vendor_id + 4, &ecx, 4);
575 memcpy(hyper_vendor_id + 8, &edx, 4);
576 hyper_vendor_id[12] = '\0';
577
578 if (!hyper_vendor_id[0])
579 return;
580
581 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
582 desc->hyper = HYPER_XEN;
583 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
584 desc->hyper = HYPER_KVM;
585 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
586 desc->hyper = HYPER_MSHV;
587 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
588 desc->hyper = HYPER_VMWARE;
589 }
590
591 #else /* ! (__x86_64__ || __i386__) */
592 static void
593 read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
594 {
595 }
596 #endif
597
598 static int
599 read_hypervisor_powerpc(struct lscpu_desc *desc)
600 {
601 assert(!desc->hyper);
602
603 /* powerpc:
604 * IBM iSeries: legacy, if /proc/iSeries exists, its para-virtualized on top of OS/400
605 * IBM pSeries: always has a hypervisor
606 * if partition-name is "full", its kind of "bare-metal": full-system-partition
607 * otherwise its some partition created by Hardware Management Console
608 * in any case, its always some sort of HVM
609 * Note that pSeries could also be emulated by qemu/KVM.
610 * KVM: "linux,kvm" in /hypervisor/compatible indicates a KVM guest
611 * Xen: not in use, not detected
612 */
613 if (path_exist("/proc/iSeries")) {
614 desc->hyper = HYPER_OS400;
615 desc->virtype = VIRT_PARA;
616 } else if (path_exist(_PATH_PROC_DEVICETREE "/ibm,partition-name")
617 && path_exist(_PATH_PROC_DEVICETREE "/hmc-managed?")
618 && !path_exist(_PATH_PROC_DEVICETREE "/chosen/qemu,graphic-width")) {
619 FILE *fd;
620 desc->hyper = HYPER_PHYP;
621 desc->virtype = VIRT_PARA;
622 fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/ibm,partition-name");
623 if (fd) {
624 char buf[256];
625 if (fscanf(fd, "%s", buf) == 1 && !strcmp(buf, "full"))
626 desc->virtype = VIRT_NONE;
627 fclose(fd);
628 }
629 } else if (path_exist(_PATH_PROC_DEVICETREE "/hypervisor/compatible")) {
630 FILE *fd;
631 fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/hypervisor/compatible");
632 if (fd) {
633 char buf[256];
634 size_t i, len;
635 memset(buf, 0, sizeof(buf));
636 len = fread(buf, 1, sizeof(buf) - 1, fd);
637 fclose(fd);
638 for (i = 0; i < len;) {
639 if (!strcmp(&buf[i], "linux,kvm")) {
640 desc->hyper = HYPER_KVM;
641 desc->virtype = VIRT_FULL;
642 break;
643 }
644 i += strlen(&buf[i]);
645 i++;
646 }
647 }
648 }
649
650 return desc->hyper;
651 }
652
653 #ifdef INCLUDE_VMWARE_BDOOR
654
655 #define VMWARE_BDOOR_MAGIC 0x564D5868
656 #define VMWARE_BDOOR_PORT 0x5658
657 #define VMWARE_BDOOR_CMD_GETVERSION 10
658
659 #define VMWARE_BDOOR(eax, ebx, ecx, edx) \
660 __asm__("inl (%%dx), %%eax" : \
661 "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
662 "0"(VMWARE_BDOOR_MAGIC), "1"(VMWARE_BDOOR_CMD_GETVERSION), \
663 "2"(VMWARE_BDOOR_PORT), "3"(0) : \
664 "memory");
665
666 static jmp_buf segv_handler_env;
667
668 static void
669 segv_handler(__attribute__((__unused__)) int sig,
670 __attribute__((__unused__)) siginfo_t *info,
671 __attribute__((__unused__)) void *ignored)
672 {
673 siglongjmp(segv_handler_env, 1);
674 }
675
676 static int
677 is_vmware_platform(void)
678 {
679 uint32_t eax, ebx, ecx, edx;
680 struct sigaction act, oact;
681
682 /*
683 * The assembly routine for vmware detection works
684 * fine under vmware, even if ran as regular user. But
685 * on real HW or under other hypervisors, it segfaults (which is
686 * expected). So we temporarily install SIGSEGV handler to catch
687 * the signal. All this magic is needed because lscpu
688 * isn't supposed to require root privileges.
689 */
690 if (sigsetjmp(segv_handler_env, 1))
691 return 0;
692
693 bzero(&act, sizeof(act));
694 act.sa_sigaction = segv_handler;
695 act.sa_flags = SA_SIGINFO;
696
697 if (sigaction(SIGSEGV, &act, &oact))
698 err(EXIT_FAILURE, _("error: can not set signal handler"));
699
700 VMWARE_BDOOR(eax, ebx, ecx, edx);
701
702 if (sigaction(SIGSEGV, &oact, NULL))
703 err(EXIT_FAILURE, _("error: can not restore signal handler"));
704
705 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
706 }
707
708 #else /* ! INCLUDE_VMWARE_BDOOR */
709
710 static int
711 is_vmware_platform(void)
712 {
713 return 0;
714 }
715
716 #endif /* INCLUDE_VMWARE_BDOOR */
717
718 static void
719 read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
720 {
721 FILE *fd;
722
723 if (mod->system != SYSTEM_SNAPSHOT) {
724 read_hypervisor_cpuid(desc);
725 if (!desc->hyper)
726 desc->hyper = read_hypervisor_dmi();
727 if (!desc->hyper && is_vmware_platform())
728 desc->hyper = HYPER_VMWARE;
729 }
730
731 if (desc->hyper)
732 desc->virtype = VIRT_FULL;
733
734 else if (read_hypervisor_powerpc(desc) > 0) {}
735
736 /* Xen para-virt or dom0 */
737 else if (path_exist(_PATH_PROC_XEN)) {
738 int dom0 = 0;
739 fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
740
741 if (fd) {
742 char buf[256];
743
744 if (fscanf(fd, "%s", buf) == 1 &&
745 !strcmp(buf, "control_d"))
746 dom0 = 1;
747 fclose(fd);
748 }
749 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
750 desc->hyper = HYPER_XEN;
751
752 /* Xen full-virt on non-x86_64 */
753 } else if (has_pci_device( hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
754 desc->hyper = HYPER_XEN;
755 desc->virtype = VIRT_FULL;
756 } else if (has_pci_device( hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
757 desc->hyper = HYPER_VMWARE;
758 desc->virtype = VIRT_FULL;
759 } else if (has_pci_device( hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
760 desc->hyper = HYPER_VBOX;
761 desc->virtype = VIRT_FULL;
762
763 /* IBM PR/SM */
764 } else if (path_exist(_PATH_PROC_SYSINFO)) {
765 FILE *sysinfo_fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
766 char buf[BUFSIZ];
767
768 if (!sysinfo_fd)
769 return;
770 desc->hyper = HYPER_IBM;
771 desc->hypervisor = "PR/SM";
772 desc->virtype = VIRT_FULL;
773 while (fgets(buf, sizeof(buf), sysinfo_fd) != NULL) {
774 char *str;
775
776 if (!strstr(buf, "Control Program:"))
777 continue;
778 if (!strstr(buf, "KVM"))
779 desc->hyper = HYPER_IBM;
780 else
781 desc->hyper = HYPER_KVM;
782 str = strchr(buf, ':');
783 if (!str)
784 continue;
785 xasprintf(&str, "%s", str + 1);
786
787 /* remove leading, trailing and repeating whitespace */
788 while (*str == ' ')
789 str++;
790 desc->hypervisor = str;
791 str += strlen(str) - 1;
792 while ((*str == '\n') || (*str == ' '))
793 *(str--) = '\0';
794 while ((str = strstr(desc->hypervisor, " ")))
795 memmove(str, str + 1, strlen(str));
796 }
797 fclose(sysinfo_fd);
798 }
799
800 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
801 * /proc/bc should not */
802 else if (path_exist(_PATH_PROC_VZ) && !path_exist(_PATH_PROC_BC)) {
803 desc->hyper = HYPER_PARALLELS;
804 desc->virtype = VIRT_CONT;
805
806 /* IBM */
807 } else if (desc->vendor &&
808 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
809 strcmp(desc->vendor, "IBM/S390") == 0)) {
810 desc->hyper = HYPER_IBM;
811 desc->virtype = VIRT_FULL;
812
813 /* User-mode-linux */
814 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
815 desc->hyper = HYPER_UML;
816 desc->virtype = VIRT_PARA;
817
818 /* Linux-VServer */
819 } else if (path_exist(_PATH_PROC_STATUS)) {
820 char buf[BUFSIZ];
821 char *val = NULL;
822
823 fd = path_fopen("r", 1, _PATH_PROC_STATUS);
824 while (fgets(buf, sizeof(buf), fd) != NULL) {
825 if (lookup(buf, "VxID", &val))
826 break;
827 }
828 fclose(fd);
829
830 if (val) {
831 while (isdigit(*val))
832 ++val;
833 if (!*val) {
834 desc->hyper = HYPER_VSERVER;
835 desc->virtype = VIRT_CONT;
836 }
837 }
838 }
839 }
840
841 /* add @set to the @ary, unnecessary set is deallocated. */
842 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
843 {
844 int i;
845 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
846
847 if (!ary)
848 return -1;
849
850 for (i = 0; i < *items; i++) {
851 if (CPU_EQUAL_S(setsize, set, ary[i]))
852 break;
853 }
854 if (i == *items) {
855 ary[*items] = set;
856 ++*items;
857 return 0;
858 }
859 CPU_FREE(set);
860 return 1;
861 }
862
863 static void
864 read_topology(struct lscpu_desc *desc, int idx)
865 {
866 cpu_set_t *thread_siblings, *core_siblings, *book_siblings;
867 int num = real_cpu_num(desc, idx);
868
869 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
870 return;
871
872 thread_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
873 "/cpu%d/topology/thread_siblings", num);
874 core_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
875 "/cpu%d/topology/core_siblings", num);
876 book_siblings = NULL;
877 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num))
878 book_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
879 "/cpu%d/topology/book_siblings", num);
880
881 if (!desc->coremaps) {
882 int nbooks, nsockets, ncores, nthreads;
883 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
884
885 /* threads within one core */
886 nthreads = CPU_COUNT_S(setsize, thread_siblings);
887 if (!nthreads)
888 nthreads = 1;
889
890 /* cores within one socket */
891 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
892 if (!ncores)
893 ncores = 1;
894
895 /* number of sockets within one book. Because of odd /
896 * non-present cpu maps and to keep calculation easy we make
897 * sure that nsockets and nbooks is at least 1.
898 */
899 nsockets = desc->ncpus / nthreads / ncores;
900 if (!nsockets)
901 nsockets = 1;
902
903 /* number of books */
904 nbooks = desc->ncpus / nthreads / ncores / nsockets;
905 if (!nbooks)
906 nbooks = 1;
907
908 /* all threads, see also read_basicinfo()
909 * -- fallback for kernels without
910 * /sys/devices/system/cpu/online.
911 */
912 if (!desc->nthreads)
913 desc->nthreads = nbooks * nsockets * ncores * nthreads;
914
915 /* For each map we make sure that it can have up to ncpuspos
916 * entries. This is because we cannot reliably calculate the
917 * number of cores, sockets and books on all architectures.
918 * E.g. completely virtualized architectures like s390 may
919 * have multiple sockets of different sizes.
920 */
921 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
922 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
923 if (book_siblings)
924 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
925 }
926
927 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
928 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
929 if (book_siblings)
930 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
931 }
932
933 static void
934 read_polarization(struct lscpu_desc *desc, int idx)
935 {
936 char mode[64];
937 int num = real_cpu_num(desc, idx);
938
939 if (desc->dispatching < 0)
940 return;
941 if (!path_exist(_PATH_SYS_CPU "/cpu%d/polarization", num))
942 return;
943 if (!desc->polarization)
944 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
945 path_read_str(mode, sizeof(mode), _PATH_SYS_CPU "/cpu%d/polarization", num);
946 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
947 desc->polarization[idx] = POLAR_VLOW;
948 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
949 desc->polarization[idx] = POLAR_VMEDIUM;
950 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
951 desc->polarization[idx] = POLAR_VHIGH;
952 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
953 desc->polarization[idx] = POLAR_HORIZONTAL;
954 else
955 desc->polarization[idx] = POLAR_UNKNOWN;
956 }
957
958 static void
959 read_address(struct lscpu_desc *desc, int idx)
960 {
961 int num = real_cpu_num(desc, idx);
962
963 if (!path_exist(_PATH_SYS_CPU "/cpu%d/address", num))
964 return;
965 if (!desc->addresses)
966 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
967 desc->addresses[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/address", num);
968 }
969
970 static void
971 read_configured(struct lscpu_desc *desc, int idx)
972 {
973 int num = real_cpu_num(desc, idx);
974
975 if (!path_exist(_PATH_SYS_CPU "/cpu%d/configure", num))
976 return;
977 if (!desc->configured)
978 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
979 desc->configured[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/configure", num);
980 }
981
982 static void
983 read_max_mhz(struct lscpu_desc *desc, int idx)
984 {
985 int num = real_cpu_num(desc, idx);
986
987 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_max_freq", num))
988 return;
989 if (!desc->maxmhz)
990 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
991 xasprintf(&(desc->maxmhz[idx]), "%.4f",
992 (float)path_read_s32(_PATH_SYS_CPU
993 "/cpu%d/cpufreq/cpuinfo_max_freq", num) / 1000);
994 }
995
996 static void
997 read_min_mhz(struct lscpu_desc *desc, int idx)
998 {
999 int num = real_cpu_num(desc, idx);
1000
1001 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_min_freq", num))
1002 return;
1003 if (!desc->minmhz)
1004 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1005 xasprintf(&(desc->minmhz[idx]), "%.4f",
1006 (float)path_read_s32(_PATH_SYS_CPU
1007 "/cpu%d/cpufreq/cpuinfo_min_freq", num) / 1000);
1008 }
1009
1010 static int
1011 cachecmp(const void *a, const void *b)
1012 {
1013 struct cpu_cache *c1 = (struct cpu_cache *) a;
1014 struct cpu_cache *c2 = (struct cpu_cache *) b;
1015
1016 return strcmp(c2->name, c1->name);
1017 }
1018
1019 static void
1020 read_cache(struct lscpu_desc *desc, int idx)
1021 {
1022 char buf[256];
1023 int i;
1024 int num = real_cpu_num(desc, idx);
1025
1026 if (!desc->ncaches) {
1027 while(path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1028 num, desc->ncaches))
1029 desc->ncaches++;
1030
1031 if (!desc->ncaches)
1032 return;
1033
1034 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1035 }
1036 for (i = 0; i < desc->ncaches; i++) {
1037 struct cpu_cache *ca = &desc->caches[i];
1038 cpu_set_t *map;
1039
1040 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1041 num, i))
1042 continue;
1043 if (!ca->name) {
1044 int type, level;
1045
1046 /* cache type */
1047 path_read_str(buf, sizeof(buf),
1048 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
1049 num, i);
1050 if (!strcmp(buf, "Data"))
1051 type = 'd';
1052 else if (!strcmp(buf, "Instruction"))
1053 type = 'i';
1054 else
1055 type = 0;
1056
1057 /* cache level */
1058 level = path_read_s32(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
1059 num, i);
1060 if (type)
1061 snprintf(buf, sizeof(buf), "L%d%c", level, type);
1062 else
1063 snprintf(buf, sizeof(buf), "L%d", level);
1064
1065 ca->name = xstrdup(buf);
1066
1067 /* cache size */
1068 if (path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d/size",num, i)) {
1069 path_read_str(buf, sizeof(buf),
1070 _PATH_SYS_CPU "/cpu%d/cache/index%d/size", num, i);
1071 ca->size = xstrdup(buf);
1072 } else {
1073 ca->size = xstrdup("unknown size");
1074 }
1075 }
1076
1077 /* information about how CPUs share different caches */
1078 map = path_read_cpuset(maxcpus,
1079 _PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
1080 num, i);
1081
1082 if (!ca->sharedmaps)
1083 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1084 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1085 }
1086 }
1087
1088 static inline int is_node_dirent(struct dirent *d)
1089 {
1090 return
1091 d &&
1092 #ifdef _DIRENT_HAVE_D_TYPE
1093 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1094 #endif
1095 strncmp(d->d_name, "node", 4) == 0 &&
1096 isdigit_string(d->d_name + 4);
1097 }
1098
1099 static int
1100 nodecmp(const void *ap, const void *bp)
1101 {
1102 int *a = (int *) ap, *b = (int *) bp;
1103 return *a - *b;
1104 }
1105
1106 static void
1107 read_nodes(struct lscpu_desc *desc)
1108 {
1109 int i = 0;
1110 DIR *dir;
1111 struct dirent *d;
1112 char *path;
1113
1114 /* number of NUMA node */
1115 path = path_strdup(_PATH_SYS_NODE);
1116 dir = opendir(path);
1117 free(path);
1118
1119 while (dir && (d = readdir(dir))) {
1120 if (is_node_dirent(d))
1121 desc->nnodes++;
1122 }
1123
1124 if (!desc->nnodes) {
1125 if (dir)
1126 closedir(dir);
1127 return;
1128 }
1129
1130 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1131 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1132
1133 if (dir) {
1134 rewinddir(dir);
1135 while ((d = readdir(dir)) && i < desc->nnodes) {
1136 if (is_node_dirent(d))
1137 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1138 _("Failed to extract the node number"));
1139 }
1140 closedir(dir);
1141 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1142 }
1143
1144 /* information about how nodes share different CPUs */
1145 for (i = 0; i < desc->nnodes; i++)
1146 desc->nodemaps[i] = path_read_cpuset(maxcpus,
1147 _PATH_SYS_NODE "/node%d/cpumap",
1148 desc->idx2nodenum[i]);
1149 }
1150
1151 static char *
1152 get_cell_data(struct lscpu_desc *desc, int idx, int col,
1153 struct lscpu_modifier *mod,
1154 char *buf, size_t bufsz)
1155 {
1156 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1157 size_t i;
1158 int cpu = real_cpu_num(desc, idx);
1159
1160 *buf = '\0';
1161
1162 switch (col) {
1163 case COL_CPU:
1164 snprintf(buf, bufsz, "%d", cpu);
1165 break;
1166 case COL_CORE:
1167 if (cpuset_ary_isset(cpu, desc->coremaps,
1168 desc->ncores, setsize, &i) == 0)
1169 snprintf(buf, bufsz, "%zd", i);
1170 break;
1171 case COL_SOCKET:
1172 if (cpuset_ary_isset(cpu, desc->socketmaps,
1173 desc->nsockets, setsize, &i) == 0)
1174 snprintf(buf, bufsz, "%zd", i);
1175 break;
1176 case COL_NODE:
1177 if (cpuset_ary_isset(cpu, desc->nodemaps,
1178 desc->nnodes, setsize, &i) == 0)
1179 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1180 break;
1181 case COL_BOOK:
1182 if (cpuset_ary_isset(cpu, desc->bookmaps,
1183 desc->nbooks, setsize, &i) == 0)
1184 snprintf(buf, bufsz, "%zd", i);
1185 break;
1186 case COL_CACHE:
1187 {
1188 char *p = buf;
1189 size_t sz = bufsz;
1190 int j;
1191
1192 for (j = desc->ncaches - 1; j >= 0; j--) {
1193 struct cpu_cache *ca = &desc->caches[j];
1194
1195 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1196 ca->nsharedmaps, setsize, &i) == 0) {
1197 int x = snprintf(p, sz, "%zd", i);
1198 if (x <= 0 || (size_t) x + 2 >= sz)
1199 return NULL;
1200 p += x;
1201 sz -= x;
1202 }
1203 if (j != 0) {
1204 *p++ = mod->compat ? ',' : ':';
1205 *p = '\0';
1206 sz++;
1207 }
1208 }
1209 break;
1210 }
1211 case COL_POLARIZATION:
1212 if (desc->polarization) {
1213 int x = desc->polarization[idx];
1214
1215 snprintf(buf, bufsz, "%s",
1216 mod->mode == OUTPUT_PARSABLE ?
1217 polar_modes[x].parsable :
1218 polar_modes[x].readable);
1219 }
1220 break;
1221 case COL_ADDRESS:
1222 if (desc->addresses)
1223 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1224 break;
1225 case COL_CONFIGURED:
1226 if (!desc->configured)
1227 break;
1228 if (mod->mode == OUTPUT_PARSABLE)
1229 snprintf(buf, bufsz,
1230 desc->configured[idx] ? _("Y") : _("N"));
1231 else
1232 snprintf(buf, bufsz,
1233 desc->configured[idx] ? _("yes") : _("no"));
1234 break;
1235 case COL_ONLINE:
1236 if (!desc->online)
1237 break;
1238 if (mod->mode == OUTPUT_PARSABLE)
1239 snprintf(buf, bufsz,
1240 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1241 else
1242 snprintf(buf, bufsz,
1243 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1244 break;
1245 case COL_MAXMHZ:
1246 if (desc->maxmhz)
1247 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1248 break;
1249 case COL_MINMHZ:
1250 if (desc->minmhz)
1251 xstrncpy(buf, desc->minmhz[idx], bufsz);
1252 break;
1253 }
1254 return buf;
1255 }
1256
1257 static char *
1258 get_cell_header(struct lscpu_desc *desc, int col,
1259 struct lscpu_modifier *mod,
1260 char *buf, size_t bufsz)
1261 {
1262 *buf = '\0';
1263
1264 if (col == COL_CACHE) {
1265 char *p = buf;
1266 size_t sz = bufsz;
1267 int i;
1268
1269 for (i = desc->ncaches - 1; i >= 0; i--) {
1270 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1271 if (x <= 0 || (size_t) x + 2 > sz)
1272 return NULL;
1273 sz -= x;
1274 p += x;
1275 if (i > 0) {
1276 *p++ = mod->compat ? ',' : ':';
1277 *p = '\0';
1278 sz++;
1279 }
1280 }
1281 if (desc->ncaches)
1282 return buf;
1283 }
1284 snprintf(buf, bufsz, "%s", coldescs[col].name);
1285 return buf;
1286 }
1287
1288 /*
1289 * [-p] backend, we support two parsable formats:
1290 *
1291 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1292 * output and it contains fixed set of the columns. The CACHE columns are at
1293 * the end of the line and the CACHE is not printed if the number of the caches
1294 * is zero. The CACHE columns are separated by two commas, for example:
1295 *
1296 * $ lscpu --parse
1297 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1298 * 0,0,0,0,,0,0,0
1299 * 1,1,0,0,,1,1,0
1300 *
1301 * 2) "user defined output" -- this format prints always all columns without
1302 * special prefix for CACHE column. If there are not CACHEs then the column is
1303 * empty and the header "Cache" is printed rather than a real name of the cache.
1304 * The CACHE columns are separated by ':'.
1305 *
1306 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1307 * # CPU,Core,Socket,Node,L1d:L1i:L2
1308 * 0,0,0,0,0:0:0
1309 * 1,1,0,0,1:1:0
1310 */
1311 static void
1312 print_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1313 struct lscpu_modifier *mod)
1314 {
1315 char buf[BUFSIZ], *data;
1316 int i;
1317
1318 /*
1319 * Header
1320 */
1321 printf(_(
1322 "# The following is the parsable format, which can be fed to other\n"
1323 "# programs. Each different item in every column has an unique ID\n"
1324 "# starting from zero.\n"));
1325
1326 fputs("# ", stdout);
1327 for (i = 0; i < ncols; i++) {
1328 int col = cols[i];
1329
1330 if (col == COL_CACHE) {
1331 if (mod->compat && !desc->ncaches)
1332 continue;
1333 if (mod->compat && i != 0)
1334 putchar(',');
1335 }
1336 if (i > 0)
1337 putchar(',');
1338
1339 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1340
1341 if (data && * data && col != COL_CACHE &&
1342 !coldescs[col].is_abbr) {
1343 /*
1344 * For normal column names use mixed case (e.g. "Socket")
1345 */
1346 char *p = data + 1;
1347
1348 while (p && *p != '\0') {
1349 *p = tolower((unsigned int) *p);
1350 p++;
1351 }
1352 }
1353 fputs(data && *data ? data : "", stdout);
1354 }
1355 putchar('\n');
1356
1357 /*
1358 * Data
1359 */
1360 for (i = 0; i < desc->ncpuspos; i++) {
1361 int c;
1362 int cpu = real_cpu_num(desc, i);
1363
1364 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1365 continue;
1366 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1367 continue;
1368 if (desc->present && !is_cpu_present(desc, cpu))
1369 continue;
1370 for (c = 0; c < ncols; c++) {
1371 if (mod->compat && cols[c] == COL_CACHE) {
1372 if (!desc->ncaches)
1373 continue;
1374 if (c > 0)
1375 putchar(',');
1376 }
1377 if (c > 0)
1378 putchar(',');
1379
1380 data = get_cell_data(desc, i, cols[c], mod,
1381 buf, sizeof(buf));
1382 fputs(data && *data ? data : "", stdout);
1383 }
1384 putchar('\n');
1385 }
1386 }
1387
1388 /*
1389 * [-e] backend
1390 */
1391 static void
1392 print_readable(struct lscpu_desc *desc, int cols[], int ncols,
1393 struct lscpu_modifier *mod)
1394 {
1395 int i;
1396 char buf[BUFSIZ];
1397 const char *data;
1398 struct libscols_table *table = scols_new_table();
1399
1400 if (!table)
1401 err(EXIT_FAILURE, _("failed to initialize output table"));
1402
1403 for (i = 0; i < ncols; i++) {
1404 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1405 if (!scols_table_new_column(table, xstrdup(data), 0, 0))
1406 err(EXIT_FAILURE, _("failed to initialize output column"));
1407 }
1408
1409 for (i = 0; i < desc->ncpuspos; i++) {
1410 int c;
1411 struct libscols_line *line;
1412 int cpu = real_cpu_num(desc, i);
1413
1414 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1415 continue;
1416 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1417 continue;
1418 if (desc->present && !is_cpu_present(desc, cpu))
1419 continue;
1420
1421 line = scols_table_new_line(table, NULL);
1422 if (!line)
1423 err(EXIT_FAILURE, _("failed to initialize output line"));
1424
1425 for (c = 0; c < ncols; c++) {
1426 data = get_cell_data(desc, i, cols[c], mod,
1427 buf, sizeof(buf));
1428 if (!data || !*data)
1429 data = "-";
1430 scols_line_set_data(line, c, data);
1431 }
1432 }
1433
1434 scols_print_table(table);
1435 scols_unref_table(table);
1436 }
1437
1438 /* output formats "<key> <value>"*/
1439 #define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
1440 #define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
1441
1442 static void
1443 print_cpuset(const char *key, cpu_set_t *set, int hex)
1444 {
1445 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1446 size_t setbuflen = 7 * maxcpus;
1447 char setbuf[setbuflen], *p;
1448
1449 if (hex) {
1450 p = cpumask_create(setbuf, setbuflen, set, setsize);
1451 printf("%-23s0x%s\n", key, p);
1452 } else {
1453 p = cpulist_create(setbuf, setbuflen, set, setsize);
1454 print_s(key, p);
1455 }
1456
1457 }
1458
1459 /*
1460 * default output
1461 */
1462 static void
1463 print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1464 {
1465 char buf[512];
1466 int i;
1467 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1468
1469 print_s(_("Architecture:"), desc->arch);
1470
1471 if (desc->mode) {
1472 char mbuf[64], *p = mbuf;
1473
1474 if (desc->mode & MODE_32BIT) {
1475 strcpy(p, "32-bit, ");
1476 p += 8;
1477 }
1478 if (desc->mode & MODE_64BIT) {
1479 strcpy(p, "64-bit, ");
1480 p += 8;
1481 }
1482 *(p - 2) = '\0';
1483 print_s(_("CPU op-mode(s):"), mbuf);
1484 }
1485 #if !defined(WORDS_BIGENDIAN)
1486 print_s(_("Byte Order:"), "Little Endian");
1487 #else
1488 print_s(_("Byte Order:"), "Big Endian");
1489 #endif
1490 print_n(_("CPU(s):"), desc->ncpus);
1491
1492 if (desc->online)
1493 print_cpuset(mod->hex ? _("On-line CPU(s) mask:") :
1494 _("On-line CPU(s) list:"),
1495 desc->online, mod->hex);
1496
1497 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1498 cpu_set_t *set;
1499
1500 /* Linux kernel provides cpuset of off-line CPUs that contains
1501 * all configured CPUs (see /sys/devices/system/cpu/offline),
1502 * but want to print real (present in system) off-line CPUs only.
1503 */
1504 set = cpuset_alloc(maxcpus, NULL, NULL);
1505 if (!set)
1506 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1507 CPU_ZERO_S(setsize, set);
1508 for (i = 0; i < desc->ncpuspos; i++) {
1509 int cpu = real_cpu_num(desc, i);
1510 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
1511 CPU_SET_S(cpu, setsize, set);
1512 }
1513 print_cpuset(mod->hex ? _("Off-line CPU(s) mask:") :
1514 _("Off-line CPU(s) list:"),
1515 set, mod->hex);
1516 cpuset_free(set);
1517 }
1518
1519 if (desc->nsockets) {
1520 int cores_per_socket, sockets_per_book, books;
1521
1522 cores_per_socket = sockets_per_book = books = 0;
1523 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1524 * Using simply the cpu topology masks in sysfs will not give
1525 * usable results since everything is virtualized. E.g.
1526 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1527 * five cpus.
1528 * If the cpu topology is not exported (e.g. 2nd level guest)
1529 * fall back to old calculation scheme.
1530 */
1531 if (path_exist(_PATH_PROC_SYSINFO)) {
1532 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
1533 char pbuf[BUFSIZ];
1534 int t0, t1, t2;
1535
1536 while (fd && fgets(pbuf, sizeof(pbuf), fd) != NULL) {
1537 if (sscanf(pbuf, "CPU Topology SW:%d%d%d%d%d%d",
1538 &t0, &t1, &t2, &books, &sockets_per_book,
1539 &cores_per_socket) == 6)
1540 break;
1541 }
1542 if (fd)
1543 fclose(fd);
1544 }
1545 print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
1546 print_n(_("Core(s) per socket:"),
1547 cores_per_socket ?: desc->ncores / desc->nsockets);
1548 if (desc->nbooks) {
1549 print_n(_("Socket(s) per book:"),
1550 sockets_per_book ?: desc->nsockets / desc->nbooks);
1551 print_n(_("Book(s):"), books ?: desc->nbooks);
1552 } else {
1553 print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
1554 }
1555 }
1556 if (desc->nnodes)
1557 print_n(_("NUMA node(s):"), desc->nnodes);
1558 if (desc->vendor)
1559 print_s(_("Vendor ID:"), desc->vendor);
1560 if (desc->family)
1561 print_s(_("CPU family:"), desc->family);
1562 if (desc->model)
1563 print_s(_("Model:"), desc->model);
1564 if (desc->modelname)
1565 print_s(_("Model name:"), desc->modelname);
1566 if (desc->stepping)
1567 print_s(_("Stepping:"), desc->stepping);
1568 if (desc->mhz)
1569 print_s(_("CPU MHz:"), desc->mhz);
1570 if (desc->maxmhz)
1571 print_s(_("CPU max MHz:"), desc->maxmhz[0]);
1572 if (desc->minmhz)
1573 print_s(_("CPU min MHz:"), desc->minmhz[0]);
1574 if (desc->bogomips)
1575 print_s(_("BogoMIPS:"), desc->bogomips);
1576 if (desc->virtflag) {
1577 if (!strcmp(desc->virtflag, "svm"))
1578 print_s(_("Virtualization:"), "AMD-V");
1579 else if (!strcmp(desc->virtflag, "vmx"))
1580 print_s(_("Virtualization:"), "VT-x");
1581 }
1582 if (desc->hypervisor)
1583 print_s(_("Hypervisor:"), desc->hypervisor);
1584 if (desc->hyper) {
1585 print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1586 print_s(_("Virtualization type:"), _(virt_types[desc->virtype]));
1587 }
1588 if (desc->dispatching >= 0)
1589 print_s(_("Dispatching mode:"), _(disp_modes[desc->dispatching]));
1590 if (desc->ncaches) {
1591 char cbuf[512];
1592
1593 for (i = desc->ncaches - 1; i >= 0; i--) {
1594 snprintf(cbuf, sizeof(cbuf),
1595 _("%s cache:"), desc->caches[i].name);
1596 print_s(cbuf, desc->caches[i].size);
1597 }
1598 }
1599
1600 for (i = 0; i < desc->nnodes; i++) {
1601 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
1602 print_cpuset(buf, desc->nodemaps[i], mod->hex);
1603 }
1604 }
1605
1606 static void __attribute__((__noreturn__)) usage(FILE *out)
1607 {
1608 size_t i;
1609
1610 fputs(USAGE_HEADER, out);
1611 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
1612
1613 fputs(USAGE_OPTIONS, out);
1614 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
1615 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
1616 fputs(_(" -c, --offline print offline CPUs only\n"), out);
1617 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
1618 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
1619 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
1620 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
1621 fputs(USAGE_SEPARATOR, out);
1622 fputs(USAGE_HELP, out);
1623 fputs(USAGE_VERSION, out);
1624
1625 fprintf(out, _("\nAvailable columns:\n"));
1626
1627 for (i = 0; i < ARRAY_SIZE(coldescs); i++)
1628 fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
1629
1630 fprintf(out, _("\nFor more details see lscpu(1).\n"));
1631
1632 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
1633 }
1634
1635 int main(int argc, char *argv[])
1636 {
1637 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
1638 struct lscpu_desc _desc = { .flags = 0 }, *desc = &_desc;
1639 int c, i;
1640 int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
1641 int cpu_modifier_specified = 0;
1642
1643 static const struct option longopts[] = {
1644 { "all", no_argument, 0, 'a' },
1645 { "online", no_argument, 0, 'b' },
1646 { "offline", no_argument, 0, 'c' },
1647 { "help", no_argument, 0, 'h' },
1648 { "extended", optional_argument, 0, 'e' },
1649 { "parse", optional_argument, 0, 'p' },
1650 { "sysroot", required_argument, 0, 's' },
1651 { "hex", no_argument, 0, 'x' },
1652 { "version", no_argument, 0, 'V' },
1653 { NULL, 0, 0, 0 }
1654 };
1655
1656 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
1657 { 'a','b','c' },
1658 { 'e','p' },
1659 { 0 }
1660 };
1661 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
1662
1663 setlocale(LC_ALL, "");
1664 bindtextdomain(PACKAGE, LOCALEDIR);
1665 textdomain(PACKAGE);
1666 atexit(close_stdout);
1667
1668 while ((c = getopt_long(argc, argv, "abce::hp::s:xV", longopts, NULL)) != -1) {
1669
1670 err_exclusive_options(c, longopts, excl, excl_st);
1671
1672 switch (c) {
1673 case 'a':
1674 mod->online = mod->offline = 1;
1675 cpu_modifier_specified = 1;
1676 break;
1677 case 'b':
1678 mod->online = 1;
1679 cpu_modifier_specified = 1;
1680 break;
1681 case 'c':
1682 mod->offline = 1;
1683 cpu_modifier_specified = 1;
1684 break;
1685 case 'h':
1686 usage(stdout);
1687 case 'p':
1688 case 'e':
1689 if (optarg) {
1690 if (*optarg == '=')
1691 optarg++;
1692 ncolumns = string_to_idarray(optarg,
1693 columns, ARRAY_SIZE(columns),
1694 column_name_to_id);
1695 if (ncolumns < 0)
1696 return EXIT_FAILURE;
1697 }
1698 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
1699 break;
1700 case 's':
1701 path_set_prefix(optarg);
1702 mod->system = SYSTEM_SNAPSHOT;
1703 break;
1704 case 'x':
1705 mod->hex = 1;
1706 break;
1707 case 'V':
1708 printf(_("%s from %s\n"), program_invocation_short_name,
1709 PACKAGE_STRING);
1710 return EXIT_SUCCESS;
1711 default:
1712 usage(stderr);
1713 }
1714 }
1715
1716 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
1717 fprintf(stderr,
1718 _("%s: options --all, --online and --offline may only "
1719 "be used with options --extended or --parse.\n"),
1720 program_invocation_short_name);
1721 return EXIT_FAILURE;
1722 }
1723
1724 if (argc != optind)
1725 usage(stderr);
1726
1727 /* set default cpu display mode if none was specified */
1728 if (!mod->online && !mod->offline) {
1729 mod->online = 1;
1730 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
1731 }
1732
1733 read_basicinfo(desc, mod);
1734
1735 for (i = 0; i < desc->ncpuspos; i++) {
1736 read_topology(desc, i);
1737 read_cache(desc, i);
1738 read_polarization(desc, i);
1739 read_address(desc, i);
1740 read_configured(desc, i);
1741 read_max_mhz(desc, i);
1742 read_min_mhz(desc, i);
1743 }
1744
1745 if (desc->caches)
1746 qsort(desc->caches, desc->ncaches,
1747 sizeof(struct cpu_cache), cachecmp);
1748
1749 read_nodes(desc);
1750 read_hypervisor(desc, mod);
1751
1752 switch(mod->mode) {
1753 case OUTPUT_SUMMARY:
1754 print_summary(desc, mod);
1755 break;
1756 case OUTPUT_PARSABLE:
1757 if (!ncolumns) {
1758 columns[ncolumns++] = COL_CPU;
1759 columns[ncolumns++] = COL_CORE;
1760 columns[ncolumns++] = COL_SOCKET;
1761 columns[ncolumns++] = COL_NODE;
1762 columns[ncolumns++] = COL_CACHE;
1763 mod->compat = 1;
1764 }
1765 print_parsable(desc, columns, ncolumns, mod);
1766 break;
1767 case OUTPUT_READABLE:
1768 if (!ncolumns) {
1769 /* No list was given. Just print whatever is there. */
1770 columns[ncolumns++] = COL_CPU;
1771 if (desc->nodemaps)
1772 columns[ncolumns++] = COL_NODE;
1773 if (desc->bookmaps)
1774 columns[ncolumns++] = COL_BOOK;
1775 if (desc->socketmaps)
1776 columns[ncolumns++] = COL_SOCKET;
1777 if (desc->coremaps)
1778 columns[ncolumns++] = COL_CORE;
1779 if (desc->caches)
1780 columns[ncolumns++] = COL_CACHE;
1781 if (desc->online)
1782 columns[ncolumns++] = COL_ONLINE;
1783 if (desc->configured)
1784 columns[ncolumns++] = COL_CONFIGURED;
1785 if (desc->polarization)
1786 columns[ncolumns++] = COL_POLARIZATION;
1787 if (desc->addresses)
1788 columns[ncolumns++] = COL_ADDRESS;
1789 if (desc->maxmhz)
1790 columns[ncolumns++] = COL_MAXMHZ;
1791 if (desc->minmhz)
1792 columns[ncolumns++] = COL_MINMHZ;
1793 }
1794 print_readable(desc, columns, ncolumns, mod);
1795 break;
1796 }
1797
1798 return EXIT_SUCCESS;
1799 }