]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
misc: consolidate smartcols error messages
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #include <assert.h>
23 #include <ctype.h>
24 #include <dirent.h>
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <getopt.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/utsname.h>
32 #include <unistd.h>
33 #include <stdarg.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36
37 #if (defined(__x86_64__) || defined(__i386__))
38 # if !defined( __SANITIZE_ADDRESS__)
39 # define INCLUDE_VMWARE_BDOOR
40 # else
41 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
42 # endif
43 #endif
44
45 #ifdef INCLUDE_VMWARE_BDOOR
46 # include <stdint.h>
47 # include <signal.h>
48 # include <strings.h>
49 # include <setjmp.h>
50 # ifdef HAVE_SYS_IO_H
51 # include <sys/io.h>
52 # endif
53 #endif
54
55 #if defined(HAVE_LIBRTAS)
56 #include <librtas.h>
57 #endif
58
59 #include <libsmartcols.h>
60
61 #include "cpuset.h"
62 #include "nls.h"
63 #include "xalloc.h"
64 #include "c.h"
65 #include "strutils.h"
66 #include "bitops.h"
67 #include "path.h"
68 #include "closestream.h"
69 #include "optutils.h"
70 #include "lscpu.h"
71
72 #define CACHE_MAX 100
73
74 /* /sys paths */
75 #define _PATH_SYS_SYSTEM "/sys/devices/system"
76 #define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
77 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
78 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
79 #define _PATH_PROC_XEN "/proc/xen"
80 #define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
81 #define _PATH_PROC_CPUINFO "/proc/cpuinfo"
82 #define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
83 #define _PATH_PROC_SYSINFO "/proc/sysinfo"
84 #define _PATH_PROC_STATUS "/proc/self/status"
85 #define _PATH_PROC_VZ "/proc/vz"
86 #define _PATH_PROC_BC "/proc/bc"
87 #define _PATH_PROC_DEVICETREE "/proc/device-tree"
88 #define _PATH_DEV_MEM "/dev/mem"
89 #define _PATH_PROC_OSRELEASE "/proc/sys/kernel/osrelease"
90
91 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
92 #define XENFEAT_supervisor_mode_kernel 3
93 #define XENFEAT_mmu_pt_update_preserve_ad 5
94 #define XENFEAT_hvm_callback_vector 8
95
96 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
97 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
98 | (1U << XENFEAT_hvm_callback_vector) )
99
100 /* virtualization types */
101 enum {
102 VIRT_NONE = 0,
103 VIRT_PARA,
104 VIRT_FULL,
105 VIRT_CONT
106 };
107 static const char *virt_types[] = {
108 [VIRT_NONE] = N_("none"),
109 [VIRT_PARA] = N_("para"),
110 [VIRT_FULL] = N_("full"),
111 [VIRT_CONT] = N_("container"),
112 };
113
114 static const char *hv_vendors[] = {
115 [HYPER_NONE] = NULL,
116 [HYPER_XEN] = "Xen",
117 [HYPER_KVM] = "KVM",
118 [HYPER_MSHV] = "Microsoft",
119 [HYPER_VMWARE] = "VMware",
120 [HYPER_IBM] = "IBM",
121 [HYPER_VSERVER] = "Linux-VServer",
122 [HYPER_UML] = "User-mode Linux",
123 [HYPER_INNOTEK] = "Innotek GmbH",
124 [HYPER_HITACHI] = "Hitachi",
125 [HYPER_PARALLELS] = "Parallels",
126 [HYPER_VBOX] = "Oracle",
127 [HYPER_OS400] = "OS/400",
128 [HYPER_PHYP] = "pHyp",
129 [HYPER_SPAR] = "Unisys s-Par",
130 [HYPER_WSL] = "Windows Subsystem for Linux"
131 };
132
133 static const int hv_vendor_pci[] = {
134 [HYPER_NONE] = 0x0000,
135 [HYPER_XEN] = 0x5853,
136 [HYPER_KVM] = 0x0000,
137 [HYPER_MSHV] = 0x1414,
138 [HYPER_VMWARE] = 0x15ad,
139 [HYPER_VBOX] = 0x80ee,
140 };
141
142 static const int hv_graphics_pci[] = {
143 [HYPER_NONE] = 0x0000,
144 [HYPER_XEN] = 0x0001,
145 [HYPER_KVM] = 0x0000,
146 [HYPER_MSHV] = 0x5353,
147 [HYPER_VMWARE] = 0x0710,
148 [HYPER_VBOX] = 0xbeef,
149 };
150
151 /* CPU modes */
152 enum {
153 MODE_32BIT = (1 << 1),
154 MODE_64BIT = (1 << 2)
155 };
156
157 /* cache(s) description */
158 struct cpu_cache {
159 char *name;
160 char *size;
161
162 int nsharedmaps;
163 cpu_set_t **sharedmaps;
164 };
165
166 /* dispatching modes */
167 enum {
168 DISP_HORIZONTAL = 0,
169 DISP_VERTICAL = 1
170 };
171
172 static const char *disp_modes[] = {
173 [DISP_HORIZONTAL] = N_("horizontal"),
174 [DISP_VERTICAL] = N_("vertical")
175 };
176
177 /* cpu polarization */
178 enum {
179 POLAR_UNKNOWN = 0,
180 POLAR_VLOW,
181 POLAR_VMEDIUM,
182 POLAR_VHIGH,
183 POLAR_HORIZONTAL
184 };
185
186 struct polarization_modes {
187 char *parsable;
188 char *readable;
189 };
190
191 static struct polarization_modes polar_modes[] = {
192 [POLAR_UNKNOWN] = {"U", "-"},
193 [POLAR_VLOW] = {"VL", "vert-low"},
194 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
195 [POLAR_VHIGH] = {"VH", "vert-high"},
196 [POLAR_HORIZONTAL] = {"H", "horizontal"},
197 };
198
199 /* global description */
200 struct lscpu_desc {
201 char *arch;
202 char *vendor;
203 char *machinetype; /* s390 */
204 char *family;
205 char *model;
206 char *modelname;
207 char *revision; /* alternative for model (ppc) */
208 char *cpu; /* alternative for modelname (ppc, sparc) */
209 char *virtflag; /* virtualization flag (vmx, svm) */
210 char *hypervisor; /* hypervisor software */
211 int hyper; /* hypervisor vendor ID */
212 int virtype; /* VIRT_PARA|FULL|NONE ? */
213 char *mhz;
214 char *dynamic_mhz; /* dynamic mega hertz (s390) */
215 char *static_mhz; /* static mega hertz (s390) */
216 char **maxmhz; /* maximum mega hertz */
217 char **minmhz; /* minimum mega hertz */
218 char *stepping;
219 char *bogomips;
220 char *flags;
221 char *mtid; /* maximum thread id (s390) */
222 int dispatching; /* none, horizontal or vertical */
223 int mode; /* rm, lm or/and tm */
224
225 int ncpuspos; /* maximal possible CPUs */
226 int ncpus; /* number of present CPUs */
227 cpu_set_t *present; /* mask with present CPUs */
228 cpu_set_t *online; /* mask with online CPUs */
229
230 int nthreads; /* number of online threads */
231
232 int ncaches;
233 struct cpu_cache *caches;
234
235 int necaches; /* extra caches (s390) */
236 struct cpu_cache *ecaches;
237
238 /*
239 * All maps are sequentially indexed (0..ncpuspos), the array index
240 * does not have match with cpuX number as presented by kernel. You
241 * have to use real_cpu_num() to get the real cpuX number.
242 *
243 * For example, the possible system CPUs are: 1,3,5, it means that
244 * ncpuspos=3, so all arrays are in range 0..3.
245 */
246 int *idx2cpunum; /* mapping index to CPU num */
247
248 int nnodes; /* number of NUMA modes */
249 int *idx2nodenum; /* Support for discontinuous nodes */
250 cpu_set_t **nodemaps; /* array with NUMA nodes */
251
252 /* drawers -- based on drawer_siblings (internal kernel map of cpuX's
253 * hardware threads within the same drawer */
254 int ndrawers; /* number of all online drawers */
255 cpu_set_t **drawermaps; /* unique drawer_siblings */
256 int *drawerids; /* physical drawer ids */
257
258 /* books -- based on book_siblings (internal kernel map of cpuX's
259 * hardware threads within the same book */
260 int nbooks; /* number of all online books */
261 cpu_set_t **bookmaps; /* unique book_siblings */
262 int *bookids; /* physical book ids */
263
264 /* sockets -- based on core_siblings (internal kernel map of cpuX's
265 * hardware threads within the same physical_package_id (socket)) */
266 int nsockets; /* number of all online sockets */
267 cpu_set_t **socketmaps; /* unique core_siblings */
268 int *socketids; /* physical socket ids */
269
270 /* cores -- based on thread_siblings (internal kernel map of cpuX's
271 * hardware threads within the same core as cpuX) */
272 int ncores; /* number of all online cores */
273 cpu_set_t **coremaps; /* unique thread_siblings */
274 int *coreids; /* physical core ids */
275
276 int *polarization; /* cpu polarization */
277 int *addresses; /* physical cpu addresses */
278 int *configured; /* cpu configured */
279 int physsockets; /* Physical sockets (modules) */
280 int physchips; /* Physical chips */
281 int physcoresperchip; /* Physical cores per chip */
282 };
283
284 enum {
285 OUTPUT_SUMMARY = 0, /* default */
286 OUTPUT_PARSABLE, /* -p */
287 OUTPUT_READABLE, /* -e */
288 };
289
290 enum {
291 SYSTEM_LIVE = 0, /* analyzing a live system */
292 SYSTEM_SNAPSHOT, /* analyzing a snapshot of a different system */
293 };
294
295 struct lscpu_modifier {
296 int mode; /* OUTPUT_* */
297 int system; /* SYSTEM_* */
298 unsigned int hex:1, /* print CPU masks rather than CPU lists */
299 compat:1, /* use backwardly compatible format */
300 online:1, /* print online CPUs */
301 offline:1, /* print offline CPUs */
302 json:1, /* JSON output format */
303 physical:1; /* use physical numbers */
304 };
305
306 static int maxcpus; /* size in bits of kernel cpu mask */
307
308 #define is_cpu_online(_d, _cpu) \
309 ((_d) && (_d)->online ? \
310 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
311 #define is_cpu_present(_d, _cpu) \
312 ((_d) && (_d)->present ? \
313 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
314
315 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
316
317 /*
318 * IDs
319 */
320 enum {
321 COL_CPU,
322 COL_CORE,
323 COL_SOCKET,
324 COL_NODE,
325 COL_BOOK,
326 COL_DRAWER,
327 COL_CACHE,
328 COL_POLARIZATION,
329 COL_ADDRESS,
330 COL_CONFIGURED,
331 COL_ONLINE,
332 COL_MAXMHZ,
333 COL_MINMHZ,
334 };
335
336 /* column description
337 */
338 struct lscpu_coldesc {
339 const char *name;
340 const char *help;
341
342 unsigned int is_abbr:1; /* name is abbreviation */
343 };
344
345 static struct lscpu_coldesc coldescs[] =
346 {
347 [COL_CPU] = { "CPU", N_("logical CPU number"), 1 },
348 [COL_CORE] = { "CORE", N_("logical core number") },
349 [COL_SOCKET] = { "SOCKET", N_("logical socket number") },
350 [COL_NODE] = { "NODE", N_("logical NUMA node number") },
351 [COL_BOOK] = { "BOOK", N_("logical book number") },
352 [COL_DRAWER] = { "DRAWER", N_("logical drawer number") },
353 [COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
354 [COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
355 [COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
356 [COL_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
357 [COL_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
358 [COL_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
359 [COL_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
360 };
361
362 static int
363 column_name_to_id(const char *name, size_t namesz)
364 {
365 size_t i;
366
367 for (i = 0; i < ARRAY_SIZE(coldescs); i++) {
368 const char *cn = coldescs[i].name;
369
370 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
371 return i;
372 }
373 warnx(_("unknown column: %s"), name);
374 return -1;
375 }
376
377 /* Lookup a pattern and get the value from cpuinfo.
378 * Format is:
379 *
380 * "<pattern> : <key>"
381 */
382 static int
383 lookup(char *line, char *pattern, char **value)
384 {
385 char *p, *v;
386 int len = strlen(pattern);
387
388 /* don't re-fill already found tags, first one wins */
389 if (!*line || *value)
390 return 0;
391
392 /* pattern */
393 if (strncmp(line, pattern, len))
394 return 0;
395
396 /* white spaces */
397 for (p = line + len; isspace(*p); p++);
398
399 /* separator */
400 if (*p != ':')
401 return 0;
402
403 /* white spaces */
404 for (++p; isspace(*p); p++);
405
406 /* value */
407 if (!*p)
408 return 0;
409 v = p;
410
411 /* end of value */
412 len = strlen(line) - 1;
413 for (p = line + len; isspace(*(p-1)); p--);
414 *p = '\0';
415
416 *value = xstrdup(v);
417 return 1;
418 }
419
420 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
421 * part of the cache topology information within the sysfs filesystem.
422 * This is true for all shared caches on e.g. s390. When there are layers of
423 * hypervisors in between it is not knows which CPUs share which caches.
424 * Therefore information about shared caches is only available in
425 * /proc/cpuinfo.
426 * Format is:
427 * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
428 */
429 static int
430 lookup_cache(char *line, struct lscpu_desc *desc)
431 {
432 struct cpu_cache *cache;
433 long long size;
434 char *p, type;
435 int level;
436
437 /* Make sure line starts with "cache<nr> :" */
438 if (strncmp(line, "cache", 5))
439 return 0;
440 for (p = line + 5; isdigit(*p); p++);
441 for (; isspace(*p); p++);
442 if (*p != ':')
443 return 0;
444
445 p = strstr(line, "scope=") + 6;
446 /* Skip private caches, also present in sysfs */
447 if (!p || strncmp(p, "Private", 7) == 0)
448 return 0;
449 p = strstr(line, "level=");
450 if (!p || sscanf(p, "level=%d", &level) != 1)
451 return 0;
452 p = strstr(line, "type=") + 5;
453 if (!p || !*p)
454 return 0;
455 type = 0;
456 if (strncmp(p, "Data", 4) == 0)
457 type = 'd';
458 if (strncmp(p, "Instruction", 11) == 0)
459 type = 'i';
460 p = strstr(line, "size=");
461 if (!p || sscanf(p, "size=%lld", &size) != 1)
462 return 0;
463
464 desc->necaches++;
465 desc->ecaches = xrealloc(desc->ecaches,
466 desc->necaches * sizeof(struct cpu_cache));
467 cache = &desc->ecaches[desc->necaches - 1];
468 memset(cache, 0 , sizeof(*cache));
469 if (type)
470 xasprintf(&cache->name, "L%d%c", level, type);
471 else
472 xasprintf(&cache->name, "L%d", level);
473 xasprintf(&cache->size, "%lldK", size);
474 return 1;
475 }
476
477 /* Don't init the mode for platforms where we are not able to
478 * detect that CPU supports 64-bit mode.
479 */
480 static int
481 init_mode(struct lscpu_modifier *mod)
482 {
483 int m = 0;
484
485 if (mod->system == SYSTEM_SNAPSHOT)
486 /* reading info from any /{sys,proc} dump, don't mix it with
487 * information about our real CPU */
488 return 0;
489
490 #if defined(__alpha__) || defined(__ia64__)
491 m |= MODE_64BIT; /* 64bit platforms only */
492 #endif
493 /* platforms with 64bit flag in /proc/cpuinfo, define
494 * 32bit default here */
495 #if defined(__i386__) || defined(__x86_64__) || \
496 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
497 m |= MODE_32BIT;
498 #endif
499 return m;
500 }
501
502 #if defined(HAVE_LIBRTAS)
503 #define PROCESSOR_MODULE_INFO 43
504 static int strbe16toh(const char *buf, int offset)
505 {
506 return (buf[offset] << 8) + buf[offset+1];
507 }
508
509 static void read_physical_info_powerpc(struct lscpu_desc *desc)
510 {
511 char buf[BUFSIZ];
512 int rc, len, ntypes;
513
514 desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
515
516 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
517 if (rc < 0)
518 return;
519
520 len = strbe16toh(buf, 0);
521 if (len < 8)
522 return;
523
524 ntypes = strbe16toh(buf, 2);
525
526 assert(ntypes <= 1);
527 if (!ntypes)
528 return;
529
530 desc->physsockets = strbe16toh(buf, 4);
531 desc->physchips = strbe16toh(buf, 6);
532 desc->physcoresperchip = strbe16toh(buf, 8);
533 }
534 #else
535 static void read_physical_info_powerpc(
536 struct lscpu_desc *desc __attribute__((__unused__)))
537 {
538 }
539 #endif
540
541 static void
542 read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
543 {
544 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
545 char buf[BUFSIZ];
546 struct utsname utsbuf;
547 size_t setsize;
548
549 /* architecture */
550 if (uname(&utsbuf) == -1)
551 err(EXIT_FAILURE, _("error: uname failed"));
552 desc->arch = xstrdup(utsbuf.machine);
553
554 /* details */
555 while (fgets(buf, sizeof(buf), fp) != NULL) {
556 if (lookup(buf, "vendor", &desc->vendor)) ;
557 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
558 else if (lookup(buf, "family", &desc->family)) ;
559 else if (lookup(buf, "cpu family", &desc->family)) ;
560 else if (lookup(buf, "model", &desc->model)) ;
561 else if (lookup(buf, "model name", &desc->modelname)) ;
562 else if (lookup(buf, "stepping", &desc->stepping)) ;
563 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
564 else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
565 else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
566 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
567 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
568 else if (lookup(buf, "Features", &desc->flags)) ; /* aarch64 */
569 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
570 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
571 else if (lookup(buf, "BogoMIPS", &desc->bogomips)) ; /* aarch64 */
572 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
573 else if (lookup(buf, "cpu", &desc->cpu)) ;
574 else if (lookup(buf, "revision", &desc->revision)) ;
575 else if (lookup(buf, "CPU revision", &desc->revision)) ; /* aarch64 */
576 else if (lookup(buf, "max thread id", &desc->mtid)) ; /* s390 */
577 else if (lookup_cache(buf, desc)) ;
578 else
579 continue;
580 }
581
582 desc->mode = init_mode(mod);
583
584 if (desc->flags) {
585 snprintf(buf, sizeof(buf), " %s ", desc->flags);
586 if (strstr(buf, " svm "))
587 desc->virtflag = xstrdup("svm");
588 else if (strstr(buf, " vmx "))
589 desc->virtflag = xstrdup("vmx");
590 if (strstr(buf, " lm "))
591 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
592 if (strstr(buf, " zarch "))
593 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
594 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
595 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
596 }
597
598 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
599 if (strcmp(desc->arch, "ppc64") == 0)
600 desc->mode |= MODE_32BIT | MODE_64BIT;
601 else if (strcmp(desc->arch, "ppc") == 0)
602 desc->mode |= MODE_32BIT;
603 }
604
605 fclose(fp);
606
607 if (path_exist(_PATH_SYS_CPU "/kernel_max"))
608 /* note that kernel_max is maximum index [NR_CPUS-1] */
609 maxcpus = path_read_s32(_PATH_SYS_CPU "/kernel_max") + 1;
610
611 else if (mod->system == SYSTEM_LIVE)
612 /* the root is '/' so we are working with data from the current kernel */
613 maxcpus = get_max_number_of_cpus();
614
615 if (maxcpus <= 0)
616 /* error or we are reading some /sys snapshot instead of the
617 * real /sys, let's use any crazy number... */
618 maxcpus = 2048;
619
620 setsize = CPU_ALLOC_SIZE(maxcpus);
621
622 if (path_exist(_PATH_SYS_CPU "/possible")) {
623 cpu_set_t *tmp = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/possible");
624 int num, idx;
625
626 desc->ncpuspos = CPU_COUNT_S(setsize, tmp);
627 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
628
629 for (num = 0, idx = 0; num < maxcpus; num++) {
630 if (CPU_ISSET(num, tmp))
631 desc->idx2cpunum[idx++] = num;
632 }
633 cpuset_free(tmp);
634 } else
635 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
636 _PATH_SYS_CPU "/possible");
637
638
639 /* get mask for present CPUs */
640 if (path_exist(_PATH_SYS_CPU "/present")) {
641 desc->present = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/present");
642 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
643 }
644
645 /* get mask for online CPUs */
646 if (path_exist(_PATH_SYS_CPU "/online")) {
647 desc->online = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/online");
648 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
649 }
650
651 /* get dispatching mode */
652 if (path_exist(_PATH_SYS_CPU "/dispatching"))
653 desc->dispatching = path_read_s32(_PATH_SYS_CPU "/dispatching");
654 else
655 desc->dispatching = -1;
656
657 if (mod->system == SYSTEM_LIVE)
658 read_physical_info_powerpc(desc);
659
660 if ((fp = path_fopen("r", 0, _PATH_PROC_SYSINFO))) {
661 while (fgets(buf, sizeof(buf), fp) != NULL && !desc->machinetype)
662 lookup(buf, "Type", &desc->machinetype);
663 fclose(fp);
664 }
665 }
666
667 static int
668 has_pci_device(unsigned int vendor, unsigned int device)
669 {
670 FILE *f;
671 unsigned int num, fn, ven, dev;
672 int res = 1;
673
674 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
675 if (!f)
676 return 0;
677
678 /* for more details about bus/pci/devices format see
679 * drivers/pci/proc.c in linux kernel
680 */
681 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
682 &num, &fn, &ven, &dev) == 4) {
683
684 if (ven == vendor && dev == device)
685 goto found;
686 }
687
688 res = 0;
689 found:
690 fclose(f);
691 return res;
692 }
693
694 #if defined(__x86_64__) || defined(__i386__)
695
696 /*
697 * This CPUID leaf returns the information about the hypervisor.
698 * EAX : maximum input value for CPUID supported by the hypervisor.
699 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
700 */
701 #define HYPERVISOR_INFO_LEAF 0x40000000
702
703 static inline void
704 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
705 unsigned int *ecx, unsigned int *edx)
706 {
707 __asm__(
708 #if defined(__PIC__) && defined(__i386__)
709 /* x86 PIC cannot clobber ebx -- gcc bitches */
710 "xchg %%ebx, %%esi;"
711 "cpuid;"
712 "xchg %%esi, %%ebx;"
713 : "=S" (*ebx),
714 #else
715 "cpuid;"
716 : "=b" (*ebx),
717 #endif
718 "=a" (*eax),
719 "=c" (*ecx),
720 "=d" (*edx)
721 : "1" (op), "c"(0));
722 }
723
724 static void
725 read_hypervisor_cpuid(struct lscpu_desc *desc)
726 {
727 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
728 char hyper_vendor_id[13];
729
730 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
731
732 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
733 memcpy(hyper_vendor_id + 0, &ebx, 4);
734 memcpy(hyper_vendor_id + 4, &ecx, 4);
735 memcpy(hyper_vendor_id + 8, &edx, 4);
736 hyper_vendor_id[12] = '\0';
737
738 if (!hyper_vendor_id[0])
739 return;
740
741 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
742 desc->hyper = HYPER_XEN;
743 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
744 desc->hyper = HYPER_KVM;
745 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
746 desc->hyper = HYPER_MSHV;
747 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
748 desc->hyper = HYPER_VMWARE;
749 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
750 desc->hyper = HYPER_SPAR;
751 }
752
753 #else /* ! (__x86_64__ || __i386__) */
754 static void
755 read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
756 {
757 }
758 #endif
759
760 static int is_compatible(const char *path, const char *str)
761 {
762 FILE *fd = path_fopen("r", 0, "%s", path);
763
764 if (fd) {
765 char buf[256];
766 size_t i, len;
767
768 memset(buf, 0, sizeof(buf));
769 len = fread(buf, 1, sizeof(buf) - 1, fd);
770 fclose(fd);
771
772 for (i = 0; i < len;) {
773 if (!strcmp(&buf[i], str))
774 return 1;
775 i += strlen(&buf[i]);
776 i++;
777 }
778 }
779
780 return 0;
781 }
782
783 static int
784 read_hypervisor_powerpc(struct lscpu_desc *desc)
785 {
786 assert(!desc->hyper);
787
788 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
789 if (path_exist("/proc/iSeries")) {
790 desc->hyper = HYPER_OS400;
791 desc->virtype = VIRT_PARA;
792
793 /* PowerNV (POWER Non-Virtualized, bare-metal) */
794 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "ibm,powernv")) {
795 desc->hyper = HYPER_NONE;
796 desc->virtype = VIRT_NONE;
797
798 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
799 } else if (path_exist(_PATH_PROC_DEVICETREE "/ibm,partition-name")
800 && path_exist(_PATH_PROC_DEVICETREE "/hmc-managed?")
801 && !path_exist(_PATH_PROC_DEVICETREE "/chosen/qemu,graphic-width")) {
802 FILE *fd;
803 desc->hyper = HYPER_PHYP;
804 desc->virtype = VIRT_PARA;
805 fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/ibm,partition-name");
806 if (fd) {
807 char buf[256];
808 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
809 desc->virtype = VIRT_NONE;
810 fclose(fd);
811 }
812
813 /* Qemu */
814 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "qemu,pseries")) {
815 desc->hyper = HYPER_KVM;
816 desc->virtype = VIRT_PARA;
817 }
818 return desc->hyper;
819 }
820
821 #ifdef INCLUDE_VMWARE_BDOOR
822
823 #define VMWARE_BDOOR_MAGIC 0x564D5868
824 #define VMWARE_BDOOR_PORT 0x5658
825 #define VMWARE_BDOOR_CMD_GETVERSION 10
826
827 static UL_ASAN_BLACKLIST
828 void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
829 {
830 __asm__(
831 #if defined(__PIC__) && defined(__i386__)
832 /* x86 PIC cannot clobber ebx -- gcc bitches */
833 "xchg %%ebx, %%esi;"
834 "inl (%%dx), %%eax;"
835 "xchg %%esi, %%ebx;"
836 : "=S" (*ebx),
837 #else
838 "inl (%%dx), %%eax;"
839 : "=b" (*ebx),
840 #endif
841 "=a" (*eax),
842 "=c" (*ecx),
843 "=d" (*edx)
844 : "0" (VMWARE_BDOOR_MAGIC),
845 "1" (VMWARE_BDOOR_CMD_GETVERSION),
846 "2" (VMWARE_BDOOR_PORT),
847 "3" (0)
848 : "memory");
849 }
850
851 static jmp_buf segv_handler_env;
852
853 static void
854 segv_handler(__attribute__((__unused__)) int sig,
855 __attribute__((__unused__)) siginfo_t *info,
856 __attribute__((__unused__)) void *ignored)
857 {
858 siglongjmp(segv_handler_env, 1);
859 }
860
861 static int
862 is_vmware_platform(void)
863 {
864 uint32_t eax, ebx, ecx, edx;
865 struct sigaction act, oact;
866
867 /*
868 * FIXME: Not reliable for non-root users. Note it works as expected if
869 * vmware_bdoor() is not optimized for PIE, but then it fails to build
870 * on 32bit x86 systems. See lscpu git log for more details (commit
871 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
872 */
873 if (getuid() != 0)
874 return 0;
875
876 /*
877 * The assembly routine for vmware detection works
878 * fine under vmware, even if ran as regular user. But
879 * on real HW or under other hypervisors, it segfaults (which is
880 * expected). So we temporarily install SIGSEGV handler to catch
881 * the signal. All this magic is needed because lscpu
882 * isn't supposed to require root privileges.
883 */
884 if (sigsetjmp(segv_handler_env, 1))
885 return 0;
886
887 memset(&act, 0, sizeof(act));
888 act.sa_sigaction = segv_handler;
889 act.sa_flags = SA_SIGINFO;
890
891 if (sigaction(SIGSEGV, &act, &oact))
892 err(EXIT_FAILURE, _("cannot set signal handler"));
893
894 vmware_bdoor(&eax, &ebx, &ecx, &edx);
895
896 if (sigaction(SIGSEGV, &oact, NULL))
897 err(EXIT_FAILURE, _("cannot restore signal handler"));
898
899 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
900 }
901
902 #else /* ! INCLUDE_VMWARE_BDOOR */
903
904 static int
905 is_vmware_platform(void)
906 {
907 return 0;
908 }
909
910 #endif /* INCLUDE_VMWARE_BDOOR */
911
912 static void
913 read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
914 {
915 FILE *fd;
916
917 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
918
919 if ((fd = path_fopen("r", 0, _PATH_PROC_OSRELEASE))) {
920 char buf[256];
921
922 if (fgets(buf, sizeof(buf), fd) != NULL) {
923 if (strstr(buf, "Microsoft")) {
924 desc->hyper = HYPER_WSL;
925 desc->virtype = VIRT_CONT;
926 }
927 }
928 fclose(fd);
929 if (desc->virtype)
930 return;
931 }
932
933 if (mod->system != SYSTEM_SNAPSHOT) {
934 read_hypervisor_cpuid(desc);
935 if (!desc->hyper)
936 desc->hyper = read_hypervisor_dmi();
937 if (!desc->hyper && is_vmware_platform())
938 desc->hyper = HYPER_VMWARE;
939 }
940
941 if (desc->hyper) {
942 desc->virtype = VIRT_FULL;
943
944 if (desc->hyper == HYPER_XEN) {
945 uint32_t features;
946
947 fd = path_fopen("r", 0, _PATH_SYS_HYP_FEATURES);
948 if (fd && fscanf(fd, "%x", &features) == 1) {
949 /* Xen PV domain */
950 if (features & XEN_FEATURES_PV_MASK)
951 desc->virtype = VIRT_PARA;
952 /* Xen PVH domain */
953 else if ((features & XEN_FEATURES_PVH_MASK)
954 == XEN_FEATURES_PVH_MASK)
955 desc->virtype = VIRT_PARA;
956 fclose(fd);
957 } else {
958 err(EXIT_FAILURE, _("failed to read from: %s"),
959 _PATH_SYS_HYP_FEATURES);
960 }
961 }
962 } else if (read_hypervisor_powerpc(desc) > 0) {}
963
964 /* Xen para-virt or dom0 */
965 else if (path_exist(_PATH_PROC_XEN)) {
966 int dom0 = 0;
967 fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
968
969 if (fd) {
970 char buf[256];
971
972 if (fscanf(fd, "%255s", buf) == 1 &&
973 !strcmp(buf, "control_d"))
974 dom0 = 1;
975 fclose(fd);
976 }
977 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
978 desc->hyper = HYPER_XEN;
979
980 /* Xen full-virt on non-x86_64 */
981 } else if (has_pci_device( hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
982 desc->hyper = HYPER_XEN;
983 desc->virtype = VIRT_FULL;
984 } else if (has_pci_device( hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
985 desc->hyper = HYPER_VMWARE;
986 desc->virtype = VIRT_FULL;
987 } else if (has_pci_device( hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
988 desc->hyper = HYPER_VBOX;
989 desc->virtype = VIRT_FULL;
990
991 /* IBM PR/SM */
992 } else if ((fd = path_fopen("r", 0, _PATH_PROC_SYSINFO))) {
993 char buf[BUFSIZ];
994
995 desc->hyper = HYPER_IBM;
996 desc->hypervisor = "PR/SM";
997 desc->virtype = VIRT_FULL;
998 while (fgets(buf, sizeof(buf), fd) != NULL) {
999 char *str;
1000
1001 if (!strstr(buf, "Control Program:"))
1002 continue;
1003 if (!strstr(buf, "KVM"))
1004 desc->hyper = HYPER_IBM;
1005 else
1006 desc->hyper = HYPER_KVM;
1007 str = strchr(buf, ':');
1008 if (!str)
1009 continue;
1010 xasprintf(&str, "%s", str + 1);
1011
1012 /* remove leading, trailing and repeating whitespace */
1013 while (*str == ' ')
1014 str++;
1015 desc->hypervisor = str;
1016 str += strlen(str) - 1;
1017 while ((*str == '\n') || (*str == ' '))
1018 *(str--) = '\0';
1019 while ((str = strstr(desc->hypervisor, " ")))
1020 memmove(str, str + 1, strlen(str));
1021 }
1022 fclose(fd);
1023 }
1024
1025 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
1026 * /proc/bc should not */
1027 else if (path_exist(_PATH_PROC_VZ) && !path_exist(_PATH_PROC_BC)) {
1028 desc->hyper = HYPER_PARALLELS;
1029 desc->virtype = VIRT_CONT;
1030
1031 /* IBM */
1032 } else if (desc->vendor &&
1033 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
1034 strcmp(desc->vendor, "IBM/S390") == 0)) {
1035 desc->hyper = HYPER_IBM;
1036 desc->virtype = VIRT_FULL;
1037
1038 /* User-mode-linux */
1039 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
1040 desc->hyper = HYPER_UML;
1041 desc->virtype = VIRT_PARA;
1042
1043 /* Linux-VServer */
1044 } else if ((fd = path_fopen("r", 0, _PATH_PROC_STATUS))) {
1045 char buf[BUFSIZ];
1046 char *val = NULL;
1047
1048 while (fgets(buf, sizeof(buf), fd) != NULL) {
1049 if (lookup(buf, "VxID", &val))
1050 break;
1051 }
1052 fclose(fd);
1053
1054 if (val) {
1055 char *org = val;
1056
1057 while (isdigit(*val))
1058 ++val;
1059 if (!*val) {
1060 desc->hyper = HYPER_VSERVER;
1061 desc->virtype = VIRT_CONT;
1062 }
1063 free(org);
1064 }
1065 }
1066 }
1067
1068 /* add @set to the @ary, unnecessary set is deallocated. */
1069 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
1070 {
1071 int i;
1072 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1073
1074 if (!ary)
1075 return -1;
1076
1077 for (i = 0; i < *items; i++) {
1078 if (CPU_EQUAL_S(setsize, set, ary[i]))
1079 break;
1080 }
1081 if (i == *items) {
1082 ary[*items] = set;
1083 ++*items;
1084 return 0;
1085 }
1086 CPU_FREE(set);
1087 return 1;
1088 }
1089
1090 static void
1091 read_topology(struct lscpu_desc *desc, int idx)
1092 {
1093 cpu_set_t *thread_siblings, *core_siblings;
1094 cpu_set_t *book_siblings, *drawer_siblings;
1095 int coreid, socketid, bookid, drawerid;
1096 int i, num = real_cpu_num(desc, idx);
1097
1098 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
1099 return;
1100
1101 thread_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1102 "/cpu%d/topology/thread_siblings", num);
1103 core_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1104 "/cpu%d/topology/core_siblings", num);
1105 book_siblings = NULL;
1106 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num))
1107 book_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1108 "/cpu%d/topology/book_siblings", num);
1109 drawer_siblings = NULL;
1110 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/drawer_siblings", num))
1111 drawer_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1112 "/cpu%d/topology/drawer_siblings", num);
1113 coreid = -1;
1114 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/core_id", num))
1115 coreid = path_read_s32(_PATH_SYS_CPU
1116 "/cpu%d/topology/core_id", num);
1117 socketid = -1;
1118 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/physical_package_id", num))
1119 socketid = path_read_s32(_PATH_SYS_CPU
1120 "/cpu%d/topology/physical_package_id", num);
1121 bookid = -1;
1122 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_id", num))
1123 bookid = path_read_s32(_PATH_SYS_CPU
1124 "/cpu%d/topology/book_id", num);
1125 drawerid = -1;
1126 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/drawer_id", num))
1127 drawerid = path_read_s32(_PATH_SYS_CPU
1128 "/cpu%d/topology/drawer_id", num);
1129
1130 if (!desc->coremaps) {
1131 int ndrawers, nbooks, nsockets, ncores, nthreads;
1132 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1133
1134 /* threads within one core */
1135 nthreads = CPU_COUNT_S(setsize, thread_siblings);
1136 if (!nthreads)
1137 nthreads = 1;
1138
1139 /* cores within one socket */
1140 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
1141 if (!ncores)
1142 ncores = 1;
1143
1144 /* number of sockets within one book. Because of odd /
1145 * non-present cpu maps and to keep calculation easy we make
1146 * sure that nsockets and nbooks is at least 1.
1147 */
1148 nsockets = desc->ncpus / nthreads / ncores;
1149 if (!nsockets)
1150 nsockets = 1;
1151
1152 /* number of books */
1153 nbooks = desc->ncpus / nthreads / ncores / nsockets;
1154 if (!nbooks)
1155 nbooks = 1;
1156
1157 /* number of drawers */
1158 ndrawers = desc->ncpus / nbooks / nthreads / ncores / nsockets;
1159 if (!ndrawers)
1160 ndrawers = 1;
1161
1162 /* all threads, see also read_basicinfo()
1163 * -- fallback for kernels without
1164 * /sys/devices/system/cpu/online.
1165 */
1166 if (!desc->nthreads)
1167 desc->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
1168
1169 /* For each map we make sure that it can have up to ncpuspos
1170 * entries. This is because we cannot reliably calculate the
1171 * number of cores, sockets and books on all architectures.
1172 * E.g. completely virtualized architectures like s390 may
1173 * have multiple sockets of different sizes.
1174 */
1175 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1176 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1177 desc->coreids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1178 desc->socketids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1179 for (i = 0; i < desc->ncpuspos; i++)
1180 desc->coreids[i] = desc->socketids[i] = -1;
1181 if (book_siblings) {
1182 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1183 desc->bookids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1184 for (i = 0; i < desc->ncpuspos; i++)
1185 desc->bookids[i] = -1;
1186 }
1187 if (drawer_siblings) {
1188 desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1189 desc->drawerids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1190 for (i = 0; i < desc->ncpuspos; i++)
1191 desc->drawerids[i] = -1;
1192 }
1193 }
1194
1195 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
1196 desc->coreids[idx] = coreid;
1197 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
1198 desc->socketids[idx] = socketid;
1199 if (book_siblings) {
1200 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
1201 desc->bookids[idx] = bookid;
1202 }
1203 if (drawer_siblings) {
1204 add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
1205 desc->drawerids[idx] = drawerid;
1206 }
1207 }
1208
1209 static void
1210 read_polarization(struct lscpu_desc *desc, int idx)
1211 {
1212 char mode[64];
1213 int num = real_cpu_num(desc, idx);
1214
1215 if (desc->dispatching < 0)
1216 return;
1217 if (!path_exist(_PATH_SYS_CPU "/cpu%d/polarization", num))
1218 return;
1219 if (!desc->polarization)
1220 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
1221 path_read_str(mode, sizeof(mode), _PATH_SYS_CPU "/cpu%d/polarization", num);
1222 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
1223 desc->polarization[idx] = POLAR_VLOW;
1224 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
1225 desc->polarization[idx] = POLAR_VMEDIUM;
1226 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
1227 desc->polarization[idx] = POLAR_VHIGH;
1228 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
1229 desc->polarization[idx] = POLAR_HORIZONTAL;
1230 else
1231 desc->polarization[idx] = POLAR_UNKNOWN;
1232 }
1233
1234 static void
1235 read_address(struct lscpu_desc *desc, int idx)
1236 {
1237 int num = real_cpu_num(desc, idx);
1238
1239 if (!path_exist(_PATH_SYS_CPU "/cpu%d/address", num))
1240 return;
1241 if (!desc->addresses)
1242 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
1243 desc->addresses[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/address", num);
1244 }
1245
1246 static void
1247 read_configured(struct lscpu_desc *desc, int idx)
1248 {
1249 int num = real_cpu_num(desc, idx);
1250
1251 if (!path_exist(_PATH_SYS_CPU "/cpu%d/configure", num))
1252 return;
1253 if (!desc->configured)
1254 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
1255 desc->configured[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/configure", num);
1256 }
1257
1258 /* Read overall maximum frequency of cpu */
1259 static char *
1260 cpu_max_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1261 {
1262 int i;
1263 float cpu_freq = atof(desc->maxmhz[0]);
1264
1265 if (desc->present) {
1266 for (i = 1; i < desc->ncpuspos; i++) {
1267 if (CPU_ISSET(real_cpu_num(desc, i), desc->present)) {
1268 float freq = atof(desc->maxmhz[i]);
1269
1270 if (freq > cpu_freq)
1271 cpu_freq = freq;
1272 }
1273 }
1274 }
1275 snprintf(buf, bufsz, "%.4f", cpu_freq);
1276 return buf;
1277 }
1278
1279 /* Read overall minimum frequency of cpu */
1280 static char *
1281 cpu_min_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1282 {
1283 int i;
1284 float cpu_freq = atof(desc->minmhz[0]);
1285
1286 if (desc->present) {
1287 for (i = 1; i < desc->ncpuspos; i++) {
1288 if (CPU_ISSET(real_cpu_num(desc, i), desc->present)) {
1289 float freq = atof(desc->minmhz[i]);
1290
1291 if (freq < cpu_freq)
1292 cpu_freq = freq;
1293 }
1294 }
1295 }
1296 snprintf(buf, bufsz, "%.4f", cpu_freq);
1297 return buf;
1298 }
1299
1300
1301 static void
1302 read_max_mhz(struct lscpu_desc *desc, int idx)
1303 {
1304 int num = real_cpu_num(desc, idx);
1305
1306 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_max_freq", num))
1307 return;
1308 if (!desc->maxmhz)
1309 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1310 xasprintf(&(desc->maxmhz[idx]), "%.4f",
1311 (float)path_read_s32(_PATH_SYS_CPU
1312 "/cpu%d/cpufreq/cpuinfo_max_freq", num) / 1000);
1313 }
1314
1315 static void
1316 read_min_mhz(struct lscpu_desc *desc, int idx)
1317 {
1318 int num = real_cpu_num(desc, idx);
1319
1320 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_min_freq", num))
1321 return;
1322 if (!desc->minmhz)
1323 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1324 xasprintf(&(desc->minmhz[idx]), "%.4f",
1325 (float)path_read_s32(_PATH_SYS_CPU
1326 "/cpu%d/cpufreq/cpuinfo_min_freq", num) / 1000);
1327 }
1328
1329 static int
1330 cachecmp(const void *a, const void *b)
1331 {
1332 struct cpu_cache *c1 = (struct cpu_cache *) a;
1333 struct cpu_cache *c2 = (struct cpu_cache *) b;
1334
1335 return strcmp(c2->name, c1->name);
1336 }
1337
1338 static void
1339 read_cache(struct lscpu_desc *desc, int idx)
1340 {
1341 char buf[256];
1342 int i;
1343 int num = real_cpu_num(desc, idx);
1344
1345 if (!desc->ncaches) {
1346 while(path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1347 num, desc->ncaches))
1348 desc->ncaches++;
1349
1350 if (!desc->ncaches)
1351 return;
1352
1353 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1354 }
1355 for (i = 0; i < desc->ncaches; i++) {
1356 struct cpu_cache *ca = &desc->caches[i];
1357 cpu_set_t *map;
1358
1359 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1360 num, i))
1361 continue;
1362 if (!ca->name) {
1363 int type, level;
1364
1365 /* cache type */
1366 path_read_str(buf, sizeof(buf),
1367 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
1368 num, i);
1369 if (!strcmp(buf, "Data"))
1370 type = 'd';
1371 else if (!strcmp(buf, "Instruction"))
1372 type = 'i';
1373 else
1374 type = 0;
1375
1376 /* cache level */
1377 level = path_read_s32(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
1378 num, i);
1379 if (type)
1380 snprintf(buf, sizeof(buf), "L%d%c", level, type);
1381 else
1382 snprintf(buf, sizeof(buf), "L%d", level);
1383
1384 ca->name = xstrdup(buf);
1385
1386 /* cache size */
1387 if (path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d/size",num, i)) {
1388 path_read_str(buf, sizeof(buf),
1389 _PATH_SYS_CPU "/cpu%d/cache/index%d/size", num, i);
1390 ca->size = xstrdup(buf);
1391 } else {
1392 ca->size = xstrdup("unknown size");
1393 }
1394 }
1395
1396 /* information about how CPUs share different caches */
1397 map = path_read_cpuset(maxcpus,
1398 _PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
1399 num, i);
1400
1401 if (!ca->sharedmaps)
1402 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1403 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1404 }
1405 }
1406
1407 static inline int is_node_dirent(struct dirent *d)
1408 {
1409 return
1410 d &&
1411 #ifdef _DIRENT_HAVE_D_TYPE
1412 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1413 #endif
1414 strncmp(d->d_name, "node", 4) == 0 &&
1415 isdigit_string(d->d_name + 4);
1416 }
1417
1418 static int
1419 nodecmp(const void *ap, const void *bp)
1420 {
1421 int *a = (int *) ap, *b = (int *) bp;
1422 return *a - *b;
1423 }
1424
1425 static void
1426 read_nodes(struct lscpu_desc *desc)
1427 {
1428 int i = 0;
1429 DIR *dir;
1430 struct dirent *d;
1431 char *path;
1432
1433 /* number of NUMA node */
1434 path = path_strdup(_PATH_SYS_NODE);
1435 dir = opendir(path);
1436 free(path);
1437
1438 while (dir && (d = readdir(dir))) {
1439 if (is_node_dirent(d))
1440 desc->nnodes++;
1441 }
1442
1443 if (!desc->nnodes) {
1444 if (dir)
1445 closedir(dir);
1446 return;
1447 }
1448
1449 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1450 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1451
1452 if (dir) {
1453 rewinddir(dir);
1454 while ((d = readdir(dir)) && i < desc->nnodes) {
1455 if (is_node_dirent(d))
1456 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1457 _("Failed to extract the node number"));
1458 }
1459 closedir(dir);
1460 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1461 }
1462
1463 /* information about how nodes share different CPUs */
1464 for (i = 0; i < desc->nnodes; i++)
1465 desc->nodemaps[i] = path_read_cpuset(maxcpus,
1466 _PATH_SYS_NODE "/node%d/cpumap",
1467 desc->idx2nodenum[i]);
1468 }
1469
1470 static char *
1471 get_cell_data(struct lscpu_desc *desc, int idx, int col,
1472 struct lscpu_modifier *mod,
1473 char *buf, size_t bufsz)
1474 {
1475 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1476 size_t i;
1477 int cpu = real_cpu_num(desc, idx);
1478
1479 *buf = '\0';
1480
1481 switch (col) {
1482 case COL_CPU:
1483 snprintf(buf, bufsz, "%d", cpu);
1484 break;
1485 case COL_CORE:
1486 if (mod->physical) {
1487 if (desc->coreids[idx] == -1)
1488 snprintf(buf, bufsz, "-");
1489 else
1490 snprintf(buf, bufsz, "%d", desc->coreids[idx]);
1491 } else {
1492 if (cpuset_ary_isset(cpu, desc->coremaps,
1493 desc->ncores, setsize, &i) == 0)
1494 snprintf(buf, bufsz, "%zu", i);
1495 }
1496 break;
1497 case COL_SOCKET:
1498 if (mod->physical) {
1499 if (desc->socketids[idx] == -1)
1500 snprintf(buf, bufsz, "-");
1501 else
1502 snprintf(buf, bufsz, "%d", desc->socketids[idx]);
1503 } else {
1504 if (cpuset_ary_isset(cpu, desc->socketmaps,
1505 desc->nsockets, setsize, &i) == 0)
1506 snprintf(buf, bufsz, "%zu", i);
1507 }
1508 break;
1509 case COL_NODE:
1510 if (cpuset_ary_isset(cpu, desc->nodemaps,
1511 desc->nnodes, setsize, &i) == 0)
1512 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1513 break;
1514 case COL_DRAWER:
1515 if (mod->physical) {
1516 if (desc->drawerids[idx] == -1)
1517 snprintf(buf, bufsz, "-");
1518 else
1519 snprintf(buf, bufsz, "%d", desc->drawerids[idx]);
1520 } else {
1521 if (cpuset_ary_isset(cpu, desc->drawermaps,
1522 desc->ndrawers, setsize, &i) == 0)
1523 snprintf(buf, bufsz, "%zu", i);
1524 }
1525 break;
1526 case COL_BOOK:
1527 if (mod->physical) {
1528 if (desc->bookids[idx] == -1)
1529 snprintf(buf, bufsz, "-");
1530 else
1531 snprintf(buf, bufsz, "%d", desc->bookids[idx]);
1532 } else {
1533 if (cpuset_ary_isset(cpu, desc->bookmaps,
1534 desc->nbooks, setsize, &i) == 0)
1535 snprintf(buf, bufsz, "%zu", i);
1536 }
1537 break;
1538 case COL_CACHE:
1539 {
1540 char *p = buf;
1541 size_t sz = bufsz;
1542 int j;
1543
1544 for (j = desc->ncaches - 1; j >= 0; j--) {
1545 struct cpu_cache *ca = &desc->caches[j];
1546
1547 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1548 ca->nsharedmaps, setsize, &i) == 0) {
1549 int x = snprintf(p, sz, "%zu", i);
1550 if (x < 0 || (size_t) x >= sz)
1551 return NULL;
1552 p += x;
1553 sz -= x;
1554 }
1555 if (j != 0) {
1556 if (sz < 2)
1557 return NULL;
1558 *p++ = mod->compat ? ',' : ':';
1559 *p = '\0';
1560 sz--;
1561 }
1562 }
1563 break;
1564 }
1565 case COL_POLARIZATION:
1566 if (desc->polarization) {
1567 int x = desc->polarization[idx];
1568
1569 snprintf(buf, bufsz, "%s",
1570 mod->mode == OUTPUT_PARSABLE ?
1571 polar_modes[x].parsable :
1572 polar_modes[x].readable);
1573 }
1574 break;
1575 case COL_ADDRESS:
1576 if (desc->addresses)
1577 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1578 break;
1579 case COL_CONFIGURED:
1580 if (!desc->configured)
1581 break;
1582 if (mod->mode == OUTPUT_PARSABLE)
1583 snprintf(buf, bufsz, "%s",
1584 desc->configured[idx] ? _("Y") : _("N"));
1585 else
1586 snprintf(buf, bufsz, "%s",
1587 desc->configured[idx] ? _("yes") : _("no"));
1588 break;
1589 case COL_ONLINE:
1590 if (!desc->online)
1591 break;
1592 if (mod->mode == OUTPUT_PARSABLE)
1593 snprintf(buf, bufsz, "%s",
1594 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1595 else
1596 snprintf(buf, bufsz, "%s",
1597 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1598 break;
1599 case COL_MAXMHZ:
1600 if (desc->maxmhz)
1601 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1602 break;
1603 case COL_MINMHZ:
1604 if (desc->minmhz)
1605 xstrncpy(buf, desc->minmhz[idx], bufsz);
1606 break;
1607 }
1608 return buf;
1609 }
1610
1611 static char *
1612 get_cell_header(struct lscpu_desc *desc, int col,
1613 struct lscpu_modifier *mod,
1614 char *buf, size_t bufsz)
1615 {
1616 *buf = '\0';
1617
1618 if (col == COL_CACHE) {
1619 char *p = buf;
1620 size_t sz = bufsz;
1621 int i;
1622
1623 for (i = desc->ncaches - 1; i >= 0; i--) {
1624 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1625 if (x < 0 || (size_t) x >= sz)
1626 return NULL;
1627 sz -= x;
1628 p += x;
1629 if (i > 0) {
1630 if (sz < 2)
1631 return NULL;
1632 *p++ = mod->compat ? ',' : ':';
1633 *p = '\0';
1634 sz--;
1635 }
1636 }
1637 if (desc->ncaches)
1638 return buf;
1639 }
1640 snprintf(buf, bufsz, "%s", coldescs[col].name);
1641 return buf;
1642 }
1643
1644 /*
1645 * [-p] backend, we support two parsable formats:
1646 *
1647 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1648 * output and it contains fixed set of the columns. The CACHE columns are at
1649 * the end of the line and the CACHE is not printed if the number of the caches
1650 * is zero. The CACHE columns are separated by two commas, for example:
1651 *
1652 * $ lscpu --parse
1653 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1654 * 0,0,0,0,,0,0,0
1655 * 1,1,0,0,,1,1,0
1656 *
1657 * 2) "user defined output" -- this format prints always all columns without
1658 * special prefix for CACHE column. If there are not CACHEs then the column is
1659 * empty and the header "Cache" is printed rather than a real name of the cache.
1660 * The CACHE columns are separated by ':'.
1661 *
1662 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1663 * # CPU,Core,Socket,Node,L1d:L1i:L2
1664 * 0,0,0,0,0:0:0
1665 * 1,1,0,0,1:1:0
1666 */
1667 static void
1668 print_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1669 struct lscpu_modifier *mod)
1670 {
1671 char buf[BUFSIZ], *data;
1672 int i;
1673
1674 /*
1675 * Header
1676 */
1677 printf(_(
1678 "# The following is the parsable format, which can be fed to other\n"
1679 "# programs. Each different item in every column has an unique ID\n"
1680 "# starting from zero.\n"));
1681
1682 fputs("# ", stdout);
1683 for (i = 0; i < ncols; i++) {
1684 int col = cols[i];
1685
1686 if (col == COL_CACHE) {
1687 if (mod->compat && !desc->ncaches)
1688 continue;
1689 if (mod->compat && i != 0)
1690 putchar(',');
1691 }
1692 if (i > 0)
1693 putchar(',');
1694
1695 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1696
1697 if (data && * data && col != COL_CACHE &&
1698 !coldescs[col].is_abbr) {
1699 /*
1700 * For normal column names use mixed case (e.g. "Socket")
1701 */
1702 char *p = data + 1;
1703
1704 while (p && *p != '\0') {
1705 *p = tolower((unsigned int) *p);
1706 p++;
1707 }
1708 }
1709 fputs(data && *data ? data : "", stdout);
1710 }
1711 putchar('\n');
1712
1713 /*
1714 * Data
1715 */
1716 for (i = 0; i < desc->ncpuspos; i++) {
1717 int c;
1718 int cpu = real_cpu_num(desc, i);
1719
1720 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1721 continue;
1722 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1723 continue;
1724 if (desc->present && !is_cpu_present(desc, cpu))
1725 continue;
1726 for (c = 0; c < ncols; c++) {
1727 if (mod->compat && cols[c] == COL_CACHE) {
1728 if (!desc->ncaches)
1729 continue;
1730 if (c > 0)
1731 putchar(',');
1732 }
1733 if (c > 0)
1734 putchar(',');
1735
1736 data = get_cell_data(desc, i, cols[c], mod,
1737 buf, sizeof(buf));
1738 fputs(data && *data ? data : "", stdout);
1739 }
1740 putchar('\n');
1741 }
1742 }
1743
1744 /*
1745 * [-e] backend
1746 */
1747 static void
1748 print_readable(struct lscpu_desc *desc, int cols[], int ncols,
1749 struct lscpu_modifier *mod)
1750 {
1751 int i;
1752 char buf[BUFSIZ];
1753 const char *data;
1754 struct libscols_table *table;
1755
1756 scols_init_debug(0);
1757
1758 table = scols_new_table();
1759 if (!table)
1760 err(EXIT_FAILURE, _("failed to allocate output table"));
1761 if (mod->json) {
1762 scols_table_enable_json(table, 1);
1763 scols_table_set_name(table, "cpus");
1764 }
1765
1766 for (i = 0; i < ncols; i++) {
1767 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1768 if (!scols_table_new_column(table, data, 0, 0))
1769 err(EXIT_FAILURE, _("failed to allocate output column"));
1770 }
1771
1772 for (i = 0; i < desc->ncpuspos; i++) {
1773 int c;
1774 struct libscols_line *line;
1775 int cpu = real_cpu_num(desc, i);
1776
1777 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1778 continue;
1779 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1780 continue;
1781 if (desc->present && !is_cpu_present(desc, cpu))
1782 continue;
1783
1784 line = scols_table_new_line(table, NULL);
1785 if (!line)
1786 err(EXIT_FAILURE, _("failed to allocate output line"));
1787
1788 for (c = 0; c < ncols; c++) {
1789 data = get_cell_data(desc, i, cols[c], mod,
1790 buf, sizeof(buf));
1791 if (!data || !*data)
1792 data = "-";
1793 if (scols_line_set_data(line, c, data))
1794 err(EXIT_FAILURE, _("failed to add output data"));
1795 }
1796 }
1797
1798 scols_print_table(table);
1799 scols_unref_table(table);
1800 }
1801
1802
1803 static void __attribute__ ((__format__(printf, 3, 4)))
1804 add_summary_sprint(struct libscols_table *tb,
1805 const char *txt,
1806 const char *fmt,
1807 ...)
1808 {
1809 struct libscols_line *ln = scols_table_new_line(tb, NULL);
1810 char *data;
1811 va_list args;
1812
1813 if (!ln)
1814 err(EXIT_FAILURE, _("failed to allocate output line"));
1815
1816 /* description column */
1817 scols_line_set_data(ln, 0, txt);
1818
1819 /* data column */
1820 va_start(args, fmt);
1821 xvasprintf(&data, fmt, args);
1822 va_end(args);
1823
1824 if (data && scols_line_refer_data(ln, 1, data))
1825 err(EXIT_FAILURE, _("failed to add output data"));
1826 }
1827
1828 #define add_summary_n(tb, txt, num) add_summary_sprint(tb, txt, "%d", num)
1829 #define add_summary_s(tb, txt, str) add_summary_sprint(tb, txt, "%s", str)
1830
1831 static void
1832 print_cpuset(struct libscols_table *tb,
1833 const char *key, cpu_set_t *set, int hex)
1834 {
1835 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1836 size_t setbuflen = 7 * maxcpus;
1837 char setbuf[setbuflen], *p;
1838
1839 if (hex) {
1840 p = cpumask_create(setbuf, setbuflen, set, setsize);
1841 add_summary_s(tb, key, p);
1842 } else {
1843 p = cpulist_create(setbuf, setbuflen, set, setsize);
1844 add_summary_s(tb, key, p);
1845 }
1846 }
1847
1848 /*
1849 * default output
1850 */
1851 static void
1852 print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1853 {
1854 char buf[BUFSIZ];
1855 int i = 0;
1856 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1857 struct libscols_table *tb;
1858
1859 scols_init_debug(0);
1860
1861 tb = scols_new_table();
1862 if (!tb)
1863 err(EXIT_FAILURE, _("failed to allocate output table"));
1864
1865 scols_table_enable_noheadings(tb, 1);
1866 if (mod->json) {
1867 scols_table_enable_json(tb, 1);
1868 scols_table_set_name(tb, "lscpu");
1869 }
1870
1871 if (scols_table_new_column(tb, "field", 0, 0) == NULL ||
1872 scols_table_new_column(tb, "data", 0, SCOLS_FL_NOEXTREMES) == NULL)
1873 err(EXIT_FAILURE, _("failed to initialize output column"));
1874
1875 add_summary_s(tb, _("Architecture:"), desc->arch);
1876 if (desc->mode) {
1877 char *p = buf;
1878
1879 if (desc->mode & MODE_32BIT) {
1880 strcpy(p, "32-bit, ");
1881 p += 8;
1882 }
1883 if (desc->mode & MODE_64BIT) {
1884 strcpy(p, "64-bit, ");
1885 p += 8;
1886 }
1887 *(p - 2) = '\0';
1888 add_summary_s(tb, _("CPU op-mode(s):"), buf);
1889 }
1890 #if !defined(WORDS_BIGENDIAN)
1891 add_summary_s(tb, _("Byte Order:"), "Little Endian");
1892 #else
1893 add_summary_s(tb, _("Byte Order:"), "Big Endian");
1894 #endif
1895 add_summary_n(tb, _("CPU(s):"), desc->ncpus);
1896
1897 if (desc->online)
1898 print_cpuset(tb, mod->hex ? _("On-line CPU(s) mask:") :
1899 _("On-line CPU(s) list:"),
1900 desc->online, mod->hex);
1901
1902 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1903 cpu_set_t *set;
1904
1905 /* Linux kernel provides cpuset of off-line CPUs that contains
1906 * all configured CPUs (see /sys/devices/system/cpu/offline),
1907 * but want to print real (present in system) off-line CPUs only.
1908 */
1909 set = cpuset_alloc(maxcpus, NULL, NULL);
1910 if (!set)
1911 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1912 CPU_ZERO_S(setsize, set);
1913 for (i = 0; i < desc->ncpuspos; i++) {
1914 int cpu = real_cpu_num(desc, i);
1915 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
1916 CPU_SET_S(cpu, setsize, set);
1917 }
1918 print_cpuset(tb, mod->hex ? _("Off-line CPU(s) mask:") :
1919 _("Off-line CPU(s) list:"),
1920 set, mod->hex);
1921 cpuset_free(set);
1922 }
1923
1924 if (desc->nsockets) {
1925 int threads_per_core, cores_per_socket, sockets_per_book;
1926 int books_per_drawer, drawers;
1927 FILE *fd;
1928
1929 threads_per_core = cores_per_socket = sockets_per_book = 0;
1930 books_per_drawer = drawers = 0;
1931 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1932 * Using simply the cpu topology masks in sysfs will not give
1933 * usable results since everything is virtualized. E.g.
1934 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1935 * five cpus.
1936 * If the cpu topology is not exported (e.g. 2nd level guest)
1937 * fall back to old calculation scheme.
1938 */
1939 if ((fd = path_fopen("r", 0, _PATH_PROC_SYSINFO))) {
1940 int t0, t1;
1941
1942 while (fd && fgets(buf, sizeof(buf), fd) != NULL) {
1943 if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
1944 &t0, &t1, &drawers, &books_per_drawer,
1945 &sockets_per_book,
1946 &cores_per_socket) == 6)
1947 break;
1948 }
1949 if (fd)
1950 fclose(fd);
1951 }
1952 if (desc->mtid)
1953 threads_per_core = atoi(desc->mtid) + 1;
1954 add_summary_n(tb, _("Thread(s) per core:"),
1955 threads_per_core ?: desc->nthreads / desc->ncores);
1956 add_summary_n(tb, _("Core(s) per socket:"),
1957 cores_per_socket ?: desc->ncores / desc->nsockets);
1958 if (desc->nbooks) {
1959 add_summary_n(tb, _("Socket(s) per book:"),
1960 sockets_per_book ?: desc->nsockets / desc->nbooks);
1961 if (desc->ndrawers) {
1962 add_summary_n(tb, _("Book(s) per drawer:"),
1963 books_per_drawer ?: desc->nbooks / desc->ndrawers);
1964 add_summary_n(tb, _("Drawer(s):"), drawers ?: desc->ndrawers);
1965 } else {
1966 add_summary_n(tb, _("Book(s):"), books_per_drawer ?: desc->nbooks);
1967 }
1968 } else {
1969 add_summary_n(tb, _("Socket(s):"), sockets_per_book ?: desc->nsockets);
1970 }
1971 }
1972 if (desc->nnodes)
1973 add_summary_n(tb, _("NUMA node(s):"), desc->nnodes);
1974 if (desc->vendor)
1975 add_summary_s(tb, _("Vendor ID:"), desc->vendor);
1976 if (desc->machinetype)
1977 add_summary_s(tb, _("Machine type:"), desc->machinetype);
1978 if (desc->family)
1979 add_summary_s(tb, _("CPU family:"), desc->family);
1980 if (desc->model || desc->revision)
1981 add_summary_s(tb, _("Model:"), desc->revision ? desc->revision : desc->model);
1982 if (desc->modelname || desc->cpu)
1983 add_summary_s(tb, _("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
1984 if (desc->stepping)
1985 add_summary_s(tb, _("Stepping:"), desc->stepping);
1986 if (desc->mhz)
1987 add_summary_s(tb, _("CPU MHz:"), desc->mhz);
1988 if (desc->dynamic_mhz)
1989 add_summary_s(tb, _("CPU dynamic MHz:"), desc->dynamic_mhz);
1990 if (desc->static_mhz)
1991 add_summary_s(tb, _("CPU static MHz:"), desc->static_mhz);
1992 if (desc->maxmhz)
1993 add_summary_s(tb, _("CPU max MHz:"), cpu_max_mhz(desc, buf, sizeof(buf)));
1994 if (desc->minmhz)
1995 add_summary_s(tb, _("CPU min MHz:"), cpu_min_mhz(desc, buf, sizeof(buf)));
1996 if (desc->bogomips)
1997 add_summary_s(tb, _("BogoMIPS:"), desc->bogomips);
1998 if (desc->virtflag) {
1999 if (!strcmp(desc->virtflag, "svm"))
2000 add_summary_s(tb, _("Virtualization:"), "AMD-V");
2001 else if (!strcmp(desc->virtflag, "vmx"))
2002 add_summary_s(tb, _("Virtualization:"), "VT-x");
2003 }
2004 if (desc->hypervisor)
2005 add_summary_s(tb, _("Hypervisor:"), desc->hypervisor);
2006 if (desc->hyper) {
2007 add_summary_s(tb, _("Hypervisor vendor:"), hv_vendors[desc->hyper]);
2008 add_summary_s(tb, _("Virtualization type:"), _(virt_types[desc->virtype]));
2009 }
2010 if (desc->dispatching >= 0)
2011 add_summary_s(tb, _("Dispatching mode:"), _(disp_modes[desc->dispatching]));
2012 if (desc->ncaches) {
2013 for (i = desc->ncaches - 1; i >= 0; i--) {
2014 snprintf(buf, sizeof(buf),
2015 _("%s cache:"), desc->caches[i].name);
2016 add_summary_s(tb, buf, desc->caches[i].size);
2017 }
2018 }
2019 if (desc->necaches) {
2020 for (i = desc->necaches - 1; i >= 0; i--) {
2021 snprintf(buf, sizeof(buf),
2022 _("%s cache:"), desc->ecaches[i].name);
2023 add_summary_s(tb, buf, desc->ecaches[i].size);
2024 }
2025 }
2026
2027 for (i = 0; i < desc->nnodes; i++) {
2028 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
2029 print_cpuset(tb, buf, desc->nodemaps[i], mod->hex);
2030 }
2031
2032 if (desc->physsockets) {
2033 add_summary_n(tb, _("Physical sockets:"), desc->physsockets);
2034 add_summary_n(tb, _("Physical chips:"), desc->physchips);
2035 add_summary_n(tb, _("Physical cores/chip:"), desc->physcoresperchip);
2036 }
2037
2038 if (desc->flags)
2039 add_summary_s(tb, _("Flags:"), desc->flags);
2040
2041 scols_print_table(tb);
2042 scols_unref_table(tb);
2043 }
2044
2045 static void __attribute__((__noreturn__)) usage(FILE *out)
2046 {
2047 size_t i;
2048
2049 fputs(USAGE_HEADER, out);
2050 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
2051
2052 fputs(USAGE_SEPARATOR, out);
2053 fputs(_("Display information about the CPU architecture.\n"), out);
2054
2055 fputs(USAGE_OPTIONS, out);
2056 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
2057 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
2058 fputs(_(" -c, --offline print offline CPUs only\n"), out);
2059 fputs(_(" -J, --json use JSON for default or extended format\n"), out);
2060 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
2061 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
2062 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
2063 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
2064 fputs(_(" -y, --physical print physical instead of logical IDs\n"), out);
2065 fputs(USAGE_SEPARATOR, out);
2066 fputs(USAGE_HELP, out);
2067 fputs(USAGE_VERSION, out);
2068
2069 fprintf(out, _("\nAvailable columns:\n"));
2070
2071 for (i = 0; i < ARRAY_SIZE(coldescs); i++)
2072 fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
2073
2074 fprintf(out, USAGE_MAN_TAIL("lscpu(1)"));
2075
2076 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
2077 }
2078
2079 int main(int argc, char *argv[])
2080 {
2081 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
2082 struct lscpu_desc _desc = { .flags = NULL }, *desc = &_desc;
2083 int c, i;
2084 int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
2085 int cpu_modifier_specified = 0;
2086
2087 static const struct option longopts[] = {
2088 { "all", no_argument, NULL, 'a' },
2089 { "online", no_argument, NULL, 'b' },
2090 { "offline", no_argument, NULL, 'c' },
2091 { "help", no_argument, NULL, 'h' },
2092 { "extended", optional_argument, NULL, 'e' },
2093 { "json", no_argument, NULL, 'J' },
2094 { "parse", optional_argument, NULL, 'p' },
2095 { "sysroot", required_argument, NULL, 's' },
2096 { "physical", no_argument, NULL, 'y' },
2097 { "hex", no_argument, NULL, 'x' },
2098 { "version", no_argument, NULL, 'V' },
2099 { NULL, 0, NULL, 0 }
2100 };
2101
2102 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
2103 { 'a','b','c' },
2104 { 'e','p' },
2105 { 0 }
2106 };
2107 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
2108
2109 setlocale(LC_ALL, "");
2110 bindtextdomain(PACKAGE, LOCALEDIR);
2111 textdomain(PACKAGE);
2112 atexit(close_stdout);
2113
2114 while ((c = getopt_long(argc, argv, "abce::hJp::s:xyV", longopts, NULL)) != -1) {
2115
2116 err_exclusive_options(c, longopts, excl, excl_st);
2117
2118 switch (c) {
2119 case 'a':
2120 mod->online = mod->offline = 1;
2121 cpu_modifier_specified = 1;
2122 break;
2123 case 'b':
2124 mod->online = 1;
2125 cpu_modifier_specified = 1;
2126 break;
2127 case 'c':
2128 mod->offline = 1;
2129 cpu_modifier_specified = 1;
2130 break;
2131 case 'h':
2132 usage(stdout);
2133 case 'J':
2134 mod->json = 1;
2135 break;
2136 case 'p':
2137 case 'e':
2138 if (optarg) {
2139 if (*optarg == '=')
2140 optarg++;
2141 ncolumns = string_to_idarray(optarg,
2142 columns, ARRAY_SIZE(columns),
2143 column_name_to_id);
2144 if (ncolumns < 0)
2145 return EXIT_FAILURE;
2146 }
2147 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
2148 break;
2149 case 's':
2150 path_set_prefix(optarg);
2151 mod->system = SYSTEM_SNAPSHOT;
2152 break;
2153 case 'x':
2154 mod->hex = 1;
2155 break;
2156 case 'y':
2157 mod->physical = 1;
2158 break;
2159 case 'V':
2160 printf(UTIL_LINUX_VERSION);
2161 return EXIT_SUCCESS;
2162 default:
2163 errtryhelp(EXIT_FAILURE);
2164 }
2165 }
2166
2167 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
2168 fprintf(stderr,
2169 _("%s: options --all, --online and --offline may only "
2170 "be used with options --extended or --parse.\n"),
2171 program_invocation_short_name);
2172 return EXIT_FAILURE;
2173 }
2174
2175 if (argc != optind)
2176 usage(stderr);
2177
2178 /* set default cpu display mode if none was specified */
2179 if (!mod->online && !mod->offline) {
2180 mod->online = 1;
2181 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
2182 }
2183
2184 read_basicinfo(desc, mod);
2185
2186 for (i = 0; i < desc->ncpuspos; i++) {
2187 /* only consider present CPUs */
2188 if (desc->present &&
2189 !CPU_ISSET(real_cpu_num(desc, i), desc->present))
2190 continue;
2191 read_topology(desc, i);
2192 read_cache(desc, i);
2193 read_polarization(desc, i);
2194 read_address(desc, i);
2195 read_configured(desc, i);
2196 read_max_mhz(desc, i);
2197 read_min_mhz(desc, i);
2198 }
2199
2200 if (desc->caches)
2201 qsort(desc->caches, desc->ncaches,
2202 sizeof(struct cpu_cache), cachecmp);
2203
2204 if (desc->ecaches)
2205 qsort(desc->ecaches, desc->necaches,
2206 sizeof(struct cpu_cache), cachecmp);
2207
2208 read_nodes(desc);
2209 read_hypervisor(desc, mod);
2210
2211 switch(mod->mode) {
2212 case OUTPUT_SUMMARY:
2213 print_summary(desc, mod);
2214 break;
2215 case OUTPUT_PARSABLE:
2216 if (!ncolumns) {
2217 columns[ncolumns++] = COL_CPU;
2218 columns[ncolumns++] = COL_CORE;
2219 columns[ncolumns++] = COL_SOCKET;
2220 columns[ncolumns++] = COL_NODE;
2221 columns[ncolumns++] = COL_CACHE;
2222 mod->compat = 1;
2223 }
2224 print_parsable(desc, columns, ncolumns, mod);
2225 break;
2226 case OUTPUT_READABLE:
2227 if (!ncolumns) {
2228 /* No list was given. Just print whatever is there. */
2229 columns[ncolumns++] = COL_CPU;
2230 if (desc->nodemaps)
2231 columns[ncolumns++] = COL_NODE;
2232 if (desc->drawermaps)
2233 columns[ncolumns++] = COL_DRAWER;
2234 if (desc->bookmaps)
2235 columns[ncolumns++] = COL_BOOK;
2236 if (desc->socketmaps)
2237 columns[ncolumns++] = COL_SOCKET;
2238 if (desc->coremaps)
2239 columns[ncolumns++] = COL_CORE;
2240 if (desc->caches)
2241 columns[ncolumns++] = COL_CACHE;
2242 if (desc->online)
2243 columns[ncolumns++] = COL_ONLINE;
2244 if (desc->configured)
2245 columns[ncolumns++] = COL_CONFIGURED;
2246 if (desc->polarization)
2247 columns[ncolumns++] = COL_POLARIZATION;
2248 if (desc->addresses)
2249 columns[ncolumns++] = COL_ADDRESS;
2250 if (desc->maxmhz)
2251 columns[ncolumns++] = COL_MAXMHZ;
2252 if (desc->minmhz)
2253 columns[ncolumns++] = COL_MINMHZ;
2254 }
2255 print_readable(desc, columns, ncolumns, mod);
2256 break;
2257 }
2258
2259 return EXIT_SUCCESS;
2260 }