]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
Use --help suggestion on invalid option
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #include <assert.h>
23 #include <ctype.h>
24 #include <dirent.h>
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <getopt.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/utsname.h>
32 #include <unistd.h>
33 #include <stdarg.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36
37 #if (defined(__x86_64__) || defined(__i386__))
38 # if !defined( __SANITIZE_ADDRESS__)
39 # define INCLUDE_VMWARE_BDOOR
40 # else
41 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
42 # endif
43 #endif
44
45 #ifdef INCLUDE_VMWARE_BDOOR
46 # include <stdint.h>
47 # include <signal.h>
48 # include <strings.h>
49 # include <setjmp.h>
50 # ifdef HAVE_SYS_IO_H
51 # include <sys/io.h>
52 # endif
53 #endif
54
55 #if defined(HAVE_LIBRTAS)
56 #include <librtas.h>
57 #endif
58
59 #include <libsmartcols.h>
60
61 #include "cpuset.h"
62 #include "nls.h"
63 #include "xalloc.h"
64 #include "c.h"
65 #include "strutils.h"
66 #include "bitops.h"
67 #include "path.h"
68 #include "closestream.h"
69 #include "optutils.h"
70 #include "lscpu.h"
71
72 #define CACHE_MAX 100
73
74 /* /sys paths */
75 #define _PATH_SYS_SYSTEM "/sys/devices/system"
76 #define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
77 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
78 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
79 #define _PATH_PROC_XEN "/proc/xen"
80 #define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
81 #define _PATH_PROC_CPUINFO "/proc/cpuinfo"
82 #define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
83 #define _PATH_PROC_SYSINFO "/proc/sysinfo"
84 #define _PATH_PROC_STATUS "/proc/self/status"
85 #define _PATH_PROC_VZ "/proc/vz"
86 #define _PATH_PROC_BC "/proc/bc"
87 #define _PATH_PROC_DEVICETREE "/proc/device-tree"
88 #define _PATH_DEV_MEM "/dev/mem"
89
90 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
91 #define XENFEAT_supervisor_mode_kernel 3
92 #define XENFEAT_mmu_pt_update_preserve_ad 5
93 #define XENFEAT_hvm_callback_vector 8
94
95 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
96 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
97 | (1U << XENFEAT_hvm_callback_vector) )
98
99 /* virtualization types */
100 enum {
101 VIRT_NONE = 0,
102 VIRT_PARA,
103 VIRT_FULL,
104 VIRT_CONT
105 };
106 const char *virt_types[] = {
107 [VIRT_NONE] = N_("none"),
108 [VIRT_PARA] = N_("para"),
109 [VIRT_FULL] = N_("full"),
110 [VIRT_CONT] = N_("container"),
111 };
112
113 const char *hv_vendors[] = {
114 [HYPER_NONE] = NULL,
115 [HYPER_XEN] = "Xen",
116 [HYPER_KVM] = "KVM",
117 [HYPER_MSHV] = "Microsoft",
118 [HYPER_VMWARE] = "VMware",
119 [HYPER_IBM] = "IBM",
120 [HYPER_VSERVER] = "Linux-VServer",
121 [HYPER_UML] = "User-mode Linux",
122 [HYPER_INNOTEK] = "Innotek GmbH",
123 [HYPER_HITACHI] = "Hitachi",
124 [HYPER_PARALLELS] = "Parallels",
125 [HYPER_VBOX] = "Oracle",
126 [HYPER_OS400] = "OS/400",
127 [HYPER_PHYP] = "pHyp",
128 [HYPER_SPAR] = "Unisys s-Par"
129 };
130
131 const int hv_vendor_pci[] = {
132 [HYPER_NONE] = 0x0000,
133 [HYPER_XEN] = 0x5853,
134 [HYPER_KVM] = 0x0000,
135 [HYPER_MSHV] = 0x1414,
136 [HYPER_VMWARE] = 0x15ad,
137 [HYPER_VBOX] = 0x80ee,
138 };
139
140 const int hv_graphics_pci[] = {
141 [HYPER_NONE] = 0x0000,
142 [HYPER_XEN] = 0x0001,
143 [HYPER_KVM] = 0x0000,
144 [HYPER_MSHV] = 0x5353,
145 [HYPER_VMWARE] = 0x0710,
146 [HYPER_VBOX] = 0xbeef,
147 };
148
149 /* CPU modes */
150 enum {
151 MODE_32BIT = (1 << 1),
152 MODE_64BIT = (1 << 2)
153 };
154
155 /* cache(s) description */
156 struct cpu_cache {
157 char *name;
158 char *size;
159
160 int nsharedmaps;
161 cpu_set_t **sharedmaps;
162 };
163
164 /* dispatching modes */
165 enum {
166 DISP_HORIZONTAL = 0,
167 DISP_VERTICAL = 1
168 };
169
170 const char *disp_modes[] = {
171 [DISP_HORIZONTAL] = N_("horizontal"),
172 [DISP_VERTICAL] = N_("vertical")
173 };
174
175 /* cpu polarization */
176 enum {
177 POLAR_UNKNOWN = 0,
178 POLAR_VLOW,
179 POLAR_VMEDIUM,
180 POLAR_VHIGH,
181 POLAR_HORIZONTAL
182 };
183
184 struct polarization_modes {
185 char *parsable;
186 char *readable;
187 };
188
189 struct polarization_modes polar_modes[] = {
190 [POLAR_UNKNOWN] = {"U", "-"},
191 [POLAR_VLOW] = {"VL", "vert-low"},
192 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
193 [POLAR_VHIGH] = {"VH", "vert-high"},
194 [POLAR_HORIZONTAL] = {"H", "horizontal"},
195 };
196
197 /* global description */
198 struct lscpu_desc {
199 char *arch;
200 char *vendor;
201 char *machinetype; /* s390 */
202 char *family;
203 char *model;
204 char *modelname;
205 char *revision; /* alternative for model (ppc) */
206 char *cpu; /* alternative for modelname (ppc, sparc) */
207 char *virtflag; /* virtualization flag (vmx, svm) */
208 char *hypervisor; /* hypervisor software */
209 int hyper; /* hypervisor vendor ID */
210 int virtype; /* VIRT_PARA|FULL|NONE ? */
211 char *mhz;
212 char *dynamic_mhz; /* dynamic mega hertz (s390) */
213 char *static_mhz; /* static mega hertz (s390) */
214 char **maxmhz; /* maximum mega hertz */
215 char **minmhz; /* minimum mega hertz */
216 char *stepping;
217 char *bogomips;
218 char *flags;
219 char *mtid; /* maximum thread id (s390) */
220 int dispatching; /* none, horizontal or vertical */
221 int mode; /* rm, lm or/and tm */
222
223 int ncpuspos; /* maximal possible CPUs */
224 int ncpus; /* number of present CPUs */
225 cpu_set_t *present; /* mask with present CPUs */
226 cpu_set_t *online; /* mask with online CPUs */
227
228 int nthreads; /* number of online threads */
229
230 int ncaches;
231 struct cpu_cache *caches;
232
233 int necaches; /* extra caches (s390) */
234 struct cpu_cache *ecaches;
235
236 /*
237 * All maps are sequentially indexed (0..ncpuspos), the array index
238 * does not have match with cpuX number as presented by kernel. You
239 * have to use real_cpu_num() to get the real cpuX number.
240 *
241 * For example, the possible system CPUs are: 1,3,5, it means that
242 * ncpuspos=3, so all arrays are in range 0..3.
243 */
244 int *idx2cpunum; /* mapping index to CPU num */
245
246 int nnodes; /* number of NUMA modes */
247 int *idx2nodenum; /* Support for discontinuous nodes */
248 cpu_set_t **nodemaps; /* array with NUMA nodes */
249
250 /* drawers -- based on drawer_siblings (internal kernel map of cpuX's
251 * hardware threads within the same drawer */
252 int ndrawers; /* number of all online drawers */
253 cpu_set_t **drawermaps; /* unique drawer_siblings */
254 int *drawerids; /* physical drawer ids */
255
256 /* books -- based on book_siblings (internal kernel map of cpuX's
257 * hardware threads within the same book */
258 int nbooks; /* number of all online books */
259 cpu_set_t **bookmaps; /* unique book_siblings */
260 int *bookids; /* physical book ids */
261
262 /* sockets -- based on core_siblings (internal kernel map of cpuX's
263 * hardware threads within the same physical_package_id (socket)) */
264 int nsockets; /* number of all online sockets */
265 cpu_set_t **socketmaps; /* unique core_siblings */
266 int *socketids; /* physical socket ids */
267
268 /* cores -- based on thread_siblings (internal kernel map of cpuX's
269 * hardware threads within the same core as cpuX) */
270 int ncores; /* number of all online cores */
271 cpu_set_t **coremaps; /* unique thread_siblings */
272 int *coreids; /* physical core ids */
273
274 int *polarization; /* cpu polarization */
275 int *addresses; /* physical cpu addresses */
276 int *configured; /* cpu configured */
277 int physsockets; /* Physical sockets (modules) */
278 int physchips; /* Physical chips */
279 int physcoresperchip; /* Physical cores per chip */
280 };
281
282 enum {
283 OUTPUT_SUMMARY = 0, /* default */
284 OUTPUT_PARSABLE, /* -p */
285 OUTPUT_READABLE, /* -e */
286 };
287
288 enum {
289 SYSTEM_LIVE = 0, /* analyzing a live system */
290 SYSTEM_SNAPSHOT, /* analyzing a snapshot of a different system */
291 };
292
293 struct lscpu_modifier {
294 int mode; /* OUTPUT_* */
295 int system; /* SYSTEM_* */
296 unsigned int hex:1, /* print CPU masks rather than CPU lists */
297 compat:1, /* use backwardly compatible format */
298 online:1, /* print online CPUs */
299 offline:1, /* print offline CPUs */
300 physical:1; /* use physical numbers */
301 };
302
303 static int maxcpus; /* size in bits of kernel cpu mask */
304
305 #define is_cpu_online(_d, _cpu) \
306 ((_d) && (_d)->online ? \
307 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
308 #define is_cpu_present(_d, _cpu) \
309 ((_d) && (_d)->present ? \
310 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
311
312 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
313
314 /*
315 * IDs
316 */
317 enum {
318 COL_CPU,
319 COL_CORE,
320 COL_SOCKET,
321 COL_NODE,
322 COL_BOOK,
323 COL_DRAWER,
324 COL_CACHE,
325 COL_POLARIZATION,
326 COL_ADDRESS,
327 COL_CONFIGURED,
328 COL_ONLINE,
329 COL_MAXMHZ,
330 COL_MINMHZ,
331 };
332
333 /* column description
334 */
335 struct lscpu_coldesc {
336 const char *name;
337 const char *help;
338
339 unsigned int is_abbr:1; /* name is abbreviation */
340 };
341
342 static struct lscpu_coldesc coldescs[] =
343 {
344 [COL_CPU] = { "CPU", N_("logical CPU number"), 1 },
345 [COL_CORE] = { "CORE", N_("logical core number") },
346 [COL_SOCKET] = { "SOCKET", N_("logical socket number") },
347 [COL_NODE] = { "NODE", N_("logical NUMA node number") },
348 [COL_BOOK] = { "BOOK", N_("logical book number") },
349 [COL_DRAWER] = { "DRAWER", N_("logical drawer number") },
350 [COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
351 [COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
352 [COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
353 [COL_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
354 [COL_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
355 [COL_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
356 [COL_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
357 };
358
359 static int
360 column_name_to_id(const char *name, size_t namesz)
361 {
362 size_t i;
363
364 for (i = 0; i < ARRAY_SIZE(coldescs); i++) {
365 const char *cn = coldescs[i].name;
366
367 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
368 return i;
369 }
370 warnx(_("unknown column: %s"), name);
371 return -1;
372 }
373
374 /* Lookup a pattern and get the value from cpuinfo.
375 * Format is:
376 *
377 * "<pattern> : <key>"
378 */
379 static int
380 lookup(char *line, char *pattern, char **value)
381 {
382 char *p, *v;
383 int len = strlen(pattern);
384
385 /* don't re-fill already found tags, first one wins */
386 if (!*line || *value)
387 return 0;
388
389 /* pattern */
390 if (strncmp(line, pattern, len))
391 return 0;
392
393 /* white spaces */
394 for (p = line + len; isspace(*p); p++);
395
396 /* separator */
397 if (*p != ':')
398 return 0;
399
400 /* white spaces */
401 for (++p; isspace(*p); p++);
402
403 /* value */
404 if (!*p)
405 return 0;
406 v = p;
407
408 /* end of value */
409 len = strlen(line) - 1;
410 for (p = line + len; isspace(*(p-1)); p--);
411 *p = '\0';
412
413 *value = xstrdup(v);
414 return 1;
415 }
416
417 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
418 * part of the cache topology information within the sysfs filesystem.
419 * This is true for all shared caches on e.g. s390. When there are layers of
420 * hypervisors in between it is not knows which CPUs share which caches.
421 * Therefore information about shared caches is only available in
422 * /proc/cpuinfo.
423 * Format is:
424 * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
425 */
426 static int
427 lookup_cache(char *line, struct lscpu_desc *desc)
428 {
429 struct cpu_cache *cache;
430 long long size;
431 char *p, type;
432 int level;
433
434 /* Make sure line starts with "cache<nr> :" */
435 if (strncmp(line, "cache", 5))
436 return 0;
437 for (p = line + 5; isdigit(*p); p++);
438 for (; isspace(*p); p++);
439 if (*p != ':')
440 return 0;
441
442 p = strstr(line, "scope=") + 6;
443 /* Skip private caches, also present in sysfs */
444 if (!p || strncmp(p, "Private", 7) == 0)
445 return 0;
446 p = strstr(line, "level=");
447 if (!p || sscanf(p, "level=%d", &level) != 1)
448 return 0;
449 p = strstr(line, "type=") + 5;
450 if (!p || !*p)
451 return 0;
452 type = 0;
453 if (strncmp(p, "Data", 4) == 0)
454 type = 'd';
455 if (strncmp(p, "Instruction", 11) == 0)
456 type = 'i';
457 p = strstr(line, "size=");
458 if (!p || sscanf(p, "size=%lld", &size) != 1)
459 return 0;
460
461 desc->necaches++;
462 desc->ecaches = xrealloc(desc->ecaches,
463 desc->necaches * sizeof(struct cpu_cache));
464 cache = &desc->ecaches[desc->necaches - 1];
465 memset(cache, 0 , sizeof(*cache));
466 if (type)
467 xasprintf(&cache->name, "L%d%c", level, type);
468 else
469 xasprintf(&cache->name, "L%d", level);
470 xasprintf(&cache->size, "%lldK", size);
471 return 1;
472 }
473
474 /* Don't init the mode for platforms where we are not able to
475 * detect that CPU supports 64-bit mode.
476 */
477 static int
478 init_mode(struct lscpu_modifier *mod)
479 {
480 int m = 0;
481
482 if (mod->system == SYSTEM_SNAPSHOT)
483 /* reading info from any /{sys,proc} dump, don't mix it with
484 * information about our real CPU */
485 return 0;
486
487 #if defined(__alpha__) || defined(__ia64__)
488 m |= MODE_64BIT; /* 64bit platforms only */
489 #endif
490 /* platforms with 64bit flag in /proc/cpuinfo, define
491 * 32bit default here */
492 #if defined(__i386__) || defined(__x86_64__) || \
493 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
494 m |= MODE_32BIT;
495 #endif
496 return m;
497 }
498
499 #if defined(HAVE_LIBRTAS)
500 #define PROCESSOR_MODULE_INFO 43
501 static int strbe16toh(const char *buf, int offset)
502 {
503 return (buf[offset] << 8) + buf[offset+1];
504 }
505
506 static void read_physical_info_powerpc(struct lscpu_desc *desc)
507 {
508 char buf[BUFSIZ];
509 int rc, len, ntypes;
510
511 desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
512
513 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
514 if (rc < 0)
515 return;
516
517 len = strbe16toh(buf, 0);
518 if (len < 8)
519 return;
520
521 ntypes = strbe16toh(buf, 2);
522
523 assert(ntypes <= 1);
524 if (!ntypes)
525 return;
526
527 desc->physsockets = strbe16toh(buf, 4);
528 desc->physchips = strbe16toh(buf, 6);
529 desc->physcoresperchip = strbe16toh(buf, 8);
530 }
531 #else
532 static void read_physical_info_powerpc(
533 struct lscpu_desc *desc __attribute__((__unused__)))
534 {
535 }
536 #endif
537
538 static void
539 read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
540 {
541 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
542 char buf[BUFSIZ];
543 struct utsname utsbuf;
544 size_t setsize;
545
546 /* architecture */
547 if (uname(&utsbuf) == -1)
548 err(EXIT_FAILURE, _("error: uname failed"));
549 desc->arch = xstrdup(utsbuf.machine);
550
551 /* details */
552 while (fgets(buf, sizeof(buf), fp) != NULL) {
553 if (lookup(buf, "vendor", &desc->vendor)) ;
554 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
555 else if (lookup(buf, "family", &desc->family)) ;
556 else if (lookup(buf, "cpu family", &desc->family)) ;
557 else if (lookup(buf, "model", &desc->model)) ;
558 else if (lookup(buf, "model name", &desc->modelname)) ;
559 else if (lookup(buf, "stepping", &desc->stepping)) ;
560 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
561 else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
562 else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
563 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
564 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
565 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
566 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
567 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
568 else if (lookup(buf, "cpu", &desc->cpu)) ;
569 else if (lookup(buf, "revision", &desc->revision)) ;
570 else if (lookup(buf, "max thread id", &desc->mtid)) ; /* s390 */
571 else if (lookup_cache(buf, desc)) ;
572 else
573 continue;
574 }
575
576 desc->mode = init_mode(mod);
577
578 if (desc->flags) {
579 snprintf(buf, sizeof(buf), " %s ", desc->flags);
580 if (strstr(buf, " svm "))
581 desc->virtflag = xstrdup("svm");
582 else if (strstr(buf, " vmx "))
583 desc->virtflag = xstrdup("vmx");
584 if (strstr(buf, " lm "))
585 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
586 if (strstr(buf, " zarch "))
587 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
588 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
589 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
590 }
591
592 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
593 if (strcmp(desc->arch, "ppc64") == 0)
594 desc->mode |= MODE_32BIT | MODE_64BIT;
595 else if (strcmp(desc->arch, "ppc") == 0)
596 desc->mode |= MODE_32BIT;
597 }
598
599 fclose(fp);
600
601 if (path_exist(_PATH_SYS_CPU "/kernel_max"))
602 /* note that kernel_max is maximum index [NR_CPUS-1] */
603 maxcpus = path_read_s32(_PATH_SYS_CPU "/kernel_max") + 1;
604
605 else if (mod->system == SYSTEM_LIVE)
606 /* the root is '/' so we are working with data from the current kernel */
607 maxcpus = get_max_number_of_cpus();
608
609 if (maxcpus <= 0)
610 /* error or we are reading some /sys snapshot instead of the
611 * real /sys, let's use any crazy number... */
612 maxcpus = 2048;
613
614 setsize = CPU_ALLOC_SIZE(maxcpus);
615
616 if (path_exist(_PATH_SYS_CPU "/possible")) {
617 cpu_set_t *tmp = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/possible");
618 int num, idx;
619
620 desc->ncpuspos = CPU_COUNT_S(setsize, tmp);
621 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
622
623 for (num = 0, idx = 0; num < maxcpus; num++) {
624 if (CPU_ISSET(num, tmp))
625 desc->idx2cpunum[idx++] = num;
626 }
627 cpuset_free(tmp);
628 } else
629 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
630 _PATH_SYS_CPU "/possible");
631
632
633 /* get mask for present CPUs */
634 if (path_exist(_PATH_SYS_CPU "/present")) {
635 desc->present = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/present");
636 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
637 }
638
639 /* get mask for online CPUs */
640 if (path_exist(_PATH_SYS_CPU "/online")) {
641 desc->online = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/online");
642 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
643 }
644
645 /* get dispatching mode */
646 if (path_exist(_PATH_SYS_CPU "/dispatching"))
647 desc->dispatching = path_read_s32(_PATH_SYS_CPU "/dispatching");
648 else
649 desc->dispatching = -1;
650
651 if (mod->system == SYSTEM_LIVE)
652 read_physical_info_powerpc(desc);
653
654 if (path_exist(_PATH_PROC_SYSINFO)) {
655 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
656
657 while (fd && fgets(buf, sizeof(buf), fd) != NULL && !desc->machinetype)
658 lookup(buf, "Type", &desc->machinetype);
659 if (fd)
660 fclose(fd);
661 }
662 }
663
664 static int
665 has_pci_device(unsigned int vendor, unsigned int device)
666 {
667 FILE *f;
668 unsigned int num, fn, ven, dev;
669 int res = 1;
670
671 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
672 if (!f)
673 return 0;
674
675 /* for more details about bus/pci/devices format see
676 * drivers/pci/proc.c in linux kernel
677 */
678 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
679 &num, &fn, &ven, &dev) == 4) {
680
681 if (ven == vendor && dev == device)
682 goto found;
683 }
684
685 res = 0;
686 found:
687 fclose(f);
688 return res;
689 }
690
691 #if defined(__x86_64__) || defined(__i386__)
692
693 /*
694 * This CPUID leaf returns the information about the hypervisor.
695 * EAX : maximum input value for CPUID supported by the hypervisor.
696 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
697 */
698 #define HYPERVISOR_INFO_LEAF 0x40000000
699
700 static inline void
701 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
702 unsigned int *ecx, unsigned int *edx)
703 {
704 __asm__(
705 #if defined(__PIC__) && defined(__i386__)
706 /* x86 PIC cannot clobber ebx -- gcc bitches */
707 "xchg %%ebx, %%esi;"
708 "cpuid;"
709 "xchg %%esi, %%ebx;"
710 : "=S" (*ebx),
711 #else
712 "cpuid;"
713 : "=b" (*ebx),
714 #endif
715 "=a" (*eax),
716 "=c" (*ecx),
717 "=d" (*edx)
718 : "1" (op), "c"(0));
719 }
720
721 static void
722 read_hypervisor_cpuid(struct lscpu_desc *desc)
723 {
724 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
725 char hyper_vendor_id[13];
726
727 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
728
729 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
730 memcpy(hyper_vendor_id + 0, &ebx, 4);
731 memcpy(hyper_vendor_id + 4, &ecx, 4);
732 memcpy(hyper_vendor_id + 8, &edx, 4);
733 hyper_vendor_id[12] = '\0';
734
735 if (!hyper_vendor_id[0])
736 return;
737
738 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
739 desc->hyper = HYPER_XEN;
740 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
741 desc->hyper = HYPER_KVM;
742 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
743 desc->hyper = HYPER_MSHV;
744 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
745 desc->hyper = HYPER_VMWARE;
746 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
747 desc->hyper = HYPER_SPAR;
748 }
749
750 #else /* ! (__x86_64__ || __i386__) */
751 static void
752 read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
753 {
754 }
755 #endif
756
757 static int is_compatible(const char *path, const char *str)
758 {
759 FILE *fd = path_fopen("r", 0, "%s", path);
760
761 if (fd) {
762 char buf[256];
763 size_t i, len;
764
765 memset(buf, 0, sizeof(buf));
766 len = fread(buf, 1, sizeof(buf) - 1, fd);
767 fclose(fd);
768
769 for (i = 0; i < len;) {
770 if (!strcmp(&buf[i], str))
771 return 1;
772 i += strlen(&buf[i]);
773 i++;
774 }
775 }
776
777 return 0;
778 }
779
780 static int
781 read_hypervisor_powerpc(struct lscpu_desc *desc)
782 {
783 assert(!desc->hyper);
784
785 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
786 if (path_exist("/proc/iSeries")) {
787 desc->hyper = HYPER_OS400;
788 desc->virtype = VIRT_PARA;
789
790 /* PowerNV (POWER Non-Virtualized, bare-metal) */
791 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "ibm,powernv")) {
792 desc->hyper = HYPER_NONE;
793 desc->virtype = VIRT_NONE;
794
795 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
796 } else if (path_exist(_PATH_PROC_DEVICETREE "/ibm,partition-name")
797 && path_exist(_PATH_PROC_DEVICETREE "/hmc-managed?")
798 && !path_exist(_PATH_PROC_DEVICETREE "/chosen/qemu,graphic-width")) {
799 FILE *fd;
800 desc->hyper = HYPER_PHYP;
801 desc->virtype = VIRT_PARA;
802 fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/ibm,partition-name");
803 if (fd) {
804 char buf[256];
805 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
806 desc->virtype = VIRT_NONE;
807 fclose(fd);
808 }
809
810 /* Qemu */
811 } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "qemu,pseries")) {
812 desc->hyper = HYPER_KVM;
813 desc->virtype = VIRT_PARA;
814 }
815 return desc->hyper;
816 }
817
818 #ifdef INCLUDE_VMWARE_BDOOR
819
820 #define VMWARE_BDOOR_MAGIC 0x564D5868
821 #define VMWARE_BDOOR_PORT 0x5658
822 #define VMWARE_BDOOR_CMD_GETVERSION 10
823
824 static UL_ASAN_BLACKLIST
825 void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
826 {
827 __asm__(
828 #if defined(__PIC__) && defined(__i386__)
829 /* x86 PIC cannot clobber ebx -- gcc bitches */
830 "xchg %%ebx, %%esi;"
831 "inl (%%dx), %%eax;"
832 "xchg %%esi, %%ebx;"
833 : "=S" (*ebx),
834 #else
835 "inl (%%dx), %%eax;"
836 : "=b" (*ebx),
837 #endif
838 "=a" (*eax),
839 "=c" (*ecx),
840 "=d" (*edx)
841 : "0" (VMWARE_BDOOR_MAGIC),
842 "1" (VMWARE_BDOOR_CMD_GETVERSION),
843 "2" (VMWARE_BDOOR_PORT),
844 "3" (0)
845 : "memory");
846 }
847
848 static jmp_buf segv_handler_env;
849
850 static void
851 segv_handler(__attribute__((__unused__)) int sig,
852 __attribute__((__unused__)) siginfo_t *info,
853 __attribute__((__unused__)) void *ignored)
854 {
855 siglongjmp(segv_handler_env, 1);
856 }
857
858 static int
859 is_vmware_platform(void)
860 {
861 uint32_t eax, ebx, ecx, edx;
862 struct sigaction act, oact;
863
864 /*
865 * FIXME: Not reliable for non-root users. Note it works as expected if
866 * vmware_bdoor() is not optimized for PIE, but then it fails to build
867 * on 32bit x86 systems. See lscpu git log for more details (commit
868 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
869 */
870 if (getuid() != 0)
871 return 0;
872
873 /*
874 * The assembly routine for vmware detection works
875 * fine under vmware, even if ran as regular user. But
876 * on real HW or under other hypervisors, it segfaults (which is
877 * expected). So we temporarily install SIGSEGV handler to catch
878 * the signal. All this magic is needed because lscpu
879 * isn't supposed to require root privileges.
880 */
881 if (sigsetjmp(segv_handler_env, 1))
882 return 0;
883
884 memset(&act, 0, sizeof(act));
885 act.sa_sigaction = segv_handler;
886 act.sa_flags = SA_SIGINFO;
887
888 if (sigaction(SIGSEGV, &act, &oact))
889 err(EXIT_FAILURE, _("cannot set signal handler"));
890
891 vmware_bdoor(&eax, &ebx, &ecx, &edx);
892
893 if (sigaction(SIGSEGV, &oact, NULL))
894 err(EXIT_FAILURE, _("cannot restore signal handler"));
895
896 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
897 }
898
899 #else /* ! INCLUDE_VMWARE_BDOOR */
900
901 static int
902 is_vmware_platform(void)
903 {
904 return 0;
905 }
906
907 #endif /* INCLUDE_VMWARE_BDOOR */
908
909 static void
910 read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
911 {
912 FILE *fd;
913
914 if (mod->system != SYSTEM_SNAPSHOT) {
915 read_hypervisor_cpuid(desc);
916 if (!desc->hyper)
917 desc->hyper = read_hypervisor_dmi();
918 if (!desc->hyper && is_vmware_platform())
919 desc->hyper = HYPER_VMWARE;
920 }
921
922 if (desc->hyper) {
923 desc->virtype = VIRT_FULL;
924
925 if (desc->hyper == HYPER_XEN) {
926 uint32_t features;
927
928 fd = path_fopen("r", 0, _PATH_SYS_HYP_FEATURES);
929 if (fd && fscanf(fd, "%x", &features) == 1) {
930 /* Xen PV domain */
931 if (features & XEN_FEATURES_PV_MASK)
932 desc->virtype = VIRT_PARA;
933 /* Xen PVH domain */
934 else if ((features & XEN_FEATURES_PVH_MASK)
935 == XEN_FEATURES_PVH_MASK)
936 desc->virtype = VIRT_PARA;
937 fclose(fd);
938 } else {
939 err(EXIT_FAILURE, _("failed to read from: %s"),
940 _PATH_SYS_HYP_FEATURES);
941 }
942 }
943 } else if (read_hypervisor_powerpc(desc) > 0) {}
944
945 /* Xen para-virt or dom0 */
946 else if (path_exist(_PATH_PROC_XEN)) {
947 int dom0 = 0;
948 fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
949
950 if (fd) {
951 char buf[256];
952
953 if (fscanf(fd, "%255s", buf) == 1 &&
954 !strcmp(buf, "control_d"))
955 dom0 = 1;
956 fclose(fd);
957 }
958 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
959 desc->hyper = HYPER_XEN;
960
961 /* Xen full-virt on non-x86_64 */
962 } else if (has_pci_device( hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
963 desc->hyper = HYPER_XEN;
964 desc->virtype = VIRT_FULL;
965 } else if (has_pci_device( hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
966 desc->hyper = HYPER_VMWARE;
967 desc->virtype = VIRT_FULL;
968 } else if (has_pci_device( hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
969 desc->hyper = HYPER_VBOX;
970 desc->virtype = VIRT_FULL;
971
972 /* IBM PR/SM */
973 } else if (path_exist(_PATH_PROC_SYSINFO)) {
974 FILE *sysinfo_fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
975 char buf[BUFSIZ];
976
977 if (!sysinfo_fd)
978 return;
979 desc->hyper = HYPER_IBM;
980 desc->hypervisor = "PR/SM";
981 desc->virtype = VIRT_FULL;
982 while (fgets(buf, sizeof(buf), sysinfo_fd) != NULL) {
983 char *str;
984
985 if (!strstr(buf, "Control Program:"))
986 continue;
987 if (!strstr(buf, "KVM"))
988 desc->hyper = HYPER_IBM;
989 else
990 desc->hyper = HYPER_KVM;
991 str = strchr(buf, ':');
992 if (!str)
993 continue;
994 xasprintf(&str, "%s", str + 1);
995
996 /* remove leading, trailing and repeating whitespace */
997 while (*str == ' ')
998 str++;
999 desc->hypervisor = str;
1000 str += strlen(str) - 1;
1001 while ((*str == '\n') || (*str == ' '))
1002 *(str--) = '\0';
1003 while ((str = strstr(desc->hypervisor, " ")))
1004 memmove(str, str + 1, strlen(str));
1005 }
1006 fclose(sysinfo_fd);
1007 }
1008
1009 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
1010 * /proc/bc should not */
1011 else if (path_exist(_PATH_PROC_VZ) && !path_exist(_PATH_PROC_BC)) {
1012 desc->hyper = HYPER_PARALLELS;
1013 desc->virtype = VIRT_CONT;
1014
1015 /* IBM */
1016 } else if (desc->vendor &&
1017 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
1018 strcmp(desc->vendor, "IBM/S390") == 0)) {
1019 desc->hyper = HYPER_IBM;
1020 desc->virtype = VIRT_FULL;
1021
1022 /* User-mode-linux */
1023 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
1024 desc->hyper = HYPER_UML;
1025 desc->virtype = VIRT_PARA;
1026
1027 /* Linux-VServer */
1028 } else if (path_exist(_PATH_PROC_STATUS)) {
1029 char buf[BUFSIZ];
1030 char *val = NULL;
1031
1032 fd = path_fopen("r", 1, _PATH_PROC_STATUS);
1033 while (fgets(buf, sizeof(buf), fd) != NULL) {
1034 if (lookup(buf, "VxID", &val))
1035 break;
1036 }
1037 fclose(fd);
1038
1039 if (val) {
1040 while (isdigit(*val))
1041 ++val;
1042 if (!*val) {
1043 desc->hyper = HYPER_VSERVER;
1044 desc->virtype = VIRT_CONT;
1045 }
1046 }
1047 }
1048 }
1049
1050 /* add @set to the @ary, unnecessary set is deallocated. */
1051 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
1052 {
1053 int i;
1054 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1055
1056 if (!ary)
1057 return -1;
1058
1059 for (i = 0; i < *items; i++) {
1060 if (CPU_EQUAL_S(setsize, set, ary[i]))
1061 break;
1062 }
1063 if (i == *items) {
1064 ary[*items] = set;
1065 ++*items;
1066 return 0;
1067 }
1068 CPU_FREE(set);
1069 return 1;
1070 }
1071
1072 static void
1073 read_topology(struct lscpu_desc *desc, int idx)
1074 {
1075 cpu_set_t *thread_siblings, *core_siblings;
1076 cpu_set_t *book_siblings, *drawer_siblings;
1077 int coreid, socketid, bookid, drawerid;
1078 int i, num = real_cpu_num(desc, idx);
1079
1080 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
1081 return;
1082
1083 thread_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1084 "/cpu%d/topology/thread_siblings", num);
1085 core_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1086 "/cpu%d/topology/core_siblings", num);
1087 book_siblings = NULL;
1088 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num))
1089 book_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1090 "/cpu%d/topology/book_siblings", num);
1091 drawer_siblings = NULL;
1092 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/drawer_siblings", num))
1093 drawer_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
1094 "/cpu%d/topology/drawer_siblings", num);
1095 coreid = -1;
1096 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/core_id", num))
1097 coreid = path_read_s32(_PATH_SYS_CPU
1098 "/cpu%d/topology/core_id", num);
1099 socketid = -1;
1100 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/physical_package_id", num))
1101 socketid = path_read_s32(_PATH_SYS_CPU
1102 "/cpu%d/topology/physical_package_id", num);
1103 bookid = -1;
1104 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_id", num))
1105 bookid = path_read_s32(_PATH_SYS_CPU
1106 "/cpu%d/topology/book_id", num);
1107 drawerid = -1;
1108 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/drawer_id", num))
1109 drawerid = path_read_s32(_PATH_SYS_CPU
1110 "/cpu%d/topology/drawer_id", num);
1111
1112 if (!desc->coremaps) {
1113 int ndrawers, nbooks, nsockets, ncores, nthreads;
1114 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1115
1116 /* threads within one core */
1117 nthreads = CPU_COUNT_S(setsize, thread_siblings);
1118 if (!nthreads)
1119 nthreads = 1;
1120
1121 /* cores within one socket */
1122 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
1123 if (!ncores)
1124 ncores = 1;
1125
1126 /* number of sockets within one book. Because of odd /
1127 * non-present cpu maps and to keep calculation easy we make
1128 * sure that nsockets and nbooks is at least 1.
1129 */
1130 nsockets = desc->ncpus / nthreads / ncores;
1131 if (!nsockets)
1132 nsockets = 1;
1133
1134 /* number of books */
1135 nbooks = desc->ncpus / nthreads / ncores / nsockets;
1136 if (!nbooks)
1137 nbooks = 1;
1138
1139 /* number of drawers */
1140 ndrawers = desc->ncpus / nbooks / nthreads / ncores / nsockets;
1141 if (!ndrawers)
1142 ndrawers = 1;
1143
1144 /* all threads, see also read_basicinfo()
1145 * -- fallback for kernels without
1146 * /sys/devices/system/cpu/online.
1147 */
1148 if (!desc->nthreads)
1149 desc->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
1150
1151 /* For each map we make sure that it can have up to ncpuspos
1152 * entries. This is because we cannot reliably calculate the
1153 * number of cores, sockets and books on all architectures.
1154 * E.g. completely virtualized architectures like s390 may
1155 * have multiple sockets of different sizes.
1156 */
1157 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1158 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1159 desc->coreids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1160 desc->socketids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1161 for (i = 0; i < desc->ncpuspos; i++)
1162 desc->coreids[i] = desc->socketids[i] = -1;
1163 if (book_siblings) {
1164 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1165 desc->bookids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1166 for (i = 0; i < desc->ncpuspos; i++)
1167 desc->bookids[i] = -1;
1168 }
1169 if (drawer_siblings) {
1170 desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1171 desc->drawerids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1172 for (i = 0; i < desc->ncpuspos; i++)
1173 desc->drawerids[i] = -1;
1174 }
1175 }
1176
1177 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
1178 desc->coreids[idx] = coreid;
1179 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
1180 desc->socketids[idx] = socketid;
1181 if (book_siblings) {
1182 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
1183 desc->bookids[idx] = bookid;
1184 }
1185 if (drawer_siblings) {
1186 add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
1187 desc->drawerids[idx] = drawerid;
1188 }
1189 }
1190
1191 static void
1192 read_polarization(struct lscpu_desc *desc, int idx)
1193 {
1194 char mode[64];
1195 int num = real_cpu_num(desc, idx);
1196
1197 if (desc->dispatching < 0)
1198 return;
1199 if (!path_exist(_PATH_SYS_CPU "/cpu%d/polarization", num))
1200 return;
1201 if (!desc->polarization)
1202 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
1203 path_read_str(mode, sizeof(mode), _PATH_SYS_CPU "/cpu%d/polarization", num);
1204 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
1205 desc->polarization[idx] = POLAR_VLOW;
1206 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
1207 desc->polarization[idx] = POLAR_VMEDIUM;
1208 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
1209 desc->polarization[idx] = POLAR_VHIGH;
1210 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
1211 desc->polarization[idx] = POLAR_HORIZONTAL;
1212 else
1213 desc->polarization[idx] = POLAR_UNKNOWN;
1214 }
1215
1216 static void
1217 read_address(struct lscpu_desc *desc, int idx)
1218 {
1219 int num = real_cpu_num(desc, idx);
1220
1221 if (!path_exist(_PATH_SYS_CPU "/cpu%d/address", num))
1222 return;
1223 if (!desc->addresses)
1224 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
1225 desc->addresses[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/address", num);
1226 }
1227
1228 static void
1229 read_configured(struct lscpu_desc *desc, int idx)
1230 {
1231 int num = real_cpu_num(desc, idx);
1232
1233 if (!path_exist(_PATH_SYS_CPU "/cpu%d/configure", num))
1234 return;
1235 if (!desc->configured)
1236 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
1237 desc->configured[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/configure", num);
1238 }
1239
1240 static void
1241 read_max_mhz(struct lscpu_desc *desc, int idx)
1242 {
1243 int num = real_cpu_num(desc, idx);
1244
1245 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_max_freq", num))
1246 return;
1247 if (!desc->maxmhz)
1248 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1249 xasprintf(&(desc->maxmhz[idx]), "%.4f",
1250 (float)path_read_s32(_PATH_SYS_CPU
1251 "/cpu%d/cpufreq/cpuinfo_max_freq", num) / 1000);
1252 }
1253
1254 static void
1255 read_min_mhz(struct lscpu_desc *desc, int idx)
1256 {
1257 int num = real_cpu_num(desc, idx);
1258
1259 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_min_freq", num))
1260 return;
1261 if (!desc->minmhz)
1262 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1263 xasprintf(&(desc->minmhz[idx]), "%.4f",
1264 (float)path_read_s32(_PATH_SYS_CPU
1265 "/cpu%d/cpufreq/cpuinfo_min_freq", num) / 1000);
1266 }
1267
1268 static int
1269 cachecmp(const void *a, const void *b)
1270 {
1271 struct cpu_cache *c1 = (struct cpu_cache *) a;
1272 struct cpu_cache *c2 = (struct cpu_cache *) b;
1273
1274 return strcmp(c2->name, c1->name);
1275 }
1276
1277 static void
1278 read_cache(struct lscpu_desc *desc, int idx)
1279 {
1280 char buf[256];
1281 int i;
1282 int num = real_cpu_num(desc, idx);
1283
1284 if (!desc->ncaches) {
1285 while(path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1286 num, desc->ncaches))
1287 desc->ncaches++;
1288
1289 if (!desc->ncaches)
1290 return;
1291
1292 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1293 }
1294 for (i = 0; i < desc->ncaches; i++) {
1295 struct cpu_cache *ca = &desc->caches[i];
1296 cpu_set_t *map;
1297
1298 if (!path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
1299 num, i))
1300 continue;
1301 if (!ca->name) {
1302 int type, level;
1303
1304 /* cache type */
1305 path_read_str(buf, sizeof(buf),
1306 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
1307 num, i);
1308 if (!strcmp(buf, "Data"))
1309 type = 'd';
1310 else if (!strcmp(buf, "Instruction"))
1311 type = 'i';
1312 else
1313 type = 0;
1314
1315 /* cache level */
1316 level = path_read_s32(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
1317 num, i);
1318 if (type)
1319 snprintf(buf, sizeof(buf), "L%d%c", level, type);
1320 else
1321 snprintf(buf, sizeof(buf), "L%d", level);
1322
1323 ca->name = xstrdup(buf);
1324
1325 /* cache size */
1326 if (path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d/size",num, i)) {
1327 path_read_str(buf, sizeof(buf),
1328 _PATH_SYS_CPU "/cpu%d/cache/index%d/size", num, i);
1329 ca->size = xstrdup(buf);
1330 } else {
1331 ca->size = xstrdup("unknown size");
1332 }
1333 }
1334
1335 /* information about how CPUs share different caches */
1336 map = path_read_cpuset(maxcpus,
1337 _PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
1338 num, i);
1339
1340 if (!ca->sharedmaps)
1341 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1342 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1343 }
1344 }
1345
1346 static inline int is_node_dirent(struct dirent *d)
1347 {
1348 return
1349 d &&
1350 #ifdef _DIRENT_HAVE_D_TYPE
1351 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1352 #endif
1353 strncmp(d->d_name, "node", 4) == 0 &&
1354 isdigit_string(d->d_name + 4);
1355 }
1356
1357 static int
1358 nodecmp(const void *ap, const void *bp)
1359 {
1360 int *a = (int *) ap, *b = (int *) bp;
1361 return *a - *b;
1362 }
1363
1364 static void
1365 read_nodes(struct lscpu_desc *desc)
1366 {
1367 int i = 0;
1368 DIR *dir;
1369 struct dirent *d;
1370 char *path;
1371
1372 /* number of NUMA node */
1373 path = path_strdup(_PATH_SYS_NODE);
1374 dir = opendir(path);
1375 free(path);
1376
1377 while (dir && (d = readdir(dir))) {
1378 if (is_node_dirent(d))
1379 desc->nnodes++;
1380 }
1381
1382 if (!desc->nnodes) {
1383 if (dir)
1384 closedir(dir);
1385 return;
1386 }
1387
1388 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1389 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1390
1391 if (dir) {
1392 rewinddir(dir);
1393 while ((d = readdir(dir)) && i < desc->nnodes) {
1394 if (is_node_dirent(d))
1395 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1396 _("Failed to extract the node number"));
1397 }
1398 closedir(dir);
1399 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1400 }
1401
1402 /* information about how nodes share different CPUs */
1403 for (i = 0; i < desc->nnodes; i++)
1404 desc->nodemaps[i] = path_read_cpuset(maxcpus,
1405 _PATH_SYS_NODE "/node%d/cpumap",
1406 desc->idx2nodenum[i]);
1407 }
1408
1409 static char *
1410 get_cell_data(struct lscpu_desc *desc, int idx, int col,
1411 struct lscpu_modifier *mod,
1412 char *buf, size_t bufsz)
1413 {
1414 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1415 size_t i;
1416 int cpu = real_cpu_num(desc, idx);
1417
1418 *buf = '\0';
1419
1420 switch (col) {
1421 case COL_CPU:
1422 snprintf(buf, bufsz, "%d", cpu);
1423 break;
1424 case COL_CORE:
1425 if (mod->physical) {
1426 if (desc->coreids[idx] == -1)
1427 snprintf(buf, bufsz, "-");
1428 else
1429 snprintf(buf, bufsz, "%d", desc->coreids[idx]);
1430 } else {
1431 if (cpuset_ary_isset(cpu, desc->coremaps,
1432 desc->ncores, setsize, &i) == 0)
1433 snprintf(buf, bufsz, "%zu", i);
1434 }
1435 break;
1436 case COL_SOCKET:
1437 if (mod->physical) {
1438 if (desc->socketids[idx] == -1)
1439 snprintf(buf, bufsz, "-");
1440 else
1441 snprintf(buf, bufsz, "%d", desc->socketids[idx]);
1442 } else {
1443 if (cpuset_ary_isset(cpu, desc->socketmaps,
1444 desc->nsockets, setsize, &i) == 0)
1445 snprintf(buf, bufsz, "%zu", i);
1446 }
1447 break;
1448 case COL_NODE:
1449 if (cpuset_ary_isset(cpu, desc->nodemaps,
1450 desc->nnodes, setsize, &i) == 0)
1451 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1452 break;
1453 case COL_DRAWER:
1454 if (mod->physical) {
1455 if (desc->drawerids[idx] == -1)
1456 snprintf(buf, bufsz, "-");
1457 else
1458 snprintf(buf, bufsz, "%d", desc->drawerids[idx]);
1459 } else {
1460 if (cpuset_ary_isset(cpu, desc->drawermaps,
1461 desc->ndrawers, setsize, &i) == 0)
1462 snprintf(buf, bufsz, "%zu", i);
1463 }
1464 break;
1465 case COL_BOOK:
1466 if (mod->physical) {
1467 if (desc->bookids[idx] == -1)
1468 snprintf(buf, bufsz, "-");
1469 else
1470 snprintf(buf, bufsz, "%d", desc->bookids[idx]);
1471 } else {
1472 if (cpuset_ary_isset(cpu, desc->bookmaps,
1473 desc->nbooks, setsize, &i) == 0)
1474 snprintf(buf, bufsz, "%zu", i);
1475 }
1476 break;
1477 case COL_CACHE:
1478 {
1479 char *p = buf;
1480 size_t sz = bufsz;
1481 int j;
1482
1483 for (j = desc->ncaches - 1; j >= 0; j--) {
1484 struct cpu_cache *ca = &desc->caches[j];
1485
1486 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1487 ca->nsharedmaps, setsize, &i) == 0) {
1488 int x = snprintf(p, sz, "%zu", i);
1489 if (x < 0 || (size_t) x >= sz)
1490 return NULL;
1491 p += x;
1492 sz -= x;
1493 }
1494 if (j != 0) {
1495 if (sz < 2)
1496 return NULL;
1497 *p++ = mod->compat ? ',' : ':';
1498 *p = '\0';
1499 sz--;
1500 }
1501 }
1502 break;
1503 }
1504 case COL_POLARIZATION:
1505 if (desc->polarization) {
1506 int x = desc->polarization[idx];
1507
1508 snprintf(buf, bufsz, "%s",
1509 mod->mode == OUTPUT_PARSABLE ?
1510 polar_modes[x].parsable :
1511 polar_modes[x].readable);
1512 }
1513 break;
1514 case COL_ADDRESS:
1515 if (desc->addresses)
1516 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1517 break;
1518 case COL_CONFIGURED:
1519 if (!desc->configured)
1520 break;
1521 if (mod->mode == OUTPUT_PARSABLE)
1522 snprintf(buf, bufsz, "%s",
1523 desc->configured[idx] ? _("Y") : _("N"));
1524 else
1525 snprintf(buf, bufsz, "%s",
1526 desc->configured[idx] ? _("yes") : _("no"));
1527 break;
1528 case COL_ONLINE:
1529 if (!desc->online)
1530 break;
1531 if (mod->mode == OUTPUT_PARSABLE)
1532 snprintf(buf, bufsz, "%s",
1533 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1534 else
1535 snprintf(buf, bufsz, "%s",
1536 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1537 break;
1538 case COL_MAXMHZ:
1539 if (desc->maxmhz)
1540 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1541 break;
1542 case COL_MINMHZ:
1543 if (desc->minmhz)
1544 xstrncpy(buf, desc->minmhz[idx], bufsz);
1545 break;
1546 }
1547 return buf;
1548 }
1549
1550 static char *
1551 get_cell_header(struct lscpu_desc *desc, int col,
1552 struct lscpu_modifier *mod,
1553 char *buf, size_t bufsz)
1554 {
1555 *buf = '\0';
1556
1557 if (col == COL_CACHE) {
1558 char *p = buf;
1559 size_t sz = bufsz;
1560 int i;
1561
1562 for (i = desc->ncaches - 1; i >= 0; i--) {
1563 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1564 if (x < 0 || (size_t) x >= sz)
1565 return NULL;
1566 sz -= x;
1567 p += x;
1568 if (i > 0) {
1569 if (sz < 2)
1570 return NULL;
1571 *p++ = mod->compat ? ',' : ':';
1572 *p = '\0';
1573 sz--;
1574 }
1575 }
1576 if (desc->ncaches)
1577 return buf;
1578 }
1579 snprintf(buf, bufsz, "%s", coldescs[col].name);
1580 return buf;
1581 }
1582
1583 /*
1584 * [-p] backend, we support two parsable formats:
1585 *
1586 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1587 * output and it contains fixed set of the columns. The CACHE columns are at
1588 * the end of the line and the CACHE is not printed if the number of the caches
1589 * is zero. The CACHE columns are separated by two commas, for example:
1590 *
1591 * $ lscpu --parse
1592 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1593 * 0,0,0,0,,0,0,0
1594 * 1,1,0,0,,1,1,0
1595 *
1596 * 2) "user defined output" -- this format prints always all columns without
1597 * special prefix for CACHE column. If there are not CACHEs then the column is
1598 * empty and the header "Cache" is printed rather than a real name of the cache.
1599 * The CACHE columns are separated by ':'.
1600 *
1601 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1602 * # CPU,Core,Socket,Node,L1d:L1i:L2
1603 * 0,0,0,0,0:0:0
1604 * 1,1,0,0,1:1:0
1605 */
1606 static void
1607 print_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1608 struct lscpu_modifier *mod)
1609 {
1610 char buf[BUFSIZ], *data;
1611 int i;
1612
1613 /*
1614 * Header
1615 */
1616 printf(_(
1617 "# The following is the parsable format, which can be fed to other\n"
1618 "# programs. Each different item in every column has an unique ID\n"
1619 "# starting from zero.\n"));
1620
1621 fputs("# ", stdout);
1622 for (i = 0; i < ncols; i++) {
1623 int col = cols[i];
1624
1625 if (col == COL_CACHE) {
1626 if (mod->compat && !desc->ncaches)
1627 continue;
1628 if (mod->compat && i != 0)
1629 putchar(',');
1630 }
1631 if (i > 0)
1632 putchar(',');
1633
1634 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1635
1636 if (data && * data && col != COL_CACHE &&
1637 !coldescs[col].is_abbr) {
1638 /*
1639 * For normal column names use mixed case (e.g. "Socket")
1640 */
1641 char *p = data + 1;
1642
1643 while (p && *p != '\0') {
1644 *p = tolower((unsigned int) *p);
1645 p++;
1646 }
1647 }
1648 fputs(data && *data ? data : "", stdout);
1649 }
1650 putchar('\n');
1651
1652 /*
1653 * Data
1654 */
1655 for (i = 0; i < desc->ncpuspos; i++) {
1656 int c;
1657 int cpu = real_cpu_num(desc, i);
1658
1659 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1660 continue;
1661 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1662 continue;
1663 if (desc->present && !is_cpu_present(desc, cpu))
1664 continue;
1665 for (c = 0; c < ncols; c++) {
1666 if (mod->compat && cols[c] == COL_CACHE) {
1667 if (!desc->ncaches)
1668 continue;
1669 if (c > 0)
1670 putchar(',');
1671 }
1672 if (c > 0)
1673 putchar(',');
1674
1675 data = get_cell_data(desc, i, cols[c], mod,
1676 buf, sizeof(buf));
1677 fputs(data && *data ? data : "", stdout);
1678 }
1679 putchar('\n');
1680 }
1681 }
1682
1683 /*
1684 * [-e] backend
1685 */
1686 static void
1687 print_readable(struct lscpu_desc *desc, int cols[], int ncols,
1688 struct lscpu_modifier *mod)
1689 {
1690 int i;
1691 char buf[BUFSIZ];
1692 const char *data;
1693 struct libscols_table *table;
1694
1695 scols_init_debug(0);
1696
1697 table = scols_new_table();
1698 if (!table)
1699 err(EXIT_FAILURE, _("failed to initialize output table"));
1700
1701 for (i = 0; i < ncols; i++) {
1702 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1703 if (!scols_table_new_column(table, xstrdup(data), 0, 0))
1704 err(EXIT_FAILURE, _("failed to initialize output column"));
1705 }
1706
1707 for (i = 0; i < desc->ncpuspos; i++) {
1708 int c;
1709 struct libscols_line *line;
1710 int cpu = real_cpu_num(desc, i);
1711
1712 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1713 continue;
1714 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1715 continue;
1716 if (desc->present && !is_cpu_present(desc, cpu))
1717 continue;
1718
1719 line = scols_table_new_line(table, NULL);
1720 if (!line)
1721 err(EXIT_FAILURE, _("failed to initialize output line"));
1722
1723 for (c = 0; c < ncols; c++) {
1724 data = get_cell_data(desc, i, cols[c], mod,
1725 buf, sizeof(buf));
1726 if (!data || !*data)
1727 data = "-";
1728 scols_line_set_data(line, c, data);
1729 }
1730 }
1731
1732 scols_print_table(table);
1733 scols_unref_table(table);
1734 }
1735
1736 /* output formats "<key> <value>"*/
1737 #define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
1738 #define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
1739
1740 static void
1741 print_cpuset(const char *key, cpu_set_t *set, int hex)
1742 {
1743 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1744 size_t setbuflen = 7 * maxcpus;
1745 char setbuf[setbuflen], *p;
1746
1747 if (hex) {
1748 p = cpumask_create(setbuf, setbuflen, set, setsize);
1749 printf("%-23s0x%s\n", key, p);
1750 } else {
1751 p = cpulist_create(setbuf, setbuflen, set, setsize);
1752 print_s(key, p);
1753 }
1754
1755 }
1756
1757 /*
1758 * default output
1759 */
1760 static void
1761 print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1762 {
1763 char buf[512];
1764 int i;
1765 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1766
1767 print_s(_("Architecture:"), desc->arch);
1768
1769 if (desc->mode) {
1770 char mbuf[64], *p = mbuf;
1771
1772 if (desc->mode & MODE_32BIT) {
1773 strcpy(p, "32-bit, ");
1774 p += 8;
1775 }
1776 if (desc->mode & MODE_64BIT) {
1777 strcpy(p, "64-bit, ");
1778 p += 8;
1779 }
1780 *(p - 2) = '\0';
1781 print_s(_("CPU op-mode(s):"), mbuf);
1782 }
1783 #if !defined(WORDS_BIGENDIAN)
1784 print_s(_("Byte Order:"), "Little Endian");
1785 #else
1786 print_s(_("Byte Order:"), "Big Endian");
1787 #endif
1788 print_n(_("CPU(s):"), desc->ncpus);
1789
1790 if (desc->online)
1791 print_cpuset(mod->hex ? _("On-line CPU(s) mask:") :
1792 _("On-line CPU(s) list:"),
1793 desc->online, mod->hex);
1794
1795 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1796 cpu_set_t *set;
1797
1798 /* Linux kernel provides cpuset of off-line CPUs that contains
1799 * all configured CPUs (see /sys/devices/system/cpu/offline),
1800 * but want to print real (present in system) off-line CPUs only.
1801 */
1802 set = cpuset_alloc(maxcpus, NULL, NULL);
1803 if (!set)
1804 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1805 CPU_ZERO_S(setsize, set);
1806 for (i = 0; i < desc->ncpuspos; i++) {
1807 int cpu = real_cpu_num(desc, i);
1808 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
1809 CPU_SET_S(cpu, setsize, set);
1810 }
1811 print_cpuset(mod->hex ? _("Off-line CPU(s) mask:") :
1812 _("Off-line CPU(s) list:"),
1813 set, mod->hex);
1814 cpuset_free(set);
1815 }
1816
1817 if (desc->nsockets) {
1818 int threads_per_core, cores_per_socket, sockets_per_book;
1819 int books_per_drawer, drawers;
1820
1821 threads_per_core = cores_per_socket = sockets_per_book = 0;
1822 books_per_drawer = drawers = 0;
1823 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1824 * Using simply the cpu topology masks in sysfs will not give
1825 * usable results since everything is virtualized. E.g.
1826 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1827 * five cpus.
1828 * If the cpu topology is not exported (e.g. 2nd level guest)
1829 * fall back to old calculation scheme.
1830 */
1831 if (path_exist(_PATH_PROC_SYSINFO)) {
1832 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
1833 char pbuf[BUFSIZ];
1834 int t0, t1;
1835
1836 while (fd && fgets(pbuf, sizeof(pbuf), fd) != NULL) {
1837 if (sscanf(pbuf, "CPU Topology SW:%d%d%d%d%d%d",
1838 &t0, &t1, &drawers, &books_per_drawer,
1839 &sockets_per_book,
1840 &cores_per_socket) == 6)
1841 break;
1842 }
1843 if (fd)
1844 fclose(fd);
1845 }
1846 if (desc->mtid)
1847 threads_per_core = atoi(desc->mtid) + 1;
1848 print_n(_("Thread(s) per core:"),
1849 threads_per_core ?: desc->nthreads / desc->ncores);
1850 print_n(_("Core(s) per socket:"),
1851 cores_per_socket ?: desc->ncores / desc->nsockets);
1852 if (desc->nbooks) {
1853 print_n(_("Socket(s) per book:"),
1854 sockets_per_book ?: desc->nsockets / desc->nbooks);
1855 if (desc->ndrawers) {
1856 print_n(_("Book(s) per drawer:"),
1857 books_per_drawer ?: desc->nbooks / desc->ndrawers);
1858 print_n(_("Drawer(s):"), drawers ?: desc->ndrawers);
1859 } else {
1860 print_n(_("Book(s):"), books_per_drawer ?: desc->nbooks);
1861 }
1862 } else {
1863 print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
1864 }
1865 }
1866 if (desc->nnodes)
1867 print_n(_("NUMA node(s):"), desc->nnodes);
1868 if (desc->vendor)
1869 print_s(_("Vendor ID:"), desc->vendor);
1870 if (desc->machinetype)
1871 print_s(_("Machine type:"), desc->machinetype);
1872 if (desc->family)
1873 print_s(_("CPU family:"), desc->family);
1874 if (desc->model || desc->revision)
1875 print_s(_("Model:"), desc->revision ? desc->revision : desc->model);
1876 if (desc->modelname || desc->cpu)
1877 print_s(_("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
1878 if (desc->stepping)
1879 print_s(_("Stepping:"), desc->stepping);
1880 if (desc->mhz)
1881 print_s(_("CPU MHz:"), desc->mhz);
1882 if (desc->dynamic_mhz)
1883 print_s(_("CPU dynamic MHz:"), desc->dynamic_mhz);
1884 if (desc->static_mhz)
1885 print_s(_("CPU static MHz:"), desc->static_mhz);
1886 if (desc->maxmhz)
1887 print_s(_("CPU max MHz:"), desc->maxmhz[0]);
1888 if (desc->minmhz)
1889 print_s(_("CPU min MHz:"), desc->minmhz[0]);
1890 if (desc->bogomips)
1891 print_s(_("BogoMIPS:"), desc->bogomips);
1892 if (desc->virtflag) {
1893 if (!strcmp(desc->virtflag, "svm"))
1894 print_s(_("Virtualization:"), "AMD-V");
1895 else if (!strcmp(desc->virtflag, "vmx"))
1896 print_s(_("Virtualization:"), "VT-x");
1897 }
1898 if (desc->hypervisor)
1899 print_s(_("Hypervisor:"), desc->hypervisor);
1900 if (desc->hyper) {
1901 print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1902 print_s(_("Virtualization type:"), _(virt_types[desc->virtype]));
1903 }
1904 if (desc->dispatching >= 0)
1905 print_s(_("Dispatching mode:"), _(disp_modes[desc->dispatching]));
1906 if (desc->ncaches) {
1907 char cbuf[512];
1908
1909 for (i = desc->ncaches - 1; i >= 0; i--) {
1910 snprintf(cbuf, sizeof(cbuf),
1911 _("%s cache:"), desc->caches[i].name);
1912 print_s(cbuf, desc->caches[i].size);
1913 }
1914 }
1915
1916 if (desc->necaches) {
1917 char cbuf[512];
1918
1919 for (i = desc->necaches - 1; i >= 0; i--) {
1920 snprintf(cbuf, sizeof(cbuf),
1921 _("%s cache:"), desc->ecaches[i].name);
1922 print_s(cbuf, desc->ecaches[i].size);
1923 }
1924 }
1925
1926 for (i = 0; i < desc->nnodes; i++) {
1927 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
1928 print_cpuset(buf, desc->nodemaps[i], mod->hex);
1929 }
1930
1931 if (desc->flags)
1932 print_s(_("Flags:"), desc->flags);
1933
1934 if (desc->physsockets) {
1935 print_n(_("Physical sockets:"), desc->physsockets);
1936 print_n(_("Physical chips:"), desc->physchips);
1937 print_n(_("Physical cores/chip:"), desc->physcoresperchip);
1938 }
1939 }
1940
1941 static void __attribute__((__noreturn__)) usage(FILE *out)
1942 {
1943 size_t i;
1944
1945 fputs(USAGE_HEADER, out);
1946 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
1947
1948 fputs(USAGE_SEPARATOR, out);
1949 fputs(_("Display information about the CPU architecture.\n"), out);
1950
1951 fputs(USAGE_OPTIONS, out);
1952 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
1953 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
1954 fputs(_(" -c, --offline print offline CPUs only\n"), out);
1955 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
1956 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
1957 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
1958 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
1959 fputs(_(" -y, --physical print physical instead of logical IDs\n"), out);
1960 fputs(USAGE_SEPARATOR, out);
1961 fputs(USAGE_HELP, out);
1962 fputs(USAGE_VERSION, out);
1963
1964 fprintf(out, _("\nAvailable columns:\n"));
1965
1966 for (i = 0; i < ARRAY_SIZE(coldescs); i++)
1967 fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
1968
1969 fprintf(out, USAGE_MAN_TAIL("lscpu(1)"));
1970
1971 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
1972 }
1973
1974 int main(int argc, char *argv[])
1975 {
1976 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
1977 struct lscpu_desc _desc = { .flags = 0 }, *desc = &_desc;
1978 int c, i;
1979 int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
1980 int cpu_modifier_specified = 0;
1981
1982 static const struct option longopts[] = {
1983 { "all", no_argument, 0, 'a' },
1984 { "online", no_argument, 0, 'b' },
1985 { "offline", no_argument, 0, 'c' },
1986 { "help", no_argument, 0, 'h' },
1987 { "extended", optional_argument, 0, 'e' },
1988 { "parse", optional_argument, 0, 'p' },
1989 { "sysroot", required_argument, 0, 's' },
1990 { "physical", no_argument, 0, 'y' },
1991 { "hex", no_argument, 0, 'x' },
1992 { "version", no_argument, 0, 'V' },
1993 { NULL, 0, 0, 0 }
1994 };
1995
1996 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
1997 { 'a','b','c' },
1998 { 'e','p' },
1999 { 0 }
2000 };
2001 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
2002
2003 setlocale(LC_ALL, "");
2004 bindtextdomain(PACKAGE, LOCALEDIR);
2005 textdomain(PACKAGE);
2006 atexit(close_stdout);
2007
2008 while ((c = getopt_long(argc, argv, "abce::hp::s:xyV", longopts, NULL)) != -1) {
2009
2010 err_exclusive_options(c, longopts, excl, excl_st);
2011
2012 switch (c) {
2013 case 'a':
2014 mod->online = mod->offline = 1;
2015 cpu_modifier_specified = 1;
2016 break;
2017 case 'b':
2018 mod->online = 1;
2019 cpu_modifier_specified = 1;
2020 break;
2021 case 'c':
2022 mod->offline = 1;
2023 cpu_modifier_specified = 1;
2024 break;
2025 case 'h':
2026 usage(stdout);
2027 case 'p':
2028 case 'e':
2029 if (optarg) {
2030 if (*optarg == '=')
2031 optarg++;
2032 ncolumns = string_to_idarray(optarg,
2033 columns, ARRAY_SIZE(columns),
2034 column_name_to_id);
2035 if (ncolumns < 0)
2036 return EXIT_FAILURE;
2037 }
2038 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
2039 break;
2040 case 's':
2041 path_set_prefix(optarg);
2042 mod->system = SYSTEM_SNAPSHOT;
2043 break;
2044 case 'x':
2045 mod->hex = 1;
2046 break;
2047 case 'y':
2048 mod->physical = 1;
2049 break;
2050 case 'V':
2051 printf(UTIL_LINUX_VERSION);
2052 return EXIT_SUCCESS;
2053 default:
2054 errtryhelp(EXIT_FAILURE);
2055 }
2056 }
2057
2058 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
2059 fprintf(stderr,
2060 _("%s: options --all, --online and --offline may only "
2061 "be used with options --extended or --parse.\n"),
2062 program_invocation_short_name);
2063 return EXIT_FAILURE;
2064 }
2065
2066 if (argc != optind)
2067 usage(stderr);
2068
2069 /* set default cpu display mode if none was specified */
2070 if (!mod->online && !mod->offline) {
2071 mod->online = 1;
2072 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
2073 }
2074
2075 read_basicinfo(desc, mod);
2076
2077 for (i = 0; i < desc->ncpuspos; i++) {
2078 /* only consider present CPUs */
2079 if (desc->present &&
2080 !CPU_ISSET(real_cpu_num(desc, i), desc->present))
2081 continue;
2082 read_topology(desc, i);
2083 read_cache(desc, i);
2084 read_polarization(desc, i);
2085 read_address(desc, i);
2086 read_configured(desc, i);
2087 read_max_mhz(desc, i);
2088 read_min_mhz(desc, i);
2089 }
2090
2091 if (desc->caches)
2092 qsort(desc->caches, desc->ncaches,
2093 sizeof(struct cpu_cache), cachecmp);
2094
2095 if (desc->ecaches)
2096 qsort(desc->ecaches, desc->necaches,
2097 sizeof(struct cpu_cache), cachecmp);
2098
2099 read_nodes(desc);
2100 read_hypervisor(desc, mod);
2101
2102 switch(mod->mode) {
2103 case OUTPUT_SUMMARY:
2104 print_summary(desc, mod);
2105 break;
2106 case OUTPUT_PARSABLE:
2107 if (!ncolumns) {
2108 columns[ncolumns++] = COL_CPU;
2109 columns[ncolumns++] = COL_CORE;
2110 columns[ncolumns++] = COL_SOCKET;
2111 columns[ncolumns++] = COL_NODE;
2112 columns[ncolumns++] = COL_CACHE;
2113 mod->compat = 1;
2114 }
2115 print_parsable(desc, columns, ncolumns, mod);
2116 break;
2117 case OUTPUT_READABLE:
2118 if (!ncolumns) {
2119 /* No list was given. Just print whatever is there. */
2120 columns[ncolumns++] = COL_CPU;
2121 if (desc->nodemaps)
2122 columns[ncolumns++] = COL_NODE;
2123 if (desc->drawermaps)
2124 columns[ncolumns++] = COL_DRAWER;
2125 if (desc->bookmaps)
2126 columns[ncolumns++] = COL_BOOK;
2127 if (desc->socketmaps)
2128 columns[ncolumns++] = COL_SOCKET;
2129 if (desc->coremaps)
2130 columns[ncolumns++] = COL_CORE;
2131 if (desc->caches)
2132 columns[ncolumns++] = COL_CACHE;
2133 if (desc->online)
2134 columns[ncolumns++] = COL_ONLINE;
2135 if (desc->configured)
2136 columns[ncolumns++] = COL_CONFIGURED;
2137 if (desc->polarization)
2138 columns[ncolumns++] = COL_POLARIZATION;
2139 if (desc->addresses)
2140 columns[ncolumns++] = COL_ADDRESS;
2141 if (desc->maxmhz)
2142 columns[ncolumns++] = COL_MAXMHZ;
2143 if (desc->minmhz)
2144 columns[ncolumns++] = COL_MINMHZ;
2145 }
2146 print_readable(desc, columns, ncolumns, mod);
2147 break;
2148 }
2149
2150 return EXIT_SUCCESS;
2151 }