]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
lscpu: fix --caches order
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #include <assert.h>
23 #include <ctype.h>
24 #include <dirent.h>
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <getopt.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/utsname.h>
32 #include <unistd.h>
33 #include <stdarg.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36 #include <sys/personality.h>
37
38 #if (defined(__x86_64__) || defined(__i386__))
39 # if !defined( __SANITIZE_ADDRESS__)
40 # define INCLUDE_VMWARE_BDOOR
41 # else
42 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
43 # endif
44 #endif
45
46 #ifdef INCLUDE_VMWARE_BDOOR
47 # include <stdint.h>
48 # include <signal.h>
49 # include <strings.h>
50 # include <setjmp.h>
51 # ifdef HAVE_SYS_IO_H
52 # include <sys/io.h>
53 # endif
54 #endif
55
56 #if defined(HAVE_LIBRTAS)
57 #include <librtas.h>
58 #endif
59
60 #include <libsmartcols.h>
61
62 #include "closestream.h"
63 #include "optutils.h"
64
65 #include "lscpu.h"
66
67 #define CACHE_MAX 100
68
69 /* /sys paths */
70 #define _PATH_SYS_SYSTEM "/sys/devices/system"
71 #define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
72 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
73 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
74
75 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
76 #define XENFEAT_supervisor_mode_kernel 3
77 #define XENFEAT_mmu_pt_update_preserve_ad 5
78 #define XENFEAT_hvm_callback_vector 8
79
80 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
81 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
82 | (1U << XENFEAT_hvm_callback_vector) )
83
84 static const char *virt_types[] = {
85 [VIRT_NONE] = N_("none"),
86 [VIRT_PARA] = N_("para"),
87 [VIRT_FULL] = N_("full"),
88 [VIRT_CONT] = N_("container"),
89 };
90
91 static const char *hv_vendors[] = {
92 [HYPER_NONE] = NULL,
93 [HYPER_XEN] = "Xen",
94 [HYPER_KVM] = "KVM",
95 [HYPER_MSHV] = "Microsoft",
96 [HYPER_VMWARE] = "VMware",
97 [HYPER_IBM] = "IBM",
98 [HYPER_VSERVER] = "Linux-VServer",
99 [HYPER_UML] = "User-mode Linux",
100 [HYPER_INNOTEK] = "Innotek GmbH",
101 [HYPER_HITACHI] = "Hitachi",
102 [HYPER_PARALLELS] = "Parallels",
103 [HYPER_VBOX] = "Oracle",
104 [HYPER_OS400] = "OS/400",
105 [HYPER_PHYP] = "pHyp",
106 [HYPER_SPAR] = "Unisys s-Par",
107 [HYPER_WSL] = "Windows Subsystem for Linux"
108 };
109
110 static const int hv_vendor_pci[] = {
111 [HYPER_NONE] = 0x0000,
112 [HYPER_XEN] = 0x5853,
113 [HYPER_KVM] = 0x0000,
114 [HYPER_MSHV] = 0x1414,
115 [HYPER_VMWARE] = 0x15ad,
116 [HYPER_VBOX] = 0x80ee,
117 };
118
119 static const int hv_graphics_pci[] = {
120 [HYPER_NONE] = 0x0000,
121 [HYPER_XEN] = 0x0001,
122 [HYPER_KVM] = 0x0000,
123 [HYPER_MSHV] = 0x5353,
124 [HYPER_VMWARE] = 0x0710,
125 [HYPER_VBOX] = 0xbeef,
126 };
127
128
129 /* dispatching modes */
130 static const char *disp_modes[] = {
131 [DISP_HORIZONTAL] = N_("horizontal"),
132 [DISP_VERTICAL] = N_("vertical")
133 };
134
135 static struct polarization_modes polar_modes[] = {
136 [POLAR_UNKNOWN] = {"U", "-"},
137 [POLAR_VLOW] = {"VL", "vert-low"},
138 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
139 [POLAR_VHIGH] = {"VH", "vert-high"},
140 [POLAR_HORIZONTAL] = {"H", "horizontal"},
141 };
142
143 static int maxcpus; /* size in bits of kernel cpu mask */
144
145 #define is_cpu_online(_d, _cpu) \
146 ((_d) && (_d)->online ? \
147 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
148 #define is_cpu_present(_d, _cpu) \
149 ((_d) && (_d)->present ? \
150 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
151
152 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
153
154 /*
155 * IDs
156 */
157 enum {
158 COL_CPU_CPU,
159 COL_CPU_CORE,
160 COL_CPU_SOCKET,
161 COL_CPU_NODE,
162 COL_CPU_BOOK,
163 COL_CPU_DRAWER,
164 COL_CPU_CACHE,
165 COL_CPU_POLARIZATION,
166 COL_CPU_ADDRESS,
167 COL_CPU_CONFIGURED,
168 COL_CPU_ONLINE,
169 COL_CPU_MAXMHZ,
170 COL_CPU_MINMHZ,
171 };
172
173 enum {
174 COL_CACHE_ALLSIZE,
175 COL_CACHE_LEVEL,
176 COL_CACHE_NAME,
177 COL_CACHE_ONESIZE,
178 COL_CACHE_TYPE,
179 COL_CACHE_WAYS,
180 };
181
182
183 /* column description
184 */
185 struct lscpu_coldesc {
186 const char *name;
187 const char *help;
188
189 int flags;
190 unsigned int is_abbr:1; /* name is abbreviation */
191 };
192
193 static struct lscpu_coldesc coldescs_cpu[] =
194 {
195 [COL_CPU_CPU] = { "CPU", N_("logical CPU number"), 0, 1 },
196 [COL_CPU_CORE] = { "CORE", N_("logical core number") },
197 [COL_CPU_SOCKET] = { "SOCKET", N_("logical socket number") },
198 [COL_CPU_NODE] = { "NODE", N_("logical NUMA node number") },
199 [COL_CPU_BOOK] = { "BOOK", N_("logical book number") },
200 [COL_CPU_DRAWER] = { "DRAWER", N_("logical drawer number") },
201 [COL_CPU_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
202 [COL_CPU_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
203 [COL_CPU_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
204 [COL_CPU_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
205 [COL_CPU_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
206 [COL_CPU_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
207 [COL_CPU_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
208 };
209
210 static struct lscpu_coldesc coldescs_cache[] =
211 {
212 [COL_CACHE_ALLSIZE] = { "ALL-SIZE", N_("size of all system caches"), SCOLS_FL_RIGHT },
213 [COL_CACHE_LEVEL] = { "LEVEL", N_("cache level"), SCOLS_FL_RIGHT },
214 [COL_CACHE_NAME] = { "NAME", N_("cache name") },
215 [COL_CACHE_ONESIZE] = { "ONE-SIZE", N_("size of one cache"), SCOLS_FL_RIGHT },
216 [COL_CACHE_TYPE] = { "TYPE", N_("cache type") },
217 [COL_CACHE_WAYS] = { "WAYS", N_("ways of associativity"), SCOLS_FL_RIGHT }
218 };
219
220
221 static int get_cache_full_size(struct lscpu_desc *desc, struct cpu_cache *ca, uint64_t *res);
222
223 static int
224 cpu_column_name_to_id(const char *name, size_t namesz)
225 {
226 size_t i;
227
228 for (i = 0; i < ARRAY_SIZE(coldescs_cpu); i++) {
229 const char *cn = coldescs_cpu[i].name;
230
231 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
232 return i;
233 }
234 warnx(_("unknown column: %s"), name);
235 return -1;
236 }
237
238 static int
239 cache_column_name_to_id(const char *name, size_t namesz)
240 {
241 size_t i;
242
243 for (i = 0; i < ARRAY_SIZE(coldescs_cache); i++) {
244 const char *cn = coldescs_cache[i].name;
245
246 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
247 return i;
248 }
249 warnx(_("unknown column: %s"), name);
250 return -1;
251 }
252
253 /* Lookup a pattern and get the value from cpuinfo.
254 * Format is:
255 *
256 * "<pattern> : <key>"
257 */
258 static int
259 lookup(char *line, char *pattern, char **value)
260 {
261 char *p, *v;
262 int len = strlen(pattern);
263
264 /* don't re-fill already found tags, first one wins */
265 if (!*line || *value)
266 return 0;
267
268 /* pattern */
269 if (strncmp(line, pattern, len))
270 return 0;
271
272 /* white spaces */
273 for (p = line + len; isspace(*p); p++);
274
275 /* separator */
276 if (*p != ':')
277 return 0;
278
279 /* white spaces */
280 for (++p; isspace(*p); p++);
281
282 /* value */
283 if (!*p)
284 return 0;
285 v = p;
286
287 /* end of value */
288 len = strlen(line) - 1;
289 for (p = line + len; isspace(*(p-1)); p--);
290 *p = '\0';
291
292 *value = xstrdup(v);
293 return 1;
294 }
295
296 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
297 * part of the cache topology information within the sysfs filesystem.
298 * This is true for all shared caches on e.g. s390. When there are layers of
299 * hypervisors in between it is not knows which CPUs share which caches.
300 * Therefore information about shared caches is only available in
301 * /proc/cpuinfo.
302 * Format is:
303 * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
304 */
305 static int
306 lookup_cache(char *line, struct lscpu_desc *desc)
307 {
308 struct cpu_cache *cache;
309 long long size;
310 char *p, type;
311 int level;
312
313 /* Make sure line starts with "cache<nr> :" */
314 if (strncmp(line, "cache", 5))
315 return 0;
316 for (p = line + 5; isdigit(*p); p++);
317 for (; isspace(*p); p++);
318 if (*p != ':')
319 return 0;
320
321 p = strstr(line, "scope=") + 6;
322 /* Skip private caches, also present in sysfs */
323 if (!p || strncmp(p, "Private", 7) == 0)
324 return 0;
325 p = strstr(line, "level=");
326 if (!p || sscanf(p, "level=%d", &level) != 1)
327 return 0;
328 p = strstr(line, "type=") + 5;
329 if (!p || !*p)
330 return 0;
331 type = 0;
332 if (strncmp(p, "Data", 4) == 0)
333 type = 'd';
334 else if (strncmp(p, "Instruction", 11) == 0)
335 type = 'i';
336 else if (strncmp(p, "Unified", 7) == 0)
337 type = 'u';
338 p = strstr(line, "size=");
339 if (!p || sscanf(p, "size=%lld", &size) != 1)
340 return 0;
341
342 desc->necaches++;
343 desc->ecaches = xrealloc(desc->ecaches,
344 desc->necaches * sizeof(struct cpu_cache));
345 cache = &desc->ecaches[desc->necaches - 1];
346 memset(cache, 0 , sizeof(*cache));
347
348 if (type == 'i' || type == 'd')
349 xasprintf(&cache->name, "L%d%c", level, type);
350 else
351 xasprintf(&cache->name, "L%d", level);
352
353 cache->level = level;
354 cache->size = size * 1024;
355
356 cache->type = type == 'i' ? xstrdup("Instruction") :
357 type == 'd' ? xstrdup("Data") :
358 type == 'u' ? xstrdup("Unified") : NULL;
359 return 1;
360 }
361
362 /* Don't init the mode for platforms where we are not able to
363 * detect that CPU supports 64-bit mode.
364 */
365 static int
366 init_mode(struct lscpu_modifier *mod)
367 {
368 int m = 0;
369
370 if (mod->system == SYSTEM_SNAPSHOT)
371 /* reading info from any /{sys,proc} dump, don't mix it with
372 * information about our real CPU */
373 return 0;
374
375 #if defined(__alpha__) || defined(__ia64__)
376 m |= MODE_64BIT; /* 64bit platforms only */
377 #endif
378 /* platforms with 64bit flag in /proc/cpuinfo, define
379 * 32bit default here */
380 #if defined(__i386__) || defined(__x86_64__) || \
381 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
382 m |= MODE_32BIT;
383 #endif
384
385 #if defined(__aarch64__)
386 {
387 /* personality() is the most reliable way (since 4.7)
388 * to determine aarch32 support */
389 int pers = personality(PER_LINUX32);
390 if (pers != -1) {
391 personality(pers);
392 m |= MODE_32BIT;
393 }
394 m |= MODE_64BIT;
395 }
396 #endif
397 return m;
398 }
399
400 #if defined(HAVE_LIBRTAS)
401 #define PROCESSOR_MODULE_INFO 43
402 static int strbe16toh(const char *buf, int offset)
403 {
404 return (buf[offset] << 8) + buf[offset+1];
405 }
406
407 static void read_physical_info_powerpc(struct lscpu_desc *desc)
408 {
409 char buf[BUFSIZ];
410 int rc, len, ntypes;
411
412 desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
413
414 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
415 if (rc < 0)
416 return;
417
418 len = strbe16toh(buf, 0);
419 if (len < 8)
420 return;
421
422 ntypes = strbe16toh(buf, 2);
423
424 assert(ntypes <= 1);
425 if (!ntypes)
426 return;
427
428 desc->physsockets = strbe16toh(buf, 4);
429 desc->physchips = strbe16toh(buf, 6);
430 desc->physcoresperchip = strbe16toh(buf, 8);
431 }
432 #else
433 static void read_physical_info_powerpc(
434 struct lscpu_desc *desc __attribute__((__unused__)))
435 {
436 }
437 #endif
438
439
440 static void
441 read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
442 {
443 FILE *fp;
444 char buf[BUFSIZ];
445 struct utsname utsbuf;
446 size_t setsize;
447 cpu_set_t *cpuset = NULL;
448
449 /* architecture */
450 if (uname(&utsbuf) == -1)
451 err(EXIT_FAILURE, _("error: uname failed"));
452
453 fp = ul_path_fopen(desc->procfs, "r", "cpuinfo");
454 if (!fp)
455 err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
456 desc->arch = xstrdup(utsbuf.machine);
457
458 /* details */
459 while (fgets(buf, sizeof(buf), fp) != NULL) {
460 if (lookup(buf, "vendor", &desc->vendor)) ;
461 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
462 else if (lookup(buf, "CPU implementer", &desc->vendor)) ; /* ARM and aarch64 */
463 else if (lookup(buf, "family", &desc->family)) ;
464 else if (lookup(buf, "cpu family", &desc->family)) ;
465 else if (lookup(buf, "model", &desc->model)) ;
466 else if (lookup(buf, "CPU part", &desc->model)) ; /* ARM and aarch64 */
467 else if (lookup(buf, "model name", &desc->modelname)) ;
468 else if (lookup(buf, "stepping", &desc->stepping)) ;
469 else if (lookup(buf, "CPU variant", &desc->stepping)) ; /* aarch64 */
470 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
471 else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
472 else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
473 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
474 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
475 else if (lookup(buf, "Features", &desc->flags)) ; /* aarch64 */
476 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
477 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
478 else if (lookup(buf, "BogoMIPS", &desc->bogomips)) ; /* aarch64 */
479 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
480 else if (lookup(buf, "cpu", &desc->cpu)) ;
481 else if (lookup(buf, "revision", &desc->revision)) ;
482 else if (lookup(buf, "CPU revision", &desc->revision)) ; /* aarch64 */
483 else if (lookup(buf, "max thread id", &desc->mtid)) ; /* s390 */
484 else if (lookup(buf, "address sizes", &desc->addrsz)) ; /* x86 */
485 else if (lookup_cache(buf, desc)) ;
486 else
487 continue;
488 }
489
490 desc->mode = init_mode(mod);
491
492 if (desc->flags) {
493 snprintf(buf, sizeof(buf), " %s ", desc->flags);
494 if (strstr(buf, " svm "))
495 desc->virtflag = xstrdup("svm");
496 else if (strstr(buf, " vmx "))
497 desc->virtflag = xstrdup("vmx");
498 if (strstr(buf, " lm "))
499 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
500 if (strstr(buf, " zarch "))
501 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
502 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
503 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
504 }
505
506 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
507 if (strcmp(desc->arch, "ppc64") == 0)
508 desc->mode |= MODE_32BIT | MODE_64BIT;
509 else if (strcmp(desc->arch, "ppc") == 0)
510 desc->mode |= MODE_32BIT;
511 }
512
513 fclose(fp);
514
515 if (ul_path_read_s32(desc->syscpu, &maxcpus, "kernel_max") == 0)
516 /* note that kernel_max is maximum index [NR_CPUS-1] */
517 maxcpus += 1;
518
519 else if (mod->system == SYSTEM_LIVE)
520 /* the root is '/' so we are working with data from the current kernel */
521 maxcpus = get_max_number_of_cpus();
522
523 if (maxcpus <= 0)
524 /* error or we are reading some /sys snapshot instead of the
525 * real /sys, let's use any crazy number... */
526 maxcpus = 2048;
527
528 setsize = CPU_ALLOC_SIZE(maxcpus);
529
530 if (ul_path_readf_cpulist(desc->syscpu, &cpuset, maxcpus, "possible") == 0) {
531 int num, idx;
532
533 desc->ncpuspos = CPU_COUNT_S(setsize, cpuset);
534 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
535
536 for (num = 0, idx = 0; num < maxcpus; num++) {
537 if (CPU_ISSET_S(num, setsize, cpuset))
538 desc->idx2cpunum[idx++] = num;
539 }
540 cpuset_free(cpuset);
541 cpuset = NULL;
542 } else
543 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
544 _PATH_SYS_CPU "/possible");
545
546
547 /* get mask for present CPUs */
548 if (ul_path_readf_cpulist(desc->syscpu, &desc->present, maxcpus, "present") == 0)
549 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
550
551 /* get mask for online CPUs */
552 if (ul_path_readf_cpulist(desc->syscpu, &desc->online, maxcpus, "online") == 0)
553 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
554
555 /* get dispatching mode */
556 if (ul_path_read_s32(desc->syscpu, &desc->dispatching, "dispatching") != 0)
557 desc->dispatching = -1;
558
559 /* get cpufreq boost mode */
560 if (ul_path_read_s32(desc->syscpu, &desc->freqboost, "cpufreq/boost") != 0)
561 desc->freqboost = -1;
562
563 if (mod->system == SYSTEM_LIVE)
564 read_physical_info_powerpc(desc);
565
566 if ((fp = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
567 while (fgets(buf, sizeof(buf), fp) != NULL && !desc->machinetype)
568 lookup(buf, "Type", &desc->machinetype);
569 fclose(fp);
570 }
571 }
572
573 static int
574 has_pci_device(struct lscpu_desc *desc, unsigned int vendor, unsigned int device)
575 {
576 FILE *f;
577 unsigned int num, fn, ven, dev;
578 int res = 1;
579
580 f = ul_path_fopen(desc->procfs, "r", "bus/pci/devices");
581 if (!f)
582 return 0;
583
584 /* for more details about bus/pci/devices format see
585 * drivers/pci/proc.c in linux kernel
586 */
587 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
588 &num, &fn, &ven, &dev) == 4) {
589
590 if (ven == vendor && dev == device)
591 goto found;
592 }
593
594 res = 0;
595 found:
596 fclose(f);
597 return res;
598 }
599
600 #if defined(__x86_64__) || defined(__i386__)
601
602 /*
603 * This CPUID leaf returns the information about the hypervisor.
604 * EAX : maximum input value for CPUID supported by the hypervisor.
605 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
606 */
607 #define HYPERVISOR_INFO_LEAF 0x40000000
608
609 static inline void
610 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
611 unsigned int *ecx, unsigned int *edx)
612 {
613 __asm__(
614 #if defined(__PIC__) && defined(__i386__)
615 /* x86 PIC cannot clobber ebx -- gcc bitches */
616 "xchg %%ebx, %%esi;"
617 "cpuid;"
618 "xchg %%esi, %%ebx;"
619 : "=S" (*ebx),
620 #else
621 "cpuid;"
622 : "=b" (*ebx),
623 #endif
624 "=a" (*eax),
625 "=c" (*ecx),
626 "=d" (*edx)
627 : "1" (op), "c"(0));
628 }
629
630 static void
631 read_hypervisor_cpuid(struct lscpu_desc *desc)
632 {
633 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
634 char hyper_vendor_id[13];
635
636 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
637
638 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
639 memcpy(hyper_vendor_id + 0, &ebx, 4);
640 memcpy(hyper_vendor_id + 4, &ecx, 4);
641 memcpy(hyper_vendor_id + 8, &edx, 4);
642 hyper_vendor_id[12] = '\0';
643
644 if (!hyper_vendor_id[0])
645 return;
646
647 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
648 desc->hyper = HYPER_XEN;
649 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
650 desc->hyper = HYPER_KVM;
651 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
652 desc->hyper = HYPER_MSHV;
653 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
654 desc->hyper = HYPER_VMWARE;
655 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
656 desc->hyper = HYPER_SPAR;
657 }
658
659 #else /* ! (__x86_64__ || __i386__) */
660 static void
661 read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
662 {
663 }
664 #endif
665
666 static int is_devtree_compatible(struct lscpu_desc *desc, const char *str)
667 {
668 FILE *fd = ul_path_fopen(desc->procfs, "r", "device-tree/compatible");
669
670 if (fd) {
671 char buf[256];
672 size_t i, len;
673
674 memset(buf, 0, sizeof(buf));
675 len = fread(buf, 1, sizeof(buf) - 1, fd);
676 fclose(fd);
677
678 for (i = 0; i < len;) {
679 if (!strcmp(&buf[i], str))
680 return 1;
681 i += strlen(&buf[i]);
682 i++;
683 }
684 }
685
686 return 0;
687 }
688
689 static int
690 read_hypervisor_powerpc(struct lscpu_desc *desc)
691 {
692 assert(!desc->hyper);
693
694 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
695 if (ul_path_access(desc->procfs, F_OK, "iSeries") == 0) {
696 desc->hyper = HYPER_OS400;
697 desc->virtype = VIRT_PARA;
698
699 /* PowerNV (POWER Non-Virtualized, bare-metal) */
700 } else if (is_devtree_compatible(desc, "ibm,powernv")) {
701 desc->hyper = HYPER_NONE;
702 desc->virtype = VIRT_NONE;
703
704 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
705 } else if (ul_path_access(desc->procfs, F_OK, "device-tree/ibm,partition-name") == 0
706 && ul_path_access(desc->procfs, F_OK, "device-tree/hmc-managed?") == 0
707 && ul_path_access(desc->procfs, F_OK, "device-tree/chosen/qemu,graphic-width") != 0) {
708
709 FILE *fd;
710 desc->hyper = HYPER_PHYP;
711 desc->virtype = VIRT_PARA;
712
713 fd = ul_path_fopen(desc->procfs, "r", "device-tree/ibm,partition-name");
714 if (fd) {
715 char buf[256];
716 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
717 desc->virtype = VIRT_NONE;
718 fclose(fd);
719 }
720
721 /* Qemu */
722 } else if (is_devtree_compatible(desc, "qemu,pseries")) {
723 desc->hyper = HYPER_KVM;
724 desc->virtype = VIRT_PARA;
725 }
726 return desc->hyper;
727 }
728
729 #ifdef INCLUDE_VMWARE_BDOOR
730
731 #define VMWARE_BDOOR_MAGIC 0x564D5868
732 #define VMWARE_BDOOR_PORT 0x5658
733 #define VMWARE_BDOOR_CMD_GETVERSION 10
734
735 static UL_ASAN_BLACKLIST
736 void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
737 {
738 __asm__(
739 #if defined(__PIC__) && defined(__i386__)
740 /* x86 PIC cannot clobber ebx -- gcc bitches */
741 "xchg %%ebx, %%esi;"
742 "inl (%%dx), %%eax;"
743 "xchg %%esi, %%ebx;"
744 : "=S" (*ebx),
745 #else
746 "inl (%%dx), %%eax;"
747 : "=b" (*ebx),
748 #endif
749 "=a" (*eax),
750 "=c" (*ecx),
751 "=d" (*edx)
752 : "0" (VMWARE_BDOOR_MAGIC),
753 "1" (VMWARE_BDOOR_CMD_GETVERSION),
754 "2" (VMWARE_BDOOR_PORT),
755 "3" (0)
756 : "memory");
757 }
758
759 static jmp_buf segv_handler_env;
760
761 static void
762 segv_handler(__attribute__((__unused__)) int sig,
763 __attribute__((__unused__)) siginfo_t *info,
764 __attribute__((__unused__)) void *ignored)
765 {
766 siglongjmp(segv_handler_env, 1);
767 }
768
769 static int
770 is_vmware_platform(void)
771 {
772 uint32_t eax, ebx, ecx, edx;
773 struct sigaction act, oact;
774
775 /*
776 * FIXME: Not reliable for non-root users. Note it works as expected if
777 * vmware_bdoor() is not optimized for PIE, but then it fails to build
778 * on 32bit x86 systems. See lscpu git log for more details (commit
779 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
780 */
781 if (getuid() != 0)
782 return 0;
783
784 /*
785 * The assembly routine for vmware detection works
786 * fine under vmware, even if ran as regular user. But
787 * on real HW or under other hypervisors, it segfaults (which is
788 * expected). So we temporarily install SIGSEGV handler to catch
789 * the signal. All this magic is needed because lscpu
790 * isn't supposed to require root privileges.
791 */
792 if (sigsetjmp(segv_handler_env, 1))
793 return 0;
794
795 memset(&act, 0, sizeof(act));
796 act.sa_sigaction = segv_handler;
797 act.sa_flags = SA_SIGINFO;
798
799 if (sigaction(SIGSEGV, &act, &oact))
800 err(EXIT_FAILURE, _("cannot set signal handler"));
801
802 vmware_bdoor(&eax, &ebx, &ecx, &edx);
803
804 if (sigaction(SIGSEGV, &oact, NULL))
805 err(EXIT_FAILURE, _("cannot restore signal handler"));
806
807 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
808 }
809
810 #else /* ! INCLUDE_VMWARE_BDOOR */
811
812 static int
813 is_vmware_platform(void)
814 {
815 return 0;
816 }
817
818 #endif /* INCLUDE_VMWARE_BDOOR */
819
820 static void
821 read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
822 {
823 FILE *fd;
824
825 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
826
827 if ((fd = ul_path_fopen(desc->procfs, "r", "sys/kernel/osrelease"))) {
828 char buf[256];
829
830 if (fgets(buf, sizeof(buf), fd) != NULL) {
831 if (strstr(buf, "Microsoft")) {
832 desc->hyper = HYPER_WSL;
833 desc->virtype = VIRT_CONT;
834 }
835 }
836 fclose(fd);
837 if (desc->virtype)
838 return;
839 }
840
841 if (mod->system != SYSTEM_SNAPSHOT) {
842 read_hypervisor_cpuid(desc);
843 if (!desc->hyper)
844 desc->hyper = read_hypervisor_dmi();
845 if (!desc->hyper && is_vmware_platform())
846 desc->hyper = HYPER_VMWARE;
847 }
848
849 if (desc->hyper) {
850 desc->virtype = VIRT_FULL;
851
852 if (desc->hyper == HYPER_XEN) {
853 uint32_t features;
854
855 fd = ul_prefix_fopen(desc->prefix, "r", _PATH_SYS_HYP_FEATURES);
856
857 if (fd && fscanf(fd, "%x", &features) == 1) {
858 /* Xen PV domain */
859 if (features & XEN_FEATURES_PV_MASK)
860 desc->virtype = VIRT_PARA;
861 /* Xen PVH domain */
862 else if ((features & XEN_FEATURES_PVH_MASK)
863 == XEN_FEATURES_PVH_MASK)
864 desc->virtype = VIRT_PARA;
865 }
866 if (fd)
867 fclose(fd);
868 }
869 } else if (read_hypervisor_powerpc(desc) > 0) {}
870
871 /* Xen para-virt or dom0 */
872 else if (ul_path_access(desc->procfs, F_OK, "xen") == 0) {
873 int dom0 = 0;
874
875 fd = ul_path_fopen(desc->procfs, "r", "xen/capabilities");
876 if (fd) {
877 char buf[256];
878
879 if (fscanf(fd, "%255s", buf) == 1 &&
880 !strcmp(buf, "control_d"))
881 dom0 = 1;
882 fclose(fd);
883 }
884 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
885 desc->hyper = HYPER_XEN;
886
887 /* Xen full-virt on non-x86_64 */
888 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
889 desc->hyper = HYPER_XEN;
890 desc->virtype = VIRT_FULL;
891 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
892 desc->hyper = HYPER_VMWARE;
893 desc->virtype = VIRT_FULL;
894 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
895 desc->hyper = HYPER_VBOX;
896 desc->virtype = VIRT_FULL;
897
898 /* IBM PR/SM */
899 } else if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
900 char buf[BUFSIZ];
901
902 desc->hyper = HYPER_IBM;
903 desc->hypervisor = "PR/SM";
904 desc->virtype = VIRT_FULL;
905 while (fgets(buf, sizeof(buf), fd) != NULL) {
906 char *str, *p;
907
908 if (!strstr(buf, "Control Program:"))
909 continue;
910 if (!strstr(buf, "KVM"))
911 desc->hyper = HYPER_IBM;
912 else
913 desc->hyper = HYPER_KVM;
914 p = strchr(buf, ':');
915 if (!p)
916 continue;
917 xasprintf(&str, "%s", p + 1);
918
919 /* remove leading, trailing and repeating whitespace */
920 while (*str == ' ')
921 str++;
922 desc->hypervisor = str;
923 str += strlen(str) - 1;
924 while ((*str == '\n') || (*str == ' '))
925 *(str--) = '\0';
926 while ((str = strstr(desc->hypervisor, " ")))
927 memmove(str, str + 1, strlen(str));
928 break;
929 }
930 fclose(fd);
931 }
932
933 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
934 * /proc/bc should not */
935 else if (ul_path_access(desc->procfs, F_OK, "vz") == 0 &&
936 ul_path_access(desc->procfs, F_OK, "bc") != 0) {
937 desc->hyper = HYPER_PARALLELS;
938 desc->virtype = VIRT_CONT;
939
940 /* IBM */
941 } else if (desc->vendor &&
942 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
943 strcmp(desc->vendor, "IBM/S390") == 0)) {
944 desc->hyper = HYPER_IBM;
945 desc->virtype = VIRT_FULL;
946
947 /* User-mode-linux */
948 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
949 desc->hyper = HYPER_UML;
950 desc->virtype = VIRT_PARA;
951
952 /* Linux-VServer */
953 } else if ((fd = ul_path_fopen(desc->procfs, "r", "self/status"))) {
954 char buf[BUFSIZ];
955 char *val = NULL;
956
957 while (fgets(buf, sizeof(buf), fd) != NULL) {
958 if (lookup(buf, "VxID", &val))
959 break;
960 }
961 fclose(fd);
962
963 if (val) {
964 char *org = val;
965
966 while (isdigit(*val))
967 ++val;
968 if (!*val) {
969 desc->hyper = HYPER_VSERVER;
970 desc->virtype = VIRT_CONT;
971 }
972 free(org);
973 }
974 }
975 }
976
977 /* add @set to the @ary, unnecessary set is deallocated. */
978 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
979 {
980 int i;
981 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
982
983 if (!ary)
984 return -1;
985
986 for (i = 0; i < *items; i++) {
987 if (CPU_EQUAL_S(setsize, set, ary[i]))
988 break;
989 }
990 if (i == *items) {
991 ary[*items] = set;
992 ++*items;
993 return 0;
994 }
995 CPU_FREE(set);
996 return 1;
997 }
998
999 static void
1000 read_topology(struct lscpu_desc *desc, int idx)
1001 {
1002 cpu_set_t *thread_siblings, *core_siblings;
1003 cpu_set_t *book_siblings, *drawer_siblings;
1004 int coreid, socketid, bookid, drawerid;
1005 int i, num = real_cpu_num(desc, idx);
1006
1007 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/topology/thread_siblings", num) != 0)
1008 return;
1009
1010 ul_path_readf_cpuset(desc->syscpu, &thread_siblings, maxcpus,
1011 "cpu%d/topology/thread_siblings", num);
1012 ul_path_readf_cpuset(desc->syscpu, &core_siblings, maxcpus,
1013 "cpu%d/topology/core_siblings", num);
1014 ul_path_readf_cpuset(desc->syscpu, &book_siblings, maxcpus,
1015 "cpu%d/topology/book_siblings", num);
1016 ul_path_readf_cpuset(desc->syscpu, &drawer_siblings, maxcpus,
1017 "cpu%d/topology/drawer_siblings", num);
1018
1019 if (ul_path_readf_s32(desc->syscpu, &coreid, "cpu%d/topology/core_id", num) != 0)
1020 coreid = -1;
1021
1022 if (ul_path_readf_s32(desc->syscpu, &socketid, "cpu%d/topology/physical_package_id", num) != 0)
1023 socketid = -1;
1024
1025 if (ul_path_readf_s32(desc->syscpu, &bookid, "cpu%d/topology/book_id", num) != 0)
1026 bookid = -1;
1027
1028 if (ul_path_readf_s32(desc->syscpu, &drawerid, "cpu%d/topology/drawer_id", num) != 0)
1029 drawerid = -1;
1030
1031 if (!desc->coremaps) {
1032 int ndrawers, nbooks, nsockets, ncores, nthreads;
1033 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1034
1035 /* threads within one core */
1036 nthreads = CPU_COUNT_S(setsize, thread_siblings);
1037 if (!nthreads)
1038 nthreads = 1;
1039
1040 /* cores within one socket */
1041 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
1042 if (!ncores)
1043 ncores = 1;
1044
1045 /* number of sockets within one book. Because of odd /
1046 * non-present cpu maps and to keep calculation easy we make
1047 * sure that nsockets and nbooks is at least 1.
1048 */
1049 nsockets = desc->ncpus / nthreads / ncores;
1050 if (!nsockets)
1051 nsockets = 1;
1052
1053 /* number of books */
1054 nbooks = desc->ncpus / nthreads / ncores / nsockets;
1055 if (!nbooks)
1056 nbooks = 1;
1057
1058 /* number of drawers */
1059 ndrawers = desc->ncpus / nbooks / nthreads / ncores / nsockets;
1060 if (!ndrawers)
1061 ndrawers = 1;
1062
1063 /* all threads, see also read_basicinfo()
1064 * -- fallback for kernels without
1065 * /sys/devices/system/cpu/online.
1066 */
1067 if (!desc->nthreads)
1068 desc->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
1069
1070 /* For each map we make sure that it can have up to ncpuspos
1071 * entries. This is because we cannot reliably calculate the
1072 * number of cores, sockets and books on all architectures.
1073 * E.g. completely virtualized architectures like s390 may
1074 * have multiple sockets of different sizes.
1075 */
1076 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1077 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1078 desc->coreids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1079 desc->socketids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1080 for (i = 0; i < desc->ncpuspos; i++)
1081 desc->coreids[i] = desc->socketids[i] = -1;
1082 if (book_siblings) {
1083 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1084 desc->bookids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1085 for (i = 0; i < desc->ncpuspos; i++)
1086 desc->bookids[i] = -1;
1087 }
1088 if (drawer_siblings) {
1089 desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1090 desc->drawerids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1091 for (i = 0; i < desc->ncpuspos; i++)
1092 desc->drawerids[i] = -1;
1093 }
1094 }
1095
1096 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
1097 desc->coreids[idx] = coreid;
1098 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
1099 desc->socketids[idx] = socketid;
1100 if (book_siblings) {
1101 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
1102 desc->bookids[idx] = bookid;
1103 }
1104 if (drawer_siblings) {
1105 add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
1106 desc->drawerids[idx] = drawerid;
1107 }
1108 }
1109
1110 static void
1111 read_polarization(struct lscpu_desc *desc, int idx)
1112 {
1113 char mode[64];
1114 int num = real_cpu_num(desc, idx);
1115
1116 if (desc->dispatching < 0)
1117 return;
1118 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/polarization", num) != 0)
1119 return;
1120 if (!desc->polarization)
1121 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
1122
1123 ul_path_readf_buffer(desc->syscpu, mode, sizeof(mode), "cpu%d/polarization", num);
1124
1125 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
1126 desc->polarization[idx] = POLAR_VLOW;
1127 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
1128 desc->polarization[idx] = POLAR_VMEDIUM;
1129 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
1130 desc->polarization[idx] = POLAR_VHIGH;
1131 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
1132 desc->polarization[idx] = POLAR_HORIZONTAL;
1133 else
1134 desc->polarization[idx] = POLAR_UNKNOWN;
1135 }
1136
1137 static void
1138 read_address(struct lscpu_desc *desc, int idx)
1139 {
1140 int num = real_cpu_num(desc, idx);
1141
1142 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/address", num) != 0)
1143 return;
1144 if (!desc->addresses)
1145 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
1146 ul_path_readf_s32(desc->syscpu, &desc->addresses[idx], "cpu%d/address", num);
1147 }
1148
1149 static void
1150 read_configured(struct lscpu_desc *desc, int idx)
1151 {
1152 int num = real_cpu_num(desc, idx);
1153
1154 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/configure", num) != 0)
1155 return;
1156 if (!desc->configured)
1157 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
1158 ul_path_readf_s32(desc->syscpu, &desc->configured[idx], "cpu%d/configure", num);
1159 }
1160
1161 /* Read overall maximum frequency of cpu */
1162 static char *
1163 cpu_max_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1164 {
1165 int i;
1166 float cpu_freq = 0.0;
1167 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1168
1169 if (desc->present) {
1170 for (i = 0; i < desc->ncpuspos; i++) {
1171 if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
1172 && desc->maxmhz[i]) {
1173 float freq = atof(desc->maxmhz[i]);
1174
1175 if (freq > cpu_freq)
1176 cpu_freq = freq;
1177 }
1178 }
1179 }
1180 snprintf(buf, bufsz, "%.4f", cpu_freq);
1181 return buf;
1182 }
1183
1184 /* Read overall minimum frequency of cpu */
1185 static char *
1186 cpu_min_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1187 {
1188 int i;
1189 float cpu_freq = -1.0;
1190 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1191
1192 if (desc->present) {
1193 for (i = 0; i < desc->ncpuspos; i++) {
1194 if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
1195 && desc->minmhz[i]) {
1196 float freq = atof(desc->minmhz[i]);
1197
1198 if (cpu_freq < 0.0 || freq < cpu_freq)
1199 cpu_freq = freq;
1200 }
1201 }
1202 }
1203 snprintf(buf, bufsz, "%.4f", cpu_freq);
1204 return buf;
1205 }
1206
1207
1208 static void
1209 read_max_mhz(struct lscpu_desc *desc, int idx)
1210 {
1211 int num = real_cpu_num(desc, idx);
1212 int mhz;
1213
1214 if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) != 0)
1215 return;
1216 if (!desc->maxmhz)
1217 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1218 xasprintf(&desc->maxmhz[idx], "%.4f", (float) mhz / 1000);
1219 }
1220
1221 static void
1222 read_min_mhz(struct lscpu_desc *desc, int idx)
1223 {
1224 int num = real_cpu_num(desc, idx);
1225 int mhz;
1226
1227 if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) != 0)
1228 return;
1229 if (!desc->minmhz)
1230 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1231 xasprintf(&desc->minmhz[idx], "%.4f", (float) mhz / 1000);
1232 }
1233
1234 static int
1235 cachecmp(const void *a, const void *b)
1236 {
1237 struct cpu_cache *c1 = (struct cpu_cache *) a;
1238 struct cpu_cache *c2 = (struct cpu_cache *) b;
1239
1240 return strcmp(c2->name, c1->name);
1241 }
1242
1243 static void
1244 read_cache(struct lscpu_desc *desc, int idx)
1245 {
1246 char buf[256];
1247 int i;
1248 int num = real_cpu_num(desc, idx);
1249
1250 if (!desc->ncaches) {
1251 while (ul_path_accessf(desc->syscpu, F_OK,
1252 "cpu%d/cache/index%d",
1253 num, desc->ncaches) == 0)
1254 desc->ncaches++;
1255
1256 if (!desc->ncaches)
1257 return;
1258 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1259 }
1260 for (i = 0; i < desc->ncaches; i++) {
1261 struct cpu_cache *ca = &desc->caches[i];
1262 cpu_set_t *map;
1263
1264 if (ul_path_accessf(desc->syscpu, F_OK,
1265 "cpu%d/cache/index%d", num, i) != 0)
1266 continue;
1267 if (!ca->name) {
1268 int type = 0;
1269
1270 /* cache type */
1271 if (ul_path_readf_string(desc->syscpu, &ca->type,
1272 "cpu%d/cache/index%d/type", num, i) > 0) {
1273 if (!strcmp(ca->type, "Data"))
1274 type = 'd';
1275 else if (!strcmp(ca->type, "Instruction"))
1276 type = 'i';
1277 }
1278
1279 /* cache level */
1280 ul_path_readf_s32(desc->syscpu, &ca->level,
1281 "cpu%d/cache/index%d/level", num, i);
1282 if (type)
1283 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
1284 else
1285 snprintf(buf, sizeof(buf), "L%d", ca->level);
1286
1287 ca->name = xstrdup(buf);
1288
1289 /* cache ways */
1290 ul_path_readf_s32(desc->syscpu, &ca->ways,
1291 "cpu%d/cache/index%d/ways_of_associativity", num, i);
1292
1293 /* cache size */
1294 if (ul_path_readf_buffer(desc->syscpu, buf, sizeof(buf),
1295 "cpu%d/cache/index%d/size", num, i) > 0)
1296 parse_size(buf, &ca->size, NULL);
1297 else
1298 ca->size = 0;
1299 }
1300
1301 /* information about how CPUs share different caches */
1302 ul_path_readf_cpuset(desc->syscpu, &map, maxcpus,
1303 "cpu%d/cache/index%d/shared_cpu_map", num, i);
1304
1305 if (!ca->sharedmaps)
1306 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1307 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1308 }
1309 }
1310
1311 static inline int is_node_dirent(struct dirent *d)
1312 {
1313 return
1314 d &&
1315 #ifdef _DIRENT_HAVE_D_TYPE
1316 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1317 #endif
1318 strncmp(d->d_name, "node", 4) == 0 &&
1319 isdigit_string(d->d_name + 4);
1320 }
1321
1322 static int
1323 nodecmp(const void *ap, const void *bp)
1324 {
1325 int *a = (int *) ap, *b = (int *) bp;
1326 return *a - *b;
1327 }
1328
1329 static void
1330 read_nodes(struct lscpu_desc *desc)
1331 {
1332 int i = 0;
1333 DIR *dir;
1334 struct dirent *d;
1335 struct path_cxt *sysnode;
1336
1337 desc->nnodes = 0;
1338
1339 sysnode = ul_new_path(_PATH_SYS_NODE);
1340 if (!sysnode)
1341 err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
1342 ul_path_set_prefix(sysnode, desc->prefix);
1343
1344 dir = ul_path_opendir(sysnode, NULL);
1345 if (!dir)
1346 goto done;
1347
1348 while ((d = readdir(dir))) {
1349 if (is_node_dirent(d))
1350 desc->nnodes++;
1351 }
1352
1353 if (!desc->nnodes) {
1354 closedir(dir);
1355 goto done;
1356 }
1357
1358 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1359 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1360
1361 rewinddir(dir);
1362 while ((d = readdir(dir)) && i < desc->nnodes) {
1363 if (is_node_dirent(d))
1364 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1365 _("Failed to extract the node number"));
1366 }
1367 closedir(dir);
1368 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1369
1370 /* information about how nodes share different CPUs */
1371 for (i = 0; i < desc->nnodes; i++)
1372 ul_path_readf_cpuset(sysnode, &desc->nodemaps[i], maxcpus,
1373 "node%d/cpumap", desc->idx2nodenum[i]);
1374 done:
1375 ul_unref_path(sysnode);
1376 }
1377
1378 static char *
1379 get_cell_data(struct lscpu_desc *desc, int idx, int col,
1380 struct lscpu_modifier *mod,
1381 char *buf, size_t bufsz)
1382 {
1383 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1384 size_t i;
1385 int cpu = real_cpu_num(desc, idx);
1386
1387 *buf = '\0';
1388
1389 switch (col) {
1390 case COL_CPU_CPU:
1391 snprintf(buf, bufsz, "%d", cpu);
1392 break;
1393 case COL_CPU_CORE:
1394 if (mod->physical) {
1395 if (desc->coreids[idx] == -1)
1396 snprintf(buf, bufsz, "-");
1397 else
1398 snprintf(buf, bufsz, "%d", desc->coreids[idx]);
1399 } else {
1400 if (cpuset_ary_isset(cpu, desc->coremaps,
1401 desc->ncores, setsize, &i) == 0)
1402 snprintf(buf, bufsz, "%zu", i);
1403 }
1404 break;
1405 case COL_CPU_SOCKET:
1406 if (mod->physical) {
1407 if (desc->socketids[idx] == -1)
1408 snprintf(buf, bufsz, "-");
1409 else
1410 snprintf(buf, bufsz, "%d", desc->socketids[idx]);
1411 } else {
1412 if (cpuset_ary_isset(cpu, desc->socketmaps,
1413 desc->nsockets, setsize, &i) == 0)
1414 snprintf(buf, bufsz, "%zu", i);
1415 }
1416 break;
1417 case COL_CPU_NODE:
1418 if (cpuset_ary_isset(cpu, desc->nodemaps,
1419 desc->nnodes, setsize, &i) == 0)
1420 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1421 break;
1422 case COL_CPU_DRAWER:
1423 if (mod->physical) {
1424 if (desc->drawerids[idx] == -1)
1425 snprintf(buf, bufsz, "-");
1426 else
1427 snprintf(buf, bufsz, "%d", desc->drawerids[idx]);
1428 } else {
1429 if (cpuset_ary_isset(cpu, desc->drawermaps,
1430 desc->ndrawers, setsize, &i) == 0)
1431 snprintf(buf, bufsz, "%zu", i);
1432 }
1433 break;
1434 case COL_CPU_BOOK:
1435 if (mod->physical) {
1436 if (desc->bookids[idx] == -1)
1437 snprintf(buf, bufsz, "-");
1438 else
1439 snprintf(buf, bufsz, "%d", desc->bookids[idx]);
1440 } else {
1441 if (cpuset_ary_isset(cpu, desc->bookmaps,
1442 desc->nbooks, setsize, &i) == 0)
1443 snprintf(buf, bufsz, "%zu", i);
1444 }
1445 break;
1446 case COL_CPU_CACHE:
1447 {
1448 char *p = buf;
1449 size_t sz = bufsz;
1450 int j;
1451
1452 for (j = desc->ncaches - 1; j >= 0; j--) {
1453 struct cpu_cache *ca = &desc->caches[j];
1454
1455 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1456 ca->nsharedmaps, setsize, &i) == 0) {
1457 int x = snprintf(p, sz, "%zu", i);
1458 if (x < 0 || (size_t) x >= sz)
1459 return NULL;
1460 p += x;
1461 sz -= x;
1462 }
1463 if (j != 0) {
1464 if (sz < 2)
1465 return NULL;
1466 *p++ = mod->compat ? ',' : ':';
1467 *p = '\0';
1468 sz--;
1469 }
1470 }
1471 break;
1472 }
1473 case COL_CPU_POLARIZATION:
1474 if (desc->polarization) {
1475 int x = desc->polarization[idx];
1476
1477 snprintf(buf, bufsz, "%s",
1478 mod->mode == OUTPUT_PARSABLE ?
1479 polar_modes[x].parsable :
1480 polar_modes[x].readable);
1481 }
1482 break;
1483 case COL_CPU_ADDRESS:
1484 if (desc->addresses)
1485 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1486 break;
1487 case COL_CPU_CONFIGURED:
1488 if (!desc->configured)
1489 break;
1490 if (mod->mode == OUTPUT_PARSABLE)
1491 snprintf(buf, bufsz, "%s",
1492 desc->configured[idx] ? _("Y") : _("N"));
1493 else
1494 snprintf(buf, bufsz, "%s",
1495 desc->configured[idx] ? _("yes") : _("no"));
1496 break;
1497 case COL_CPU_ONLINE:
1498 if (!desc->online)
1499 break;
1500 if (mod->mode == OUTPUT_PARSABLE)
1501 snprintf(buf, bufsz, "%s",
1502 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1503 else
1504 snprintf(buf, bufsz, "%s",
1505 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1506 break;
1507 case COL_CPU_MAXMHZ:
1508 if (desc->maxmhz && desc->maxmhz[idx])
1509 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1510 break;
1511 case COL_CPU_MINMHZ:
1512 if (desc->minmhz && desc->minmhz[idx])
1513 xstrncpy(buf, desc->minmhz[idx], bufsz);
1514 break;
1515 }
1516 return buf;
1517 }
1518
1519 static char *
1520 get_cell_header(struct lscpu_desc *desc, int col,
1521 struct lscpu_modifier *mod,
1522 char *buf, size_t bufsz)
1523 {
1524 *buf = '\0';
1525
1526 if (col == COL_CPU_CACHE) {
1527 char *p = buf;
1528 size_t sz = bufsz;
1529 int i;
1530
1531 for (i = desc->ncaches - 1; i >= 0; i--) {
1532 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1533 if (x < 0 || (size_t) x >= sz)
1534 return NULL;
1535 sz -= x;
1536 p += x;
1537 if (i > 0) {
1538 if (sz < 2)
1539 return NULL;
1540 *p++ = mod->compat ? ',' : ':';
1541 *p = '\0';
1542 sz--;
1543 }
1544 }
1545 if (desc->ncaches)
1546 return buf;
1547 }
1548 snprintf(buf, bufsz, "%s", coldescs_cpu[col].name);
1549 return buf;
1550 }
1551
1552 /*
1553 * [-C] backend
1554 */
1555 static void
1556 print_caches_readable(struct lscpu_desc *desc, int cols[], int ncols,
1557 struct lscpu_modifier *mod)
1558 {
1559 int i;
1560 struct libscols_table *table;
1561
1562 scols_init_debug(0);
1563
1564 table = scols_new_table();
1565 if (!table)
1566 err(EXIT_FAILURE, _("failed to allocate output table"));
1567 if (mod->json) {
1568 scols_table_enable_json(table, 1);
1569 scols_table_set_name(table, "caches");
1570 }
1571
1572 for (i = 0; i < ncols; i++) {
1573 struct lscpu_coldesc *cd = &coldescs_cache[cols[i]];
1574 if (!scols_table_new_column(table, cd->name, 0, cd->flags))
1575 err(EXIT_FAILURE, _("failed to allocate output column"));
1576 }
1577
1578 for (i = desc->ncaches - 1; i >= 0; i--) {
1579 struct cpu_cache *ca = &desc->caches[i];
1580 struct libscols_line *line;
1581 int c;
1582
1583 line = scols_table_new_line(table, NULL);
1584 if (!line)
1585 err(EXIT_FAILURE, _("failed to allocate output line"));
1586
1587 for (c = 0; c < ncols; c++) {
1588 char *data = NULL;
1589 int col = cols[c];
1590
1591 switch (col) {
1592 case COL_CACHE_NAME:
1593 if (ca->name)
1594 data = xstrdup(ca->name);
1595 break;
1596 case COL_CACHE_ONESIZE:
1597 if (!ca->size)
1598 break;
1599 if (mod->bytes)
1600 xasprintf(&data, "%" PRIu64, ca->size);
1601 else
1602 data = size_to_human_string(SIZE_SUFFIX_1LETTER, ca->size);
1603 break;
1604 case COL_CACHE_ALLSIZE:
1605 {
1606 uint64_t sz = 0;
1607
1608 if (get_cache_full_size(desc, ca, &sz) != 0)
1609 break;
1610 if (mod->bytes)
1611 xasprintf(&data, "%" PRIu64, sz);
1612 else
1613 data = size_to_human_string(SIZE_SUFFIX_1LETTER, sz);
1614 break;
1615 }
1616 case COL_CACHE_WAYS:
1617 if (ca->ways)
1618 xasprintf(&data, "%d", ca->ways);
1619 break;
1620 case COL_CACHE_TYPE:
1621 if (ca->type)
1622 data = xstrdup(ca->type);
1623 break;
1624 case COL_CACHE_LEVEL:
1625 if (ca->level)
1626 xasprintf(&data, "%d", ca->level);
1627 break;
1628 }
1629
1630 if (data && scols_line_refer_data(line, c, data))
1631 err(EXIT_FAILURE, _("failed to add output data"));
1632 }
1633 }
1634
1635 scols_print_table(table);
1636 scols_unref_table(table);
1637 }
1638
1639 /*
1640 * [-p] backend, we support two parsable formats:
1641 *
1642 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1643 * output and it contains fixed set of the columns. The CACHE columns are at
1644 * the end of the line and the CACHE is not printed if the number of the caches
1645 * is zero. The CACHE columns are separated by two commas, for example:
1646 *
1647 * $ lscpu --parse
1648 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1649 * 0,0,0,0,,0,0,0
1650 * 1,1,0,0,,1,1,0
1651 *
1652 * 2) "user defined output" -- this format prints always all columns without
1653 * special prefix for CACHE column. If there are not CACHEs then the column is
1654 * empty and the header "Cache" is printed rather than a real name of the cache.
1655 * The CACHE columns are separated by ':'.
1656 *
1657 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1658 * # CPU,Core,Socket,Node,L1d:L1i:L2
1659 * 0,0,0,0,0:0:0
1660 * 1,1,0,0,1:1:0
1661 */
1662 static void
1663 print_cpus_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1664 struct lscpu_modifier *mod)
1665 {
1666 char buf[BUFSIZ], *data;
1667 int i;
1668
1669 /*
1670 * Header
1671 */
1672 printf(_(
1673 "# The following is the parsable format, which can be fed to other\n"
1674 "# programs. Each different item in every column has an unique ID\n"
1675 "# starting from zero.\n"));
1676
1677 fputs("# ", stdout);
1678 for (i = 0; i < ncols; i++) {
1679 int col = cols[i];
1680
1681 if (col == COL_CPU_CACHE) {
1682 if (mod->compat && !desc->ncaches)
1683 continue;
1684 if (mod->compat && i != 0)
1685 putchar(',');
1686 }
1687 if (i > 0)
1688 putchar(',');
1689
1690 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1691
1692 if (data && * data && col != COL_CPU_CACHE &&
1693 !coldescs_cpu[col].is_abbr) {
1694 /*
1695 * For normal column names use mixed case (e.g. "Socket")
1696 */
1697 char *p = data + 1;
1698
1699 while (p && *p != '\0') {
1700 *p = tolower((unsigned int) *p);
1701 p++;
1702 }
1703 }
1704 fputs(data && *data ? data : "", stdout);
1705 }
1706 putchar('\n');
1707
1708 /*
1709 * Data
1710 */
1711 for (i = 0; i < desc->ncpuspos; i++) {
1712 int c;
1713 int cpu = real_cpu_num(desc, i);
1714
1715 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1716 continue;
1717 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1718 continue;
1719 if (desc->present && !is_cpu_present(desc, cpu))
1720 continue;
1721 for (c = 0; c < ncols; c++) {
1722 if (mod->compat && cols[c] == COL_CPU_CACHE) {
1723 if (!desc->ncaches)
1724 continue;
1725 if (c > 0)
1726 putchar(',');
1727 }
1728 if (c > 0)
1729 putchar(',');
1730
1731 data = get_cell_data(desc, i, cols[c], mod,
1732 buf, sizeof(buf));
1733 fputs(data && *data ? data : "", stdout);
1734 }
1735 putchar('\n');
1736 }
1737 }
1738
1739 /*
1740 * [-e] backend
1741 */
1742 static void
1743 print_cpus_readable(struct lscpu_desc *desc, int cols[], int ncols,
1744 struct lscpu_modifier *mod)
1745 {
1746 int i;
1747 char buf[BUFSIZ];
1748 const char *data;
1749 struct libscols_table *table;
1750
1751 scols_init_debug(0);
1752
1753 table = scols_new_table();
1754 if (!table)
1755 err(EXIT_FAILURE, _("failed to allocate output table"));
1756 if (mod->json) {
1757 scols_table_enable_json(table, 1);
1758 scols_table_set_name(table, "cpus");
1759 }
1760
1761 for (i = 0; i < ncols; i++) {
1762 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1763 if (!scols_table_new_column(table, data, 0, 0))
1764 err(EXIT_FAILURE, _("failed to allocate output column"));
1765 }
1766
1767 for (i = 0; i < desc->ncpuspos; i++) {
1768 int c;
1769 struct libscols_line *line;
1770 int cpu = real_cpu_num(desc, i);
1771
1772 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1773 continue;
1774 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1775 continue;
1776 if (desc->present && !is_cpu_present(desc, cpu))
1777 continue;
1778
1779 line = scols_table_new_line(table, NULL);
1780 if (!line)
1781 err(EXIT_FAILURE, _("failed to allocate output line"));
1782
1783 for (c = 0; c < ncols; c++) {
1784 data = get_cell_data(desc, i, cols[c], mod,
1785 buf, sizeof(buf));
1786 if (!data || !*data)
1787 data = "-";
1788 if (scols_line_set_data(line, c, data))
1789 err(EXIT_FAILURE, _("failed to add output data"));
1790 }
1791 }
1792
1793 scols_print_table(table);
1794 scols_unref_table(table);
1795 }
1796
1797
1798 static void __attribute__ ((__format__(printf, 3, 4)))
1799 add_summary_sprint(struct libscols_table *tb,
1800 const char *txt,
1801 const char *fmt,
1802 ...)
1803 {
1804 struct libscols_line *ln = scols_table_new_line(tb, NULL);
1805 char *data;
1806 va_list args;
1807
1808 if (!ln)
1809 err(EXIT_FAILURE, _("failed to allocate output line"));
1810
1811 /* description column */
1812 scols_line_set_data(ln, 0, txt);
1813
1814 /* data column */
1815 va_start(args, fmt);
1816 xvasprintf(&data, fmt, args);
1817 va_end(args);
1818
1819 if (data && scols_line_refer_data(ln, 1, data))
1820 err(EXIT_FAILURE, _("failed to add output data"));
1821 }
1822
1823 #define add_summary_n(tb, txt, num) add_summary_sprint(tb, txt, "%d", num)
1824 #define add_summary_s(tb, txt, str) add_summary_sprint(tb, txt, "%s", str)
1825
1826 static void
1827 print_cpuset(struct libscols_table *tb,
1828 const char *key, cpu_set_t *set, int hex)
1829 {
1830 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1831 size_t setbuflen = 7 * maxcpus;
1832 char setbuf[setbuflen], *p;
1833
1834 if (hex) {
1835 p = cpumask_create(setbuf, setbuflen, set, setsize);
1836 add_summary_s(tb, key, p);
1837 } else {
1838 p = cpulist_create(setbuf, setbuflen, set, setsize);
1839 add_summary_s(tb, key, p);
1840 }
1841 }
1842
1843 static int get_cache_full_size(struct lscpu_desc *desc,
1844 struct cpu_cache *ca, uint64_t *res)
1845 {
1846 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1847 int i, nshares = 0;
1848
1849 /* Count number of CPUs which shares the cache */
1850 for (i = 0; i < desc->ncpuspos; i++) {
1851 int cpu = real_cpu_num(desc, i);
1852
1853 if (desc->present && !is_cpu_present(desc, cpu))
1854 continue;
1855 if (CPU_ISSET_S(cpu, setsize, ca->sharedmaps[0]))
1856 nshares++;
1857 }
1858
1859 /* Correction for CPU threads */
1860 if (desc->nthreads > desc->ncores)
1861 nshares /= (desc->nthreads / desc->ncores);
1862
1863 *res = (desc->ncores / nshares) * ca->size;
1864 return 0;
1865 }
1866
1867 /*
1868 * default output
1869 */
1870 static void
1871 print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1872 {
1873 char buf[BUFSIZ];
1874 int i = 0;
1875 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1876 struct libscols_table *tb;
1877
1878 scols_init_debug(0);
1879
1880 tb = scols_new_table();
1881 if (!tb)
1882 err(EXIT_FAILURE, _("failed to allocate output table"));
1883
1884 scols_table_enable_noheadings(tb, 1);
1885 if (mod->json) {
1886 scols_table_enable_json(tb, 1);
1887 scols_table_set_name(tb, "lscpu");
1888 }
1889
1890 if (scols_table_new_column(tb, "field", 0, 0) == NULL ||
1891 scols_table_new_column(tb, "data", 0, SCOLS_FL_NOEXTREMES) == NULL)
1892 err(EXIT_FAILURE, _("failed to initialize output column"));
1893
1894 add_summary_s(tb, _("Architecture:"), desc->arch);
1895 if (desc->mode) {
1896 char *p = buf;
1897
1898 if (desc->mode & MODE_32BIT) {
1899 strcpy(p, "32-bit, ");
1900 p += 8;
1901 }
1902 if (desc->mode & MODE_64BIT) {
1903 strcpy(p, "64-bit, ");
1904 p += 8;
1905 }
1906 *(p - 2) = '\0';
1907 add_summary_s(tb, _("CPU op-mode(s):"), buf);
1908 }
1909 #if !defined(WORDS_BIGENDIAN)
1910 add_summary_s(tb, _("Byte Order:"), "Little Endian");
1911 #else
1912 add_summary_s(tb, _("Byte Order:"), "Big Endian");
1913 #endif
1914
1915 if (desc->addrsz)
1916 add_summary_s(tb, _("Address sizes:"), desc->addrsz);
1917
1918 add_summary_n(tb, _("CPU(s):"), desc->ncpus);
1919
1920 if (desc->online)
1921 print_cpuset(tb, mod->hex ? _("On-line CPU(s) mask:") :
1922 _("On-line CPU(s) list:"),
1923 desc->online, mod->hex);
1924
1925 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1926 cpu_set_t *set;
1927
1928 /* Linux kernel provides cpuset of off-line CPUs that contains
1929 * all configured CPUs (see /sys/devices/system/cpu/offline),
1930 * but want to print real (present in system) off-line CPUs only.
1931 */
1932 set = cpuset_alloc(maxcpus, NULL, NULL);
1933 if (!set)
1934 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1935 CPU_ZERO_S(setsize, set);
1936 for (i = 0; i < desc->ncpuspos; i++) {
1937 int cpu = real_cpu_num(desc, i);
1938 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
1939 CPU_SET_S(cpu, setsize, set);
1940 }
1941 print_cpuset(tb, mod->hex ? _("Off-line CPU(s) mask:") :
1942 _("Off-line CPU(s) list:"),
1943 set, mod->hex);
1944 cpuset_free(set);
1945 }
1946
1947 if (desc->nsockets) {
1948 int threads_per_core, cores_per_socket, sockets_per_book;
1949 int books_per_drawer, drawers;
1950 FILE *fd;
1951
1952 threads_per_core = cores_per_socket = sockets_per_book = 0;
1953 books_per_drawer = drawers = 0;
1954 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1955 * Using simply the cpu topology masks in sysfs will not give
1956 * usable results since everything is virtualized. E.g.
1957 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1958 * five cpus.
1959 * If the cpu topology is not exported (e.g. 2nd level guest)
1960 * fall back to old calculation scheme.
1961 */
1962 if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
1963 int t0, t1;
1964
1965 while (fd && fgets(buf, sizeof(buf), fd) != NULL) {
1966 if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
1967 &t0, &t1, &drawers, &books_per_drawer,
1968 &sockets_per_book,
1969 &cores_per_socket) == 6)
1970 break;
1971 }
1972 if (fd)
1973 fclose(fd);
1974 }
1975 if (desc->mtid)
1976 threads_per_core = atoi(desc->mtid) + 1;
1977 add_summary_n(tb, _("Thread(s) per core:"),
1978 threads_per_core ?: desc->nthreads / desc->ncores);
1979 add_summary_n(tb, _("Core(s) per socket:"),
1980 cores_per_socket ?: desc->ncores / desc->nsockets);
1981 if (desc->nbooks) {
1982 add_summary_n(tb, _("Socket(s) per book:"),
1983 sockets_per_book ?: desc->nsockets / desc->nbooks);
1984 if (desc->ndrawers) {
1985 add_summary_n(tb, _("Book(s) per drawer:"),
1986 books_per_drawer ?: desc->nbooks / desc->ndrawers);
1987 add_summary_n(tb, _("Drawer(s):"), drawers ?: desc->ndrawers);
1988 } else {
1989 add_summary_n(tb, _("Book(s):"), books_per_drawer ?: desc->nbooks);
1990 }
1991 } else {
1992 add_summary_n(tb, _("Socket(s):"), sockets_per_book ?: desc->nsockets);
1993 }
1994 }
1995 if (desc->nnodes)
1996 add_summary_n(tb, _("NUMA node(s):"), desc->nnodes);
1997 if (desc->vendor)
1998 add_summary_s(tb, _("Vendor ID:"), desc->vendor);
1999 if (desc->machinetype)
2000 add_summary_s(tb, _("Machine type:"), desc->machinetype);
2001 if (desc->family)
2002 add_summary_s(tb, _("CPU family:"), desc->family);
2003 if (desc->model || desc->revision)
2004 add_summary_s(tb, _("Model:"), desc->revision ? desc->revision : desc->model);
2005 if (desc->modelname || desc->cpu)
2006 add_summary_s(tb, _("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
2007 if (desc->stepping)
2008 add_summary_s(tb, _("Stepping:"), desc->stepping);
2009 if (desc->freqboost >= 0)
2010 add_summary_s(tb, _("Frequency boost:"), desc->freqboost ?
2011 _("enabled") : _("disabled"));
2012 if (desc->mhz)
2013 add_summary_s(tb, _("CPU MHz:"), desc->mhz);
2014 if (desc->dynamic_mhz)
2015 add_summary_s(tb, _("CPU dynamic MHz:"), desc->dynamic_mhz);
2016 if (desc->static_mhz)
2017 add_summary_s(tb, _("CPU static MHz:"), desc->static_mhz);
2018 if (desc->maxmhz)
2019 add_summary_s(tb, _("CPU max MHz:"), cpu_max_mhz(desc, buf, sizeof(buf)));
2020 if (desc->minmhz)
2021 add_summary_s(tb, _("CPU min MHz:"), cpu_min_mhz(desc, buf, sizeof(buf)));
2022 if (desc->bogomips)
2023 add_summary_s(tb, _("BogoMIPS:"), desc->bogomips);
2024 if (desc->virtflag) {
2025 if (!strcmp(desc->virtflag, "svm"))
2026 add_summary_s(tb, _("Virtualization:"), "AMD-V");
2027 else if (!strcmp(desc->virtflag, "vmx"))
2028 add_summary_s(tb, _("Virtualization:"), "VT-x");
2029 }
2030 if (desc->hypervisor)
2031 add_summary_s(tb, _("Hypervisor:"), desc->hypervisor);
2032 if (desc->hyper) {
2033 add_summary_s(tb, _("Hypervisor vendor:"), hv_vendors[desc->hyper]);
2034 add_summary_s(tb, _("Virtualization type:"), _(virt_types[desc->virtype]));
2035 }
2036 if (desc->dispatching >= 0)
2037 add_summary_s(tb, _("Dispatching mode:"), _(disp_modes[desc->dispatching]));
2038 if (desc->ncaches) {
2039 for (i = desc->ncaches - 1; i >= 0; i--) {
2040 uint64_t sz = 0;
2041 char *tmp;
2042 struct cpu_cache *ca = &desc->caches[i];
2043
2044 if (ca->size == 0)
2045 continue;
2046 if (get_cache_full_size(desc, ca, &sz) != 0 || sz == 0)
2047 continue;
2048 if (mod->bytes)
2049 xasprintf(&tmp, "%" PRIu64, sz);
2050 else
2051 tmp = size_to_human_string(
2052 SIZE_SUFFIX_3LETTER | SIZE_SUFFIX_SPACE,
2053 sz);
2054 snprintf(buf, sizeof(buf), _("%s cache: "), ca->name);
2055 add_summary_s(tb, buf, tmp);
2056 free(tmp);
2057 }
2058 }
2059 if (desc->necaches) {
2060 for (i = desc->necaches - 1; i >= 0; i--) {
2061 char *tmp;
2062 struct cpu_cache *ca = &desc->ecaches[i];
2063
2064 if (ca->size == 0)
2065 continue;
2066 if (mod->bytes)
2067 xasprintf(&tmp, "%" PRIu64, ca->size);
2068 else
2069 tmp = size_to_human_string(
2070 SIZE_SUFFIX_3LETTER | SIZE_SUFFIX_SPACE,
2071 ca->size);
2072 snprintf(buf, sizeof(buf), _("%s cache: "), ca->name);
2073 add_summary_s(tb, buf, tmp);
2074 free(tmp);
2075 }
2076 }
2077
2078 for (i = 0; i < desc->nnodes; i++) {
2079 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
2080 print_cpuset(tb, buf, desc->nodemaps[i], mod->hex);
2081 }
2082
2083 if (desc->physsockets) {
2084 add_summary_n(tb, _("Physical sockets:"), desc->physsockets);
2085 add_summary_n(tb, _("Physical chips:"), desc->physchips);
2086 add_summary_n(tb, _("Physical cores/chip:"), desc->physcoresperchip);
2087 }
2088
2089 if (desc->flags)
2090 add_summary_s(tb, _("Flags:"), desc->flags);
2091
2092 scols_print_table(tb);
2093 scols_unref_table(tb);
2094 }
2095
2096 static void __attribute__((__noreturn__)) usage(void)
2097 {
2098 FILE *out = stdout;
2099 size_t i;
2100
2101 fputs(USAGE_HEADER, out);
2102 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
2103
2104 fputs(USAGE_SEPARATOR, out);
2105 fputs(_("Display information about the CPU architecture.\n"), out);
2106
2107 fputs(USAGE_OPTIONS, out);
2108 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
2109 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
2110 fputs(_(" -B, --bytes print sizes in bytes rather than in human readable format\n"), out);
2111 fputs(_(" -C, --caches[=<list>] info about caches in extended readable format\n"), out);
2112 fputs(_(" -c, --offline print offline CPUs only\n"), out);
2113 fputs(_(" -J, --json use JSON for default or extended format\n"), out);
2114 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
2115 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
2116 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
2117 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
2118 fputs(_(" -y, --physical print physical instead of logical IDs\n"), out);
2119 fputs(USAGE_SEPARATOR, out);
2120 printf(USAGE_HELP_OPTIONS(25));
2121
2122 fputs(_("\nAvailable output columns for -e or -p:\n"), out);
2123 for (i = 0; i < ARRAY_SIZE(coldescs_cpu); i++)
2124 fprintf(out, " %13s %s\n", coldescs_cpu[i].name, _(coldescs_cpu[i].help));
2125
2126 fputs(_("\nAvailable output columns for -C:\n"), out);
2127 for (i = 0; i < ARRAY_SIZE(coldescs_cache); i++)
2128 fprintf(out, " %13s %s\n", coldescs_cache[i].name, _(coldescs_cache[i].help));
2129
2130 printf(USAGE_MAN_TAIL("lscpu(1)"));
2131
2132 exit(EXIT_SUCCESS);
2133 }
2134
2135 int main(int argc, char *argv[])
2136 {
2137 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
2138 struct lscpu_desc _desc = { .flags = NULL }, *desc = &_desc;
2139 int c, i, all = 0;
2140 int columns[ARRAY_SIZE(coldescs_cpu)], ncolumns = 0;
2141 int cpu_modifier_specified = 0;
2142 size_t setsize;
2143
2144 enum {
2145 OPT_OUTPUT_ALL = CHAR_MAX + 1,
2146 };
2147 static const struct option longopts[] = {
2148 { "all", no_argument, NULL, 'a' },
2149 { "online", no_argument, NULL, 'b' },
2150 { "bytes", no_argument, NULL, 'B' },
2151 { "caches", optional_argument, NULL, 'C' },
2152 { "offline", no_argument, NULL, 'c' },
2153 { "help", no_argument, NULL, 'h' },
2154 { "extended", optional_argument, NULL, 'e' },
2155 { "json", no_argument, NULL, 'J' },
2156 { "parse", optional_argument, NULL, 'p' },
2157 { "sysroot", required_argument, NULL, 's' },
2158 { "physical", no_argument, NULL, 'y' },
2159 { "hex", no_argument, NULL, 'x' },
2160 { "version", no_argument, NULL, 'V' },
2161 { "output-all", no_argument, NULL, OPT_OUTPUT_ALL },
2162 { NULL, 0, NULL, 0 }
2163 };
2164
2165 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
2166 { 'a','b','c' },
2167 { 'C','e','p' },
2168 { 0 }
2169 };
2170 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
2171
2172 setlocale(LC_ALL, "");
2173 bindtextdomain(PACKAGE, LOCALEDIR);
2174 textdomain(PACKAGE);
2175 atexit(close_stdout);
2176
2177 while ((c = getopt_long(argc, argv, "aBbC::ce::hJp::s:xyV", longopts, NULL)) != -1) {
2178
2179 err_exclusive_options(c, longopts, excl, excl_st);
2180
2181 switch (c) {
2182 case 'a':
2183 mod->online = mod->offline = 1;
2184 cpu_modifier_specified = 1;
2185 break;
2186 case 'B':
2187 mod->bytes = 1;
2188 break;
2189 case 'b':
2190 mod->online = 1;
2191 cpu_modifier_specified = 1;
2192 break;
2193 case 'c':
2194 mod->offline = 1;
2195 cpu_modifier_specified = 1;
2196 break;
2197 case 'C':
2198 if (optarg) {
2199 if (*optarg == '=')
2200 optarg++;
2201 ncolumns = string_to_idarray(optarg,
2202 columns, ARRAY_SIZE(columns),
2203 cache_column_name_to_id);
2204 if (ncolumns < 0)
2205 return EXIT_FAILURE;
2206 }
2207 mod->mode = OUTPUT_CACHES;
2208 break;
2209 case 'h':
2210 usage();
2211 case 'J':
2212 mod->json = 1;
2213 break;
2214 case 'p':
2215 case 'e':
2216 if (optarg) {
2217 if (*optarg == '=')
2218 optarg++;
2219 ncolumns = string_to_idarray(optarg,
2220 columns, ARRAY_SIZE(columns),
2221 cpu_column_name_to_id);
2222 if (ncolumns < 0)
2223 return EXIT_FAILURE;
2224 }
2225 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
2226 break;
2227 case 's':
2228 desc->prefix = optarg;
2229 mod->system = SYSTEM_SNAPSHOT;
2230 break;
2231 case 'x':
2232 mod->hex = 1;
2233 break;
2234 case 'y':
2235 mod->physical = 1;
2236 break;
2237 case 'V':
2238 printf(UTIL_LINUX_VERSION);
2239 return EXIT_SUCCESS;
2240 case OPT_OUTPUT_ALL:
2241 all = 1;
2242 break;
2243 default:
2244 errtryhelp(EXIT_FAILURE);
2245 }
2246 }
2247
2248 if (all) {
2249 size_t sz, maxsz = mod->mode == OUTPUT_CACHES ?
2250 ARRAY_SIZE(coldescs_cache) :
2251 ARRAY_SIZE(coldescs_cpu);
2252
2253 for (sz = 0; sz < maxsz; sz++)
2254 columns[sz] = 1;
2255 }
2256
2257 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
2258 fprintf(stderr,
2259 _("%s: options --all, --online and --offline may only "
2260 "be used with options --extended or --parse.\n"),
2261 program_invocation_short_name);
2262 return EXIT_FAILURE;
2263 }
2264
2265 if (argc != optind) {
2266 warnx(_("bad usage"));
2267 errtryhelp(EXIT_FAILURE);
2268 }
2269
2270 /* set default cpu display mode if none was specified */
2271 if (!mod->online && !mod->offline) {
2272 mod->online = 1;
2273 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
2274 }
2275
2276 ul_path_init_debug();
2277
2278 /* /sys/devices/system/cpu */
2279 desc->syscpu = ul_new_path(_PATH_SYS_CPU);
2280 if (!desc->syscpu)
2281 err(EXIT_FAILURE, _("failed to initialize CPUs sysfs handler"));
2282 if (desc->prefix)
2283 ul_path_set_prefix(desc->syscpu, desc->prefix);
2284
2285 /* /proc */
2286 desc->procfs = ul_new_path("/proc");
2287 if (!desc->procfs)
2288 err(EXIT_FAILURE, _("failed to initialize procfs handler"));
2289 if (desc->prefix)
2290 ul_path_set_prefix(desc->procfs, desc->prefix);
2291
2292 read_basicinfo(desc, mod);
2293
2294 setsize = CPU_ALLOC_SIZE(maxcpus);
2295
2296 for (i = 0; i < desc->ncpuspos; i++) {
2297 /* only consider present CPUs */
2298 if (desc->present &&
2299 !CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present))
2300 continue;
2301 read_topology(desc, i);
2302 read_cache(desc, i);
2303 read_polarization(desc, i);
2304 read_address(desc, i);
2305 read_configured(desc, i);
2306 read_max_mhz(desc, i);
2307 read_min_mhz(desc, i);
2308 }
2309
2310 if (desc->caches)
2311 qsort(desc->caches, desc->ncaches,
2312 sizeof(struct cpu_cache), cachecmp);
2313
2314 if (desc->ecaches)
2315 qsort(desc->ecaches, desc->necaches,
2316 sizeof(struct cpu_cache), cachecmp);
2317
2318 read_nodes(desc);
2319 read_hypervisor(desc, mod);
2320 arm_cpu_decode(desc);
2321
2322 switch(mod->mode) {
2323 case OUTPUT_SUMMARY:
2324 print_summary(desc, mod);
2325 break;
2326 case OUTPUT_CACHES:
2327 if (!ncolumns) {
2328 columns[ncolumns++] = COL_CACHE_NAME;
2329 columns[ncolumns++] = COL_CACHE_ONESIZE;
2330 columns[ncolumns++] = COL_CACHE_ALLSIZE;
2331 columns[ncolumns++] = COL_CACHE_WAYS;
2332 columns[ncolumns++] = COL_CACHE_TYPE;
2333 columns[ncolumns++] = COL_CACHE_LEVEL;
2334 }
2335 print_caches_readable(desc, columns, ncolumns, mod);
2336 break;
2337 case OUTPUT_PARSABLE:
2338 if (!ncolumns) {
2339 columns[ncolumns++] = COL_CPU_CPU;
2340 columns[ncolumns++] = COL_CPU_CORE;
2341 columns[ncolumns++] = COL_CPU_SOCKET;
2342 columns[ncolumns++] = COL_CPU_NODE;
2343 columns[ncolumns++] = COL_CPU_CACHE;
2344 mod->compat = 1;
2345 }
2346 print_cpus_parsable(desc, columns, ncolumns, mod);
2347 break;
2348 case OUTPUT_READABLE:
2349 if (!ncolumns) {
2350 /* No list was given. Just print whatever is there. */
2351 columns[ncolumns++] = COL_CPU_CPU;
2352 if (desc->nodemaps)
2353 columns[ncolumns++] = COL_CPU_NODE;
2354 if (desc->drawermaps)
2355 columns[ncolumns++] = COL_CPU_DRAWER;
2356 if (desc->bookmaps)
2357 columns[ncolumns++] = COL_CPU_BOOK;
2358 if (desc->socketmaps)
2359 columns[ncolumns++] = COL_CPU_SOCKET;
2360 if (desc->coremaps)
2361 columns[ncolumns++] = COL_CPU_CORE;
2362 if (desc->caches)
2363 columns[ncolumns++] = COL_CPU_CACHE;
2364 if (desc->online)
2365 columns[ncolumns++] = COL_CPU_ONLINE;
2366 if (desc->configured)
2367 columns[ncolumns++] = COL_CPU_CONFIGURED;
2368 if (desc->polarization)
2369 columns[ncolumns++] = COL_CPU_POLARIZATION;
2370 if (desc->addresses)
2371 columns[ncolumns++] = COL_CPU_ADDRESS;
2372 if (desc->maxmhz)
2373 columns[ncolumns++] = COL_CPU_MAXMHZ;
2374 if (desc->minmhz)
2375 columns[ncolumns++] = COL_CPU_MINMHZ;
2376 }
2377 print_cpus_readable(desc, columns, ncolumns, mod);
2378 break;
2379 }
2380
2381 ul_unref_path(desc->syscpu);
2382 ul_unref_path(desc->procfs);
2383 return EXIT_SUCCESS;
2384 }