]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
1a03aaf6a7df070f8c47ae8747e406c87d1d7917
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #include <assert.h>
23 #include <ctype.h>
24 #include <dirent.h>
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <getopt.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/utsname.h>
32 #include <unistd.h>
33 #include <stdarg.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36 #include <sys/personality.h>
37
38 #if (defined(__x86_64__) || defined(__i386__))
39 # if !defined( __SANITIZE_ADDRESS__)
40 # define INCLUDE_VMWARE_BDOOR
41 # else
42 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
43 # endif
44 #endif
45
46 #ifdef INCLUDE_VMWARE_BDOOR
47 # include <stdint.h>
48 # include <signal.h>
49 # include <strings.h>
50 # include <setjmp.h>
51 # ifdef HAVE_SYS_IO_H
52 # include <sys/io.h>
53 # endif
54 #endif
55
56 #if defined(HAVE_LIBRTAS)
57 #include <librtas.h>
58 #endif
59
60 #include <libsmartcols.h>
61
62 #include "closestream.h"
63 #include "optutils.h"
64
65 #include "lscpu.h"
66
67 #define CACHE_MAX 100
68
69 /* /sys paths */
70 #define _PATH_SYS_SYSTEM "/sys/devices/system"
71 #define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
72 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
73 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
74
75 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
76 #define XENFEAT_supervisor_mode_kernel 3
77 #define XENFEAT_mmu_pt_update_preserve_ad 5
78 #define XENFEAT_hvm_callback_vector 8
79
80 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
81 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
82 | (1U << XENFEAT_hvm_callback_vector) )
83
84 static const char *virt_types[] = {
85 [VIRT_NONE] = N_("none"),
86 [VIRT_PARA] = N_("para"),
87 [VIRT_FULL] = N_("full"),
88 [VIRT_CONT] = N_("container"),
89 };
90
91 static const char *hv_vendors[] = {
92 [HYPER_NONE] = NULL,
93 [HYPER_XEN] = "Xen",
94 [HYPER_KVM] = "KVM",
95 [HYPER_MSHV] = "Microsoft",
96 [HYPER_VMWARE] = "VMware",
97 [HYPER_IBM] = "IBM",
98 [HYPER_VSERVER] = "Linux-VServer",
99 [HYPER_UML] = "User-mode Linux",
100 [HYPER_INNOTEK] = "Innotek GmbH",
101 [HYPER_HITACHI] = "Hitachi",
102 [HYPER_PARALLELS] = "Parallels",
103 [HYPER_VBOX] = "Oracle",
104 [HYPER_OS400] = "OS/400",
105 [HYPER_PHYP] = "pHyp",
106 [HYPER_SPAR] = "Unisys s-Par",
107 [HYPER_WSL] = "Windows Subsystem for Linux"
108 };
109
110 static const int hv_vendor_pci[] = {
111 [HYPER_NONE] = 0x0000,
112 [HYPER_XEN] = 0x5853,
113 [HYPER_KVM] = 0x0000,
114 [HYPER_MSHV] = 0x1414,
115 [HYPER_VMWARE] = 0x15ad,
116 [HYPER_VBOX] = 0x80ee,
117 };
118
119 static const int hv_graphics_pci[] = {
120 [HYPER_NONE] = 0x0000,
121 [HYPER_XEN] = 0x0001,
122 [HYPER_KVM] = 0x0000,
123 [HYPER_MSHV] = 0x5353,
124 [HYPER_VMWARE] = 0x0710,
125 [HYPER_VBOX] = 0xbeef,
126 };
127
128
129 /* dispatching modes */
130 static const char *disp_modes[] = {
131 [DISP_HORIZONTAL] = N_("horizontal"),
132 [DISP_VERTICAL] = N_("vertical")
133 };
134
135 static struct polarization_modes polar_modes[] = {
136 [POLAR_UNKNOWN] = {"U", "-"},
137 [POLAR_VLOW] = {"VL", "vert-low"},
138 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
139 [POLAR_VHIGH] = {"VH", "vert-high"},
140 [POLAR_HORIZONTAL] = {"H", "horizontal"},
141 };
142
143 static int maxcpus; /* size in bits of kernel cpu mask */
144
145 #define is_cpu_online(_d, _cpu) \
146 ((_d) && (_d)->online ? \
147 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
148 #define is_cpu_present(_d, _cpu) \
149 ((_d) && (_d)->present ? \
150 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
151
152 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
153
154 /*
155 * IDs
156 */
157 enum {
158 COL_CPU,
159 COL_CORE,
160 COL_SOCKET,
161 COL_NODE,
162 COL_BOOK,
163 COL_DRAWER,
164 COL_CACHE,
165 COL_POLARIZATION,
166 COL_ADDRESS,
167 COL_CONFIGURED,
168 COL_ONLINE,
169 COL_MAXMHZ,
170 COL_MINMHZ,
171 };
172
173 /* column description
174 */
175 struct lscpu_coldesc {
176 const char *name;
177 const char *help;
178
179 unsigned int is_abbr:1; /* name is abbreviation */
180 };
181
182 static struct lscpu_coldesc coldescs[] =
183 {
184 [COL_CPU] = { "CPU", N_("logical CPU number"), 1 },
185 [COL_CORE] = { "CORE", N_("logical core number") },
186 [COL_SOCKET] = { "SOCKET", N_("logical socket number") },
187 [COL_NODE] = { "NODE", N_("logical NUMA node number") },
188 [COL_BOOK] = { "BOOK", N_("logical book number") },
189 [COL_DRAWER] = { "DRAWER", N_("logical drawer number") },
190 [COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
191 [COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
192 [COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
193 [COL_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
194 [COL_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
195 [COL_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
196 [COL_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
197 };
198
199 static int
200 column_name_to_id(const char *name, size_t namesz)
201 {
202 size_t i;
203
204 for (i = 0; i < ARRAY_SIZE(coldescs); i++) {
205 const char *cn = coldescs[i].name;
206
207 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
208 return i;
209 }
210 warnx(_("unknown column: %s"), name);
211 return -1;
212 }
213
214 /* Lookup a pattern and get the value from cpuinfo.
215 * Format is:
216 *
217 * "<pattern> : <key>"
218 */
219 static int
220 lookup(char *line, char *pattern, char **value)
221 {
222 char *p, *v;
223 int len = strlen(pattern);
224
225 /* don't re-fill already found tags, first one wins */
226 if (!*line || *value)
227 return 0;
228
229 /* pattern */
230 if (strncmp(line, pattern, len))
231 return 0;
232
233 /* white spaces */
234 for (p = line + len; isspace(*p); p++);
235
236 /* separator */
237 if (*p != ':')
238 return 0;
239
240 /* white spaces */
241 for (++p; isspace(*p); p++);
242
243 /* value */
244 if (!*p)
245 return 0;
246 v = p;
247
248 /* end of value */
249 len = strlen(line) - 1;
250 for (p = line + len; isspace(*(p-1)); p--);
251 *p = '\0';
252
253 *value = xstrdup(v);
254 return 1;
255 }
256
257 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
258 * part of the cache topology information within the sysfs filesystem.
259 * This is true for all shared caches on e.g. s390. When there are layers of
260 * hypervisors in between it is not knows which CPUs share which caches.
261 * Therefore information about shared caches is only available in
262 * /proc/cpuinfo.
263 * Format is:
264 * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
265 */
266 static int
267 lookup_cache(char *line, struct lscpu_desc *desc)
268 {
269 struct cpu_cache *cache;
270 long long size;
271 char *p, type;
272 int level;
273
274 /* Make sure line starts with "cache<nr> :" */
275 if (strncmp(line, "cache", 5))
276 return 0;
277 for (p = line + 5; isdigit(*p); p++);
278 for (; isspace(*p); p++);
279 if (*p != ':')
280 return 0;
281
282 p = strstr(line, "scope=") + 6;
283 /* Skip private caches, also present in sysfs */
284 if (!p || strncmp(p, "Private", 7) == 0)
285 return 0;
286 p = strstr(line, "level=");
287 if (!p || sscanf(p, "level=%d", &level) != 1)
288 return 0;
289 p = strstr(line, "type=") + 5;
290 if (!p || !*p)
291 return 0;
292 type = 0;
293 if (strncmp(p, "Data", 4) == 0)
294 type = 'd';
295 if (strncmp(p, "Instruction", 11) == 0)
296 type = 'i';
297 p = strstr(line, "size=");
298 if (!p || sscanf(p, "size=%lld", &size) != 1)
299 return 0;
300
301 desc->necaches++;
302 desc->ecaches = xrealloc(desc->ecaches,
303 desc->necaches * sizeof(struct cpu_cache));
304 cache = &desc->ecaches[desc->necaches - 1];
305 memset(cache, 0 , sizeof(*cache));
306 if (type)
307 xasprintf(&cache->name, "L%d%c", level, type);
308 else
309 xasprintf(&cache->name, "L%d", level);
310 xasprintf(&cache->size, "%lldK", size);
311 return 1;
312 }
313
314 /* Don't init the mode for platforms where we are not able to
315 * detect that CPU supports 64-bit mode.
316 */
317 static int
318 init_mode(struct lscpu_modifier *mod)
319 {
320 int m = 0;
321
322 if (mod->system == SYSTEM_SNAPSHOT)
323 /* reading info from any /{sys,proc} dump, don't mix it with
324 * information about our real CPU */
325 return 0;
326
327 #if defined(__alpha__) || defined(__ia64__)
328 m |= MODE_64BIT; /* 64bit platforms only */
329 #endif
330 /* platforms with 64bit flag in /proc/cpuinfo, define
331 * 32bit default here */
332 #if defined(__i386__) || defined(__x86_64__) || \
333 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
334 m |= MODE_32BIT;
335 #endif
336
337 #if defined(__aarch64__)
338 {
339 /* personality() is the most reliable way (since 4.7)
340 * to determine aarch32 support */
341 int pers = personality(PER_LINUX32);
342 if (pers != -1) {
343 personality(pers);
344 m |= MODE_32BIT;
345 }
346 m |= MODE_64BIT;
347 }
348 #endif
349 return m;
350 }
351
352 #if defined(HAVE_LIBRTAS)
353 #define PROCESSOR_MODULE_INFO 43
354 static int strbe16toh(const char *buf, int offset)
355 {
356 return (buf[offset] << 8) + buf[offset+1];
357 }
358
359 static void read_physical_info_powerpc(struct lscpu_desc *desc)
360 {
361 char buf[BUFSIZ];
362 int rc, len, ntypes;
363
364 desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
365
366 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
367 if (rc < 0)
368 return;
369
370 len = strbe16toh(buf, 0);
371 if (len < 8)
372 return;
373
374 ntypes = strbe16toh(buf, 2);
375
376 assert(ntypes <= 1);
377 if (!ntypes)
378 return;
379
380 desc->physsockets = strbe16toh(buf, 4);
381 desc->physchips = strbe16toh(buf, 6);
382 desc->physcoresperchip = strbe16toh(buf, 8);
383 }
384 #else
385 static void read_physical_info_powerpc(
386 struct lscpu_desc *desc __attribute__((__unused__)))
387 {
388 }
389 #endif
390
391
392 static void
393 read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
394 {
395 FILE *fp;
396 char buf[BUFSIZ];
397 struct utsname utsbuf;
398 size_t setsize;
399 cpu_set_t *cpuset = NULL;
400
401 /* architecture */
402 if (uname(&utsbuf) == -1)
403 err(EXIT_FAILURE, _("error: uname failed"));
404
405 fp = ul_path_fopen(desc->procfs, "r", "cpuinfo");
406 if (!fp)
407 err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
408 desc->arch = xstrdup(utsbuf.machine);
409
410 /* details */
411 while (fgets(buf, sizeof(buf), fp) != NULL) {
412 if (lookup(buf, "vendor", &desc->vendor)) ;
413 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
414 else if (lookup(buf, "CPU implementer", &desc->vendor)) ; /* ARM and aarch64 */
415 else if (lookup(buf, "family", &desc->family)) ;
416 else if (lookup(buf, "cpu family", &desc->family)) ;
417 else if (lookup(buf, "model", &desc->model)) ;
418 else if (lookup(buf, "CPU part", &desc->model)) ; /* ARM and aarch64 */
419 else if (lookup(buf, "model name", &desc->modelname)) ;
420 else if (lookup(buf, "stepping", &desc->stepping)) ;
421 else if (lookup(buf, "CPU variant", &desc->stepping)) ; /* aarch64 */
422 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
423 else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
424 else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
425 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
426 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
427 else if (lookup(buf, "Features", &desc->flags)) ; /* aarch64 */
428 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
429 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
430 else if (lookup(buf, "BogoMIPS", &desc->bogomips)) ; /* aarch64 */
431 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
432 else if (lookup(buf, "cpu", &desc->cpu)) ;
433 else if (lookup(buf, "revision", &desc->revision)) ;
434 else if (lookup(buf, "CPU revision", &desc->revision)) ; /* aarch64 */
435 else if (lookup(buf, "max thread id", &desc->mtid)) ; /* s390 */
436 else if (lookup(buf, "address sizes", &desc->addrsz)) ; /* x86 */
437 else if (lookup_cache(buf, desc)) ;
438 else
439 continue;
440 }
441
442 desc->mode = init_mode(mod);
443
444 if (desc->flags) {
445 snprintf(buf, sizeof(buf), " %s ", desc->flags);
446 if (strstr(buf, " svm "))
447 desc->virtflag = xstrdup("svm");
448 else if (strstr(buf, " vmx "))
449 desc->virtflag = xstrdup("vmx");
450 if (strstr(buf, " lm "))
451 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
452 if (strstr(buf, " zarch "))
453 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
454 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
455 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
456 }
457
458 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
459 if (strcmp(desc->arch, "ppc64") == 0)
460 desc->mode |= MODE_32BIT | MODE_64BIT;
461 else if (strcmp(desc->arch, "ppc") == 0)
462 desc->mode |= MODE_32BIT;
463 }
464
465 fclose(fp);
466
467 if (ul_path_read_s32(desc->syscpu, &maxcpus, "kernel_max") == 0)
468 /* note that kernel_max is maximum index [NR_CPUS-1] */
469 maxcpus += 1;
470
471 else if (mod->system == SYSTEM_LIVE)
472 /* the root is '/' so we are working with data from the current kernel */
473 maxcpus = get_max_number_of_cpus();
474
475 if (maxcpus <= 0)
476 /* error or we are reading some /sys snapshot instead of the
477 * real /sys, let's use any crazy number... */
478 maxcpus = 2048;
479
480 setsize = CPU_ALLOC_SIZE(maxcpus);
481
482 if (ul_path_readf_cpulist(desc->syscpu, &cpuset, maxcpus, "possible") == 0) {
483 int num, idx;
484
485 desc->ncpuspos = CPU_COUNT_S(setsize, cpuset);
486 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
487
488 for (num = 0, idx = 0; num < maxcpus; num++) {
489 if (CPU_ISSET_S(num, setsize, cpuset))
490 desc->idx2cpunum[idx++] = num;
491 }
492 cpuset_free(cpuset);
493 cpuset = NULL;
494 } else
495 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
496 _PATH_SYS_CPU "/possible");
497
498
499 /* get mask for present CPUs */
500 if (ul_path_readf_cpulist(desc->syscpu, &desc->present, maxcpus, "present") == 0)
501 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
502
503 /* get mask for online CPUs */
504 if (ul_path_readf_cpulist(desc->syscpu, &desc->online, maxcpus, "online") == 0)
505 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
506
507 /* get dispatching mode */
508 if (ul_path_read_s32(desc->syscpu, &desc->dispatching, "dispatching") != 0)
509 desc->dispatching = -1;
510
511 /* get cpufreq boost mode */
512 if (ul_path_read_s32(desc->syscpu, &desc->freqboost, "cpufreq/boost") != 0)
513 desc->freqboost = -1;
514
515 if (mod->system == SYSTEM_LIVE)
516 read_physical_info_powerpc(desc);
517
518 if ((fp = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
519 while (fgets(buf, sizeof(buf), fp) != NULL && !desc->machinetype)
520 lookup(buf, "Type", &desc->machinetype);
521 fclose(fp);
522 }
523 }
524
525 static int
526 has_pci_device(struct lscpu_desc *desc, unsigned int vendor, unsigned int device)
527 {
528 FILE *f;
529 unsigned int num, fn, ven, dev;
530 int res = 1;
531
532 f = ul_path_fopen(desc->procfs, "r", "bus/pci/devices");
533 if (!f)
534 return 0;
535
536 /* for more details about bus/pci/devices format see
537 * drivers/pci/proc.c in linux kernel
538 */
539 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
540 &num, &fn, &ven, &dev) == 4) {
541
542 if (ven == vendor && dev == device)
543 goto found;
544 }
545
546 res = 0;
547 found:
548 fclose(f);
549 return res;
550 }
551
552 #if defined(__x86_64__) || defined(__i386__)
553
554 /*
555 * This CPUID leaf returns the information about the hypervisor.
556 * EAX : maximum input value for CPUID supported by the hypervisor.
557 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
558 */
559 #define HYPERVISOR_INFO_LEAF 0x40000000
560
561 static inline void
562 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
563 unsigned int *ecx, unsigned int *edx)
564 {
565 __asm__(
566 #if defined(__PIC__) && defined(__i386__)
567 /* x86 PIC cannot clobber ebx -- gcc bitches */
568 "xchg %%ebx, %%esi;"
569 "cpuid;"
570 "xchg %%esi, %%ebx;"
571 : "=S" (*ebx),
572 #else
573 "cpuid;"
574 : "=b" (*ebx),
575 #endif
576 "=a" (*eax),
577 "=c" (*ecx),
578 "=d" (*edx)
579 : "1" (op), "c"(0));
580 }
581
582 static void
583 read_hypervisor_cpuid(struct lscpu_desc *desc)
584 {
585 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
586 char hyper_vendor_id[13];
587
588 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
589
590 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
591 memcpy(hyper_vendor_id + 0, &ebx, 4);
592 memcpy(hyper_vendor_id + 4, &ecx, 4);
593 memcpy(hyper_vendor_id + 8, &edx, 4);
594 hyper_vendor_id[12] = '\0';
595
596 if (!hyper_vendor_id[0])
597 return;
598
599 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
600 desc->hyper = HYPER_XEN;
601 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
602 desc->hyper = HYPER_KVM;
603 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
604 desc->hyper = HYPER_MSHV;
605 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
606 desc->hyper = HYPER_VMWARE;
607 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
608 desc->hyper = HYPER_SPAR;
609 }
610
611 #else /* ! (__x86_64__ || __i386__) */
612 static void
613 read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
614 {
615 }
616 #endif
617
618 static int is_devtree_compatible(struct lscpu_desc *desc, const char *str)
619 {
620 FILE *fd = ul_path_fopen(desc->procfs, "r", "device-tree/compatible");
621
622 if (fd) {
623 char buf[256];
624 size_t i, len;
625
626 memset(buf, 0, sizeof(buf));
627 len = fread(buf, 1, sizeof(buf) - 1, fd);
628 fclose(fd);
629
630 for (i = 0; i < len;) {
631 if (!strcmp(&buf[i], str))
632 return 1;
633 i += strlen(&buf[i]);
634 i++;
635 }
636 }
637
638 return 0;
639 }
640
641 static int
642 read_hypervisor_powerpc(struct lscpu_desc *desc)
643 {
644 assert(!desc->hyper);
645
646 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
647 if (ul_path_access(desc->procfs, F_OK, "iSeries") == 0) {
648 desc->hyper = HYPER_OS400;
649 desc->virtype = VIRT_PARA;
650
651 /* PowerNV (POWER Non-Virtualized, bare-metal) */
652 } else if (is_devtree_compatible(desc, "ibm,powernv")) {
653 desc->hyper = HYPER_NONE;
654 desc->virtype = VIRT_NONE;
655
656 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
657 } else if (ul_path_access(desc->procfs, F_OK, "device-tree/ibm,partition-name") == 0
658 && ul_path_access(desc->procfs, F_OK, "device-tree/hmc-managed?") == 0
659 && ul_path_access(desc->procfs, F_OK, "device-tree/chosen/qemu,graphic-width") != 0) {
660
661 FILE *fd;
662 desc->hyper = HYPER_PHYP;
663 desc->virtype = VIRT_PARA;
664
665 fd = ul_path_fopen(desc->procfs, "r", "device-tree/ibm,partition-name");
666 if (fd) {
667 char buf[256];
668 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
669 desc->virtype = VIRT_NONE;
670 fclose(fd);
671 }
672
673 /* Qemu */
674 } else if (is_devtree_compatible(desc, "qemu,pseries")) {
675 desc->hyper = HYPER_KVM;
676 desc->virtype = VIRT_PARA;
677 }
678 return desc->hyper;
679 }
680
681 #ifdef INCLUDE_VMWARE_BDOOR
682
683 #define VMWARE_BDOOR_MAGIC 0x564D5868
684 #define VMWARE_BDOOR_PORT 0x5658
685 #define VMWARE_BDOOR_CMD_GETVERSION 10
686
687 static UL_ASAN_BLACKLIST
688 void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
689 {
690 __asm__(
691 #if defined(__PIC__) && defined(__i386__)
692 /* x86 PIC cannot clobber ebx -- gcc bitches */
693 "xchg %%ebx, %%esi;"
694 "inl (%%dx), %%eax;"
695 "xchg %%esi, %%ebx;"
696 : "=S" (*ebx),
697 #else
698 "inl (%%dx), %%eax;"
699 : "=b" (*ebx),
700 #endif
701 "=a" (*eax),
702 "=c" (*ecx),
703 "=d" (*edx)
704 : "0" (VMWARE_BDOOR_MAGIC),
705 "1" (VMWARE_BDOOR_CMD_GETVERSION),
706 "2" (VMWARE_BDOOR_PORT),
707 "3" (0)
708 : "memory");
709 }
710
711 static jmp_buf segv_handler_env;
712
713 static void
714 segv_handler(__attribute__((__unused__)) int sig,
715 __attribute__((__unused__)) siginfo_t *info,
716 __attribute__((__unused__)) void *ignored)
717 {
718 siglongjmp(segv_handler_env, 1);
719 }
720
721 static int
722 is_vmware_platform(void)
723 {
724 uint32_t eax, ebx, ecx, edx;
725 struct sigaction act, oact;
726
727 /*
728 * FIXME: Not reliable for non-root users. Note it works as expected if
729 * vmware_bdoor() is not optimized for PIE, but then it fails to build
730 * on 32bit x86 systems. See lscpu git log for more details (commit
731 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
732 */
733 if (getuid() != 0)
734 return 0;
735
736 /*
737 * The assembly routine for vmware detection works
738 * fine under vmware, even if ran as regular user. But
739 * on real HW or under other hypervisors, it segfaults (which is
740 * expected). So we temporarily install SIGSEGV handler to catch
741 * the signal. All this magic is needed because lscpu
742 * isn't supposed to require root privileges.
743 */
744 if (sigsetjmp(segv_handler_env, 1))
745 return 0;
746
747 memset(&act, 0, sizeof(act));
748 act.sa_sigaction = segv_handler;
749 act.sa_flags = SA_SIGINFO;
750
751 if (sigaction(SIGSEGV, &act, &oact))
752 err(EXIT_FAILURE, _("cannot set signal handler"));
753
754 vmware_bdoor(&eax, &ebx, &ecx, &edx);
755
756 if (sigaction(SIGSEGV, &oact, NULL))
757 err(EXIT_FAILURE, _("cannot restore signal handler"));
758
759 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
760 }
761
762 #else /* ! INCLUDE_VMWARE_BDOOR */
763
764 static int
765 is_vmware_platform(void)
766 {
767 return 0;
768 }
769
770 #endif /* INCLUDE_VMWARE_BDOOR */
771
772 static void
773 read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
774 {
775 FILE *fd;
776
777 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
778
779 if ((fd = ul_path_fopen(desc->procfs, "r", "sys/kernel/osrelease"))) {
780 char buf[256];
781
782 if (fgets(buf, sizeof(buf), fd) != NULL) {
783 if (strstr(buf, "Microsoft")) {
784 desc->hyper = HYPER_WSL;
785 desc->virtype = VIRT_CONT;
786 }
787 }
788 fclose(fd);
789 if (desc->virtype)
790 return;
791 }
792
793 if (mod->system != SYSTEM_SNAPSHOT) {
794 read_hypervisor_cpuid(desc);
795 if (!desc->hyper)
796 desc->hyper = read_hypervisor_dmi();
797 if (!desc->hyper && is_vmware_platform())
798 desc->hyper = HYPER_VMWARE;
799 }
800
801 if (desc->hyper) {
802 desc->virtype = VIRT_FULL;
803
804 if (desc->hyper == HYPER_XEN) {
805 uint32_t features;
806
807 fd = ul_prefix_fopen(desc->prefix, "r", _PATH_SYS_HYP_FEATURES);
808
809 if (fd && fscanf(fd, "%x", &features) == 1) {
810 /* Xen PV domain */
811 if (features & XEN_FEATURES_PV_MASK)
812 desc->virtype = VIRT_PARA;
813 /* Xen PVH domain */
814 else if ((features & XEN_FEATURES_PVH_MASK)
815 == XEN_FEATURES_PVH_MASK)
816 desc->virtype = VIRT_PARA;
817 }
818 if (fd)
819 fclose(fd);
820 }
821 } else if (read_hypervisor_powerpc(desc) > 0) {}
822
823 /* Xen para-virt or dom0 */
824 else if (ul_path_access(desc->procfs, F_OK, "xen") == 0) {
825 int dom0 = 0;
826
827 fd = ul_path_fopen(desc->procfs, "r", "xen/capabilities");
828 if (fd) {
829 char buf[256];
830
831 if (fscanf(fd, "%255s", buf) == 1 &&
832 !strcmp(buf, "control_d"))
833 dom0 = 1;
834 fclose(fd);
835 }
836 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
837 desc->hyper = HYPER_XEN;
838
839 /* Xen full-virt on non-x86_64 */
840 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
841 desc->hyper = HYPER_XEN;
842 desc->virtype = VIRT_FULL;
843 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
844 desc->hyper = HYPER_VMWARE;
845 desc->virtype = VIRT_FULL;
846 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
847 desc->hyper = HYPER_VBOX;
848 desc->virtype = VIRT_FULL;
849
850 /* IBM PR/SM */
851 } else if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
852 char buf[BUFSIZ];
853
854 desc->hyper = HYPER_IBM;
855 desc->hypervisor = "PR/SM";
856 desc->virtype = VIRT_FULL;
857 while (fgets(buf, sizeof(buf), fd) != NULL) {
858 char *str, *p;
859
860 if (!strstr(buf, "Control Program:"))
861 continue;
862 if (!strstr(buf, "KVM"))
863 desc->hyper = HYPER_IBM;
864 else
865 desc->hyper = HYPER_KVM;
866 p = strchr(buf, ':');
867 if (!p)
868 continue;
869 xasprintf(&str, "%s", p + 1);
870
871 /* remove leading, trailing and repeating whitespace */
872 while (*str == ' ')
873 str++;
874 desc->hypervisor = str;
875 str += strlen(str) - 1;
876 while ((*str == '\n') || (*str == ' '))
877 *(str--) = '\0';
878 while ((str = strstr(desc->hypervisor, " ")))
879 memmove(str, str + 1, strlen(str));
880 break;
881 }
882 fclose(fd);
883 }
884
885 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
886 * /proc/bc should not */
887 else if (ul_path_access(desc->procfs, F_OK, "vz") == 0 &&
888 ul_path_access(desc->procfs, F_OK, "bc") != 0) {
889 desc->hyper = HYPER_PARALLELS;
890 desc->virtype = VIRT_CONT;
891
892 /* IBM */
893 } else if (desc->vendor &&
894 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
895 strcmp(desc->vendor, "IBM/S390") == 0)) {
896 desc->hyper = HYPER_IBM;
897 desc->virtype = VIRT_FULL;
898
899 /* User-mode-linux */
900 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
901 desc->hyper = HYPER_UML;
902 desc->virtype = VIRT_PARA;
903
904 /* Linux-VServer */
905 } else if ((fd = ul_path_fopen(desc->procfs, "r", "self/status"))) {
906 char buf[BUFSIZ];
907 char *val = NULL;
908
909 while (fgets(buf, sizeof(buf), fd) != NULL) {
910 if (lookup(buf, "VxID", &val))
911 break;
912 }
913 fclose(fd);
914
915 if (val) {
916 char *org = val;
917
918 while (isdigit(*val))
919 ++val;
920 if (!*val) {
921 desc->hyper = HYPER_VSERVER;
922 desc->virtype = VIRT_CONT;
923 }
924 free(org);
925 }
926 }
927 }
928
929 /* add @set to the @ary, unnecessary set is deallocated. */
930 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
931 {
932 int i;
933 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
934
935 if (!ary)
936 return -1;
937
938 for (i = 0; i < *items; i++) {
939 if (CPU_EQUAL_S(setsize, set, ary[i]))
940 break;
941 }
942 if (i == *items) {
943 ary[*items] = set;
944 ++*items;
945 return 0;
946 }
947 CPU_FREE(set);
948 return 1;
949 }
950
951 static void
952 read_topology(struct lscpu_desc *desc, int idx)
953 {
954 cpu_set_t *thread_siblings, *core_siblings;
955 cpu_set_t *book_siblings, *drawer_siblings;
956 int coreid, socketid, bookid, drawerid;
957 int i, num = real_cpu_num(desc, idx);
958
959 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/topology/thread_siblings", num) != 0)
960 return;
961
962 ul_path_readf_cpuset(desc->syscpu, &thread_siblings, maxcpus,
963 "cpu%d/topology/thread_siblings", num);
964 ul_path_readf_cpuset(desc->syscpu, &core_siblings, maxcpus,
965 "cpu%d/topology/core_siblings", num);
966 ul_path_readf_cpuset(desc->syscpu, &book_siblings, maxcpus,
967 "cpu%d/topology/book_siblings", num);
968 ul_path_readf_cpuset(desc->syscpu, &drawer_siblings, maxcpus,
969 "cpu%d/topology/drawer_siblings", num);
970
971 if (ul_path_readf_s32(desc->syscpu, &coreid, "cpu%d/topology/core_id", num) != 0)
972 coreid = -1;
973
974 if (ul_path_readf_s32(desc->syscpu, &socketid, "cpu%d/topology/physical_package_id", num) != 0)
975 socketid = -1;
976
977 if (ul_path_readf_s32(desc->syscpu, &bookid, "cpu%d/topology/book_id", num) != 0)
978 bookid = -1;
979
980 if (ul_path_readf_s32(desc->syscpu, &drawerid, "cpu%d/topology/drawer_id", num) != 0)
981 drawerid = -1;
982
983 if (!desc->coremaps) {
984 int ndrawers, nbooks, nsockets, ncores, nthreads;
985 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
986
987 /* threads within one core */
988 nthreads = CPU_COUNT_S(setsize, thread_siblings);
989 if (!nthreads)
990 nthreads = 1;
991
992 /* cores within one socket */
993 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
994 if (!ncores)
995 ncores = 1;
996
997 /* number of sockets within one book. Because of odd /
998 * non-present cpu maps and to keep calculation easy we make
999 * sure that nsockets and nbooks is at least 1.
1000 */
1001 nsockets = desc->ncpus / nthreads / ncores;
1002 if (!nsockets)
1003 nsockets = 1;
1004
1005 /* number of books */
1006 nbooks = desc->ncpus / nthreads / ncores / nsockets;
1007 if (!nbooks)
1008 nbooks = 1;
1009
1010 /* number of drawers */
1011 ndrawers = desc->ncpus / nbooks / nthreads / ncores / nsockets;
1012 if (!ndrawers)
1013 ndrawers = 1;
1014
1015 /* all threads, see also read_basicinfo()
1016 * -- fallback for kernels without
1017 * /sys/devices/system/cpu/online.
1018 */
1019 if (!desc->nthreads)
1020 desc->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
1021
1022 /* For each map we make sure that it can have up to ncpuspos
1023 * entries. This is because we cannot reliably calculate the
1024 * number of cores, sockets and books on all architectures.
1025 * E.g. completely virtualized architectures like s390 may
1026 * have multiple sockets of different sizes.
1027 */
1028 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1029 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1030 desc->coreids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1031 desc->socketids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1032 for (i = 0; i < desc->ncpuspos; i++)
1033 desc->coreids[i] = desc->socketids[i] = -1;
1034 if (book_siblings) {
1035 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1036 desc->bookids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1037 for (i = 0; i < desc->ncpuspos; i++)
1038 desc->bookids[i] = -1;
1039 }
1040 if (drawer_siblings) {
1041 desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1042 desc->drawerids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1043 for (i = 0; i < desc->ncpuspos; i++)
1044 desc->drawerids[i] = -1;
1045 }
1046 }
1047
1048 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
1049 desc->coreids[idx] = coreid;
1050 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
1051 desc->socketids[idx] = socketid;
1052 if (book_siblings) {
1053 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
1054 desc->bookids[idx] = bookid;
1055 }
1056 if (drawer_siblings) {
1057 add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
1058 desc->drawerids[idx] = drawerid;
1059 }
1060 }
1061
1062 static void
1063 read_polarization(struct lscpu_desc *desc, int idx)
1064 {
1065 char mode[64];
1066 int num = real_cpu_num(desc, idx);
1067
1068 if (desc->dispatching < 0)
1069 return;
1070 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/polarization", num) != 0)
1071 return;
1072 if (!desc->polarization)
1073 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
1074
1075 ul_path_readf_buffer(desc->syscpu, mode, sizeof(mode), "cpu%d/polarization", num);
1076
1077 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
1078 desc->polarization[idx] = POLAR_VLOW;
1079 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
1080 desc->polarization[idx] = POLAR_VMEDIUM;
1081 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
1082 desc->polarization[idx] = POLAR_VHIGH;
1083 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
1084 desc->polarization[idx] = POLAR_HORIZONTAL;
1085 else
1086 desc->polarization[idx] = POLAR_UNKNOWN;
1087 }
1088
1089 static void
1090 read_address(struct lscpu_desc *desc, int idx)
1091 {
1092 int num = real_cpu_num(desc, idx);
1093
1094 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/address", num) != 0)
1095 return;
1096 if (!desc->addresses)
1097 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
1098 ul_path_readf_s32(desc->syscpu, &desc->addresses[idx], "cpu%d/address", num);
1099 }
1100
1101 static void
1102 read_configured(struct lscpu_desc *desc, int idx)
1103 {
1104 int num = real_cpu_num(desc, idx);
1105
1106 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/configure", num) != 0)
1107 return;
1108 if (!desc->configured)
1109 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
1110 ul_path_readf_s32(desc->syscpu, &desc->configured[idx], "cpu%d/configure", num);
1111 }
1112
1113 /* Read overall maximum frequency of cpu */
1114 static char *
1115 cpu_max_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1116 {
1117 int i;
1118 float cpu_freq = 0.0;
1119 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1120
1121 if (desc->present) {
1122 for (i = 0; i < desc->ncpuspos; i++) {
1123 if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
1124 && desc->maxmhz[i]) {
1125 float freq = atof(desc->maxmhz[i]);
1126
1127 if (freq > cpu_freq)
1128 cpu_freq = freq;
1129 }
1130 }
1131 }
1132 snprintf(buf, bufsz, "%.4f", cpu_freq);
1133 return buf;
1134 }
1135
1136 /* Read overall minimum frequency of cpu */
1137 static char *
1138 cpu_min_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1139 {
1140 int i;
1141 float cpu_freq = -1.0;
1142 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1143
1144 if (desc->present) {
1145 for (i = 0; i < desc->ncpuspos; i++) {
1146 if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
1147 && desc->minmhz[i]) {
1148 float freq = atof(desc->minmhz[i]);
1149
1150 if (cpu_freq < 0.0 || freq < cpu_freq)
1151 cpu_freq = freq;
1152 }
1153 }
1154 }
1155 snprintf(buf, bufsz, "%.4f", cpu_freq);
1156 return buf;
1157 }
1158
1159
1160 static void
1161 read_max_mhz(struct lscpu_desc *desc, int idx)
1162 {
1163 int num = real_cpu_num(desc, idx);
1164 int mhz;
1165
1166 if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) != 0)
1167 return;
1168 if (!desc->maxmhz)
1169 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1170 xasprintf(&desc->maxmhz[idx], "%.4f", (float) mhz / 1000);
1171 }
1172
1173 static void
1174 read_min_mhz(struct lscpu_desc *desc, int idx)
1175 {
1176 int num = real_cpu_num(desc, idx);
1177 int mhz;
1178
1179 if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) != 0)
1180 return;
1181 if (!desc->minmhz)
1182 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1183 xasprintf(&desc->minmhz[idx], "%.4f", (float) mhz / 1000);
1184 }
1185
1186 static int
1187 cachecmp(const void *a, const void *b)
1188 {
1189 struct cpu_cache *c1 = (struct cpu_cache *) a;
1190 struct cpu_cache *c2 = (struct cpu_cache *) b;
1191
1192 return strcmp(c2->name, c1->name);
1193 }
1194
1195 static void
1196 read_cache(struct lscpu_desc *desc, int idx)
1197 {
1198 char buf[256];
1199 int i;
1200 int num = real_cpu_num(desc, idx);
1201
1202 if (!desc->ncaches) {
1203 while (ul_path_accessf(desc->syscpu, F_OK,
1204 "cpu%d/cache/index%d",
1205 num, desc->ncaches) == 0)
1206 desc->ncaches++;
1207
1208 if (!desc->ncaches)
1209 return;
1210 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1211 }
1212 for (i = 0; i < desc->ncaches; i++) {
1213 struct cpu_cache *ca = &desc->caches[i];
1214 cpu_set_t *map;
1215
1216 if (ul_path_accessf(desc->syscpu, F_OK,
1217 "cpu%d/cache/index%d", num, i) != 0)
1218 continue;
1219 if (!ca->name) {
1220 int type = 0, level;
1221
1222 /* cache type */
1223 if (ul_path_readf_buffer(desc->syscpu, buf, sizeof(buf),
1224 "cpu%d/cache/index%d/type", num, i) > 0) {
1225 if (!strcmp(buf, "Data"))
1226 type = 'd';
1227 else if (!strcmp(buf, "Instruction"))
1228 type = 'i';
1229 }
1230
1231 /* cache level */
1232 ul_path_readf_s32(desc->syscpu, &level,
1233 "cpu%d/cache/index%d/level", num, i);
1234 if (type)
1235 snprintf(buf, sizeof(buf), "L%d%c", level, type);
1236 else
1237 snprintf(buf, sizeof(buf), "L%d", level);
1238
1239 ca->name = xstrdup(buf);
1240
1241 /* cache size */
1242 if (ul_path_readf_string(desc->syscpu, &ca->size,
1243 "cpu%d/cache/index%d/size", num, i) < 0)
1244 ca->size = xstrdup("unknown size");
1245 }
1246
1247 /* information about how CPUs share different caches */
1248 ul_path_readf_cpuset(desc->syscpu, &map, maxcpus,
1249 "cpu%d/cache/index%d/shared_cpu_map", num, i);
1250
1251 if (!ca->sharedmaps)
1252 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1253 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1254 }
1255 }
1256
1257 static inline int is_node_dirent(struct dirent *d)
1258 {
1259 return
1260 d &&
1261 #ifdef _DIRENT_HAVE_D_TYPE
1262 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1263 #endif
1264 strncmp(d->d_name, "node", 4) == 0 &&
1265 isdigit_string(d->d_name + 4);
1266 }
1267
1268 static int
1269 nodecmp(const void *ap, const void *bp)
1270 {
1271 int *a = (int *) ap, *b = (int *) bp;
1272 return *a - *b;
1273 }
1274
1275 static void
1276 read_nodes(struct lscpu_desc *desc)
1277 {
1278 int i = 0;
1279 DIR *dir;
1280 struct dirent *d;
1281 struct path_cxt *sysnode;
1282
1283 desc->nnodes = 0;
1284
1285 sysnode = ul_new_path(_PATH_SYS_NODE);
1286 if (!sysnode)
1287 err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
1288 ul_path_set_prefix(sysnode, desc->prefix);
1289
1290 dir = ul_path_opendir(sysnode, NULL);
1291 if (!dir)
1292 goto done;
1293
1294 while ((d = readdir(dir))) {
1295 if (is_node_dirent(d))
1296 desc->nnodes++;
1297 }
1298
1299 if (!desc->nnodes) {
1300 closedir(dir);
1301 goto done;
1302 }
1303
1304 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1305 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1306
1307 rewinddir(dir);
1308 while ((d = readdir(dir)) && i < desc->nnodes) {
1309 if (is_node_dirent(d))
1310 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1311 _("Failed to extract the node number"));
1312 }
1313 closedir(dir);
1314 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1315
1316 /* information about how nodes share different CPUs */
1317 for (i = 0; i < desc->nnodes; i++)
1318 ul_path_readf_cpuset(sysnode, &desc->nodemaps[i], maxcpus,
1319 "node%d/cpumap", desc->idx2nodenum[i]);
1320 done:
1321 ul_unref_path(sysnode);
1322 }
1323
1324 static char *
1325 get_cell_data(struct lscpu_desc *desc, int idx, int col,
1326 struct lscpu_modifier *mod,
1327 char *buf, size_t bufsz)
1328 {
1329 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1330 size_t i;
1331 int cpu = real_cpu_num(desc, idx);
1332
1333 *buf = '\0';
1334
1335 switch (col) {
1336 case COL_CPU:
1337 snprintf(buf, bufsz, "%d", cpu);
1338 break;
1339 case COL_CORE:
1340 if (mod->physical) {
1341 if (desc->coreids[idx] == -1)
1342 snprintf(buf, bufsz, "-");
1343 else
1344 snprintf(buf, bufsz, "%d", desc->coreids[idx]);
1345 } else {
1346 if (cpuset_ary_isset(cpu, desc->coremaps,
1347 desc->ncores, setsize, &i) == 0)
1348 snprintf(buf, bufsz, "%zu", i);
1349 }
1350 break;
1351 case COL_SOCKET:
1352 if (mod->physical) {
1353 if (desc->socketids[idx] == -1)
1354 snprintf(buf, bufsz, "-");
1355 else
1356 snprintf(buf, bufsz, "%d", desc->socketids[idx]);
1357 } else {
1358 if (cpuset_ary_isset(cpu, desc->socketmaps,
1359 desc->nsockets, setsize, &i) == 0)
1360 snprintf(buf, bufsz, "%zu", i);
1361 }
1362 break;
1363 case COL_NODE:
1364 if (cpuset_ary_isset(cpu, desc->nodemaps,
1365 desc->nnodes, setsize, &i) == 0)
1366 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1367 break;
1368 case COL_DRAWER:
1369 if (mod->physical) {
1370 if (desc->drawerids[idx] == -1)
1371 snprintf(buf, bufsz, "-");
1372 else
1373 snprintf(buf, bufsz, "%d", desc->drawerids[idx]);
1374 } else {
1375 if (cpuset_ary_isset(cpu, desc->drawermaps,
1376 desc->ndrawers, setsize, &i) == 0)
1377 snprintf(buf, bufsz, "%zu", i);
1378 }
1379 break;
1380 case COL_BOOK:
1381 if (mod->physical) {
1382 if (desc->bookids[idx] == -1)
1383 snprintf(buf, bufsz, "-");
1384 else
1385 snprintf(buf, bufsz, "%d", desc->bookids[idx]);
1386 } else {
1387 if (cpuset_ary_isset(cpu, desc->bookmaps,
1388 desc->nbooks, setsize, &i) == 0)
1389 snprintf(buf, bufsz, "%zu", i);
1390 }
1391 break;
1392 case COL_CACHE:
1393 {
1394 char *p = buf;
1395 size_t sz = bufsz;
1396 int j;
1397
1398 for (j = desc->ncaches - 1; j >= 0; j--) {
1399 struct cpu_cache *ca = &desc->caches[j];
1400
1401 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1402 ca->nsharedmaps, setsize, &i) == 0) {
1403 int x = snprintf(p, sz, "%zu", i);
1404 if (x < 0 || (size_t) x >= sz)
1405 return NULL;
1406 p += x;
1407 sz -= x;
1408 }
1409 if (j != 0) {
1410 if (sz < 2)
1411 return NULL;
1412 *p++ = mod->compat ? ',' : ':';
1413 *p = '\0';
1414 sz--;
1415 }
1416 }
1417 break;
1418 }
1419 case COL_POLARIZATION:
1420 if (desc->polarization) {
1421 int x = desc->polarization[idx];
1422
1423 snprintf(buf, bufsz, "%s",
1424 mod->mode == OUTPUT_PARSABLE ?
1425 polar_modes[x].parsable :
1426 polar_modes[x].readable);
1427 }
1428 break;
1429 case COL_ADDRESS:
1430 if (desc->addresses)
1431 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1432 break;
1433 case COL_CONFIGURED:
1434 if (!desc->configured)
1435 break;
1436 if (mod->mode == OUTPUT_PARSABLE)
1437 snprintf(buf, bufsz, "%s",
1438 desc->configured[idx] ? _("Y") : _("N"));
1439 else
1440 snprintf(buf, bufsz, "%s",
1441 desc->configured[idx] ? _("yes") : _("no"));
1442 break;
1443 case COL_ONLINE:
1444 if (!desc->online)
1445 break;
1446 if (mod->mode == OUTPUT_PARSABLE)
1447 snprintf(buf, bufsz, "%s",
1448 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1449 else
1450 snprintf(buf, bufsz, "%s",
1451 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1452 break;
1453 case COL_MAXMHZ:
1454 if (desc->maxmhz && desc->maxmhz[idx])
1455 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1456 break;
1457 case COL_MINMHZ:
1458 if (desc->minmhz && desc->minmhz[idx])
1459 xstrncpy(buf, desc->minmhz[idx], bufsz);
1460 break;
1461 }
1462 return buf;
1463 }
1464
1465 static char *
1466 get_cell_header(struct lscpu_desc *desc, int col,
1467 struct lscpu_modifier *mod,
1468 char *buf, size_t bufsz)
1469 {
1470 *buf = '\0';
1471
1472 if (col == COL_CACHE) {
1473 char *p = buf;
1474 size_t sz = bufsz;
1475 int i;
1476
1477 for (i = desc->ncaches - 1; i >= 0; i--) {
1478 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1479 if (x < 0 || (size_t) x >= sz)
1480 return NULL;
1481 sz -= x;
1482 p += x;
1483 if (i > 0) {
1484 if (sz < 2)
1485 return NULL;
1486 *p++ = mod->compat ? ',' : ':';
1487 *p = '\0';
1488 sz--;
1489 }
1490 }
1491 if (desc->ncaches)
1492 return buf;
1493 }
1494 snprintf(buf, bufsz, "%s", coldescs[col].name);
1495 return buf;
1496 }
1497
1498 /*
1499 * [-p] backend, we support two parsable formats:
1500 *
1501 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1502 * output and it contains fixed set of the columns. The CACHE columns are at
1503 * the end of the line and the CACHE is not printed if the number of the caches
1504 * is zero. The CACHE columns are separated by two commas, for example:
1505 *
1506 * $ lscpu --parse
1507 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1508 * 0,0,0,0,,0,0,0
1509 * 1,1,0,0,,1,1,0
1510 *
1511 * 2) "user defined output" -- this format prints always all columns without
1512 * special prefix for CACHE column. If there are not CACHEs then the column is
1513 * empty and the header "Cache" is printed rather than a real name of the cache.
1514 * The CACHE columns are separated by ':'.
1515 *
1516 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1517 * # CPU,Core,Socket,Node,L1d:L1i:L2
1518 * 0,0,0,0,0:0:0
1519 * 1,1,0,0,1:1:0
1520 */
1521 static void
1522 print_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1523 struct lscpu_modifier *mod)
1524 {
1525 char buf[BUFSIZ], *data;
1526 int i;
1527
1528 /*
1529 * Header
1530 */
1531 printf(_(
1532 "# The following is the parsable format, which can be fed to other\n"
1533 "# programs. Each different item in every column has an unique ID\n"
1534 "# starting from zero.\n"));
1535
1536 fputs("# ", stdout);
1537 for (i = 0; i < ncols; i++) {
1538 int col = cols[i];
1539
1540 if (col == COL_CACHE) {
1541 if (mod->compat && !desc->ncaches)
1542 continue;
1543 if (mod->compat && i != 0)
1544 putchar(',');
1545 }
1546 if (i > 0)
1547 putchar(',');
1548
1549 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1550
1551 if (data && * data && col != COL_CACHE &&
1552 !coldescs[col].is_abbr) {
1553 /*
1554 * For normal column names use mixed case (e.g. "Socket")
1555 */
1556 char *p = data + 1;
1557
1558 while (p && *p != '\0') {
1559 *p = tolower((unsigned int) *p);
1560 p++;
1561 }
1562 }
1563 fputs(data && *data ? data : "", stdout);
1564 }
1565 putchar('\n');
1566
1567 /*
1568 * Data
1569 */
1570 for (i = 0; i < desc->ncpuspos; i++) {
1571 int c;
1572 int cpu = real_cpu_num(desc, i);
1573
1574 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1575 continue;
1576 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1577 continue;
1578 if (desc->present && !is_cpu_present(desc, cpu))
1579 continue;
1580 for (c = 0; c < ncols; c++) {
1581 if (mod->compat && cols[c] == COL_CACHE) {
1582 if (!desc->ncaches)
1583 continue;
1584 if (c > 0)
1585 putchar(',');
1586 }
1587 if (c > 0)
1588 putchar(',');
1589
1590 data = get_cell_data(desc, i, cols[c], mod,
1591 buf, sizeof(buf));
1592 fputs(data && *data ? data : "", stdout);
1593 }
1594 putchar('\n');
1595 }
1596 }
1597
1598 /*
1599 * [-e] backend
1600 */
1601 static void
1602 print_readable(struct lscpu_desc *desc, int cols[], int ncols,
1603 struct lscpu_modifier *mod)
1604 {
1605 int i;
1606 char buf[BUFSIZ];
1607 const char *data;
1608 struct libscols_table *table;
1609
1610 scols_init_debug(0);
1611
1612 table = scols_new_table();
1613 if (!table)
1614 err(EXIT_FAILURE, _("failed to allocate output table"));
1615 if (mod->json) {
1616 scols_table_enable_json(table, 1);
1617 scols_table_set_name(table, "cpus");
1618 }
1619
1620 for (i = 0; i < ncols; i++) {
1621 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1622 if (!scols_table_new_column(table, data, 0, 0))
1623 err(EXIT_FAILURE, _("failed to allocate output column"));
1624 }
1625
1626 for (i = 0; i < desc->ncpuspos; i++) {
1627 int c;
1628 struct libscols_line *line;
1629 int cpu = real_cpu_num(desc, i);
1630
1631 if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
1632 continue;
1633 if (!mod->online && desc->online && is_cpu_online(desc, cpu))
1634 continue;
1635 if (desc->present && !is_cpu_present(desc, cpu))
1636 continue;
1637
1638 line = scols_table_new_line(table, NULL);
1639 if (!line)
1640 err(EXIT_FAILURE, _("failed to allocate output line"));
1641
1642 for (c = 0; c < ncols; c++) {
1643 data = get_cell_data(desc, i, cols[c], mod,
1644 buf, sizeof(buf));
1645 if (!data || !*data)
1646 data = "-";
1647 if (scols_line_set_data(line, c, data))
1648 err(EXIT_FAILURE, _("failed to add output data"));
1649 }
1650 }
1651
1652 scols_print_table(table);
1653 scols_unref_table(table);
1654 }
1655
1656
1657 static void __attribute__ ((__format__(printf, 3, 4)))
1658 add_summary_sprint(struct libscols_table *tb,
1659 const char *txt,
1660 const char *fmt,
1661 ...)
1662 {
1663 struct libscols_line *ln = scols_table_new_line(tb, NULL);
1664 char *data;
1665 va_list args;
1666
1667 if (!ln)
1668 err(EXIT_FAILURE, _("failed to allocate output line"));
1669
1670 /* description column */
1671 scols_line_set_data(ln, 0, txt);
1672
1673 /* data column */
1674 va_start(args, fmt);
1675 xvasprintf(&data, fmt, args);
1676 va_end(args);
1677
1678 if (data && scols_line_refer_data(ln, 1, data))
1679 err(EXIT_FAILURE, _("failed to add output data"));
1680 }
1681
1682 #define add_summary_n(tb, txt, num) add_summary_sprint(tb, txt, "%d", num)
1683 #define add_summary_s(tb, txt, str) add_summary_sprint(tb, txt, "%s", str)
1684
1685 static void
1686 print_cpuset(struct libscols_table *tb,
1687 const char *key, cpu_set_t *set, int hex)
1688 {
1689 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1690 size_t setbuflen = 7 * maxcpus;
1691 char setbuf[setbuflen], *p;
1692
1693 if (hex) {
1694 p = cpumask_create(setbuf, setbuflen, set, setsize);
1695 add_summary_s(tb, key, p);
1696 } else {
1697 p = cpulist_create(setbuf, setbuflen, set, setsize);
1698 add_summary_s(tb, key, p);
1699 }
1700 }
1701
1702 static int get_cache_full_size(struct lscpu_desc *desc, int idx, uint64_t *res)
1703 {
1704 struct cpu_cache *ca = &desc->caches[idx];
1705 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1706 int i, nshares = 0, rc;
1707 uint64_t sz;
1708
1709 /* Convert size to number */
1710 rc = parse_size(ca->size, &sz, NULL);
1711 if (rc)
1712 return rc;
1713
1714 /* Count number of CPUs which shares the cache */
1715 for (i = 0; i < desc->ncpuspos; i++) {
1716 int cpu = real_cpu_num(desc, i);
1717
1718 if (desc->present && !is_cpu_present(desc, cpu))
1719 continue;
1720 if (CPU_ISSET_S(cpu, setsize, ca->sharedmaps[0]))
1721 nshares++;
1722 }
1723
1724 /* Correction for CPU threads */
1725 if (desc->nthreads > desc->ncores)
1726 nshares /= (desc->nthreads / desc->ncores);
1727
1728 *res = (desc->ncores / nshares) * sz;
1729 return 0;
1730 }
1731
1732 /*
1733 * default output
1734 */
1735 static void
1736 print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1737 {
1738 char buf[BUFSIZ];
1739 int i = 0;
1740 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1741 struct libscols_table *tb;
1742
1743 scols_init_debug(0);
1744
1745 tb = scols_new_table();
1746 if (!tb)
1747 err(EXIT_FAILURE, _("failed to allocate output table"));
1748
1749 scols_table_enable_noheadings(tb, 1);
1750 if (mod->json) {
1751 scols_table_enable_json(tb, 1);
1752 scols_table_set_name(tb, "lscpu");
1753 }
1754
1755 if (scols_table_new_column(tb, "field", 0, 0) == NULL ||
1756 scols_table_new_column(tb, "data", 0, SCOLS_FL_NOEXTREMES) == NULL)
1757 err(EXIT_FAILURE, _("failed to initialize output column"));
1758
1759 add_summary_s(tb, _("Architecture:"), desc->arch);
1760 if (desc->mode) {
1761 char *p = buf;
1762
1763 if (desc->mode & MODE_32BIT) {
1764 strcpy(p, "32-bit, ");
1765 p += 8;
1766 }
1767 if (desc->mode & MODE_64BIT) {
1768 strcpy(p, "64-bit, ");
1769 p += 8;
1770 }
1771 *(p - 2) = '\0';
1772 add_summary_s(tb, _("CPU op-mode(s):"), buf);
1773 }
1774 #if !defined(WORDS_BIGENDIAN)
1775 add_summary_s(tb, _("Byte Order:"), "Little Endian");
1776 #else
1777 add_summary_s(tb, _("Byte Order:"), "Big Endian");
1778 #endif
1779
1780 if (desc->addrsz)
1781 add_summary_s(tb, _("Address sizes:"), desc->addrsz);
1782
1783 add_summary_n(tb, _("CPU(s):"), desc->ncpus);
1784
1785 if (desc->online)
1786 print_cpuset(tb, mod->hex ? _("On-line CPU(s) mask:") :
1787 _("On-line CPU(s) list:"),
1788 desc->online, mod->hex);
1789
1790 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
1791 cpu_set_t *set;
1792
1793 /* Linux kernel provides cpuset of off-line CPUs that contains
1794 * all configured CPUs (see /sys/devices/system/cpu/offline),
1795 * but want to print real (present in system) off-line CPUs only.
1796 */
1797 set = cpuset_alloc(maxcpus, NULL, NULL);
1798 if (!set)
1799 err(EXIT_FAILURE, _("failed to callocate cpu set"));
1800 CPU_ZERO_S(setsize, set);
1801 for (i = 0; i < desc->ncpuspos; i++) {
1802 int cpu = real_cpu_num(desc, i);
1803 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
1804 CPU_SET_S(cpu, setsize, set);
1805 }
1806 print_cpuset(tb, mod->hex ? _("Off-line CPU(s) mask:") :
1807 _("Off-line CPU(s) list:"),
1808 set, mod->hex);
1809 cpuset_free(set);
1810 }
1811
1812 if (desc->nsockets) {
1813 int threads_per_core, cores_per_socket, sockets_per_book;
1814 int books_per_drawer, drawers;
1815 FILE *fd;
1816
1817 threads_per_core = cores_per_socket = sockets_per_book = 0;
1818 books_per_drawer = drawers = 0;
1819 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1820 * Using simply the cpu topology masks in sysfs will not give
1821 * usable results since everything is virtualized. E.g.
1822 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1823 * five cpus.
1824 * If the cpu topology is not exported (e.g. 2nd level guest)
1825 * fall back to old calculation scheme.
1826 */
1827 if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
1828 int t0, t1;
1829
1830 while (fd && fgets(buf, sizeof(buf), fd) != NULL) {
1831 if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
1832 &t0, &t1, &drawers, &books_per_drawer,
1833 &sockets_per_book,
1834 &cores_per_socket) == 6)
1835 break;
1836 }
1837 if (fd)
1838 fclose(fd);
1839 }
1840 if (desc->mtid)
1841 threads_per_core = atoi(desc->mtid) + 1;
1842 add_summary_n(tb, _("Thread(s) per core:"),
1843 threads_per_core ?: desc->nthreads / desc->ncores);
1844 add_summary_n(tb, _("Core(s) per socket:"),
1845 cores_per_socket ?: desc->ncores / desc->nsockets);
1846 if (desc->nbooks) {
1847 add_summary_n(tb, _("Socket(s) per book:"),
1848 sockets_per_book ?: desc->nsockets / desc->nbooks);
1849 if (desc->ndrawers) {
1850 add_summary_n(tb, _("Book(s) per drawer:"),
1851 books_per_drawer ?: desc->nbooks / desc->ndrawers);
1852 add_summary_n(tb, _("Drawer(s):"), drawers ?: desc->ndrawers);
1853 } else {
1854 add_summary_n(tb, _("Book(s):"), books_per_drawer ?: desc->nbooks);
1855 }
1856 } else {
1857 add_summary_n(tb, _("Socket(s):"), sockets_per_book ?: desc->nsockets);
1858 }
1859 }
1860 if (desc->nnodes)
1861 add_summary_n(tb, _("NUMA node(s):"), desc->nnodes);
1862 if (desc->vendor)
1863 add_summary_s(tb, _("Vendor ID:"), desc->vendor);
1864 if (desc->machinetype)
1865 add_summary_s(tb, _("Machine type:"), desc->machinetype);
1866 if (desc->family)
1867 add_summary_s(tb, _("CPU family:"), desc->family);
1868 if (desc->model || desc->revision)
1869 add_summary_s(tb, _("Model:"), desc->revision ? desc->revision : desc->model);
1870 if (desc->modelname || desc->cpu)
1871 add_summary_s(tb, _("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
1872 if (desc->stepping)
1873 add_summary_s(tb, _("Stepping:"), desc->stepping);
1874 if (desc->freqboost >= 0)
1875 add_summary_s(tb, _("Frequency boost:"), desc->freqboost ?
1876 _("enabled") : _("disabled"));
1877 if (desc->mhz)
1878 add_summary_s(tb, _("CPU MHz:"), desc->mhz);
1879 if (desc->dynamic_mhz)
1880 add_summary_s(tb, _("CPU dynamic MHz:"), desc->dynamic_mhz);
1881 if (desc->static_mhz)
1882 add_summary_s(tb, _("CPU static MHz:"), desc->static_mhz);
1883 if (desc->maxmhz)
1884 add_summary_s(tb, _("CPU max MHz:"), cpu_max_mhz(desc, buf, sizeof(buf)));
1885 if (desc->minmhz)
1886 add_summary_s(tb, _("CPU min MHz:"), cpu_min_mhz(desc, buf, sizeof(buf)));
1887 if (desc->bogomips)
1888 add_summary_s(tb, _("BogoMIPS:"), desc->bogomips);
1889 if (desc->virtflag) {
1890 if (!strcmp(desc->virtflag, "svm"))
1891 add_summary_s(tb, _("Virtualization:"), "AMD-V");
1892 else if (!strcmp(desc->virtflag, "vmx"))
1893 add_summary_s(tb, _("Virtualization:"), "VT-x");
1894 }
1895 if (desc->hypervisor)
1896 add_summary_s(tb, _("Hypervisor:"), desc->hypervisor);
1897 if (desc->hyper) {
1898 add_summary_s(tb, _("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1899 add_summary_s(tb, _("Virtualization type:"), _(virt_types[desc->virtype]));
1900 }
1901 if (desc->dispatching >= 0)
1902 add_summary_s(tb, _("Dispatching mode:"), _(disp_modes[desc->dispatching]));
1903 if (desc->ncaches) {
1904 for (i = desc->ncaches - 1; i >= 0; i--) {
1905 uint64_t sz = 0;
1906 char *tmp;
1907
1908 if (get_cache_full_size(desc, i, &sz) != 0)
1909 continue;
1910 tmp = size_to_human_string(
1911 SIZE_SUFFIX_3LETTER | SIZE_SUFFIX_SPACE,
1912 sz);
1913 snprintf(buf, sizeof(buf),
1914 _("%s cache: "), desc->caches[i].name);
1915 add_summary_s(tb, buf, tmp);
1916 free(tmp);
1917 }
1918 }
1919 if (desc->necaches) {
1920 for (i = desc->necaches - 1; i >= 0; i--) {
1921 snprintf(buf, sizeof(buf),
1922 _("%s cache:"), desc->ecaches[i].name);
1923 add_summary_s(tb, buf, desc->ecaches[i].size);
1924 }
1925 }
1926
1927 for (i = 0; i < desc->nnodes; i++) {
1928 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
1929 print_cpuset(tb, buf, desc->nodemaps[i], mod->hex);
1930 }
1931
1932 if (desc->physsockets) {
1933 add_summary_n(tb, _("Physical sockets:"), desc->physsockets);
1934 add_summary_n(tb, _("Physical chips:"), desc->physchips);
1935 add_summary_n(tb, _("Physical cores/chip:"), desc->physcoresperchip);
1936 }
1937
1938 if (desc->flags)
1939 add_summary_s(tb, _("Flags:"), desc->flags);
1940
1941 scols_print_table(tb);
1942 scols_unref_table(tb);
1943 }
1944
1945 static void __attribute__((__noreturn__)) usage(void)
1946 {
1947 FILE *out = stdout;
1948 size_t i;
1949
1950 fputs(USAGE_HEADER, out);
1951 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
1952
1953 fputs(USAGE_SEPARATOR, out);
1954 fputs(_("Display information about the CPU architecture.\n"), out);
1955
1956 fputs(USAGE_OPTIONS, out);
1957 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
1958 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
1959 fputs(_(" -c, --offline print offline CPUs only\n"), out);
1960 fputs(_(" -J, --json use JSON for default or extended format\n"), out);
1961 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
1962 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
1963 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
1964 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
1965 fputs(_(" -y, --physical print physical instead of logical IDs\n"), out);
1966 fputs(USAGE_SEPARATOR, out);
1967 printf(USAGE_HELP_OPTIONS(25));
1968
1969 fputs(USAGE_COLUMNS, out);
1970 for (i = 0; i < ARRAY_SIZE(coldescs); i++)
1971 fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
1972
1973 printf(USAGE_MAN_TAIL("lscpu(1)"));
1974
1975 exit(EXIT_SUCCESS);
1976 }
1977
1978 int main(int argc, char *argv[])
1979 {
1980 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
1981 struct lscpu_desc _desc = { .flags = NULL }, *desc = &_desc;
1982 int c, i;
1983 int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
1984 int cpu_modifier_specified = 0;
1985 size_t setsize;
1986
1987 enum {
1988 OPT_OUTPUT_ALL = CHAR_MAX + 1,
1989 };
1990 static const struct option longopts[] = {
1991 { "all", no_argument, NULL, 'a' },
1992 { "online", no_argument, NULL, 'b' },
1993 { "offline", no_argument, NULL, 'c' },
1994 { "help", no_argument, NULL, 'h' },
1995 { "extended", optional_argument, NULL, 'e' },
1996 { "json", no_argument, NULL, 'J' },
1997 { "parse", optional_argument, NULL, 'p' },
1998 { "sysroot", required_argument, NULL, 's' },
1999 { "physical", no_argument, NULL, 'y' },
2000 { "hex", no_argument, NULL, 'x' },
2001 { "version", no_argument, NULL, 'V' },
2002 { "output-all", no_argument, NULL, OPT_OUTPUT_ALL },
2003 { NULL, 0, NULL, 0 }
2004 };
2005
2006 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
2007 { 'a','b','c' },
2008 { 'e','p' },
2009 { 0 }
2010 };
2011 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
2012
2013 setlocale(LC_ALL, "");
2014 bindtextdomain(PACKAGE, LOCALEDIR);
2015 textdomain(PACKAGE);
2016 atexit(close_stdout);
2017
2018 while ((c = getopt_long(argc, argv, "abce::hJp::s:xyV", longopts, NULL)) != -1) {
2019
2020 err_exclusive_options(c, longopts, excl, excl_st);
2021
2022 switch (c) {
2023 case 'a':
2024 mod->online = mod->offline = 1;
2025 cpu_modifier_specified = 1;
2026 break;
2027 case 'b':
2028 mod->online = 1;
2029 cpu_modifier_specified = 1;
2030 break;
2031 case 'c':
2032 mod->offline = 1;
2033 cpu_modifier_specified = 1;
2034 break;
2035 case 'h':
2036 usage();
2037 case 'J':
2038 mod->json = 1;
2039 break;
2040 case 'p':
2041 case 'e':
2042 if (optarg) {
2043 if (*optarg == '=')
2044 optarg++;
2045 ncolumns = string_to_idarray(optarg,
2046 columns, ARRAY_SIZE(columns),
2047 column_name_to_id);
2048 if (ncolumns < 0)
2049 return EXIT_FAILURE;
2050 }
2051 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
2052 break;
2053 case 's':
2054 desc->prefix = optarg;
2055 mod->system = SYSTEM_SNAPSHOT;
2056 break;
2057 case 'x':
2058 mod->hex = 1;
2059 break;
2060 case 'y':
2061 mod->physical = 1;
2062 break;
2063 case 'V':
2064 printf(UTIL_LINUX_VERSION);
2065 return EXIT_SUCCESS;
2066 case OPT_OUTPUT_ALL:
2067 {
2068 size_t sz;
2069 for (sz = 0; sz < ARRAY_SIZE(coldescs); sz++)
2070 columns[sz] = 1;
2071 break;
2072 }
2073 default:
2074 errtryhelp(EXIT_FAILURE);
2075 }
2076 }
2077
2078 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
2079 fprintf(stderr,
2080 _("%s: options --all, --online and --offline may only "
2081 "be used with options --extended or --parse.\n"),
2082 program_invocation_short_name);
2083 return EXIT_FAILURE;
2084 }
2085
2086 if (argc != optind) {
2087 warnx(_("bad usage"));
2088 errtryhelp(EXIT_FAILURE);
2089 }
2090
2091 /* set default cpu display mode if none was specified */
2092 if (!mod->online && !mod->offline) {
2093 mod->online = 1;
2094 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
2095 }
2096
2097 ul_path_init_debug();
2098
2099 /* /sys/devices/system/cpu */
2100 desc->syscpu = ul_new_path(_PATH_SYS_CPU);
2101 if (!desc->syscpu)
2102 err(EXIT_FAILURE, _("failed to initialize CPUs sysfs handler"));
2103 if (desc->prefix)
2104 ul_path_set_prefix(desc->syscpu, desc->prefix);
2105
2106 /* /proc */
2107 desc->procfs = ul_new_path("/proc");
2108 if (!desc->procfs)
2109 err(EXIT_FAILURE, _("failed to initialize procfs handler"));
2110 if (desc->prefix)
2111 ul_path_set_prefix(desc->procfs, desc->prefix);
2112
2113 read_basicinfo(desc, mod);
2114
2115 setsize = CPU_ALLOC_SIZE(maxcpus);
2116
2117 for (i = 0; i < desc->ncpuspos; i++) {
2118 /* only consider present CPUs */
2119 if (desc->present &&
2120 !CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present))
2121 continue;
2122 read_topology(desc, i);
2123 read_cache(desc, i);
2124 read_polarization(desc, i);
2125 read_address(desc, i);
2126 read_configured(desc, i);
2127 read_max_mhz(desc, i);
2128 read_min_mhz(desc, i);
2129 }
2130
2131 if (desc->caches)
2132 qsort(desc->caches, desc->ncaches,
2133 sizeof(struct cpu_cache), cachecmp);
2134
2135 if (desc->ecaches)
2136 qsort(desc->ecaches, desc->necaches,
2137 sizeof(struct cpu_cache), cachecmp);
2138
2139 read_nodes(desc);
2140 read_hypervisor(desc, mod);
2141 arm_cpu_decode(desc);
2142
2143 switch(mod->mode) {
2144 case OUTPUT_SUMMARY:
2145 print_summary(desc, mod);
2146 break;
2147 case OUTPUT_PARSABLE:
2148 if (!ncolumns) {
2149 columns[ncolumns++] = COL_CPU;
2150 columns[ncolumns++] = COL_CORE;
2151 columns[ncolumns++] = COL_SOCKET;
2152 columns[ncolumns++] = COL_NODE;
2153 columns[ncolumns++] = COL_CACHE;
2154 mod->compat = 1;
2155 }
2156 print_parsable(desc, columns, ncolumns, mod);
2157 break;
2158 case OUTPUT_READABLE:
2159 if (!ncolumns) {
2160 /* No list was given. Just print whatever is there. */
2161 columns[ncolumns++] = COL_CPU;
2162 if (desc->nodemaps)
2163 columns[ncolumns++] = COL_NODE;
2164 if (desc->drawermaps)
2165 columns[ncolumns++] = COL_DRAWER;
2166 if (desc->bookmaps)
2167 columns[ncolumns++] = COL_BOOK;
2168 if (desc->socketmaps)
2169 columns[ncolumns++] = COL_SOCKET;
2170 if (desc->coremaps)
2171 columns[ncolumns++] = COL_CORE;
2172 if (desc->caches)
2173 columns[ncolumns++] = COL_CACHE;
2174 if (desc->online)
2175 columns[ncolumns++] = COL_ONLINE;
2176 if (desc->configured)
2177 columns[ncolumns++] = COL_CONFIGURED;
2178 if (desc->polarization)
2179 columns[ncolumns++] = COL_POLARIZATION;
2180 if (desc->addresses)
2181 columns[ncolumns++] = COL_ADDRESS;
2182 if (desc->maxmhz)
2183 columns[ncolumns++] = COL_MAXMHZ;
2184 if (desc->minmhz)
2185 columns[ncolumns++] = COL_MINMHZ;
2186 }
2187 print_readable(desc, columns, ncolumns, mod);
2188 break;
2189 }
2190
2191 ul_unref_path(desc->syscpu);
2192 ul_unref_path(desc->procfs);
2193 return EXIT_SUCCESS;
2194 }