]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
7411994fad5ec3337baeb1df1f7fb9688f76633c
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #include <assert.h>
23 #include <ctype.h>
24 #include <dirent.h>
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <getopt.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/utsname.h>
32 #include <unistd.h>
33 #include <stdarg.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36 #include <sys/personality.h>
37
38 #if (defined(__x86_64__) || defined(__i386__))
39 # if !defined( __SANITIZE_ADDRESS__)
40 # define INCLUDE_VMWARE_BDOOR
41 # else
42 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
43 # endif
44 #endif
45
46 #ifdef INCLUDE_VMWARE_BDOOR
47 # include <stdint.h>
48 # include <signal.h>
49 # include <strings.h>
50 # include <setjmp.h>
51 # ifdef HAVE_SYS_IO_H
52 # include <sys/io.h>
53 # endif
54 #endif
55
56 #if defined(HAVE_LIBRTAS)
57 #include <librtas.h>
58 #endif
59
60 #include <libsmartcols.h>
61
62 #include "closestream.h"
63 #include "optutils.h"
64 #include "fileutils.h"
65
66 #include "lscpu.h"
67
68 #define CACHE_MAX 100
69
70 /* /sys paths */
71 #define _PATH_SYS_SYSTEM "/sys/devices/system"
72 #define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
73 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
74 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
75
76 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
77 #define XENFEAT_supervisor_mode_kernel 3
78 #define XENFEAT_mmu_pt_update_preserve_ad 5
79 #define XENFEAT_hvm_callback_vector 8
80
81 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
82 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
83 | (1U << XENFEAT_hvm_callback_vector) )
84
85 static const char *virt_types[] = {
86 [VIRT_NONE] = N_("none"),
87 [VIRT_PARA] = N_("para"),
88 [VIRT_FULL] = N_("full"),
89 [VIRT_CONT] = N_("container"),
90 };
91
92 static const char *hv_vendors[] = {
93 [HYPER_NONE] = NULL,
94 [HYPER_XEN] = "Xen",
95 [HYPER_KVM] = "KVM",
96 [HYPER_MSHV] = "Microsoft",
97 [HYPER_VMWARE] = "VMware",
98 [HYPER_IBM] = "IBM",
99 [HYPER_VSERVER] = "Linux-VServer",
100 [HYPER_UML] = "User-mode Linux",
101 [HYPER_INNOTEK] = "Innotek GmbH",
102 [HYPER_HITACHI] = "Hitachi",
103 [HYPER_PARALLELS] = "Parallels",
104 [HYPER_VBOX] = "Oracle",
105 [HYPER_OS400] = "OS/400",
106 [HYPER_PHYP] = "pHyp",
107 [HYPER_SPAR] = "Unisys s-Par",
108 [HYPER_WSL] = "Windows Subsystem for Linux"
109 };
110
111 static const int hv_vendor_pci[] = {
112 [HYPER_NONE] = 0x0000,
113 [HYPER_XEN] = 0x5853,
114 [HYPER_KVM] = 0x0000,
115 [HYPER_MSHV] = 0x1414,
116 [HYPER_VMWARE] = 0x15ad,
117 [HYPER_VBOX] = 0x80ee,
118 };
119
120 static const int hv_graphics_pci[] = {
121 [HYPER_NONE] = 0x0000,
122 [HYPER_XEN] = 0x0001,
123 [HYPER_KVM] = 0x0000,
124 [HYPER_MSHV] = 0x5353,
125 [HYPER_VMWARE] = 0x0710,
126 [HYPER_VBOX] = 0xbeef,
127 };
128
129
130 /* dispatching modes */
131 static const char *disp_modes[] = {
132 [DISP_HORIZONTAL] = N_("horizontal"),
133 [DISP_VERTICAL] = N_("vertical")
134 };
135
136 static struct polarization_modes polar_modes[] = {
137 [POLAR_UNKNOWN] = {"U", "-"},
138 [POLAR_VLOW] = {"VL", "vert-low"},
139 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
140 [POLAR_VHIGH] = {"VH", "vert-high"},
141 [POLAR_HORIZONTAL] = {"H", "horizontal"},
142 };
143
144 static int maxcpus; /* size in bits of kernel cpu mask */
145
146 #define is_cpu_online(_d, _cpu) \
147 ((_d) && (_d)->online ? \
148 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
149 #define is_cpu_present(_d, _cpu) \
150 ((_d) && (_d)->present ? \
151 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
152
153 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
154
155 /*
156 * IDs
157 */
158 enum {
159 COL_CPU_CPU,
160 COL_CPU_CORE,
161 COL_CPU_SOCKET,
162 COL_CPU_NODE,
163 COL_CPU_BOOK,
164 COL_CPU_DRAWER,
165 COL_CPU_CACHE,
166 COL_CPU_POLARIZATION,
167 COL_CPU_ADDRESS,
168 COL_CPU_CONFIGURED,
169 COL_CPU_ONLINE,
170 COL_CPU_MAXMHZ,
171 COL_CPU_MINMHZ,
172 };
173
174 enum {
175 COL_CACHE_ALLSIZE,
176 COL_CACHE_LEVEL,
177 COL_CACHE_NAME,
178 COL_CACHE_ONESIZE,
179 COL_CACHE_TYPE,
180 COL_CACHE_WAYS,
181 COL_CACHE_ALLOCPOL,
182 COL_CACHE_WRITEPOL,
183 COL_CACHE_PHYLINE,
184 COL_CACHE_SETS,
185 COL_CACHE_COHERENCYSIZE
186 };
187
188
189 /* column description
190 */
191 struct lscpu_coldesc {
192 const char *name;
193 const char *help;
194
195 int flags;
196 unsigned int is_abbr:1; /* name is abbreviation */
197 };
198
199 static struct lscpu_coldesc coldescs_cpu[] =
200 {
201 [COL_CPU_CPU] = { "CPU", N_("logical CPU number"), SCOLS_FL_RIGHT, 1 },
202 [COL_CPU_CORE] = { "CORE", N_("logical core number"), SCOLS_FL_RIGHT },
203 [COL_CPU_SOCKET] = { "SOCKET", N_("logical socket number"), SCOLS_FL_RIGHT },
204 [COL_CPU_NODE] = { "NODE", N_("logical NUMA node number"), SCOLS_FL_RIGHT },
205 [COL_CPU_BOOK] = { "BOOK", N_("logical book number"), SCOLS_FL_RIGHT },
206 [COL_CPU_DRAWER] = { "DRAWER", N_("logical drawer number"), SCOLS_FL_RIGHT },
207 [COL_CPU_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
208 [COL_CPU_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
209 [COL_CPU_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
210 [COL_CPU_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
211 [COL_CPU_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU"), SCOLS_FL_RIGHT },
212 [COL_CPU_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU"), SCOLS_FL_RIGHT },
213 [COL_CPU_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU"), SCOLS_FL_RIGHT }
214 };
215
216 static struct lscpu_coldesc coldescs_cache[] =
217 {
218 [COL_CACHE_ALLSIZE] = { "ALL-SIZE", N_("size of all system caches"), SCOLS_FL_RIGHT },
219 [COL_CACHE_LEVEL] = { "LEVEL", N_("cache level"), SCOLS_FL_RIGHT },
220 [COL_CACHE_NAME] = { "NAME", N_("cache name") },
221 [COL_CACHE_ONESIZE] = { "ONE-SIZE", N_("size of one cache"), SCOLS_FL_RIGHT },
222 [COL_CACHE_TYPE] = { "TYPE", N_("cache type") },
223 [COL_CACHE_WAYS] = { "WAYS", N_("ways of associativity"), SCOLS_FL_RIGHT },
224 [COL_CACHE_ALLOCPOL] = { "ALLOC-POLICY", N_("allocation policy") },
225 [COL_CACHE_WRITEPOL] = { "WRITE-POLICY", N_("write policy") },
226 [COL_CACHE_PHYLINE] = { "PHY-LINE", N_("number of physical cache line per cache t"), SCOLS_FL_RIGHT },
227 [COL_CACHE_SETS] = { "SETS", N_("number of sets in the cache; set lines has the same cache index"), SCOLS_FL_RIGHT },
228 [COL_CACHE_COHERENCYSIZE] = { "COHERENCY-SIZE", N_("minimum amount of data in bytes transferred from memory to cache"), SCOLS_FL_RIGHT }
229 };
230
231
232 static int get_cache_full_size(struct lscpu_desc *desc, struct cpu_cache *ca, uint64_t *res);
233
234 static int
235 cpu_column_name_to_id(const char *name, size_t namesz)
236 {
237 size_t i;
238
239 for (i = 0; i < ARRAY_SIZE(coldescs_cpu); i++) {
240 const char *cn = coldescs_cpu[i].name;
241
242 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
243 return i;
244 }
245 warnx(_("unknown column: %s"), name);
246 return -1;
247 }
248
249 static int
250 cache_column_name_to_id(const char *name, size_t namesz)
251 {
252 size_t i;
253
254 for (i = 0; i < ARRAY_SIZE(coldescs_cache); i++) {
255 const char *cn = coldescs_cache[i].name;
256
257 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
258 return i;
259 }
260 warnx(_("unknown column: %s"), name);
261 return -1;
262 }
263
264 /* Lookup a pattern and get the value from cpuinfo.
265 * Format is:
266 *
267 * "<pattern> : <key>"
268 */
269 static int
270 lookup(char *line, char *pattern, char **value)
271 {
272 char *p, *v;
273 int len = strlen(pattern);
274
275 /* don't re-fill already found tags, first one wins */
276 if (!*line || *value)
277 return 0;
278
279 /* pattern */
280 if (strncmp(line, pattern, len))
281 return 0;
282
283 /* white spaces */
284 for (p = line + len; isspace(*p); p++);
285
286 /* separator */
287 if (*p != ':')
288 return 0;
289
290 /* white spaces */
291 for (++p; isspace(*p); p++);
292
293 /* value */
294 if (!*p)
295 return 0;
296 v = p;
297
298 /* end of value */
299 len = strlen(line) - 1;
300 for (p = line + len; isspace(*(p-1)); p--);
301 *p = '\0';
302
303 *value = xstrdup(v);
304 return 1;
305 }
306
307 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
308 * part of the cache topology information within the sysfs filesystem.
309 * This is true for all shared caches on e.g. s390. When there are layers of
310 * hypervisors in between it is not knows which CPUs share which caches.
311 * Therefore information about shared caches is only available in
312 * /proc/cpuinfo.
313 * Format is:
314 * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
315 */
316 static int
317 lookup_cache(char *line, struct lscpu_desc *desc)
318 {
319 struct cpu_cache *cache;
320 long long size;
321 char *p, type;
322 int level;
323
324 /* Make sure line starts with "cache<nr> :" */
325 if (strncmp(line, "cache", 5))
326 return 0;
327 for (p = line + 5; isdigit(*p); p++);
328 for (; isspace(*p); p++);
329 if (*p != ':')
330 return 0;
331
332 p = strstr(line, "scope=") + 6;
333 /* Skip private caches, also present in sysfs */
334 if (!p || strncmp(p, "Private", 7) == 0)
335 return 0;
336 p = strstr(line, "level=");
337 if (!p || sscanf(p, "level=%d", &level) != 1)
338 return 0;
339 p = strstr(line, "type=") + 5;
340 if (!p || !*p)
341 return 0;
342 type = 0;
343 if (strncmp(p, "Data", 4) == 0)
344 type = 'd';
345 else if (strncmp(p, "Instruction", 11) == 0)
346 type = 'i';
347 else if (strncmp(p, "Unified", 7) == 0)
348 type = 'u';
349 p = strstr(line, "size=");
350 if (!p || sscanf(p, "size=%lld", &size) != 1)
351 return 0;
352
353 desc->necaches++;
354 desc->ecaches = xrealloc(desc->ecaches,
355 desc->necaches * sizeof(struct cpu_cache));
356 cache = &desc->ecaches[desc->necaches - 1];
357 memset(cache, 0 , sizeof(*cache));
358
359 if (type == 'i' || type == 'd')
360 xasprintf(&cache->name, "L%d%c", level, type);
361 else
362 xasprintf(&cache->name, "L%d", level);
363
364 cache->level = level;
365 cache->size = size * 1024;
366
367 cache->type = type == 'i' ? xstrdup("Instruction") :
368 type == 'd' ? xstrdup("Data") :
369 type == 'u' ? xstrdup("Unified") : NULL;
370 return 1;
371 }
372
373 /* Don't init the mode for platforms where we are not able to
374 * detect that CPU supports 64-bit mode.
375 */
376 static int
377 init_mode(struct lscpu_modifier *mod)
378 {
379 int m = 0;
380
381 if (mod->system == SYSTEM_SNAPSHOT)
382 /* reading info from any /{sys,proc} dump, don't mix it with
383 * information about our real CPU */
384 return 0;
385
386 #if defined(__alpha__) || defined(__ia64__)
387 m |= MODE_64BIT; /* 64bit platforms only */
388 #endif
389 /* platforms with 64bit flag in /proc/cpuinfo, define
390 * 32bit default here */
391 #if defined(__i386__) || defined(__x86_64__) || \
392 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
393 m |= MODE_32BIT;
394 #endif
395
396 #if defined(__aarch64__)
397 {
398 /* personality() is the most reliable way (since 4.7)
399 * to determine aarch32 support */
400 int pers = personality(PER_LINUX32);
401 if (pers != -1) {
402 personality(pers);
403 m |= MODE_32BIT;
404 }
405 m |= MODE_64BIT;
406 }
407 #endif
408 return m;
409 }
410
411 #if defined(HAVE_LIBRTAS)
412 #define PROCESSOR_MODULE_INFO 43
413 static int strbe16toh(const char *buf, int offset)
414 {
415 return (buf[offset] << 8) + buf[offset+1];
416 }
417
418 static void read_physical_info_powerpc(struct lscpu_desc *desc)
419 {
420 char buf[BUFSIZ];
421 int rc, len, ntypes;
422
423 desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
424
425 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
426 if (rc < 0)
427 return;
428
429 len = strbe16toh(buf, 0);
430 if (len < 8)
431 return;
432
433 ntypes = strbe16toh(buf, 2);
434
435 assert(ntypes <= 1);
436 if (!ntypes)
437 return;
438
439 desc->physsockets = strbe16toh(buf, 4);
440 desc->physchips = strbe16toh(buf, 6);
441 desc->physcoresperchip = strbe16toh(buf, 8);
442 }
443 #else
444 static void read_physical_info_powerpc(
445 struct lscpu_desc *desc __attribute__((__unused__)))
446 {
447 }
448 #endif
449
450 static int cmp_vulnerability_name(const void *a0, const void *b0)
451 {
452 const struct cpu_vulnerability *a = (const struct cpu_vulnerability *) a0,
453 *b = (const struct cpu_vulnerability *) b0;
454 return strcmp(a->name, b->name);
455 }
456
457 static void read_vulnerabilities(struct lscpu_desc *desc)
458 {
459 struct dirent *d;
460 DIR *dir = ul_path_opendir(desc->syscpu, "vulnerabilities");
461 int n = 0;
462
463 if (!dir)
464 return;
465
466 desc->nvuls = n = 0;
467
468 while (xreaddir(dir))
469 n++;
470 if (!n)
471 return;
472
473 rewinddir(dir);
474 desc->vuls = xcalloc(n, sizeof(struct cpu_vulnerability));
475
476 while (desc->nvuls < n && (d = xreaddir(dir))) {
477 char *str, *p;
478 struct cpu_vulnerability *vu;
479
480 #ifdef _DIRENT_HAVE_D_TYPE
481 if (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN)
482 continue;
483 #endif
484 if (ul_path_readf_string(desc->syscpu, &str,
485 "vulnerabilities/%s", d->d_name) <= 0)
486 continue;
487
488 vu = &desc->vuls[desc->nvuls++];
489
490 /* Name */
491 vu->name = xstrdup(d->d_name);
492 *vu->name = toupper(*vu->name);
493 strrep(vu->name, '_', ' ');
494
495 /* Description */
496 vu->text = str;
497 p = (char *) startswith(vu->text, "Mitigation");
498 if (p) {
499 *p = ';';
500 strrem(vu->text, ':');
501 }
502 }
503 closedir(dir);
504
505 qsort(desc->vuls, desc->nvuls,
506 sizeof(struct cpu_vulnerability), cmp_vulnerability_name);
507 }
508
509
510
511
512 static void
513 read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
514 {
515 FILE *fp;
516 char buf[BUFSIZ];
517 struct utsname utsbuf;
518 size_t setsize;
519 cpu_set_t *cpuset = NULL;
520
521 /* architecture */
522 if (uname(&utsbuf) == -1)
523 err(EXIT_FAILURE, _("error: uname failed"));
524
525 fp = ul_path_fopen(desc->procfs, "r", "cpuinfo");
526 if (!fp)
527 err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
528 desc->arch = xstrdup(utsbuf.machine);
529
530 /* details */
531 while (fgets(buf, sizeof(buf), fp) != NULL) {
532 if (lookup(buf, "vendor", &desc->vendor)) ;
533 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
534 else if (lookup(buf, "CPU implementer", &desc->vendor)) ; /* ARM and aarch64 */
535 else if (lookup(buf, "family", &desc->family)) ;
536 else if (lookup(buf, "cpu family", &desc->family)) ;
537 else if (lookup(buf, "model", &desc->model)) ;
538 else if (lookup(buf, "CPU part", &desc->model)) ; /* ARM and aarch64 */
539 else if (lookup(buf, "model name", &desc->modelname)) ;
540 else if (lookup(buf, "stepping", &desc->stepping)) ;
541 else if (lookup(buf, "CPU variant", &desc->stepping)) ; /* aarch64 */
542 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
543 else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
544 else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
545 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
546 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
547 else if (lookup(buf, "Features", &desc->flags)) ; /* aarch64 */
548 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
549 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
550 else if (lookup(buf, "BogoMIPS", &desc->bogomips)) ; /* aarch64 */
551 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
552 else if (lookup(buf, "cpu", &desc->cpu)) ;
553 else if (lookup(buf, "revision", &desc->revision)) ;
554 else if (lookup(buf, "CPU revision", &desc->revision)) ; /* aarch64 */
555 else if (lookup(buf, "max thread id", &desc->mtid)) ; /* s390 */
556 else if (lookup(buf, "address sizes", &desc->addrsz)) ; /* x86 */
557 else if (lookup_cache(buf, desc)) ;
558 else
559 continue;
560 }
561
562 desc->mode = init_mode(mod);
563
564 if (desc->flags) {
565 snprintf(buf, sizeof(buf), " %s ", desc->flags);
566 if (strstr(buf, " svm "))
567 desc->virtflag = xstrdup("svm");
568 else if (strstr(buf, " vmx "))
569 desc->virtflag = xstrdup("vmx");
570 if (strstr(buf, " lm "))
571 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
572 if (strstr(buf, " zarch "))
573 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
574 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
575 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
576 }
577
578 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
579 if (strcmp(desc->arch, "ppc64") == 0)
580 desc->mode |= MODE_32BIT | MODE_64BIT;
581 else if (strcmp(desc->arch, "ppc") == 0)
582 desc->mode |= MODE_32BIT;
583 }
584
585 fclose(fp);
586
587 if (ul_path_read_s32(desc->syscpu, &maxcpus, "kernel_max") == 0)
588 /* note that kernel_max is maximum index [NR_CPUS-1] */
589 maxcpus += 1;
590
591 else if (mod->system == SYSTEM_LIVE)
592 /* the root is '/' so we are working with data from the current kernel */
593 maxcpus = get_max_number_of_cpus();
594
595 if (maxcpus <= 0)
596 /* error or we are reading some /sys snapshot instead of the
597 * real /sys, let's use any crazy number... */
598 maxcpus = 2048;
599
600 setsize = CPU_ALLOC_SIZE(maxcpus);
601
602 if (ul_path_readf_cpulist(desc->syscpu, &cpuset, maxcpus, "possible") == 0) {
603 int num, idx;
604
605 desc->ncpuspos = CPU_COUNT_S(setsize, cpuset);
606 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
607
608 for (num = 0, idx = 0; num < maxcpus; num++) {
609 if (CPU_ISSET_S(num, setsize, cpuset))
610 desc->idx2cpunum[idx++] = num;
611 }
612 cpuset_free(cpuset);
613 cpuset = NULL;
614 } else
615 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
616 _PATH_SYS_CPU "/possible");
617
618
619 /* get mask for present CPUs */
620 if (ul_path_readf_cpulist(desc->syscpu, &desc->present, maxcpus, "present") == 0)
621 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
622
623 /* get mask for online CPUs */
624 if (ul_path_readf_cpulist(desc->syscpu, &desc->online, maxcpus, "online") == 0)
625 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
626
627 /* get dispatching mode */
628 if (ul_path_read_s32(desc->syscpu, &desc->dispatching, "dispatching") != 0)
629 desc->dispatching = -1;
630
631 /* get cpufreq boost mode */
632 if (ul_path_read_s32(desc->syscpu, &desc->freqboost, "cpufreq/boost") != 0)
633 desc->freqboost = -1;
634
635 if (mod->system == SYSTEM_LIVE)
636 read_physical_info_powerpc(desc);
637
638 if ((fp = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
639 while (fgets(buf, sizeof(buf), fp) != NULL) {
640 if (lookup(buf, "Type", &desc->machinetype))
641 break;
642 }
643 fclose(fp);
644 }
645
646 /* vulnerabilities */
647 if (ul_path_access(desc->syscpu, F_OK, "vulnerabilities") == 0)
648 read_vulnerabilities(desc);
649 }
650
651 static int
652 has_pci_device(struct lscpu_desc *desc, unsigned int vendor, unsigned int device)
653 {
654 FILE *f;
655 unsigned int num, fn, ven, dev;
656 int res = 1;
657
658 f = ul_path_fopen(desc->procfs, "r", "bus/pci/devices");
659 if (!f)
660 return 0;
661
662 /* for more details about bus/pci/devices format see
663 * drivers/pci/proc.c in linux kernel
664 */
665 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
666 &num, &fn, &ven, &dev) == 4) {
667
668 if (ven == vendor && dev == device)
669 goto found;
670 }
671
672 res = 0;
673 found:
674 fclose(f);
675 return res;
676 }
677
678 #if defined(__x86_64__) || defined(__i386__)
679
680 /*
681 * This CPUID leaf returns the information about the hypervisor.
682 * EAX : maximum input value for CPUID supported by the hypervisor.
683 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
684 */
685 #define HYPERVISOR_INFO_LEAF 0x40000000
686
687 static inline void
688 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
689 unsigned int *ecx, unsigned int *edx)
690 {
691 __asm__(
692 #if defined(__PIC__) && defined(__i386__)
693 /* x86 PIC cannot clobber ebx -- gcc bitches */
694 "xchg %%ebx, %%esi;"
695 "cpuid;"
696 "xchg %%esi, %%ebx;"
697 : "=S" (*ebx),
698 #else
699 "cpuid;"
700 : "=b" (*ebx),
701 #endif
702 "=a" (*eax),
703 "=c" (*ecx),
704 "=d" (*edx)
705 : "1" (op), "c"(0));
706 }
707
708 static void
709 read_hypervisor_cpuid(struct lscpu_desc *desc)
710 {
711 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
712 char hyper_vendor_id[13];
713
714 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
715
716 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
717 memcpy(hyper_vendor_id + 0, &ebx, 4);
718 memcpy(hyper_vendor_id + 4, &ecx, 4);
719 memcpy(hyper_vendor_id + 8, &edx, 4);
720 hyper_vendor_id[12] = '\0';
721
722 if (!hyper_vendor_id[0])
723 return;
724
725 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
726 desc->hyper = HYPER_XEN;
727 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
728 desc->hyper = HYPER_KVM;
729 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
730 desc->hyper = HYPER_MSHV;
731 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
732 desc->hyper = HYPER_VMWARE;
733 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
734 desc->hyper = HYPER_SPAR;
735 }
736
737 #else /* ! (__x86_64__ || __i386__) */
738 static void
739 read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
740 {
741 }
742 #endif
743
744 static int is_devtree_compatible(struct lscpu_desc *desc, const char *str)
745 {
746 FILE *fd = ul_path_fopen(desc->procfs, "r", "device-tree/compatible");
747
748 if (fd) {
749 char buf[256];
750 size_t i, len;
751
752 memset(buf, 0, sizeof(buf));
753 len = fread(buf, 1, sizeof(buf) - 1, fd);
754 fclose(fd);
755
756 for (i = 0; i < len;) {
757 if (!strcmp(&buf[i], str))
758 return 1;
759 i += strlen(&buf[i]);
760 i++;
761 }
762 }
763
764 return 0;
765 }
766
767 static int
768 read_hypervisor_powerpc(struct lscpu_desc *desc)
769 {
770 assert(!desc->hyper);
771
772 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
773 if (ul_path_access(desc->procfs, F_OK, "iSeries") == 0) {
774 desc->hyper = HYPER_OS400;
775 desc->virtype = VIRT_PARA;
776
777 /* PowerNV (POWER Non-Virtualized, bare-metal) */
778 } else if (is_devtree_compatible(desc, "ibm,powernv")) {
779 desc->hyper = HYPER_NONE;
780 desc->virtype = VIRT_NONE;
781
782 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
783 } else if (ul_path_access(desc->procfs, F_OK, "device-tree/ibm,partition-name") == 0
784 && ul_path_access(desc->procfs, F_OK, "device-tree/hmc-managed?") == 0
785 && ul_path_access(desc->procfs, F_OK, "device-tree/chosen/qemu,graphic-width") != 0) {
786
787 FILE *fd;
788 desc->hyper = HYPER_PHYP;
789 desc->virtype = VIRT_PARA;
790
791 fd = ul_path_fopen(desc->procfs, "r", "device-tree/ibm,partition-name");
792 if (fd) {
793 char buf[256];
794 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
795 desc->virtype = VIRT_NONE;
796 fclose(fd);
797 }
798
799 /* Qemu */
800 } else if (is_devtree_compatible(desc, "qemu,pseries")) {
801 desc->hyper = HYPER_KVM;
802 desc->virtype = VIRT_PARA;
803 }
804 return desc->hyper;
805 }
806
807 #ifdef INCLUDE_VMWARE_BDOOR
808
809 #define VMWARE_BDOOR_MAGIC 0x564D5868
810 #define VMWARE_BDOOR_PORT 0x5658
811 #define VMWARE_BDOOR_CMD_GETVERSION 10
812
813 static UL_ASAN_BLACKLIST
814 void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
815 {
816 __asm__(
817 #if defined(__PIC__) && defined(__i386__)
818 /* x86 PIC cannot clobber ebx -- gcc bitches */
819 "xchg %%ebx, %%esi;"
820 "inl (%%dx), %%eax;"
821 "xchg %%esi, %%ebx;"
822 : "=S" (*ebx),
823 #else
824 "inl (%%dx), %%eax;"
825 : "=b" (*ebx),
826 #endif
827 "=a" (*eax),
828 "=c" (*ecx),
829 "=d" (*edx)
830 : "0" (VMWARE_BDOOR_MAGIC),
831 "1" (VMWARE_BDOOR_CMD_GETVERSION),
832 "2" (VMWARE_BDOOR_PORT),
833 "3" (0)
834 : "memory");
835 }
836
837 static jmp_buf segv_handler_env;
838
839 static void
840 segv_handler(__attribute__((__unused__)) int sig,
841 __attribute__((__unused__)) siginfo_t *info,
842 __attribute__((__unused__)) void *ignored)
843 {
844 siglongjmp(segv_handler_env, 1);
845 }
846
847 static int
848 is_vmware_platform(void)
849 {
850 uint32_t eax, ebx, ecx, edx;
851 struct sigaction act, oact;
852
853 /*
854 * FIXME: Not reliable for non-root users. Note it works as expected if
855 * vmware_bdoor() is not optimized for PIE, but then it fails to build
856 * on 32bit x86 systems. See lscpu git log for more details (commit
857 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
858 */
859 if (getuid() != 0)
860 return 0;
861
862 /*
863 * The assembly routine for vmware detection works
864 * fine under vmware, even if ran as regular user. But
865 * on real HW or under other hypervisors, it segfaults (which is
866 * expected). So we temporarily install SIGSEGV handler to catch
867 * the signal. All this magic is needed because lscpu
868 * isn't supposed to require root privileges.
869 */
870 if (sigsetjmp(segv_handler_env, 1))
871 return 0;
872
873 memset(&act, 0, sizeof(act));
874 act.sa_sigaction = segv_handler;
875 act.sa_flags = SA_SIGINFO;
876
877 if (sigaction(SIGSEGV, &act, &oact))
878 err(EXIT_FAILURE, _("cannot set signal handler"));
879
880 vmware_bdoor(&eax, &ebx, &ecx, &edx);
881
882 if (sigaction(SIGSEGV, &oact, NULL))
883 err(EXIT_FAILURE, _("cannot restore signal handler"));
884
885 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
886 }
887
888 #else /* ! INCLUDE_VMWARE_BDOOR */
889
890 static int
891 is_vmware_platform(void)
892 {
893 return 0;
894 }
895
896 #endif /* INCLUDE_VMWARE_BDOOR */
897
898 static void
899 read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
900 {
901 FILE *fd;
902
903 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
904
905 if ((fd = ul_path_fopen(desc->procfs, "r", "sys/kernel/osrelease"))) {
906 char buf[256];
907
908 if (fgets(buf, sizeof(buf), fd) != NULL) {
909 if (strstr(buf, "Microsoft")) {
910 desc->hyper = HYPER_WSL;
911 desc->virtype = VIRT_CONT;
912 }
913 }
914 fclose(fd);
915 if (desc->virtype)
916 return;
917 }
918
919 if (mod->system != SYSTEM_SNAPSHOT) {
920 read_hypervisor_cpuid(desc);
921 if (!desc->hyper)
922 desc->hyper = read_hypervisor_dmi();
923 if (!desc->hyper && is_vmware_platform())
924 desc->hyper = HYPER_VMWARE;
925 }
926
927 if (desc->hyper) {
928 desc->virtype = VIRT_FULL;
929
930 if (desc->hyper == HYPER_XEN) {
931 uint32_t features;
932
933 fd = ul_prefix_fopen(desc->prefix, "r", _PATH_SYS_HYP_FEATURES);
934
935 if (fd && fscanf(fd, "%x", &features) == 1) {
936 /* Xen PV domain */
937 if (features & XEN_FEATURES_PV_MASK)
938 desc->virtype = VIRT_PARA;
939 /* Xen PVH domain */
940 else if ((features & XEN_FEATURES_PVH_MASK)
941 == XEN_FEATURES_PVH_MASK)
942 desc->virtype = VIRT_PARA;
943 }
944 if (fd)
945 fclose(fd);
946 }
947 } else if (read_hypervisor_powerpc(desc) > 0) {}
948
949 /* Xen para-virt or dom0 */
950 else if (ul_path_access(desc->procfs, F_OK, "xen") == 0) {
951 int dom0 = 0;
952
953 fd = ul_path_fopen(desc->procfs, "r", "xen/capabilities");
954 if (fd) {
955 char buf[256];
956
957 if (fscanf(fd, "%255s", buf) == 1 &&
958 !strcmp(buf, "control_d"))
959 dom0 = 1;
960 fclose(fd);
961 }
962 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
963 desc->hyper = HYPER_XEN;
964
965 /* Xen full-virt on non-x86_64 */
966 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
967 desc->hyper = HYPER_XEN;
968 desc->virtype = VIRT_FULL;
969 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
970 desc->hyper = HYPER_VMWARE;
971 desc->virtype = VIRT_FULL;
972 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
973 desc->hyper = HYPER_VBOX;
974 desc->virtype = VIRT_FULL;
975
976 /* IBM PR/SM */
977 } else if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
978 char buf[BUFSIZ];
979
980 desc->hyper = HYPER_IBM;
981 desc->hypervisor = "PR/SM";
982 desc->virtype = VIRT_FULL;
983 while (fgets(buf, sizeof(buf), fd) != NULL) {
984 char *str, *p;
985
986 if (!strstr(buf, "Control Program:"))
987 continue;
988 if (!strstr(buf, "KVM"))
989 desc->hyper = HYPER_IBM;
990 else
991 desc->hyper = HYPER_KVM;
992 p = strchr(buf, ':');
993 if (!p)
994 continue;
995 xasprintf(&str, "%s", p + 1);
996
997 /* remove leading, trailing and repeating whitespace */
998 while (*str == ' ')
999 str++;
1000 desc->hypervisor = str;
1001 str += strlen(str) - 1;
1002 while ((*str == '\n') || (*str == ' '))
1003 *(str--) = '\0';
1004 while ((str = strstr(desc->hypervisor, " ")))
1005 memmove(str, str + 1, strlen(str));
1006 break;
1007 }
1008 fclose(fd);
1009 }
1010
1011 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
1012 * /proc/bc should not */
1013 else if (ul_path_access(desc->procfs, F_OK, "vz") == 0 &&
1014 ul_path_access(desc->procfs, F_OK, "bc") != 0) {
1015 desc->hyper = HYPER_PARALLELS;
1016 desc->virtype = VIRT_CONT;
1017
1018 /* IBM */
1019 } else if (desc->vendor &&
1020 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
1021 strcmp(desc->vendor, "IBM/S390") == 0)) {
1022 desc->hyper = HYPER_IBM;
1023 desc->virtype = VIRT_FULL;
1024
1025 /* User-mode-linux */
1026 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
1027 desc->hyper = HYPER_UML;
1028 desc->virtype = VIRT_PARA;
1029
1030 /* Linux-VServer */
1031 } else if ((fd = ul_path_fopen(desc->procfs, "r", "self/status"))) {
1032 char buf[BUFSIZ];
1033 char *val = NULL;
1034
1035 while (fgets(buf, sizeof(buf), fd) != NULL) {
1036 if (lookup(buf, "VxID", &val))
1037 break;
1038 }
1039 fclose(fd);
1040
1041 if (val) {
1042 char *org = val;
1043
1044 while (isdigit(*val))
1045 ++val;
1046 if (!*val) {
1047 desc->hyper = HYPER_VSERVER;
1048 desc->virtype = VIRT_CONT;
1049 }
1050 free(org);
1051 }
1052 }
1053 }
1054
1055 /* add @set to the @ary, unnecessary set is deallocated. */
1056 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
1057 {
1058 int i;
1059 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1060
1061 if (!ary)
1062 return -1;
1063
1064 for (i = 0; i < *items; i++) {
1065 if (CPU_EQUAL_S(setsize, set, ary[i]))
1066 break;
1067 }
1068 if (i == *items) {
1069 ary[*items] = set;
1070 ++*items;
1071 return 0;
1072 }
1073 CPU_FREE(set);
1074 return 1;
1075 }
1076
1077 static void
1078 read_topology(struct lscpu_desc *desc, int idx)
1079 {
1080 cpu_set_t *thread_siblings, *core_siblings;
1081 cpu_set_t *book_siblings, *drawer_siblings;
1082 int coreid, socketid, bookid, drawerid;
1083 int i, num = real_cpu_num(desc, idx);
1084
1085 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/topology/thread_siblings", num) != 0)
1086 return;
1087
1088 ul_path_readf_cpuset(desc->syscpu, &thread_siblings, maxcpus,
1089 "cpu%d/topology/thread_siblings", num);
1090 ul_path_readf_cpuset(desc->syscpu, &core_siblings, maxcpus,
1091 "cpu%d/topology/core_siblings", num);
1092 ul_path_readf_cpuset(desc->syscpu, &book_siblings, maxcpus,
1093 "cpu%d/topology/book_siblings", num);
1094 ul_path_readf_cpuset(desc->syscpu, &drawer_siblings, maxcpus,
1095 "cpu%d/topology/drawer_siblings", num);
1096
1097 if (ul_path_readf_s32(desc->syscpu, &coreid, "cpu%d/topology/core_id", num) != 0)
1098 coreid = -1;
1099
1100 if (ul_path_readf_s32(desc->syscpu, &socketid, "cpu%d/topology/physical_package_id", num) != 0)
1101 socketid = -1;
1102
1103 if (ul_path_readf_s32(desc->syscpu, &bookid, "cpu%d/topology/book_id", num) != 0)
1104 bookid = -1;
1105
1106 if (ul_path_readf_s32(desc->syscpu, &drawerid, "cpu%d/topology/drawer_id", num) != 0)
1107 drawerid = -1;
1108
1109 if (!desc->coremaps) {
1110 int ndrawers, nbooks, nsockets, ncores, nthreads;
1111 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1112
1113 /* threads within one core */
1114 nthreads = CPU_COUNT_S(setsize, thread_siblings);
1115 if (!nthreads)
1116 nthreads = 1;
1117
1118 /* cores within one socket */
1119 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
1120 if (!ncores)
1121 ncores = 1;
1122
1123 /* number of sockets within one book. Because of odd /
1124 * non-present cpu maps and to keep calculation easy we make
1125 * sure that nsockets and nbooks is at least 1.
1126 */
1127 nsockets = desc->ncpus / nthreads / ncores;
1128 if (!nsockets)
1129 nsockets = 1;
1130
1131 /* number of books */
1132 nbooks = desc->ncpus / nthreads / ncores / nsockets;
1133 if (!nbooks)
1134 nbooks = 1;
1135
1136 /* number of drawers */
1137 ndrawers = desc->ncpus / nbooks / nthreads / ncores / nsockets;
1138 if (!ndrawers)
1139 ndrawers = 1;
1140
1141 /* all threads, see also read_basicinfo()
1142 * -- fallback for kernels without
1143 * /sys/devices/system/cpu/online.
1144 */
1145 if (!desc->nthreads)
1146 desc->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
1147
1148 /* For each map we make sure that it can have up to ncpuspos
1149 * entries. This is because we cannot reliably calculate the
1150 * number of cores, sockets and books on all architectures.
1151 * E.g. completely virtualized architectures like s390 may
1152 * have multiple sockets of different sizes.
1153 */
1154 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1155 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1156 desc->coreids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1157 desc->socketids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1158 for (i = 0; i < desc->ncpuspos; i++)
1159 desc->coreids[i] = desc->socketids[i] = -1;
1160 if (book_siblings) {
1161 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1162 desc->bookids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1163 for (i = 0; i < desc->ncpuspos; i++)
1164 desc->bookids[i] = -1;
1165 }
1166 if (drawer_siblings) {
1167 desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1168 desc->drawerids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1169 for (i = 0; i < desc->ncpuspos; i++)
1170 desc->drawerids[i] = -1;
1171 }
1172 }
1173
1174 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
1175 desc->coreids[idx] = coreid;
1176 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
1177 desc->socketids[idx] = socketid;
1178 if (book_siblings) {
1179 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
1180 desc->bookids[idx] = bookid;
1181 }
1182 if (drawer_siblings) {
1183 add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
1184 desc->drawerids[idx] = drawerid;
1185 }
1186 }
1187
1188 static void
1189 read_polarization(struct lscpu_desc *desc, int idx)
1190 {
1191 char mode[64];
1192 int num = real_cpu_num(desc, idx);
1193
1194 if (desc->dispatching < 0)
1195 return;
1196 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/polarization", num) != 0)
1197 return;
1198 if (!desc->polarization)
1199 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
1200
1201 ul_path_readf_buffer(desc->syscpu, mode, sizeof(mode), "cpu%d/polarization", num);
1202
1203 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
1204 desc->polarization[idx] = POLAR_VLOW;
1205 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
1206 desc->polarization[idx] = POLAR_VMEDIUM;
1207 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
1208 desc->polarization[idx] = POLAR_VHIGH;
1209 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
1210 desc->polarization[idx] = POLAR_HORIZONTAL;
1211 else
1212 desc->polarization[idx] = POLAR_UNKNOWN;
1213 }
1214
1215 static void
1216 read_address(struct lscpu_desc *desc, int idx)
1217 {
1218 int num = real_cpu_num(desc, idx);
1219
1220 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/address", num) != 0)
1221 return;
1222 if (!desc->addresses)
1223 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
1224 ul_path_readf_s32(desc->syscpu, &desc->addresses[idx], "cpu%d/address", num);
1225 }
1226
1227 static void
1228 read_configured(struct lscpu_desc *desc, int idx)
1229 {
1230 int num = real_cpu_num(desc, idx);
1231
1232 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/configure", num) != 0)
1233 return;
1234 if (!desc->configured)
1235 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
1236 ul_path_readf_s32(desc->syscpu, &desc->configured[idx], "cpu%d/configure", num);
1237 }
1238
1239 /* Read overall maximum frequency of cpu */
1240 static char *
1241 cpu_max_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1242 {
1243 int i;
1244 float cpu_freq = 0.0;
1245 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1246
1247 if (desc->present) {
1248 for (i = 0; i < desc->ncpuspos; i++) {
1249 if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
1250 && desc->maxmhz[i]) {
1251 float freq = atof(desc->maxmhz[i]);
1252
1253 if (freq > cpu_freq)
1254 cpu_freq = freq;
1255 }
1256 }
1257 }
1258 snprintf(buf, bufsz, "%.4f", cpu_freq);
1259 return buf;
1260 }
1261
1262 /* Read overall minimum frequency of cpu */
1263 static char *
1264 cpu_min_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1265 {
1266 int i;
1267 float cpu_freq = -1.0;
1268 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1269
1270 if (desc->present) {
1271 for (i = 0; i < desc->ncpuspos; i++) {
1272 if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
1273 && desc->minmhz[i]) {
1274 float freq = atof(desc->minmhz[i]);
1275
1276 if (cpu_freq < 0.0 || freq < cpu_freq)
1277 cpu_freq = freq;
1278 }
1279 }
1280 }
1281 snprintf(buf, bufsz, "%.4f", cpu_freq);
1282 return buf;
1283 }
1284
1285
1286 static void
1287 read_max_mhz(struct lscpu_desc *desc, int idx)
1288 {
1289 int num = real_cpu_num(desc, idx);
1290 int mhz;
1291
1292 if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) != 0)
1293 return;
1294 if (!desc->maxmhz)
1295 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1296 xasprintf(&desc->maxmhz[idx], "%.4f", (float) mhz / 1000);
1297 }
1298
1299 static void
1300 read_min_mhz(struct lscpu_desc *desc, int idx)
1301 {
1302 int num = real_cpu_num(desc, idx);
1303 int mhz;
1304
1305 if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) != 0)
1306 return;
1307 if (!desc->minmhz)
1308 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1309 xasprintf(&desc->minmhz[idx], "%.4f", (float) mhz / 1000);
1310 }
1311
1312 static int
1313 cachecmp(const void *a, const void *b)
1314 {
1315 struct cpu_cache *c1 = (struct cpu_cache *) a;
1316 struct cpu_cache *c2 = (struct cpu_cache *) b;
1317
1318 return strcmp(c2->name, c1->name);
1319 }
1320
1321 static void
1322 read_cache(struct lscpu_desc *desc, int idx)
1323 {
1324 char buf[256];
1325 int i;
1326 int num = real_cpu_num(desc, idx);
1327
1328 if (!desc->ncaches) {
1329 while (ul_path_accessf(desc->syscpu, F_OK,
1330 "cpu%d/cache/index%d",
1331 num, desc->ncaches) == 0)
1332 desc->ncaches++;
1333
1334 if (!desc->ncaches)
1335 return;
1336 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1337 }
1338 for (i = 0; i < desc->ncaches; i++) {
1339 struct cpu_cache *ca = &desc->caches[i];
1340 cpu_set_t *map;
1341
1342 if (ul_path_accessf(desc->syscpu, F_OK,
1343 "cpu%d/cache/index%d", num, i) != 0)
1344 continue;
1345 if (!ca->name) {
1346 int type = 0;
1347
1348 /* cache type */
1349 if (ul_path_readf_string(desc->syscpu, &ca->type,
1350 "cpu%d/cache/index%d/type", num, i) > 0) {
1351 if (!strcmp(ca->type, "Data"))
1352 type = 'd';
1353 else if (!strcmp(ca->type, "Instruction"))
1354 type = 'i';
1355 }
1356
1357 /* cache level */
1358 ul_path_readf_s32(desc->syscpu, &ca->level,
1359 "cpu%d/cache/index%d/level", num, i);
1360 if (type)
1361 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
1362 else
1363 snprintf(buf, sizeof(buf), "L%d", ca->level);
1364
1365 ca->name = xstrdup(buf);
1366
1367 ul_path_readf_u32(desc->syscpu, &ca->ways_of_associativity,
1368 "cpu%d/cache/index%d/ways_of_associativity", num, i);
1369 ul_path_readf_u32(desc->syscpu, &ca->physical_line_partition,
1370 "cpu%d/cache/index%d/physical_line_partition", num, i);
1371 ul_path_readf_u32(desc->syscpu, &ca->number_of_sets,
1372 "cpu%d/cache/index%d/number_of_sets", num, i);
1373 ul_path_readf_u32(desc->syscpu, &ca->coherency_line_size,
1374 "cpu%d/cache/index%d/coherency_line_size", num, i);
1375
1376 ul_path_readf_string(desc->syscpu, &ca->allocation_policy,
1377 "cpu%d/cache/index%d/allocation_policy", num, i);
1378 ul_path_readf_string(desc->syscpu, &ca->write_policy,
1379 "cpu%d/cache/index%d/write_policy", num, i);
1380
1381 /* cache size */
1382 if (ul_path_readf_buffer(desc->syscpu, buf, sizeof(buf),
1383 "cpu%d/cache/index%d/size", num, i) > 0)
1384 parse_size(buf, &ca->size, NULL);
1385 else
1386 ca->size = 0;
1387 }
1388
1389 /* information about how CPUs share different caches */
1390 ul_path_readf_cpuset(desc->syscpu, &map, maxcpus,
1391 "cpu%d/cache/index%d/shared_cpu_map", num, i);
1392
1393 if (!ca->sharedmaps)
1394 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1395 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1396 }
1397 }
1398
1399 static inline int is_node_dirent(struct dirent *d)
1400 {
1401 return
1402 d &&
1403 #ifdef _DIRENT_HAVE_D_TYPE
1404 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1405 #endif
1406 strncmp(d->d_name, "node", 4) == 0 &&
1407 isdigit_string(d->d_name + 4);
1408 }
1409
1410 static int
1411 nodecmp(const void *ap, const void *bp)
1412 {
1413 int *a = (int *) ap, *b = (int *) bp;
1414 return *a - *b;
1415 }
1416
1417 static void
1418 read_nodes(struct lscpu_desc *desc)
1419 {
1420 int i = 0;
1421 DIR *dir;
1422 struct dirent *d;
1423 struct path_cxt *sysnode;
1424
1425 desc->nnodes = 0;
1426
1427 sysnode = ul_new_path(_PATH_SYS_NODE);
1428 if (!sysnode)
1429 err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
1430 ul_path_set_prefix(sysnode, desc->prefix);
1431
1432 dir = ul_path_opendir(sysnode, NULL);
1433 if (!dir)
1434 goto done;
1435
1436 while ((d = readdir(dir))) {
1437 if (is_node_dirent(d))
1438 desc->nnodes++;
1439 }
1440
1441 if (!desc->nnodes) {
1442 closedir(dir);
1443 goto done;
1444 }
1445
1446 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1447 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1448
1449 rewinddir(dir);
1450 while ((d = readdir(dir)) && i < desc->nnodes) {
1451 if (is_node_dirent(d))
1452 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1453 _("Failed to extract the node number"));
1454 }
1455 closedir(dir);
1456 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1457
1458 /* information about how nodes share different CPUs */
1459 for (i = 0; i < desc->nnodes; i++)
1460 ul_path_readf_cpuset(sysnode, &desc->nodemaps[i], maxcpus,
1461 "node%d/cpumap", desc->idx2nodenum[i]);
1462 done:
1463 ul_unref_path(sysnode);
1464 }
1465
1466 static char *
1467 get_cell_data(struct lscpu_desc *desc, int idx, int col,
1468 struct lscpu_modifier *mod,
1469 char *buf, size_t bufsz)
1470 {
1471 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1472 size_t i;
1473 int cpu = real_cpu_num(desc, idx);
1474
1475 *buf = '\0';
1476
1477 switch (col) {
1478 case COL_CPU_CPU:
1479 snprintf(buf, bufsz, "%d", cpu);
1480 break;
1481 case COL_CPU_CORE:
1482 if (mod->physical) {
1483 if (desc->coreids[idx] == -1)
1484 snprintf(buf, bufsz, "-");
1485 else
1486 snprintf(buf, bufsz, "%d", desc->coreids[idx]);
1487 } else {
1488 if (cpuset_ary_isset(cpu, desc->coremaps,
1489 desc->ncores, setsize, &i) == 0)
1490 snprintf(buf, bufsz, "%zu", i);
1491 }
1492 break;
1493 case COL_CPU_SOCKET:
1494 if (mod->physical) {
1495 if (desc->socketids[idx] == -1)
1496 snprintf(buf, bufsz, "-");
1497 else
1498 snprintf(buf, bufsz, "%d", desc->socketids[idx]);
1499 } else {
1500 if (cpuset_ary_isset(cpu, desc->socketmaps,
1501 desc->nsockets, setsize, &i) == 0)
1502 snprintf(buf, bufsz, "%zu", i);
1503 }
1504 break;
1505 case COL_CPU_NODE:
1506 if (cpuset_ary_isset(cpu, desc->nodemaps,
1507 desc->nnodes, setsize, &i) == 0)
1508 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1509 break;
1510 case COL_CPU_DRAWER:
1511 if (mod->physical) {
1512 if (desc->drawerids[idx] == -1)
1513 snprintf(buf, bufsz, "-");
1514 else
1515 snprintf(buf, bufsz, "%d", desc->drawerids[idx]);
1516 } else {
1517 if (cpuset_ary_isset(cpu, desc->drawermaps,
1518 desc->ndrawers, setsize, &i) == 0)
1519 snprintf(buf, bufsz, "%zu", i);
1520 }
1521 break;
1522 case COL_CPU_BOOK:
1523 if (mod->physical) {
1524 if (desc->bookids[idx] == -1)
1525 snprintf(buf, bufsz, "-");
1526 else
1527 snprintf(buf, bufsz, "%d", desc->bookids[idx]);
1528 } else {
1529 if (cpuset_ary_isset(cpu, desc->bookmaps,
1530 desc->nbooks, setsize, &i) == 0)
1531 snprintf(buf, bufsz, "%zu", i);
1532 }
1533 break;
1534 case COL_CPU_CACHE:
1535 {
1536 char *p = buf;
1537 size_t sz = bufsz;
1538 int j;
1539
1540 for (j = desc->ncaches - 1; j >= 0; j--) {
1541 struct cpu_cache *ca = &desc->caches[j];
1542
1543 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1544 ca->nsharedmaps, setsize, &i) == 0) {
1545 int x = snprintf(p, sz, "%zu", i);
1546 if (x < 0 || (size_t) x >= sz)
1547 return NULL;
1548 p += x;
1549 sz -= x;
1550 }
1551 if (j != 0) {
1552 if (sz < 2)
1553 return NULL;
1554 *p++ = mod->compat ? ',' : ':';
1555 *p = '\0';
1556 sz--;
1557 }
1558 }
1559 break;
1560 }
1561 case COL_CPU_POLARIZATION:
1562 if (desc->polarization) {
1563 int x = desc->polarization[idx];
1564
1565 snprintf(buf, bufsz, "%s",
1566 mod->mode == OUTPUT_PARSABLE ?
1567 polar_modes[x].parsable :
1568 polar_modes[x].readable);
1569 }
1570 break;
1571 case COL_CPU_ADDRESS:
1572 if (desc->addresses)
1573 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1574 break;
1575 case COL_CPU_CONFIGURED:
1576 if (!desc->configured)
1577 break;
1578 if (mod->mode == OUTPUT_PARSABLE)
1579 snprintf(buf, bufsz, "%s",
1580 desc->configured[idx] ? _("Y") : _("N"));
1581 else
1582 snprintf(buf, bufsz, "%s",
1583 desc->configured[idx] ? _("yes") : _("no"));
1584 break;
1585 case COL_CPU_ONLINE:
1586 if (!desc->online)
1587 break;
1588 if (mod->mode == OUTPUT_PARSABLE)
1589 snprintf(buf, bufsz, "%s",
1590 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1591 else
1592 snprintf(buf, bufsz, "%s",
1593 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1594 break;
1595 case COL_CPU_MAXMHZ:
1596 if (desc->maxmhz && desc->maxmhz[idx])
1597 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1598 break;
1599 case COL_CPU_MINMHZ:
1600 if (desc->minmhz && desc->minmhz[idx])
1601 xstrncpy(buf, desc->minmhz[idx], bufsz);
1602 break;
1603 }
1604 return buf;
1605 }
1606
1607 static char *
1608 get_cell_header(struct lscpu_desc *desc, int col,
1609 struct lscpu_modifier *mod,
1610 char *buf, size_t bufsz)
1611 {
1612 *buf = '\0';
1613
1614 if (col == COL_CPU_CACHE) {
1615 char *p = buf;
1616 size_t sz = bufsz;
1617 int i;
1618
1619 for (i = desc->ncaches - 1; i >= 0; i--) {
1620 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1621 if (x < 0 || (size_t) x >= sz)
1622 return NULL;
1623 sz -= x;
1624 p += x;
1625 if (i > 0) {
1626 if (sz < 2)
1627 return NULL;
1628 *p++ = mod->compat ? ',' : ':';
1629 *p = '\0';
1630 sz--;
1631 }
1632 }
1633 if (desc->ncaches)
1634 return buf;
1635 }
1636 snprintf(buf, bufsz, "%s", coldescs_cpu[col].name);
1637 return buf;
1638 }
1639
1640 /*
1641 * [-C] backend
1642 */
1643 static void
1644 print_caches_readable(struct lscpu_desc *desc, int cols[], int ncols,
1645 struct lscpu_modifier *mod)
1646 {
1647 int i;
1648 struct libscols_table *table;
1649
1650 scols_init_debug(0);
1651
1652 table = scols_new_table();
1653 if (!table)
1654 err(EXIT_FAILURE, _("failed to allocate output table"));
1655 if (mod->json) {
1656 scols_table_enable_json(table, 1);
1657 scols_table_set_name(table, "caches");
1658 }
1659
1660 for (i = 0; i < ncols; i++) {
1661 struct lscpu_coldesc *cd = &coldescs_cache[cols[i]];
1662 if (!scols_table_new_column(table, cd->name, 0, cd->flags))
1663 err(EXIT_FAILURE, _("failed to allocate output column"));
1664 }
1665
1666 for (i = desc->ncaches - 1; i >= 0; i--) {
1667 struct cpu_cache *ca = &desc->caches[i];
1668 struct libscols_line *line;
1669 int c;
1670
1671 line = scols_table_new_line(table, NULL);
1672 if (!line)
1673 err(EXIT_FAILURE, _("failed to allocate output line"));
1674
1675 for (c = 0; c < ncols; c++) {
1676 char *data = NULL;
1677 int col = cols[c];
1678
1679 switch (col) {
1680 case COL_CACHE_NAME:
1681 if (ca->name)
1682 data = xstrdup(ca->name);
1683 break;
1684 case COL_CACHE_ONESIZE:
1685 if (!ca->size)
1686 break;
1687 if (mod->bytes)
1688 xasprintf(&data, "%" PRIu64, ca->size);
1689 else
1690 data = size_to_human_string(SIZE_SUFFIX_1LETTER, ca->size);
1691 break;
1692 case COL_CACHE_ALLSIZE:
1693 {
1694 uint64_t sz = 0;
1695
1696 if (get_cache_full_size(desc, ca, &sz) != 0)
1697 break;
1698 if (mod->bytes)
1699 xasprintf(&data, "%" PRIu64, sz);
1700 else
1701 data = size_to_human_string(SIZE_SUFFIX_1LETTER, sz);
1702 break;
1703 }
1704 case COL_CACHE_WAYS:
1705 if (ca->ways_of_associativity)
1706 xasprintf(&data, "%u", ca->ways_of_associativity);
1707 break;
1708
1709 case COL_CACHE_TYPE:
1710 if (ca->type)
1711 data = xstrdup(ca->type);
1712 break;
1713 case COL_CACHE_LEVEL:
1714 if (ca->level)
1715 xasprintf(&data, "%d", ca->level);
1716 break;
1717 case COL_CACHE_ALLOCPOL:
1718 if (ca->allocation_policy)
1719 data = xstrdup(ca->allocation_policy);
1720 break;
1721 case COL_CACHE_WRITEPOL:
1722 if (ca->write_policy)
1723 data = xstrdup(ca->write_policy);
1724 break;
1725 case COL_CACHE_PHYLINE:
1726 if (ca->physical_line_partition)
1727 xasprintf(&data, "%u", ca->physical_line_partition);
1728 break;
1729 case COL_CACHE_SETS:
1730 if (ca->number_of_sets)
1731 xasprintf(&data, "%u", ca->number_of_sets);
1732 break;
1733 case COL_CACHE_COHERENCYSIZE:
1734 if (ca->coherency_line_size)
1735 xasprintf(&data, "%u", ca->coherency_line_size);
1736 break;
1737 }
1738
1739 if (data && scols_line_refer_data(line, c, data))
1740 err(EXIT_FAILURE, _("failed to add output data"));
1741 }
1742 }
1743
1744 scols_print_table(table);
1745 scols_unref_table(table);
1746 }
1747
1748 /*
1749 * [-p] backend, we support two parsable formats:
1750 *
1751 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1752 * output and it contains fixed set of the columns. The CACHE columns are at
1753 * the end of the line and the CACHE is not printed if the number of the caches
1754 * is zero. The CACHE columns are separated by two commas, for example:
1755 *
1756 * $ lscpu --parse
1757 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1758 * 0,0,0,0,,0,0,0
1759 * 1,1,0,0,,1,1,0
1760 *
1761 * 2) "user defined output" -- this format prints always all columns without
1762 * special prefix for CACHE column. If there are not CACHEs then the column is
1763 * empty and the header "Cache" is printed rather than a real name of the cache.
1764 * The CACHE columns are separated by ':'.
1765 *
1766 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1767 * # CPU,Core,Socket,Node,L1d:L1i:L2
1768 * 0,0,0,0,0:0:0
1769 * 1,1,0,0,1:1:0
1770 */
1771 static void
1772 print_cpus_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1773 struct lscpu_modifier *mod)
1774 {
1775 char buf[BUFSIZ], *data;
1776 int i;
1777
1778 /*
1779 * Header
1780 */
1781 printf(_(
1782 "# The following is the parsable format, which can be fed to other\n"
1783 "# programs. Each different item in every column has an unique ID\n"
1784 "# starting from zero.\n"));
1785
1786 fputs("# ", stdout);
1787 for (i = 0; i < ncols; i++) {
1788 int col = cols[i];
1789
1790 if (col == COL_CPU_CACHE) {
1791 if (mod->compat && !desc->ncaches)
1792 continue;
1793 if (mod->compat && i != 0)
1794 putchar(',');
1795 }
1796 if (i > 0)
1797 putchar(',');
1798
1799 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1800
1801 if (data && * data && col != COL_CPU_CACHE &&
1802 !coldescs_cpu[col].is_abbr) {
1803 /*
1804 * For normal column names use mixed case (e.g. "Socket")
1805 */
1806 char *p = data + 1;
1807
1808 while (p && *p != '\0') {
1809 *p = tolower((unsigned int) *p);
1810 p++;
1811 }
1812 }
1813 fputs(data && *data ? data : "", stdout);
1814 }
1815 putchar('\n');
1816
1817 /*
1818 * Data
1819 */
1820 for (i = 0; i < desc->ncpuspos; i++) {
1821 int c;
1822 int cpu = real_cpu_num(desc, i);
1823
1824 if (desc->online) {
1825 if (!mod->offline && !is_cpu_online(desc, cpu))
1826 continue;
1827 if (!mod->online && is_cpu_online(desc, cpu))
1828 continue;
1829 }
1830 if (desc->present && !is_cpu_present(desc, cpu))
1831 continue;
1832 for (c = 0; c < ncols; c++) {
1833 if (mod->compat && cols[c] == COL_CPU_CACHE) {
1834 if (!desc->ncaches)
1835 continue;
1836 if (c > 0)
1837 putchar(',');
1838 }
1839 if (c > 0)
1840 putchar(',');
1841
1842 data = get_cell_data(desc, i, cols[c], mod,
1843 buf, sizeof(buf));
1844 fputs(data && *data ? data : "", stdout);
1845 }
1846 putchar('\n');
1847 }
1848 }
1849
1850 /*
1851 * [-e] backend
1852 */
1853 static void
1854 print_cpus_readable(struct lscpu_desc *desc, int cols[], int ncols,
1855 struct lscpu_modifier *mod)
1856 {
1857 int i;
1858 char buf[BUFSIZ];
1859 const char *data;
1860 struct libscols_table *table;
1861
1862 scols_init_debug(0);
1863
1864 table = scols_new_table();
1865 if (!table)
1866 err(EXIT_FAILURE, _("failed to allocate output table"));
1867 if (mod->json) {
1868 scols_table_enable_json(table, 1);
1869 scols_table_set_name(table, "cpus");
1870 }
1871
1872 for (i = 0; i < ncols; i++) {
1873 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1874 if (!scols_table_new_column(table, data, 0, coldescs_cpu[cols[i]].flags))
1875 err(EXIT_FAILURE, _("failed to allocate output column"));
1876 }
1877
1878 for (i = 0; i < desc->ncpuspos; i++) {
1879 int c;
1880 struct libscols_line *line;
1881 int cpu = real_cpu_num(desc, i);
1882
1883 if (desc->online) {
1884 if (!mod->offline && !is_cpu_online(desc, cpu))
1885 continue;
1886 if (!mod->online && is_cpu_online(desc, cpu))
1887 continue;
1888 }
1889 if (desc->present && !is_cpu_present(desc, cpu))
1890 continue;
1891
1892 line = scols_table_new_line(table, NULL);
1893 if (!line)
1894 err(EXIT_FAILURE, _("failed to allocate output line"));
1895
1896 for (c = 0; c < ncols; c++) {
1897 data = get_cell_data(desc, i, cols[c], mod,
1898 buf, sizeof(buf));
1899 if (!data || !*data)
1900 data = "-";
1901 if (scols_line_set_data(line, c, data))
1902 err(EXIT_FAILURE, _("failed to add output data"));
1903 }
1904 }
1905
1906 scols_print_table(table);
1907 scols_unref_table(table);
1908 }
1909
1910
1911 static void __attribute__ ((__format__(printf, 3, 4)))
1912 add_summary_sprint(struct libscols_table *tb,
1913 const char *txt,
1914 const char *fmt,
1915 ...)
1916 {
1917 struct libscols_line *ln = scols_table_new_line(tb, NULL);
1918 char *data;
1919 va_list args;
1920
1921 if (!ln)
1922 err(EXIT_FAILURE, _("failed to allocate output line"));
1923
1924 /* description column */
1925 if (txt && scols_line_set_data(ln, 0, txt))
1926 err(EXIT_FAILURE, _("failed to add output data"));
1927
1928 /* data column */
1929 va_start(args, fmt);
1930 xvasprintf(&data, fmt, args);
1931 va_end(args);
1932
1933 if (data && scols_line_refer_data(ln, 1, data))
1934 err(EXIT_FAILURE, _("failed to add output data"));
1935 }
1936
1937 #define add_summary_n(tb, txt, num) add_summary_sprint(tb, txt, "%d", num)
1938 #define add_summary_s(tb, txt, str) add_summary_sprint(tb, txt, "%s", str)
1939
1940 static void
1941 print_cpuset(struct libscols_table *tb,
1942 const char *key, cpu_set_t *set, int hex)
1943 {
1944 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1945 size_t setbuflen = 7 * maxcpus;
1946 char setbuf[setbuflen], *p;
1947
1948 if (hex) {
1949 p = cpumask_create(setbuf, setbuflen, set, setsize);
1950 add_summary_s(tb, key, p);
1951 } else {
1952 p = cpulist_create(setbuf, setbuflen, set, setsize);
1953 add_summary_s(tb, key, p);
1954 }
1955 }
1956
1957 static int get_cache_full_size(struct lscpu_desc *desc,
1958 struct cpu_cache *ca, uint64_t *res)
1959 {
1960 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1961 int i, nshares = 0;
1962
1963 /* Count number of CPUs which shares the cache */
1964 for (i = 0; i < desc->ncpuspos; i++) {
1965 int cpu = real_cpu_num(desc, i);
1966
1967 if (desc->present && !is_cpu_present(desc, cpu))
1968 continue;
1969 if (CPU_ISSET_S(cpu, setsize, ca->sharedmaps[0]))
1970 nshares++;
1971 }
1972
1973 /* Correction for CPU threads */
1974 if (desc->nthreads > desc->ncores)
1975 nshares /= (desc->nthreads / desc->ncores);
1976 if (nshares < 1)
1977 nshares = 1;
1978
1979 *res = (desc->ncores / nshares) * ca->size;
1980 return 0;
1981 }
1982
1983 /*
1984 * default output
1985 */
1986 static void
1987 print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1988 {
1989 char buf[BUFSIZ];
1990 int i = 0;
1991 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1992 struct libscols_table *tb;
1993
1994 scols_init_debug(0);
1995
1996 tb = scols_new_table();
1997 if (!tb)
1998 err(EXIT_FAILURE, _("failed to allocate output table"));
1999
2000 scols_table_enable_noheadings(tb, 1);
2001 if (mod->json) {
2002 scols_table_enable_json(tb, 1);
2003 scols_table_set_name(tb, "lscpu");
2004 }
2005
2006 if (scols_table_new_column(tb, "field", 0, 0) == NULL ||
2007 scols_table_new_column(tb, "data", 0, SCOLS_FL_NOEXTREMES | SCOLS_FL_WRAP) == NULL)
2008 err(EXIT_FAILURE, _("failed to initialize output column"));
2009
2010 add_summary_s(tb, _("Architecture:"), desc->arch);
2011 if (desc->mode) {
2012 char *p = buf;
2013
2014 if (desc->mode & MODE_32BIT) {
2015 strcpy(p, "32-bit, ");
2016 p += 8;
2017 }
2018 if (desc->mode & MODE_64BIT) {
2019 strcpy(p, "64-bit, ");
2020 p += 8;
2021 }
2022 *(p - 2) = '\0';
2023 add_summary_s(tb, _("CPU op-mode(s):"), buf);
2024 }
2025 #if !defined(WORDS_BIGENDIAN)
2026 add_summary_s(tb, _("Byte Order:"), "Little Endian");
2027 #else
2028 add_summary_s(tb, _("Byte Order:"), "Big Endian");
2029 #endif
2030
2031 if (desc->addrsz)
2032 add_summary_s(tb, _("Address sizes:"), desc->addrsz);
2033
2034 add_summary_n(tb, _("CPU(s):"), desc->ncpus);
2035
2036 if (desc->online)
2037 print_cpuset(tb, mod->hex ? _("On-line CPU(s) mask:") :
2038 _("On-line CPU(s) list:"),
2039 desc->online, mod->hex);
2040
2041 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
2042 cpu_set_t *set;
2043
2044 /* Linux kernel provides cpuset of off-line CPUs that contains
2045 * all configured CPUs (see /sys/devices/system/cpu/offline),
2046 * but want to print real (present in system) off-line CPUs only.
2047 */
2048 set = cpuset_alloc(maxcpus, NULL, NULL);
2049 if (!set)
2050 err(EXIT_FAILURE, _("failed to callocate cpu set"));
2051 CPU_ZERO_S(setsize, set);
2052 for (i = 0; i < desc->ncpuspos; i++) {
2053 int cpu = real_cpu_num(desc, i);
2054 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
2055 CPU_SET_S(cpu, setsize, set);
2056 }
2057 print_cpuset(tb, mod->hex ? _("Off-line CPU(s) mask:") :
2058 _("Off-line CPU(s) list:"),
2059 set, mod->hex);
2060 cpuset_free(set);
2061 }
2062
2063 if (desc->nsockets) {
2064 int threads_per_core, cores_per_socket, sockets_per_book;
2065 int books_per_drawer, drawers;
2066 FILE *fd;
2067
2068 threads_per_core = cores_per_socket = sockets_per_book = 0;
2069 books_per_drawer = drawers = 0;
2070 /* s390 detects its cpu topology via /proc/sysinfo, if present.
2071 * Using simply the cpu topology masks in sysfs will not give
2072 * usable results since everything is virtualized. E.g.
2073 * virtual core 0 may have only 1 cpu, but virtual core 2 may
2074 * five cpus.
2075 * If the cpu topology is not exported (e.g. 2nd level guest)
2076 * fall back to old calculation scheme.
2077 */
2078 if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
2079 int t0, t1;
2080
2081 while (fd && fgets(buf, sizeof(buf), fd) != NULL) {
2082 if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
2083 &t0, &t1, &drawers, &books_per_drawer,
2084 &sockets_per_book,
2085 &cores_per_socket) == 6)
2086 break;
2087 }
2088 if (fd)
2089 fclose(fd);
2090 }
2091 if (desc->mtid)
2092 threads_per_core = atoi(desc->mtid) + 1;
2093 add_summary_n(tb, _("Thread(s) per core:"),
2094 threads_per_core ?: desc->nthreads / desc->ncores);
2095 add_summary_n(tb, _("Core(s) per socket:"),
2096 cores_per_socket ?: desc->ncores / desc->nsockets);
2097 if (desc->nbooks) {
2098 add_summary_n(tb, _("Socket(s) per book:"),
2099 sockets_per_book ?: desc->nsockets / desc->nbooks);
2100 if (desc->ndrawers) {
2101 add_summary_n(tb, _("Book(s) per drawer:"),
2102 books_per_drawer ?: desc->nbooks / desc->ndrawers);
2103 add_summary_n(tb, _("Drawer(s):"), drawers ?: desc->ndrawers);
2104 } else {
2105 add_summary_n(tb, _("Book(s):"), books_per_drawer ?: desc->nbooks);
2106 }
2107 } else {
2108 add_summary_n(tb, _("Socket(s):"), sockets_per_book ?: desc->nsockets);
2109 }
2110 }
2111 if (desc->nnodes)
2112 add_summary_n(tb, _("NUMA node(s):"), desc->nnodes);
2113 if (desc->vendor)
2114 add_summary_s(tb, _("Vendor ID:"), desc->vendor);
2115 if (desc->machinetype)
2116 add_summary_s(tb, _("Machine type:"), desc->machinetype);
2117 if (desc->family)
2118 add_summary_s(tb, _("CPU family:"), desc->family);
2119 if (desc->model || desc->revision)
2120 add_summary_s(tb, _("Model:"), desc->revision ? desc->revision : desc->model);
2121 if (desc->modelname || desc->cpu)
2122 add_summary_s(tb, _("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
2123 if (desc->stepping)
2124 add_summary_s(tb, _("Stepping:"), desc->stepping);
2125 if (desc->freqboost >= 0)
2126 add_summary_s(tb, _("Frequency boost:"), desc->freqboost ?
2127 _("enabled") : _("disabled"));
2128 if (desc->mhz)
2129 add_summary_s(tb, _("CPU MHz:"), desc->mhz);
2130 if (desc->dynamic_mhz)
2131 add_summary_s(tb, _("CPU dynamic MHz:"), desc->dynamic_mhz);
2132 if (desc->static_mhz)
2133 add_summary_s(tb, _("CPU static MHz:"), desc->static_mhz);
2134 if (desc->maxmhz)
2135 add_summary_s(tb, _("CPU max MHz:"), cpu_max_mhz(desc, buf, sizeof(buf)));
2136 if (desc->minmhz)
2137 add_summary_s(tb, _("CPU min MHz:"), cpu_min_mhz(desc, buf, sizeof(buf)));
2138 if (desc->bogomips)
2139 add_summary_s(tb, _("BogoMIPS:"), desc->bogomips);
2140 if (desc->virtflag) {
2141 if (!strcmp(desc->virtflag, "svm"))
2142 add_summary_s(tb, _("Virtualization:"), "AMD-V");
2143 else if (!strcmp(desc->virtflag, "vmx"))
2144 add_summary_s(tb, _("Virtualization:"), "VT-x");
2145 }
2146 if (desc->hypervisor)
2147 add_summary_s(tb, _("Hypervisor:"), desc->hypervisor);
2148 if (desc->hyper) {
2149 add_summary_s(tb, _("Hypervisor vendor:"), hv_vendors[desc->hyper]);
2150 add_summary_s(tb, _("Virtualization type:"), _(virt_types[desc->virtype]));
2151 }
2152 if (desc->dispatching >= 0)
2153 add_summary_s(tb, _("Dispatching mode:"), _(disp_modes[desc->dispatching]));
2154 if (desc->ncaches) {
2155 for (i = desc->ncaches - 1; i >= 0; i--) {
2156 uint64_t sz = 0;
2157 char *tmp;
2158 struct cpu_cache *ca = &desc->caches[i];
2159
2160 if (ca->size == 0)
2161 continue;
2162 if (get_cache_full_size(desc, ca, &sz) != 0 || sz == 0)
2163 continue;
2164 if (mod->bytes)
2165 xasprintf(&tmp, "%" PRIu64, sz);
2166 else
2167 tmp = size_to_human_string(
2168 SIZE_SUFFIX_3LETTER | SIZE_SUFFIX_SPACE,
2169 sz);
2170 snprintf(buf, sizeof(buf), _("%s cache:"), ca->name);
2171 add_summary_s(tb, buf, tmp);
2172 free(tmp);
2173 }
2174 }
2175 if (desc->necaches) {
2176 for (i = desc->necaches - 1; i >= 0; i--) {
2177 char *tmp;
2178 struct cpu_cache *ca = &desc->ecaches[i];
2179
2180 if (ca->size == 0)
2181 continue;
2182 if (mod->bytes)
2183 xasprintf(&tmp, "%" PRIu64, ca->size);
2184 else
2185 tmp = size_to_human_string(
2186 SIZE_SUFFIX_3LETTER | SIZE_SUFFIX_SPACE,
2187 ca->size);
2188 snprintf(buf, sizeof(buf), _("%s cache:"), ca->name);
2189 add_summary_s(tb, buf, tmp);
2190 free(tmp);
2191 }
2192 }
2193
2194 for (i = 0; i < desc->nnodes; i++) {
2195 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
2196 print_cpuset(tb, buf, desc->nodemaps[i], mod->hex);
2197 }
2198
2199 if (desc->physsockets) {
2200 add_summary_n(tb, _("Physical sockets:"), desc->physsockets);
2201 add_summary_n(tb, _("Physical chips:"), desc->physchips);
2202 add_summary_n(tb, _("Physical cores/chip:"), desc->physcoresperchip);
2203 }
2204
2205 if (desc->vuls) {
2206 for (i = 0; i < desc->nvuls; i++) {
2207 snprintf(buf, sizeof(buf), ("Vulnerability %s:"), desc->vuls[i].name);
2208 add_summary_s(tb, buf, desc->vuls[i].text);
2209 }
2210 }
2211
2212 if (desc->flags)
2213 add_summary_s(tb, _("Flags:"), desc->flags);
2214
2215 scols_print_table(tb);
2216 scols_unref_table(tb);
2217 }
2218
2219 static void __attribute__((__noreturn__)) usage(void)
2220 {
2221 FILE *out = stdout;
2222 size_t i;
2223
2224 fputs(USAGE_HEADER, out);
2225 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
2226
2227 fputs(USAGE_SEPARATOR, out);
2228 fputs(_("Display information about the CPU architecture.\n"), out);
2229
2230 fputs(USAGE_OPTIONS, out);
2231 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
2232 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
2233 fputs(_(" -B, --bytes print sizes in bytes rather than in human readable format\n"), out);
2234 fputs(_(" -C, --caches[=<list>] info about caches in extended readable format\n"), out);
2235 fputs(_(" -c, --offline print offline CPUs only\n"), out);
2236 fputs(_(" -J, --json use JSON for default or extended format\n"), out);
2237 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
2238 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
2239 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
2240 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
2241 fputs(_(" -y, --physical print physical instead of logical IDs\n"), out);
2242 fputs(_(" --output-all print all available columns for -e, -p or -C\n"), out);
2243 fputs(USAGE_SEPARATOR, out);
2244 printf(USAGE_HELP_OPTIONS(25));
2245
2246 fputs(_("\nAvailable output columns for -e or -p:\n"), out);
2247 for (i = 0; i < ARRAY_SIZE(coldescs_cpu); i++)
2248 fprintf(out, " %13s %s\n", coldescs_cpu[i].name, _(coldescs_cpu[i].help));
2249
2250 fputs(_("\nAvailable output columns for -C:\n"), out);
2251 for (i = 0; i < ARRAY_SIZE(coldescs_cache); i++)
2252 fprintf(out, " %13s %s\n", coldescs_cache[i].name, _(coldescs_cache[i].help));
2253
2254 printf(USAGE_MAN_TAIL("lscpu(1)"));
2255
2256 exit(EXIT_SUCCESS);
2257 }
2258
2259 int main(int argc, char *argv[])
2260 {
2261 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
2262 struct lscpu_desc _desc = { .flags = NULL }, *desc = &_desc;
2263 int c, i, all = 0;
2264 int columns[ARRAY_SIZE(coldescs_cpu)], ncolumns = 0;
2265 int cpu_modifier_specified = 0;
2266 size_t setsize;
2267
2268 enum {
2269 OPT_OUTPUT_ALL = CHAR_MAX + 1,
2270 };
2271 static const struct option longopts[] = {
2272 { "all", no_argument, NULL, 'a' },
2273 { "online", no_argument, NULL, 'b' },
2274 { "bytes", no_argument, NULL, 'B' },
2275 { "caches", optional_argument, NULL, 'C' },
2276 { "offline", no_argument, NULL, 'c' },
2277 { "help", no_argument, NULL, 'h' },
2278 { "extended", optional_argument, NULL, 'e' },
2279 { "json", no_argument, NULL, 'J' },
2280 { "parse", optional_argument, NULL, 'p' },
2281 { "sysroot", required_argument, NULL, 's' },
2282 { "physical", no_argument, NULL, 'y' },
2283 { "hex", no_argument, NULL, 'x' },
2284 { "version", no_argument, NULL, 'V' },
2285 { "output-all", no_argument, NULL, OPT_OUTPUT_ALL },
2286 { NULL, 0, NULL, 0 }
2287 };
2288
2289 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
2290 { 'C','e','p' },
2291 { 'a','b','c' },
2292 { 0 }
2293 };
2294 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
2295
2296 setlocale(LC_ALL, "");
2297 bindtextdomain(PACKAGE, LOCALEDIR);
2298 textdomain(PACKAGE);
2299 atexit(close_stdout);
2300
2301 while ((c = getopt_long(argc, argv, "aBbC::ce::hJp::s:xyV", longopts, NULL)) != -1) {
2302
2303 err_exclusive_options(c, longopts, excl, excl_st);
2304
2305 switch (c) {
2306 case 'a':
2307 mod->online = mod->offline = 1;
2308 cpu_modifier_specified = 1;
2309 break;
2310 case 'B':
2311 mod->bytes = 1;
2312 break;
2313 case 'b':
2314 mod->online = 1;
2315 cpu_modifier_specified = 1;
2316 break;
2317 case 'c':
2318 mod->offline = 1;
2319 cpu_modifier_specified = 1;
2320 break;
2321 case 'C':
2322 if (optarg) {
2323 if (*optarg == '=')
2324 optarg++;
2325 ncolumns = string_to_idarray(optarg,
2326 columns, ARRAY_SIZE(columns),
2327 cache_column_name_to_id);
2328 if (ncolumns < 0)
2329 return EXIT_FAILURE;
2330 }
2331 mod->mode = OUTPUT_CACHES;
2332 break;
2333 case 'J':
2334 mod->json = 1;
2335 break;
2336 case 'p':
2337 case 'e':
2338 if (optarg) {
2339 if (*optarg == '=')
2340 optarg++;
2341 ncolumns = string_to_idarray(optarg,
2342 columns, ARRAY_SIZE(columns),
2343 cpu_column_name_to_id);
2344 if (ncolumns < 0)
2345 return EXIT_FAILURE;
2346 }
2347 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
2348 break;
2349 case 's':
2350 desc->prefix = optarg;
2351 mod->system = SYSTEM_SNAPSHOT;
2352 break;
2353 case 'x':
2354 mod->hex = 1;
2355 break;
2356 case 'y':
2357 mod->physical = 1;
2358 break;
2359 case OPT_OUTPUT_ALL:
2360 all = 1;
2361 break;
2362
2363 case 'h':
2364 usage();
2365 case 'V':
2366 print_version(EXIT_SUCCESS);
2367 default:
2368 errtryhelp(EXIT_FAILURE);
2369 }
2370 }
2371
2372 if (all && ncolumns == 0) {
2373 size_t sz, maxsz = mod->mode == OUTPUT_CACHES ?
2374 ARRAY_SIZE(coldescs_cache) :
2375 ARRAY_SIZE(coldescs_cpu);
2376
2377 for (sz = 0; sz < maxsz; sz++)
2378 columns[ncolumns++] = sz;
2379 }
2380
2381 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
2382 fprintf(stderr,
2383 _("%s: options --all, --online and --offline may only "
2384 "be used with options --extended or --parse.\n"),
2385 program_invocation_short_name);
2386 return EXIT_FAILURE;
2387 }
2388
2389 if (argc != optind) {
2390 warnx(_("bad usage"));
2391 errtryhelp(EXIT_FAILURE);
2392 }
2393
2394 /* set default cpu display mode if none was specified */
2395 if (!mod->online && !mod->offline) {
2396 mod->online = 1;
2397 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
2398 }
2399
2400 ul_path_init_debug();
2401
2402 /* /sys/devices/system/cpu */
2403 desc->syscpu = ul_new_path(_PATH_SYS_CPU);
2404 if (!desc->syscpu)
2405 err(EXIT_FAILURE, _("failed to initialize CPUs sysfs handler"));
2406 if (desc->prefix)
2407 ul_path_set_prefix(desc->syscpu, desc->prefix);
2408
2409 /* /proc */
2410 desc->procfs = ul_new_path("/proc");
2411 if (!desc->procfs)
2412 err(EXIT_FAILURE, _("failed to initialize procfs handler"));
2413 if (desc->prefix)
2414 ul_path_set_prefix(desc->procfs, desc->prefix);
2415
2416 read_basicinfo(desc, mod);
2417
2418 setsize = CPU_ALLOC_SIZE(maxcpus);
2419
2420 for (i = 0; i < desc->ncpuspos; i++) {
2421 /* only consider present CPUs */
2422 if (desc->present &&
2423 !CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present))
2424 continue;
2425 read_topology(desc, i);
2426 read_cache(desc, i);
2427 read_polarization(desc, i);
2428 read_address(desc, i);
2429 read_configured(desc, i);
2430 read_max_mhz(desc, i);
2431 read_min_mhz(desc, i);
2432 }
2433
2434 if (desc->caches)
2435 qsort(desc->caches, desc->ncaches,
2436 sizeof(struct cpu_cache), cachecmp);
2437
2438 if (desc->ecaches)
2439 qsort(desc->ecaches, desc->necaches,
2440 sizeof(struct cpu_cache), cachecmp);
2441
2442 read_nodes(desc);
2443 read_hypervisor(desc, mod);
2444 arm_cpu_decode(desc);
2445
2446 switch(mod->mode) {
2447 case OUTPUT_SUMMARY:
2448 print_summary(desc, mod);
2449 break;
2450 case OUTPUT_CACHES:
2451 if (!ncolumns) {
2452 columns[ncolumns++] = COL_CACHE_NAME;
2453 columns[ncolumns++] = COL_CACHE_ONESIZE;
2454 columns[ncolumns++] = COL_CACHE_ALLSIZE;
2455 columns[ncolumns++] = COL_CACHE_WAYS;
2456 columns[ncolumns++] = COL_CACHE_TYPE;
2457 columns[ncolumns++] = COL_CACHE_LEVEL;
2458 columns[ncolumns++] = COL_CACHE_SETS;
2459 columns[ncolumns++] = COL_CACHE_PHYLINE;
2460 columns[ncolumns++] = COL_CACHE_COHERENCYSIZE;
2461 }
2462 print_caches_readable(desc, columns, ncolumns, mod);
2463 break;
2464 case OUTPUT_PARSABLE:
2465 if (!ncolumns) {
2466 columns[ncolumns++] = COL_CPU_CPU;
2467 columns[ncolumns++] = COL_CPU_CORE;
2468 columns[ncolumns++] = COL_CPU_SOCKET;
2469 columns[ncolumns++] = COL_CPU_NODE;
2470 columns[ncolumns++] = COL_CPU_CACHE;
2471 mod->compat = 1;
2472 }
2473 print_cpus_parsable(desc, columns, ncolumns, mod);
2474 break;
2475 case OUTPUT_READABLE:
2476 if (!ncolumns) {
2477 /* No list was given. Just print whatever is there. */
2478 columns[ncolumns++] = COL_CPU_CPU;
2479 if (desc->nodemaps)
2480 columns[ncolumns++] = COL_CPU_NODE;
2481 if (desc->drawermaps)
2482 columns[ncolumns++] = COL_CPU_DRAWER;
2483 if (desc->bookmaps)
2484 columns[ncolumns++] = COL_CPU_BOOK;
2485 if (desc->socketmaps)
2486 columns[ncolumns++] = COL_CPU_SOCKET;
2487 if (desc->coremaps)
2488 columns[ncolumns++] = COL_CPU_CORE;
2489 if (desc->caches)
2490 columns[ncolumns++] = COL_CPU_CACHE;
2491 if (desc->online)
2492 columns[ncolumns++] = COL_CPU_ONLINE;
2493 if (desc->configured)
2494 columns[ncolumns++] = COL_CPU_CONFIGURED;
2495 if (desc->polarization)
2496 columns[ncolumns++] = COL_CPU_POLARIZATION;
2497 if (desc->addresses)
2498 columns[ncolumns++] = COL_CPU_ADDRESS;
2499 if (desc->maxmhz)
2500 columns[ncolumns++] = COL_CPU_MAXMHZ;
2501 if (desc->minmhz)
2502 columns[ncolumns++] = COL_CPU_MINMHZ;
2503 }
2504 print_cpus_readable(desc, columns, ncolumns, mod);
2505 break;
2506 }
2507
2508 ul_unref_path(desc->syscpu);
2509 ul_unref_path(desc->procfs);
2510 return EXIT_SUCCESS;
2511 }