]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu.c
lscpu: make lookup() use more consistent [coverity scan]
[thirdparty/util-linux.git] / sys-utils / lscpu.c
1 /*
2 * lscpu - CPU architecture information helper
3 *
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #include <assert.h>
23 #include <ctype.h>
24 #include <dirent.h>
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <getopt.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/utsname.h>
32 #include <unistd.h>
33 #include <stdarg.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36 #include <sys/personality.h>
37
38 #if (defined(__x86_64__) || defined(__i386__))
39 # if !defined( __SANITIZE_ADDRESS__)
40 # define INCLUDE_VMWARE_BDOOR
41 # else
42 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
43 # endif
44 #endif
45
46 #ifdef INCLUDE_VMWARE_BDOOR
47 # include <stdint.h>
48 # include <signal.h>
49 # include <strings.h>
50 # include <setjmp.h>
51 # ifdef HAVE_SYS_IO_H
52 # include <sys/io.h>
53 # endif
54 #endif
55
56 #if defined(HAVE_LIBRTAS)
57 #include <librtas.h>
58 #endif
59
60 #include <libsmartcols.h>
61
62 #include "closestream.h"
63 #include "optutils.h"
64 #include "fileutils.h"
65
66 #include "lscpu.h"
67
68 #define CACHE_MAX 100
69
70 /* /sys paths */
71 #define _PATH_SYS_SYSTEM "/sys/devices/system"
72 #define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
73 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
74 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
75
76 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
77 #define XENFEAT_supervisor_mode_kernel 3
78 #define XENFEAT_mmu_pt_update_preserve_ad 5
79 #define XENFEAT_hvm_callback_vector 8
80
81 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
82 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
83 | (1U << XENFEAT_hvm_callback_vector) )
84
85 static const char *virt_types[] = {
86 [VIRT_NONE] = N_("none"),
87 [VIRT_PARA] = N_("para"),
88 [VIRT_FULL] = N_("full"),
89 [VIRT_CONT] = N_("container"),
90 };
91
92 static const char *hv_vendors[] = {
93 [HYPER_NONE] = NULL,
94 [HYPER_XEN] = "Xen",
95 [HYPER_KVM] = "KVM",
96 [HYPER_MSHV] = "Microsoft",
97 [HYPER_VMWARE] = "VMware",
98 [HYPER_IBM] = "IBM",
99 [HYPER_VSERVER] = "Linux-VServer",
100 [HYPER_UML] = "User-mode Linux",
101 [HYPER_INNOTEK] = "Innotek GmbH",
102 [HYPER_HITACHI] = "Hitachi",
103 [HYPER_PARALLELS] = "Parallels",
104 [HYPER_VBOX] = "Oracle",
105 [HYPER_OS400] = "OS/400",
106 [HYPER_PHYP] = "pHyp",
107 [HYPER_SPAR] = "Unisys s-Par",
108 [HYPER_WSL] = "Windows Subsystem for Linux"
109 };
110
111 static const int hv_vendor_pci[] = {
112 [HYPER_NONE] = 0x0000,
113 [HYPER_XEN] = 0x5853,
114 [HYPER_KVM] = 0x0000,
115 [HYPER_MSHV] = 0x1414,
116 [HYPER_VMWARE] = 0x15ad,
117 [HYPER_VBOX] = 0x80ee,
118 };
119
120 static const int hv_graphics_pci[] = {
121 [HYPER_NONE] = 0x0000,
122 [HYPER_XEN] = 0x0001,
123 [HYPER_KVM] = 0x0000,
124 [HYPER_MSHV] = 0x5353,
125 [HYPER_VMWARE] = 0x0710,
126 [HYPER_VBOX] = 0xbeef,
127 };
128
129
130 /* dispatching modes */
131 static const char *disp_modes[] = {
132 [DISP_HORIZONTAL] = N_("horizontal"),
133 [DISP_VERTICAL] = N_("vertical")
134 };
135
136 static struct polarization_modes polar_modes[] = {
137 [POLAR_UNKNOWN] = {"U", "-"},
138 [POLAR_VLOW] = {"VL", "vert-low"},
139 [POLAR_VMEDIUM] = {"VM", "vert-medium"},
140 [POLAR_VHIGH] = {"VH", "vert-high"},
141 [POLAR_HORIZONTAL] = {"H", "horizontal"},
142 };
143
144 static int maxcpus; /* size in bits of kernel cpu mask */
145
146 #define is_cpu_online(_d, _cpu) \
147 ((_d) && (_d)->online ? \
148 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
149 #define is_cpu_present(_d, _cpu) \
150 ((_d) && (_d)->present ? \
151 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
152
153 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
154
155 /*
156 * IDs
157 */
158 enum {
159 COL_CPU_CPU,
160 COL_CPU_CORE,
161 COL_CPU_SOCKET,
162 COL_CPU_NODE,
163 COL_CPU_BOOK,
164 COL_CPU_DRAWER,
165 COL_CPU_CACHE,
166 COL_CPU_POLARIZATION,
167 COL_CPU_ADDRESS,
168 COL_CPU_CONFIGURED,
169 COL_CPU_ONLINE,
170 COL_CPU_MAXMHZ,
171 COL_CPU_MINMHZ,
172 };
173
174 enum {
175 COL_CACHE_ALLSIZE,
176 COL_CACHE_LEVEL,
177 COL_CACHE_NAME,
178 COL_CACHE_ONESIZE,
179 COL_CACHE_TYPE,
180 COL_CACHE_WAYS,
181 };
182
183
184 /* column description
185 */
186 struct lscpu_coldesc {
187 const char *name;
188 const char *help;
189
190 int flags;
191 unsigned int is_abbr:1; /* name is abbreviation */
192 };
193
194 static struct lscpu_coldesc coldescs_cpu[] =
195 {
196 [COL_CPU_CPU] = { "CPU", N_("logical CPU number"), SCOLS_FL_RIGHT, 1 },
197 [COL_CPU_CORE] = { "CORE", N_("logical core number"), SCOLS_FL_RIGHT },
198 [COL_CPU_SOCKET] = { "SOCKET", N_("logical socket number"), SCOLS_FL_RIGHT },
199 [COL_CPU_NODE] = { "NODE", N_("logical NUMA node number"), SCOLS_FL_RIGHT },
200 [COL_CPU_BOOK] = { "BOOK", N_("logical book number"), SCOLS_FL_RIGHT },
201 [COL_CPU_DRAWER] = { "DRAWER", N_("logical drawer number"), SCOLS_FL_RIGHT },
202 [COL_CPU_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
203 [COL_CPU_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
204 [COL_CPU_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
205 [COL_CPU_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
206 [COL_CPU_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU"), SCOLS_FL_RIGHT },
207 [COL_CPU_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU"), SCOLS_FL_RIGHT },
208 [COL_CPU_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU"), SCOLS_FL_RIGHT }
209 };
210
211 static struct lscpu_coldesc coldescs_cache[] =
212 {
213 [COL_CACHE_ALLSIZE] = { "ALL-SIZE", N_("size of all system caches"), SCOLS_FL_RIGHT },
214 [COL_CACHE_LEVEL] = { "LEVEL", N_("cache level"), SCOLS_FL_RIGHT },
215 [COL_CACHE_NAME] = { "NAME", N_("cache name") },
216 [COL_CACHE_ONESIZE] = { "ONE-SIZE", N_("size of one cache"), SCOLS_FL_RIGHT },
217 [COL_CACHE_TYPE] = { "TYPE", N_("cache type") },
218 [COL_CACHE_WAYS] = { "WAYS", N_("ways of associativity"), SCOLS_FL_RIGHT }
219 };
220
221
222 static int get_cache_full_size(struct lscpu_desc *desc, struct cpu_cache *ca, uint64_t *res);
223
224 static int
225 cpu_column_name_to_id(const char *name, size_t namesz)
226 {
227 size_t i;
228
229 for (i = 0; i < ARRAY_SIZE(coldescs_cpu); i++) {
230 const char *cn = coldescs_cpu[i].name;
231
232 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
233 return i;
234 }
235 warnx(_("unknown column: %s"), name);
236 return -1;
237 }
238
239 static int
240 cache_column_name_to_id(const char *name, size_t namesz)
241 {
242 size_t i;
243
244 for (i = 0; i < ARRAY_SIZE(coldescs_cache); i++) {
245 const char *cn = coldescs_cache[i].name;
246
247 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
248 return i;
249 }
250 warnx(_("unknown column: %s"), name);
251 return -1;
252 }
253
254 /* Lookup a pattern and get the value from cpuinfo.
255 * Format is:
256 *
257 * "<pattern> : <key>"
258 */
259 static int
260 lookup(char *line, char *pattern, char **value)
261 {
262 char *p, *v;
263 int len = strlen(pattern);
264
265 /* don't re-fill already found tags, first one wins */
266 if (!*line || *value)
267 return 0;
268
269 /* pattern */
270 if (strncmp(line, pattern, len))
271 return 0;
272
273 /* white spaces */
274 for (p = line + len; isspace(*p); p++);
275
276 /* separator */
277 if (*p != ':')
278 return 0;
279
280 /* white spaces */
281 for (++p; isspace(*p); p++);
282
283 /* value */
284 if (!*p)
285 return 0;
286 v = p;
287
288 /* end of value */
289 len = strlen(line) - 1;
290 for (p = line + len; isspace(*(p-1)); p--);
291 *p = '\0';
292
293 *value = xstrdup(v);
294 return 1;
295 }
296
297 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
298 * part of the cache topology information within the sysfs filesystem.
299 * This is true for all shared caches on e.g. s390. When there are layers of
300 * hypervisors in between it is not knows which CPUs share which caches.
301 * Therefore information about shared caches is only available in
302 * /proc/cpuinfo.
303 * Format is:
304 * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
305 */
306 static int
307 lookup_cache(char *line, struct lscpu_desc *desc)
308 {
309 struct cpu_cache *cache;
310 long long size;
311 char *p, type;
312 int level;
313
314 /* Make sure line starts with "cache<nr> :" */
315 if (strncmp(line, "cache", 5))
316 return 0;
317 for (p = line + 5; isdigit(*p); p++);
318 for (; isspace(*p); p++);
319 if (*p != ':')
320 return 0;
321
322 p = strstr(line, "scope=") + 6;
323 /* Skip private caches, also present in sysfs */
324 if (!p || strncmp(p, "Private", 7) == 0)
325 return 0;
326 p = strstr(line, "level=");
327 if (!p || sscanf(p, "level=%d", &level) != 1)
328 return 0;
329 p = strstr(line, "type=") + 5;
330 if (!p || !*p)
331 return 0;
332 type = 0;
333 if (strncmp(p, "Data", 4) == 0)
334 type = 'd';
335 else if (strncmp(p, "Instruction", 11) == 0)
336 type = 'i';
337 else if (strncmp(p, "Unified", 7) == 0)
338 type = 'u';
339 p = strstr(line, "size=");
340 if (!p || sscanf(p, "size=%lld", &size) != 1)
341 return 0;
342
343 desc->necaches++;
344 desc->ecaches = xrealloc(desc->ecaches,
345 desc->necaches * sizeof(struct cpu_cache));
346 cache = &desc->ecaches[desc->necaches - 1];
347 memset(cache, 0 , sizeof(*cache));
348
349 if (type == 'i' || type == 'd')
350 xasprintf(&cache->name, "L%d%c", level, type);
351 else
352 xasprintf(&cache->name, "L%d", level);
353
354 cache->level = level;
355 cache->size = size * 1024;
356
357 cache->type = type == 'i' ? xstrdup("Instruction") :
358 type == 'd' ? xstrdup("Data") :
359 type == 'u' ? xstrdup("Unified") : NULL;
360 return 1;
361 }
362
363 /* Don't init the mode for platforms where we are not able to
364 * detect that CPU supports 64-bit mode.
365 */
366 static int
367 init_mode(struct lscpu_modifier *mod)
368 {
369 int m = 0;
370
371 if (mod->system == SYSTEM_SNAPSHOT)
372 /* reading info from any /{sys,proc} dump, don't mix it with
373 * information about our real CPU */
374 return 0;
375
376 #if defined(__alpha__) || defined(__ia64__)
377 m |= MODE_64BIT; /* 64bit platforms only */
378 #endif
379 /* platforms with 64bit flag in /proc/cpuinfo, define
380 * 32bit default here */
381 #if defined(__i386__) || defined(__x86_64__) || \
382 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
383 m |= MODE_32BIT;
384 #endif
385
386 #if defined(__aarch64__)
387 {
388 /* personality() is the most reliable way (since 4.7)
389 * to determine aarch32 support */
390 int pers = personality(PER_LINUX32);
391 if (pers != -1) {
392 personality(pers);
393 m |= MODE_32BIT;
394 }
395 m |= MODE_64BIT;
396 }
397 #endif
398 return m;
399 }
400
401 #if defined(HAVE_LIBRTAS)
402 #define PROCESSOR_MODULE_INFO 43
403 static int strbe16toh(const char *buf, int offset)
404 {
405 return (buf[offset] << 8) + buf[offset+1];
406 }
407
408 static void read_physical_info_powerpc(struct lscpu_desc *desc)
409 {
410 char buf[BUFSIZ];
411 int rc, len, ntypes;
412
413 desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
414
415 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
416 if (rc < 0)
417 return;
418
419 len = strbe16toh(buf, 0);
420 if (len < 8)
421 return;
422
423 ntypes = strbe16toh(buf, 2);
424
425 assert(ntypes <= 1);
426 if (!ntypes)
427 return;
428
429 desc->physsockets = strbe16toh(buf, 4);
430 desc->physchips = strbe16toh(buf, 6);
431 desc->physcoresperchip = strbe16toh(buf, 8);
432 }
433 #else
434 static void read_physical_info_powerpc(
435 struct lscpu_desc *desc __attribute__((__unused__)))
436 {
437 }
438 #endif
439
440 static int cmp_vulnerability_name(const void *a0, const void *b0)
441 {
442 const struct cpu_vulnerability *a = (const struct cpu_vulnerability *) a0,
443 *b = (const struct cpu_vulnerability *) b0;
444 return strcmp(a->name, b->name);
445 }
446
447 static void read_vulnerabilities(struct lscpu_desc *desc)
448 {
449 struct dirent *d;
450 DIR *dir = ul_path_opendir(desc->syscpu, "vulnerabilities");
451 int n = 0;
452
453 if (!dir)
454 return;
455
456 desc->nvuls = n = 0;
457
458 while (xreaddir(dir))
459 n++;
460 if (!n)
461 return;
462
463 rewinddir(dir);
464 desc->vuls = xcalloc(n, sizeof(struct cpu_vulnerability));
465
466 while (desc->nvuls < n && (d = xreaddir(dir))) {
467 char *str, *p;
468 struct cpu_vulnerability *vu;
469
470 #ifdef _DIRENT_HAVE_D_TYPE
471 if (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN)
472 continue;
473 #endif
474 if (ul_path_readf_string(desc->syscpu, &str,
475 "vulnerabilities/%s", d->d_name) <= 0)
476 continue;
477
478 vu = &desc->vuls[desc->nvuls++];
479
480 /* Name */
481 vu->name = xstrdup(d->d_name);
482 *vu->name = toupper(*vu->name);
483 strrep(vu->name, '_', ' ');
484
485 /* Description */
486 vu->text = str;
487 p = (char *) startswith(vu->text, "Mitigation");
488 if (p) {
489 *p = ';';
490 strrem(vu->text, ':');
491 }
492 }
493 closedir(dir);
494
495 qsort(desc->vuls, desc->nvuls,
496 sizeof(struct cpu_vulnerability), cmp_vulnerability_name);
497 }
498
499
500
501
502 static void
503 read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
504 {
505 FILE *fp;
506 char buf[BUFSIZ];
507 struct utsname utsbuf;
508 size_t setsize;
509 cpu_set_t *cpuset = NULL;
510
511 /* architecture */
512 if (uname(&utsbuf) == -1)
513 err(EXIT_FAILURE, _("error: uname failed"));
514
515 fp = ul_path_fopen(desc->procfs, "r", "cpuinfo");
516 if (!fp)
517 err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
518 desc->arch = xstrdup(utsbuf.machine);
519
520 /* details */
521 while (fgets(buf, sizeof(buf), fp) != NULL) {
522 if (lookup(buf, "vendor", &desc->vendor)) ;
523 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
524 else if (lookup(buf, "CPU implementer", &desc->vendor)) ; /* ARM and aarch64 */
525 else if (lookup(buf, "family", &desc->family)) ;
526 else if (lookup(buf, "cpu family", &desc->family)) ;
527 else if (lookup(buf, "model", &desc->model)) ;
528 else if (lookup(buf, "CPU part", &desc->model)) ; /* ARM and aarch64 */
529 else if (lookup(buf, "model name", &desc->modelname)) ;
530 else if (lookup(buf, "stepping", &desc->stepping)) ;
531 else if (lookup(buf, "CPU variant", &desc->stepping)) ; /* aarch64 */
532 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
533 else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
534 else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
535 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
536 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
537 else if (lookup(buf, "Features", &desc->flags)) ; /* aarch64 */
538 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
539 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
540 else if (lookup(buf, "BogoMIPS", &desc->bogomips)) ; /* aarch64 */
541 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
542 else if (lookup(buf, "cpu", &desc->cpu)) ;
543 else if (lookup(buf, "revision", &desc->revision)) ;
544 else if (lookup(buf, "CPU revision", &desc->revision)) ; /* aarch64 */
545 else if (lookup(buf, "max thread id", &desc->mtid)) ; /* s390 */
546 else if (lookup(buf, "address sizes", &desc->addrsz)) ; /* x86 */
547 else if (lookup_cache(buf, desc)) ;
548 else
549 continue;
550 }
551
552 desc->mode = init_mode(mod);
553
554 if (desc->flags) {
555 snprintf(buf, sizeof(buf), " %s ", desc->flags);
556 if (strstr(buf, " svm "))
557 desc->virtflag = xstrdup("svm");
558 else if (strstr(buf, " vmx "))
559 desc->virtflag = xstrdup("vmx");
560 if (strstr(buf, " lm "))
561 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
562 if (strstr(buf, " zarch "))
563 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
564 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
565 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
566 }
567
568 if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
569 if (strcmp(desc->arch, "ppc64") == 0)
570 desc->mode |= MODE_32BIT | MODE_64BIT;
571 else if (strcmp(desc->arch, "ppc") == 0)
572 desc->mode |= MODE_32BIT;
573 }
574
575 fclose(fp);
576
577 if (ul_path_read_s32(desc->syscpu, &maxcpus, "kernel_max") == 0)
578 /* note that kernel_max is maximum index [NR_CPUS-1] */
579 maxcpus += 1;
580
581 else if (mod->system == SYSTEM_LIVE)
582 /* the root is '/' so we are working with data from the current kernel */
583 maxcpus = get_max_number_of_cpus();
584
585 if (maxcpus <= 0)
586 /* error or we are reading some /sys snapshot instead of the
587 * real /sys, let's use any crazy number... */
588 maxcpus = 2048;
589
590 setsize = CPU_ALLOC_SIZE(maxcpus);
591
592 if (ul_path_readf_cpulist(desc->syscpu, &cpuset, maxcpus, "possible") == 0) {
593 int num, idx;
594
595 desc->ncpuspos = CPU_COUNT_S(setsize, cpuset);
596 desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
597
598 for (num = 0, idx = 0; num < maxcpus; num++) {
599 if (CPU_ISSET_S(num, setsize, cpuset))
600 desc->idx2cpunum[idx++] = num;
601 }
602 cpuset_free(cpuset);
603 cpuset = NULL;
604 } else
605 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
606 _PATH_SYS_CPU "/possible");
607
608
609 /* get mask for present CPUs */
610 if (ul_path_readf_cpulist(desc->syscpu, &desc->present, maxcpus, "present") == 0)
611 desc->ncpus = CPU_COUNT_S(setsize, desc->present);
612
613 /* get mask for online CPUs */
614 if (ul_path_readf_cpulist(desc->syscpu, &desc->online, maxcpus, "online") == 0)
615 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
616
617 /* get dispatching mode */
618 if (ul_path_read_s32(desc->syscpu, &desc->dispatching, "dispatching") != 0)
619 desc->dispatching = -1;
620
621 /* get cpufreq boost mode */
622 if (ul_path_read_s32(desc->syscpu, &desc->freqboost, "cpufreq/boost") != 0)
623 desc->freqboost = -1;
624
625 if (mod->system == SYSTEM_LIVE)
626 read_physical_info_powerpc(desc);
627
628 if ((fp = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
629 while (fgets(buf, sizeof(buf), fp) != NULL) {
630 if (lookup(buf, "Type", &desc->machinetype))
631 break;
632 }
633 fclose(fp);
634 }
635
636 /* vulnerabilities */
637 if (ul_path_access(desc->syscpu, F_OK, "vulnerabilities") == 0)
638 read_vulnerabilities(desc);
639 }
640
641 static int
642 has_pci_device(struct lscpu_desc *desc, unsigned int vendor, unsigned int device)
643 {
644 FILE *f;
645 unsigned int num, fn, ven, dev;
646 int res = 1;
647
648 f = ul_path_fopen(desc->procfs, "r", "bus/pci/devices");
649 if (!f)
650 return 0;
651
652 /* for more details about bus/pci/devices format see
653 * drivers/pci/proc.c in linux kernel
654 */
655 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
656 &num, &fn, &ven, &dev) == 4) {
657
658 if (ven == vendor && dev == device)
659 goto found;
660 }
661
662 res = 0;
663 found:
664 fclose(f);
665 return res;
666 }
667
668 #if defined(__x86_64__) || defined(__i386__)
669
670 /*
671 * This CPUID leaf returns the information about the hypervisor.
672 * EAX : maximum input value for CPUID supported by the hypervisor.
673 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
674 */
675 #define HYPERVISOR_INFO_LEAF 0x40000000
676
677 static inline void
678 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
679 unsigned int *ecx, unsigned int *edx)
680 {
681 __asm__(
682 #if defined(__PIC__) && defined(__i386__)
683 /* x86 PIC cannot clobber ebx -- gcc bitches */
684 "xchg %%ebx, %%esi;"
685 "cpuid;"
686 "xchg %%esi, %%ebx;"
687 : "=S" (*ebx),
688 #else
689 "cpuid;"
690 : "=b" (*ebx),
691 #endif
692 "=a" (*eax),
693 "=c" (*ecx),
694 "=d" (*edx)
695 : "1" (op), "c"(0));
696 }
697
698 static void
699 read_hypervisor_cpuid(struct lscpu_desc *desc)
700 {
701 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
702 char hyper_vendor_id[13];
703
704 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
705
706 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
707 memcpy(hyper_vendor_id + 0, &ebx, 4);
708 memcpy(hyper_vendor_id + 4, &ecx, 4);
709 memcpy(hyper_vendor_id + 8, &edx, 4);
710 hyper_vendor_id[12] = '\0';
711
712 if (!hyper_vendor_id[0])
713 return;
714
715 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
716 desc->hyper = HYPER_XEN;
717 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
718 desc->hyper = HYPER_KVM;
719 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
720 desc->hyper = HYPER_MSHV;
721 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
722 desc->hyper = HYPER_VMWARE;
723 else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
724 desc->hyper = HYPER_SPAR;
725 }
726
727 #else /* ! (__x86_64__ || __i386__) */
728 static void
729 read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
730 {
731 }
732 #endif
733
734 static int is_devtree_compatible(struct lscpu_desc *desc, const char *str)
735 {
736 FILE *fd = ul_path_fopen(desc->procfs, "r", "device-tree/compatible");
737
738 if (fd) {
739 char buf[256];
740 size_t i, len;
741
742 memset(buf, 0, sizeof(buf));
743 len = fread(buf, 1, sizeof(buf) - 1, fd);
744 fclose(fd);
745
746 for (i = 0; i < len;) {
747 if (!strcmp(&buf[i], str))
748 return 1;
749 i += strlen(&buf[i]);
750 i++;
751 }
752 }
753
754 return 0;
755 }
756
757 static int
758 read_hypervisor_powerpc(struct lscpu_desc *desc)
759 {
760 assert(!desc->hyper);
761
762 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
763 if (ul_path_access(desc->procfs, F_OK, "iSeries") == 0) {
764 desc->hyper = HYPER_OS400;
765 desc->virtype = VIRT_PARA;
766
767 /* PowerNV (POWER Non-Virtualized, bare-metal) */
768 } else if (is_devtree_compatible(desc, "ibm,powernv")) {
769 desc->hyper = HYPER_NONE;
770 desc->virtype = VIRT_NONE;
771
772 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
773 } else if (ul_path_access(desc->procfs, F_OK, "device-tree/ibm,partition-name") == 0
774 && ul_path_access(desc->procfs, F_OK, "device-tree/hmc-managed?") == 0
775 && ul_path_access(desc->procfs, F_OK, "device-tree/chosen/qemu,graphic-width") != 0) {
776
777 FILE *fd;
778 desc->hyper = HYPER_PHYP;
779 desc->virtype = VIRT_PARA;
780
781 fd = ul_path_fopen(desc->procfs, "r", "device-tree/ibm,partition-name");
782 if (fd) {
783 char buf[256];
784 if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
785 desc->virtype = VIRT_NONE;
786 fclose(fd);
787 }
788
789 /* Qemu */
790 } else if (is_devtree_compatible(desc, "qemu,pseries")) {
791 desc->hyper = HYPER_KVM;
792 desc->virtype = VIRT_PARA;
793 }
794 return desc->hyper;
795 }
796
797 #ifdef INCLUDE_VMWARE_BDOOR
798
799 #define VMWARE_BDOOR_MAGIC 0x564D5868
800 #define VMWARE_BDOOR_PORT 0x5658
801 #define VMWARE_BDOOR_CMD_GETVERSION 10
802
803 static UL_ASAN_BLACKLIST
804 void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
805 {
806 __asm__(
807 #if defined(__PIC__) && defined(__i386__)
808 /* x86 PIC cannot clobber ebx -- gcc bitches */
809 "xchg %%ebx, %%esi;"
810 "inl (%%dx), %%eax;"
811 "xchg %%esi, %%ebx;"
812 : "=S" (*ebx),
813 #else
814 "inl (%%dx), %%eax;"
815 : "=b" (*ebx),
816 #endif
817 "=a" (*eax),
818 "=c" (*ecx),
819 "=d" (*edx)
820 : "0" (VMWARE_BDOOR_MAGIC),
821 "1" (VMWARE_BDOOR_CMD_GETVERSION),
822 "2" (VMWARE_BDOOR_PORT),
823 "3" (0)
824 : "memory");
825 }
826
827 static jmp_buf segv_handler_env;
828
829 static void
830 segv_handler(__attribute__((__unused__)) int sig,
831 __attribute__((__unused__)) siginfo_t *info,
832 __attribute__((__unused__)) void *ignored)
833 {
834 siglongjmp(segv_handler_env, 1);
835 }
836
837 static int
838 is_vmware_platform(void)
839 {
840 uint32_t eax, ebx, ecx, edx;
841 struct sigaction act, oact;
842
843 /*
844 * FIXME: Not reliable for non-root users. Note it works as expected if
845 * vmware_bdoor() is not optimized for PIE, but then it fails to build
846 * on 32bit x86 systems. See lscpu git log for more details (commit
847 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
848 */
849 if (getuid() != 0)
850 return 0;
851
852 /*
853 * The assembly routine for vmware detection works
854 * fine under vmware, even if ran as regular user. But
855 * on real HW or under other hypervisors, it segfaults (which is
856 * expected). So we temporarily install SIGSEGV handler to catch
857 * the signal. All this magic is needed because lscpu
858 * isn't supposed to require root privileges.
859 */
860 if (sigsetjmp(segv_handler_env, 1))
861 return 0;
862
863 memset(&act, 0, sizeof(act));
864 act.sa_sigaction = segv_handler;
865 act.sa_flags = SA_SIGINFO;
866
867 if (sigaction(SIGSEGV, &act, &oact))
868 err(EXIT_FAILURE, _("cannot set signal handler"));
869
870 vmware_bdoor(&eax, &ebx, &ecx, &edx);
871
872 if (sigaction(SIGSEGV, &oact, NULL))
873 err(EXIT_FAILURE, _("cannot restore signal handler"));
874
875 return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
876 }
877
878 #else /* ! INCLUDE_VMWARE_BDOOR */
879
880 static int
881 is_vmware_platform(void)
882 {
883 return 0;
884 }
885
886 #endif /* INCLUDE_VMWARE_BDOOR */
887
888 static void
889 read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
890 {
891 FILE *fd;
892
893 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
894
895 if ((fd = ul_path_fopen(desc->procfs, "r", "sys/kernel/osrelease"))) {
896 char buf[256];
897
898 if (fgets(buf, sizeof(buf), fd) != NULL) {
899 if (strstr(buf, "Microsoft")) {
900 desc->hyper = HYPER_WSL;
901 desc->virtype = VIRT_CONT;
902 }
903 }
904 fclose(fd);
905 if (desc->virtype)
906 return;
907 }
908
909 if (mod->system != SYSTEM_SNAPSHOT) {
910 read_hypervisor_cpuid(desc);
911 if (!desc->hyper)
912 desc->hyper = read_hypervisor_dmi();
913 if (!desc->hyper && is_vmware_platform())
914 desc->hyper = HYPER_VMWARE;
915 }
916
917 if (desc->hyper) {
918 desc->virtype = VIRT_FULL;
919
920 if (desc->hyper == HYPER_XEN) {
921 uint32_t features;
922
923 fd = ul_prefix_fopen(desc->prefix, "r", _PATH_SYS_HYP_FEATURES);
924
925 if (fd && fscanf(fd, "%x", &features) == 1) {
926 /* Xen PV domain */
927 if (features & XEN_FEATURES_PV_MASK)
928 desc->virtype = VIRT_PARA;
929 /* Xen PVH domain */
930 else if ((features & XEN_FEATURES_PVH_MASK)
931 == XEN_FEATURES_PVH_MASK)
932 desc->virtype = VIRT_PARA;
933 }
934 if (fd)
935 fclose(fd);
936 }
937 } else if (read_hypervisor_powerpc(desc) > 0) {}
938
939 /* Xen para-virt or dom0 */
940 else if (ul_path_access(desc->procfs, F_OK, "xen") == 0) {
941 int dom0 = 0;
942
943 fd = ul_path_fopen(desc->procfs, "r", "xen/capabilities");
944 if (fd) {
945 char buf[256];
946
947 if (fscanf(fd, "%255s", buf) == 1 &&
948 !strcmp(buf, "control_d"))
949 dom0 = 1;
950 fclose(fd);
951 }
952 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
953 desc->hyper = HYPER_XEN;
954
955 /* Xen full-virt on non-x86_64 */
956 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
957 desc->hyper = HYPER_XEN;
958 desc->virtype = VIRT_FULL;
959 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
960 desc->hyper = HYPER_VMWARE;
961 desc->virtype = VIRT_FULL;
962 } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
963 desc->hyper = HYPER_VBOX;
964 desc->virtype = VIRT_FULL;
965
966 /* IBM PR/SM */
967 } else if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
968 char buf[BUFSIZ];
969
970 desc->hyper = HYPER_IBM;
971 desc->hypervisor = "PR/SM";
972 desc->virtype = VIRT_FULL;
973 while (fgets(buf, sizeof(buf), fd) != NULL) {
974 char *str, *p;
975
976 if (!strstr(buf, "Control Program:"))
977 continue;
978 if (!strstr(buf, "KVM"))
979 desc->hyper = HYPER_IBM;
980 else
981 desc->hyper = HYPER_KVM;
982 p = strchr(buf, ':');
983 if (!p)
984 continue;
985 xasprintf(&str, "%s", p + 1);
986
987 /* remove leading, trailing and repeating whitespace */
988 while (*str == ' ')
989 str++;
990 desc->hypervisor = str;
991 str += strlen(str) - 1;
992 while ((*str == '\n') || (*str == ' '))
993 *(str--) = '\0';
994 while ((str = strstr(desc->hypervisor, " ")))
995 memmove(str, str + 1, strlen(str));
996 break;
997 }
998 fclose(fd);
999 }
1000
1001 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
1002 * /proc/bc should not */
1003 else if (ul_path_access(desc->procfs, F_OK, "vz") == 0 &&
1004 ul_path_access(desc->procfs, F_OK, "bc") != 0) {
1005 desc->hyper = HYPER_PARALLELS;
1006 desc->virtype = VIRT_CONT;
1007
1008 /* IBM */
1009 } else if (desc->vendor &&
1010 (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
1011 strcmp(desc->vendor, "IBM/S390") == 0)) {
1012 desc->hyper = HYPER_IBM;
1013 desc->virtype = VIRT_FULL;
1014
1015 /* User-mode-linux */
1016 } else if (desc->modelname && strstr(desc->modelname, "UML")) {
1017 desc->hyper = HYPER_UML;
1018 desc->virtype = VIRT_PARA;
1019
1020 /* Linux-VServer */
1021 } else if ((fd = ul_path_fopen(desc->procfs, "r", "self/status"))) {
1022 char buf[BUFSIZ];
1023 char *val = NULL;
1024
1025 while (fgets(buf, sizeof(buf), fd) != NULL) {
1026 if (lookup(buf, "VxID", &val))
1027 break;
1028 }
1029 fclose(fd);
1030
1031 if (val) {
1032 char *org = val;
1033
1034 while (isdigit(*val))
1035 ++val;
1036 if (!*val) {
1037 desc->hyper = HYPER_VSERVER;
1038 desc->virtype = VIRT_CONT;
1039 }
1040 free(org);
1041 }
1042 }
1043 }
1044
1045 /* add @set to the @ary, unnecessary set is deallocated. */
1046 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
1047 {
1048 int i;
1049 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1050
1051 if (!ary)
1052 return -1;
1053
1054 for (i = 0; i < *items; i++) {
1055 if (CPU_EQUAL_S(setsize, set, ary[i]))
1056 break;
1057 }
1058 if (i == *items) {
1059 ary[*items] = set;
1060 ++*items;
1061 return 0;
1062 }
1063 CPU_FREE(set);
1064 return 1;
1065 }
1066
1067 static void
1068 read_topology(struct lscpu_desc *desc, int idx)
1069 {
1070 cpu_set_t *thread_siblings, *core_siblings;
1071 cpu_set_t *book_siblings, *drawer_siblings;
1072 int coreid, socketid, bookid, drawerid;
1073 int i, num = real_cpu_num(desc, idx);
1074
1075 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/topology/thread_siblings", num) != 0)
1076 return;
1077
1078 ul_path_readf_cpuset(desc->syscpu, &thread_siblings, maxcpus,
1079 "cpu%d/topology/thread_siblings", num);
1080 ul_path_readf_cpuset(desc->syscpu, &core_siblings, maxcpus,
1081 "cpu%d/topology/core_siblings", num);
1082 ul_path_readf_cpuset(desc->syscpu, &book_siblings, maxcpus,
1083 "cpu%d/topology/book_siblings", num);
1084 ul_path_readf_cpuset(desc->syscpu, &drawer_siblings, maxcpus,
1085 "cpu%d/topology/drawer_siblings", num);
1086
1087 if (ul_path_readf_s32(desc->syscpu, &coreid, "cpu%d/topology/core_id", num) != 0)
1088 coreid = -1;
1089
1090 if (ul_path_readf_s32(desc->syscpu, &socketid, "cpu%d/topology/physical_package_id", num) != 0)
1091 socketid = -1;
1092
1093 if (ul_path_readf_s32(desc->syscpu, &bookid, "cpu%d/topology/book_id", num) != 0)
1094 bookid = -1;
1095
1096 if (ul_path_readf_s32(desc->syscpu, &drawerid, "cpu%d/topology/drawer_id", num) != 0)
1097 drawerid = -1;
1098
1099 if (!desc->coremaps) {
1100 int ndrawers, nbooks, nsockets, ncores, nthreads;
1101 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1102
1103 /* threads within one core */
1104 nthreads = CPU_COUNT_S(setsize, thread_siblings);
1105 if (!nthreads)
1106 nthreads = 1;
1107
1108 /* cores within one socket */
1109 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
1110 if (!ncores)
1111 ncores = 1;
1112
1113 /* number of sockets within one book. Because of odd /
1114 * non-present cpu maps and to keep calculation easy we make
1115 * sure that nsockets and nbooks is at least 1.
1116 */
1117 nsockets = desc->ncpus / nthreads / ncores;
1118 if (!nsockets)
1119 nsockets = 1;
1120
1121 /* number of books */
1122 nbooks = desc->ncpus / nthreads / ncores / nsockets;
1123 if (!nbooks)
1124 nbooks = 1;
1125
1126 /* number of drawers */
1127 ndrawers = desc->ncpus / nbooks / nthreads / ncores / nsockets;
1128 if (!ndrawers)
1129 ndrawers = 1;
1130
1131 /* all threads, see also read_basicinfo()
1132 * -- fallback for kernels without
1133 * /sys/devices/system/cpu/online.
1134 */
1135 if (!desc->nthreads)
1136 desc->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
1137
1138 /* For each map we make sure that it can have up to ncpuspos
1139 * entries. This is because we cannot reliably calculate the
1140 * number of cores, sockets and books on all architectures.
1141 * E.g. completely virtualized architectures like s390 may
1142 * have multiple sockets of different sizes.
1143 */
1144 desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1145 desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1146 desc->coreids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1147 desc->socketids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1148 for (i = 0; i < desc->ncpuspos; i++)
1149 desc->coreids[i] = desc->socketids[i] = -1;
1150 if (book_siblings) {
1151 desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1152 desc->bookids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1153 for (i = 0; i < desc->ncpuspos; i++)
1154 desc->bookids[i] = -1;
1155 }
1156 if (drawer_siblings) {
1157 desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1158 desc->drawerids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
1159 for (i = 0; i < desc->ncpuspos; i++)
1160 desc->drawerids[i] = -1;
1161 }
1162 }
1163
1164 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
1165 desc->coreids[idx] = coreid;
1166 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
1167 desc->socketids[idx] = socketid;
1168 if (book_siblings) {
1169 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
1170 desc->bookids[idx] = bookid;
1171 }
1172 if (drawer_siblings) {
1173 add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
1174 desc->drawerids[idx] = drawerid;
1175 }
1176 }
1177
1178 static void
1179 read_polarization(struct lscpu_desc *desc, int idx)
1180 {
1181 char mode[64];
1182 int num = real_cpu_num(desc, idx);
1183
1184 if (desc->dispatching < 0)
1185 return;
1186 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/polarization", num) != 0)
1187 return;
1188 if (!desc->polarization)
1189 desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
1190
1191 ul_path_readf_buffer(desc->syscpu, mode, sizeof(mode), "cpu%d/polarization", num);
1192
1193 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
1194 desc->polarization[idx] = POLAR_VLOW;
1195 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
1196 desc->polarization[idx] = POLAR_VMEDIUM;
1197 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
1198 desc->polarization[idx] = POLAR_VHIGH;
1199 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
1200 desc->polarization[idx] = POLAR_HORIZONTAL;
1201 else
1202 desc->polarization[idx] = POLAR_UNKNOWN;
1203 }
1204
1205 static void
1206 read_address(struct lscpu_desc *desc, int idx)
1207 {
1208 int num = real_cpu_num(desc, idx);
1209
1210 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/address", num) != 0)
1211 return;
1212 if (!desc->addresses)
1213 desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
1214 ul_path_readf_s32(desc->syscpu, &desc->addresses[idx], "cpu%d/address", num);
1215 }
1216
1217 static void
1218 read_configured(struct lscpu_desc *desc, int idx)
1219 {
1220 int num = real_cpu_num(desc, idx);
1221
1222 if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/configure", num) != 0)
1223 return;
1224 if (!desc->configured)
1225 desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
1226 ul_path_readf_s32(desc->syscpu, &desc->configured[idx], "cpu%d/configure", num);
1227 }
1228
1229 /* Read overall maximum frequency of cpu */
1230 static char *
1231 cpu_max_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1232 {
1233 int i;
1234 float cpu_freq = 0.0;
1235 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1236
1237 if (desc->present) {
1238 for (i = 0; i < desc->ncpuspos; i++) {
1239 if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
1240 && desc->maxmhz[i]) {
1241 float freq = atof(desc->maxmhz[i]);
1242
1243 if (freq > cpu_freq)
1244 cpu_freq = freq;
1245 }
1246 }
1247 }
1248 snprintf(buf, bufsz, "%.4f", cpu_freq);
1249 return buf;
1250 }
1251
1252 /* Read overall minimum frequency of cpu */
1253 static char *
1254 cpu_min_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
1255 {
1256 int i;
1257 float cpu_freq = -1.0;
1258 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1259
1260 if (desc->present) {
1261 for (i = 0; i < desc->ncpuspos; i++) {
1262 if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
1263 && desc->minmhz[i]) {
1264 float freq = atof(desc->minmhz[i]);
1265
1266 if (cpu_freq < 0.0 || freq < cpu_freq)
1267 cpu_freq = freq;
1268 }
1269 }
1270 }
1271 snprintf(buf, bufsz, "%.4f", cpu_freq);
1272 return buf;
1273 }
1274
1275
1276 static void
1277 read_max_mhz(struct lscpu_desc *desc, int idx)
1278 {
1279 int num = real_cpu_num(desc, idx);
1280 int mhz;
1281
1282 if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) != 0)
1283 return;
1284 if (!desc->maxmhz)
1285 desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1286 xasprintf(&desc->maxmhz[idx], "%.4f", (float) mhz / 1000);
1287 }
1288
1289 static void
1290 read_min_mhz(struct lscpu_desc *desc, int idx)
1291 {
1292 int num = real_cpu_num(desc, idx);
1293 int mhz;
1294
1295 if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) != 0)
1296 return;
1297 if (!desc->minmhz)
1298 desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
1299 xasprintf(&desc->minmhz[idx], "%.4f", (float) mhz / 1000);
1300 }
1301
1302 static int
1303 cachecmp(const void *a, const void *b)
1304 {
1305 struct cpu_cache *c1 = (struct cpu_cache *) a;
1306 struct cpu_cache *c2 = (struct cpu_cache *) b;
1307
1308 return strcmp(c2->name, c1->name);
1309 }
1310
1311 static void
1312 read_cache(struct lscpu_desc *desc, int idx)
1313 {
1314 char buf[256];
1315 int i;
1316 int num = real_cpu_num(desc, idx);
1317
1318 if (!desc->ncaches) {
1319 while (ul_path_accessf(desc->syscpu, F_OK,
1320 "cpu%d/cache/index%d",
1321 num, desc->ncaches) == 0)
1322 desc->ncaches++;
1323
1324 if (!desc->ncaches)
1325 return;
1326 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
1327 }
1328 for (i = 0; i < desc->ncaches; i++) {
1329 struct cpu_cache *ca = &desc->caches[i];
1330 cpu_set_t *map;
1331
1332 if (ul_path_accessf(desc->syscpu, F_OK,
1333 "cpu%d/cache/index%d", num, i) != 0)
1334 continue;
1335 if (!ca->name) {
1336 int type = 0;
1337
1338 /* cache type */
1339 if (ul_path_readf_string(desc->syscpu, &ca->type,
1340 "cpu%d/cache/index%d/type", num, i) > 0) {
1341 if (!strcmp(ca->type, "Data"))
1342 type = 'd';
1343 else if (!strcmp(ca->type, "Instruction"))
1344 type = 'i';
1345 }
1346
1347 /* cache level */
1348 ul_path_readf_s32(desc->syscpu, &ca->level,
1349 "cpu%d/cache/index%d/level", num, i);
1350 if (type)
1351 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
1352 else
1353 snprintf(buf, sizeof(buf), "L%d", ca->level);
1354
1355 ca->name = xstrdup(buf);
1356
1357 /* cache ways */
1358 ul_path_readf_s32(desc->syscpu, &ca->ways,
1359 "cpu%d/cache/index%d/ways_of_associativity", num, i);
1360
1361 /* cache size */
1362 if (ul_path_readf_buffer(desc->syscpu, buf, sizeof(buf),
1363 "cpu%d/cache/index%d/size", num, i) > 0)
1364 parse_size(buf, &ca->size, NULL);
1365 else
1366 ca->size = 0;
1367 }
1368
1369 /* information about how CPUs share different caches */
1370 ul_path_readf_cpuset(desc->syscpu, &map, maxcpus,
1371 "cpu%d/cache/index%d/shared_cpu_map", num, i);
1372
1373 if (!ca->sharedmaps)
1374 ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
1375 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
1376 }
1377 }
1378
1379 static inline int is_node_dirent(struct dirent *d)
1380 {
1381 return
1382 d &&
1383 #ifdef _DIRENT_HAVE_D_TYPE
1384 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
1385 #endif
1386 strncmp(d->d_name, "node", 4) == 0 &&
1387 isdigit_string(d->d_name + 4);
1388 }
1389
1390 static int
1391 nodecmp(const void *ap, const void *bp)
1392 {
1393 int *a = (int *) ap, *b = (int *) bp;
1394 return *a - *b;
1395 }
1396
1397 static void
1398 read_nodes(struct lscpu_desc *desc)
1399 {
1400 int i = 0;
1401 DIR *dir;
1402 struct dirent *d;
1403 struct path_cxt *sysnode;
1404
1405 desc->nnodes = 0;
1406
1407 sysnode = ul_new_path(_PATH_SYS_NODE);
1408 if (!sysnode)
1409 err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
1410 ul_path_set_prefix(sysnode, desc->prefix);
1411
1412 dir = ul_path_opendir(sysnode, NULL);
1413 if (!dir)
1414 goto done;
1415
1416 while ((d = readdir(dir))) {
1417 if (is_node_dirent(d))
1418 desc->nnodes++;
1419 }
1420
1421 if (!desc->nnodes) {
1422 closedir(dir);
1423 goto done;
1424 }
1425
1426 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
1427 desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
1428
1429 rewinddir(dir);
1430 while ((d = readdir(dir)) && i < desc->nnodes) {
1431 if (is_node_dirent(d))
1432 desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
1433 _("Failed to extract the node number"));
1434 }
1435 closedir(dir);
1436 qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
1437
1438 /* information about how nodes share different CPUs */
1439 for (i = 0; i < desc->nnodes; i++)
1440 ul_path_readf_cpuset(sysnode, &desc->nodemaps[i], maxcpus,
1441 "node%d/cpumap", desc->idx2nodenum[i]);
1442 done:
1443 ul_unref_path(sysnode);
1444 }
1445
1446 static char *
1447 get_cell_data(struct lscpu_desc *desc, int idx, int col,
1448 struct lscpu_modifier *mod,
1449 char *buf, size_t bufsz)
1450 {
1451 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1452 size_t i;
1453 int cpu = real_cpu_num(desc, idx);
1454
1455 *buf = '\0';
1456
1457 switch (col) {
1458 case COL_CPU_CPU:
1459 snprintf(buf, bufsz, "%d", cpu);
1460 break;
1461 case COL_CPU_CORE:
1462 if (mod->physical) {
1463 if (desc->coreids[idx] == -1)
1464 snprintf(buf, bufsz, "-");
1465 else
1466 snprintf(buf, bufsz, "%d", desc->coreids[idx]);
1467 } else {
1468 if (cpuset_ary_isset(cpu, desc->coremaps,
1469 desc->ncores, setsize, &i) == 0)
1470 snprintf(buf, bufsz, "%zu", i);
1471 }
1472 break;
1473 case COL_CPU_SOCKET:
1474 if (mod->physical) {
1475 if (desc->socketids[idx] == -1)
1476 snprintf(buf, bufsz, "-");
1477 else
1478 snprintf(buf, bufsz, "%d", desc->socketids[idx]);
1479 } else {
1480 if (cpuset_ary_isset(cpu, desc->socketmaps,
1481 desc->nsockets, setsize, &i) == 0)
1482 snprintf(buf, bufsz, "%zu", i);
1483 }
1484 break;
1485 case COL_CPU_NODE:
1486 if (cpuset_ary_isset(cpu, desc->nodemaps,
1487 desc->nnodes, setsize, &i) == 0)
1488 snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
1489 break;
1490 case COL_CPU_DRAWER:
1491 if (mod->physical) {
1492 if (desc->drawerids[idx] == -1)
1493 snprintf(buf, bufsz, "-");
1494 else
1495 snprintf(buf, bufsz, "%d", desc->drawerids[idx]);
1496 } else {
1497 if (cpuset_ary_isset(cpu, desc->drawermaps,
1498 desc->ndrawers, setsize, &i) == 0)
1499 snprintf(buf, bufsz, "%zu", i);
1500 }
1501 break;
1502 case COL_CPU_BOOK:
1503 if (mod->physical) {
1504 if (desc->bookids[idx] == -1)
1505 snprintf(buf, bufsz, "-");
1506 else
1507 snprintf(buf, bufsz, "%d", desc->bookids[idx]);
1508 } else {
1509 if (cpuset_ary_isset(cpu, desc->bookmaps,
1510 desc->nbooks, setsize, &i) == 0)
1511 snprintf(buf, bufsz, "%zu", i);
1512 }
1513 break;
1514 case COL_CPU_CACHE:
1515 {
1516 char *p = buf;
1517 size_t sz = bufsz;
1518 int j;
1519
1520 for (j = desc->ncaches - 1; j >= 0; j--) {
1521 struct cpu_cache *ca = &desc->caches[j];
1522
1523 if (cpuset_ary_isset(cpu, ca->sharedmaps,
1524 ca->nsharedmaps, setsize, &i) == 0) {
1525 int x = snprintf(p, sz, "%zu", i);
1526 if (x < 0 || (size_t) x >= sz)
1527 return NULL;
1528 p += x;
1529 sz -= x;
1530 }
1531 if (j != 0) {
1532 if (sz < 2)
1533 return NULL;
1534 *p++ = mod->compat ? ',' : ':';
1535 *p = '\0';
1536 sz--;
1537 }
1538 }
1539 break;
1540 }
1541 case COL_CPU_POLARIZATION:
1542 if (desc->polarization) {
1543 int x = desc->polarization[idx];
1544
1545 snprintf(buf, bufsz, "%s",
1546 mod->mode == OUTPUT_PARSABLE ?
1547 polar_modes[x].parsable :
1548 polar_modes[x].readable);
1549 }
1550 break;
1551 case COL_CPU_ADDRESS:
1552 if (desc->addresses)
1553 snprintf(buf, bufsz, "%d", desc->addresses[idx]);
1554 break;
1555 case COL_CPU_CONFIGURED:
1556 if (!desc->configured)
1557 break;
1558 if (mod->mode == OUTPUT_PARSABLE)
1559 snprintf(buf, bufsz, "%s",
1560 desc->configured[idx] ? _("Y") : _("N"));
1561 else
1562 snprintf(buf, bufsz, "%s",
1563 desc->configured[idx] ? _("yes") : _("no"));
1564 break;
1565 case COL_CPU_ONLINE:
1566 if (!desc->online)
1567 break;
1568 if (mod->mode == OUTPUT_PARSABLE)
1569 snprintf(buf, bufsz, "%s",
1570 is_cpu_online(desc, cpu) ? _("Y") : _("N"));
1571 else
1572 snprintf(buf, bufsz, "%s",
1573 is_cpu_online(desc, cpu) ? _("yes") : _("no"));
1574 break;
1575 case COL_CPU_MAXMHZ:
1576 if (desc->maxmhz && desc->maxmhz[idx])
1577 xstrncpy(buf, desc->maxmhz[idx], bufsz);
1578 break;
1579 case COL_CPU_MINMHZ:
1580 if (desc->minmhz && desc->minmhz[idx])
1581 xstrncpy(buf, desc->minmhz[idx], bufsz);
1582 break;
1583 }
1584 return buf;
1585 }
1586
1587 static char *
1588 get_cell_header(struct lscpu_desc *desc, int col,
1589 struct lscpu_modifier *mod,
1590 char *buf, size_t bufsz)
1591 {
1592 *buf = '\0';
1593
1594 if (col == COL_CPU_CACHE) {
1595 char *p = buf;
1596 size_t sz = bufsz;
1597 int i;
1598
1599 for (i = desc->ncaches - 1; i >= 0; i--) {
1600 int x = snprintf(p, sz, "%s", desc->caches[i].name);
1601 if (x < 0 || (size_t) x >= sz)
1602 return NULL;
1603 sz -= x;
1604 p += x;
1605 if (i > 0) {
1606 if (sz < 2)
1607 return NULL;
1608 *p++ = mod->compat ? ',' : ':';
1609 *p = '\0';
1610 sz--;
1611 }
1612 }
1613 if (desc->ncaches)
1614 return buf;
1615 }
1616 snprintf(buf, bufsz, "%s", coldescs_cpu[col].name);
1617 return buf;
1618 }
1619
1620 /*
1621 * [-C] backend
1622 */
1623 static void
1624 print_caches_readable(struct lscpu_desc *desc, int cols[], int ncols,
1625 struct lscpu_modifier *mod)
1626 {
1627 int i;
1628 struct libscols_table *table;
1629
1630 scols_init_debug(0);
1631
1632 table = scols_new_table();
1633 if (!table)
1634 err(EXIT_FAILURE, _("failed to allocate output table"));
1635 if (mod->json) {
1636 scols_table_enable_json(table, 1);
1637 scols_table_set_name(table, "caches");
1638 }
1639
1640 for (i = 0; i < ncols; i++) {
1641 struct lscpu_coldesc *cd = &coldescs_cache[cols[i]];
1642 if (!scols_table_new_column(table, cd->name, 0, cd->flags))
1643 err(EXIT_FAILURE, _("failed to allocate output column"));
1644 }
1645
1646 for (i = desc->ncaches - 1; i >= 0; i--) {
1647 struct cpu_cache *ca = &desc->caches[i];
1648 struct libscols_line *line;
1649 int c;
1650
1651 line = scols_table_new_line(table, NULL);
1652 if (!line)
1653 err(EXIT_FAILURE, _("failed to allocate output line"));
1654
1655 for (c = 0; c < ncols; c++) {
1656 char *data = NULL;
1657 int col = cols[c];
1658
1659 switch (col) {
1660 case COL_CACHE_NAME:
1661 if (ca->name)
1662 data = xstrdup(ca->name);
1663 break;
1664 case COL_CACHE_ONESIZE:
1665 if (!ca->size)
1666 break;
1667 if (mod->bytes)
1668 xasprintf(&data, "%" PRIu64, ca->size);
1669 else
1670 data = size_to_human_string(SIZE_SUFFIX_1LETTER, ca->size);
1671 break;
1672 case COL_CACHE_ALLSIZE:
1673 {
1674 uint64_t sz = 0;
1675
1676 if (get_cache_full_size(desc, ca, &sz) != 0)
1677 break;
1678 if (mod->bytes)
1679 xasprintf(&data, "%" PRIu64, sz);
1680 else
1681 data = size_to_human_string(SIZE_SUFFIX_1LETTER, sz);
1682 break;
1683 }
1684 case COL_CACHE_WAYS:
1685 if (ca->ways)
1686 xasprintf(&data, "%d", ca->ways);
1687 break;
1688 case COL_CACHE_TYPE:
1689 if (ca->type)
1690 data = xstrdup(ca->type);
1691 break;
1692 case COL_CACHE_LEVEL:
1693 if (ca->level)
1694 xasprintf(&data, "%d", ca->level);
1695 break;
1696 }
1697
1698 if (data && scols_line_refer_data(line, c, data))
1699 err(EXIT_FAILURE, _("failed to add output data"));
1700 }
1701 }
1702
1703 scols_print_table(table);
1704 scols_unref_table(table);
1705 }
1706
1707 /*
1708 * [-p] backend, we support two parsable formats:
1709 *
1710 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1711 * output and it contains fixed set of the columns. The CACHE columns are at
1712 * the end of the line and the CACHE is not printed if the number of the caches
1713 * is zero. The CACHE columns are separated by two commas, for example:
1714 *
1715 * $ lscpu --parse
1716 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1717 * 0,0,0,0,,0,0,0
1718 * 1,1,0,0,,1,1,0
1719 *
1720 * 2) "user defined output" -- this format prints always all columns without
1721 * special prefix for CACHE column. If there are not CACHEs then the column is
1722 * empty and the header "Cache" is printed rather than a real name of the cache.
1723 * The CACHE columns are separated by ':'.
1724 *
1725 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1726 * # CPU,Core,Socket,Node,L1d:L1i:L2
1727 * 0,0,0,0,0:0:0
1728 * 1,1,0,0,1:1:0
1729 */
1730 static void
1731 print_cpus_parsable(struct lscpu_desc *desc, int cols[], int ncols,
1732 struct lscpu_modifier *mod)
1733 {
1734 char buf[BUFSIZ], *data;
1735 int i;
1736
1737 /*
1738 * Header
1739 */
1740 printf(_(
1741 "# The following is the parsable format, which can be fed to other\n"
1742 "# programs. Each different item in every column has an unique ID\n"
1743 "# starting from zero.\n"));
1744
1745 fputs("# ", stdout);
1746 for (i = 0; i < ncols; i++) {
1747 int col = cols[i];
1748
1749 if (col == COL_CPU_CACHE) {
1750 if (mod->compat && !desc->ncaches)
1751 continue;
1752 if (mod->compat && i != 0)
1753 putchar(',');
1754 }
1755 if (i > 0)
1756 putchar(',');
1757
1758 data = get_cell_header(desc, col, mod, buf, sizeof(buf));
1759
1760 if (data && * data && col != COL_CPU_CACHE &&
1761 !coldescs_cpu[col].is_abbr) {
1762 /*
1763 * For normal column names use mixed case (e.g. "Socket")
1764 */
1765 char *p = data + 1;
1766
1767 while (p && *p != '\0') {
1768 *p = tolower((unsigned int) *p);
1769 p++;
1770 }
1771 }
1772 fputs(data && *data ? data : "", stdout);
1773 }
1774 putchar('\n');
1775
1776 /*
1777 * Data
1778 */
1779 for (i = 0; i < desc->ncpuspos; i++) {
1780 int c;
1781 int cpu = real_cpu_num(desc, i);
1782
1783 if (desc->online) {
1784 if (!mod->offline && !is_cpu_online(desc, cpu))
1785 continue;
1786 if (!mod->online && is_cpu_online(desc, cpu))
1787 continue;
1788 }
1789 if (desc->present && !is_cpu_present(desc, cpu))
1790 continue;
1791 for (c = 0; c < ncols; c++) {
1792 if (mod->compat && cols[c] == COL_CPU_CACHE) {
1793 if (!desc->ncaches)
1794 continue;
1795 if (c > 0)
1796 putchar(',');
1797 }
1798 if (c > 0)
1799 putchar(',');
1800
1801 data = get_cell_data(desc, i, cols[c], mod,
1802 buf, sizeof(buf));
1803 fputs(data && *data ? data : "", stdout);
1804 }
1805 putchar('\n');
1806 }
1807 }
1808
1809 /*
1810 * [-e] backend
1811 */
1812 static void
1813 print_cpus_readable(struct lscpu_desc *desc, int cols[], int ncols,
1814 struct lscpu_modifier *mod)
1815 {
1816 int i;
1817 char buf[BUFSIZ];
1818 const char *data;
1819 struct libscols_table *table;
1820
1821 scols_init_debug(0);
1822
1823 table = scols_new_table();
1824 if (!table)
1825 err(EXIT_FAILURE, _("failed to allocate output table"));
1826 if (mod->json) {
1827 scols_table_enable_json(table, 1);
1828 scols_table_set_name(table, "cpus");
1829 }
1830
1831 for (i = 0; i < ncols; i++) {
1832 data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
1833 if (!scols_table_new_column(table, data, 0, coldescs_cpu[cols[i]].flags))
1834 err(EXIT_FAILURE, _("failed to allocate output column"));
1835 }
1836
1837 for (i = 0; i < desc->ncpuspos; i++) {
1838 int c;
1839 struct libscols_line *line;
1840 int cpu = real_cpu_num(desc, i);
1841
1842 if (desc->online) {
1843 if (!mod->offline && !is_cpu_online(desc, cpu))
1844 continue;
1845 if (!mod->online && is_cpu_online(desc, cpu))
1846 continue;
1847 }
1848 if (desc->present && !is_cpu_present(desc, cpu))
1849 continue;
1850
1851 line = scols_table_new_line(table, NULL);
1852 if (!line)
1853 err(EXIT_FAILURE, _("failed to allocate output line"));
1854
1855 for (c = 0; c < ncols; c++) {
1856 data = get_cell_data(desc, i, cols[c], mod,
1857 buf, sizeof(buf));
1858 if (!data || !*data)
1859 data = "-";
1860 if (scols_line_set_data(line, c, data))
1861 err(EXIT_FAILURE, _("failed to add output data"));
1862 }
1863 }
1864
1865 scols_print_table(table);
1866 scols_unref_table(table);
1867 }
1868
1869
1870 static void __attribute__ ((__format__(printf, 3, 4)))
1871 add_summary_sprint(struct libscols_table *tb,
1872 const char *txt,
1873 const char *fmt,
1874 ...)
1875 {
1876 struct libscols_line *ln = scols_table_new_line(tb, NULL);
1877 char *data;
1878 va_list args;
1879
1880 if (!ln)
1881 err(EXIT_FAILURE, _("failed to allocate output line"));
1882
1883 /* description column */
1884 scols_line_set_data(ln, 0, txt);
1885
1886 /* data column */
1887 va_start(args, fmt);
1888 xvasprintf(&data, fmt, args);
1889 va_end(args);
1890
1891 if (data && scols_line_refer_data(ln, 1, data))
1892 err(EXIT_FAILURE, _("failed to add output data"));
1893 }
1894
1895 #define add_summary_n(tb, txt, num) add_summary_sprint(tb, txt, "%d", num)
1896 #define add_summary_s(tb, txt, str) add_summary_sprint(tb, txt, "%s", str)
1897
1898 static void
1899 print_cpuset(struct libscols_table *tb,
1900 const char *key, cpu_set_t *set, int hex)
1901 {
1902 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1903 size_t setbuflen = 7 * maxcpus;
1904 char setbuf[setbuflen], *p;
1905
1906 if (hex) {
1907 p = cpumask_create(setbuf, setbuflen, set, setsize);
1908 add_summary_s(tb, key, p);
1909 } else {
1910 p = cpulist_create(setbuf, setbuflen, set, setsize);
1911 add_summary_s(tb, key, p);
1912 }
1913 }
1914
1915 static int get_cache_full_size(struct lscpu_desc *desc,
1916 struct cpu_cache *ca, uint64_t *res)
1917 {
1918 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1919 int i, nshares = 0;
1920
1921 /* Count number of CPUs which shares the cache */
1922 for (i = 0; i < desc->ncpuspos; i++) {
1923 int cpu = real_cpu_num(desc, i);
1924
1925 if (desc->present && !is_cpu_present(desc, cpu))
1926 continue;
1927 if (CPU_ISSET_S(cpu, setsize, ca->sharedmaps[0]))
1928 nshares++;
1929 }
1930
1931 /* Correction for CPU threads */
1932 if (desc->nthreads > desc->ncores)
1933 nshares /= (desc->nthreads / desc->ncores);
1934 if (nshares < 1)
1935 nshares = 1;
1936
1937 *res = (desc->ncores / nshares) * ca->size;
1938 return 0;
1939 }
1940
1941 /*
1942 * default output
1943 */
1944 static void
1945 print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
1946 {
1947 char buf[BUFSIZ];
1948 int i = 0;
1949 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
1950 struct libscols_table *tb;
1951
1952 scols_init_debug(0);
1953
1954 tb = scols_new_table();
1955 if (!tb)
1956 err(EXIT_FAILURE, _("failed to allocate output table"));
1957
1958 scols_table_enable_noheadings(tb, 1);
1959 if (mod->json) {
1960 scols_table_enable_json(tb, 1);
1961 scols_table_set_name(tb, "lscpu");
1962 }
1963
1964 if (scols_table_new_column(tb, "field", 0, 0) == NULL ||
1965 scols_table_new_column(tb, "data", 0, SCOLS_FL_NOEXTREMES | SCOLS_FL_WRAP) == NULL)
1966 err(EXIT_FAILURE, _("failed to initialize output column"));
1967
1968 add_summary_s(tb, _("Architecture:"), desc->arch);
1969 if (desc->mode) {
1970 char *p = buf;
1971
1972 if (desc->mode & MODE_32BIT) {
1973 strcpy(p, "32-bit, ");
1974 p += 8;
1975 }
1976 if (desc->mode & MODE_64BIT) {
1977 strcpy(p, "64-bit, ");
1978 p += 8;
1979 }
1980 *(p - 2) = '\0';
1981 add_summary_s(tb, _("CPU op-mode(s):"), buf);
1982 }
1983 #if !defined(WORDS_BIGENDIAN)
1984 add_summary_s(tb, _("Byte Order:"), "Little Endian");
1985 #else
1986 add_summary_s(tb, _("Byte Order:"), "Big Endian");
1987 #endif
1988
1989 if (desc->addrsz)
1990 add_summary_s(tb, _("Address sizes:"), desc->addrsz);
1991
1992 add_summary_n(tb, _("CPU(s):"), desc->ncpus);
1993
1994 if (desc->online)
1995 print_cpuset(tb, mod->hex ? _("On-line CPU(s) mask:") :
1996 _("On-line CPU(s) list:"),
1997 desc->online, mod->hex);
1998
1999 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
2000 cpu_set_t *set;
2001
2002 /* Linux kernel provides cpuset of off-line CPUs that contains
2003 * all configured CPUs (see /sys/devices/system/cpu/offline),
2004 * but want to print real (present in system) off-line CPUs only.
2005 */
2006 set = cpuset_alloc(maxcpus, NULL, NULL);
2007 if (!set)
2008 err(EXIT_FAILURE, _("failed to callocate cpu set"));
2009 CPU_ZERO_S(setsize, set);
2010 for (i = 0; i < desc->ncpuspos; i++) {
2011 int cpu = real_cpu_num(desc, i);
2012 if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
2013 CPU_SET_S(cpu, setsize, set);
2014 }
2015 print_cpuset(tb, mod->hex ? _("Off-line CPU(s) mask:") :
2016 _("Off-line CPU(s) list:"),
2017 set, mod->hex);
2018 cpuset_free(set);
2019 }
2020
2021 if (desc->nsockets) {
2022 int threads_per_core, cores_per_socket, sockets_per_book;
2023 int books_per_drawer, drawers;
2024 FILE *fd;
2025
2026 threads_per_core = cores_per_socket = sockets_per_book = 0;
2027 books_per_drawer = drawers = 0;
2028 /* s390 detects its cpu topology via /proc/sysinfo, if present.
2029 * Using simply the cpu topology masks in sysfs will not give
2030 * usable results since everything is virtualized. E.g.
2031 * virtual core 0 may have only 1 cpu, but virtual core 2 may
2032 * five cpus.
2033 * If the cpu topology is not exported (e.g. 2nd level guest)
2034 * fall back to old calculation scheme.
2035 */
2036 if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
2037 int t0, t1;
2038
2039 while (fd && fgets(buf, sizeof(buf), fd) != NULL) {
2040 if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
2041 &t0, &t1, &drawers, &books_per_drawer,
2042 &sockets_per_book,
2043 &cores_per_socket) == 6)
2044 break;
2045 }
2046 if (fd)
2047 fclose(fd);
2048 }
2049 if (desc->mtid)
2050 threads_per_core = atoi(desc->mtid) + 1;
2051 add_summary_n(tb, _("Thread(s) per core:"),
2052 threads_per_core ?: desc->nthreads / desc->ncores);
2053 add_summary_n(tb, _("Core(s) per socket:"),
2054 cores_per_socket ?: desc->ncores / desc->nsockets);
2055 if (desc->nbooks) {
2056 add_summary_n(tb, _("Socket(s) per book:"),
2057 sockets_per_book ?: desc->nsockets / desc->nbooks);
2058 if (desc->ndrawers) {
2059 add_summary_n(tb, _("Book(s) per drawer:"),
2060 books_per_drawer ?: desc->nbooks / desc->ndrawers);
2061 add_summary_n(tb, _("Drawer(s):"), drawers ?: desc->ndrawers);
2062 } else {
2063 add_summary_n(tb, _("Book(s):"), books_per_drawer ?: desc->nbooks);
2064 }
2065 } else {
2066 add_summary_n(tb, _("Socket(s):"), sockets_per_book ?: desc->nsockets);
2067 }
2068 }
2069 if (desc->nnodes)
2070 add_summary_n(tb, _("NUMA node(s):"), desc->nnodes);
2071 if (desc->vendor)
2072 add_summary_s(tb, _("Vendor ID:"), desc->vendor);
2073 if (desc->machinetype)
2074 add_summary_s(tb, _("Machine type:"), desc->machinetype);
2075 if (desc->family)
2076 add_summary_s(tb, _("CPU family:"), desc->family);
2077 if (desc->model || desc->revision)
2078 add_summary_s(tb, _("Model:"), desc->revision ? desc->revision : desc->model);
2079 if (desc->modelname || desc->cpu)
2080 add_summary_s(tb, _("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
2081 if (desc->stepping)
2082 add_summary_s(tb, _("Stepping:"), desc->stepping);
2083 if (desc->freqboost >= 0)
2084 add_summary_s(tb, _("Frequency boost:"), desc->freqboost ?
2085 _("enabled") : _("disabled"));
2086 if (desc->mhz)
2087 add_summary_s(tb, _("CPU MHz:"), desc->mhz);
2088 if (desc->dynamic_mhz)
2089 add_summary_s(tb, _("CPU dynamic MHz:"), desc->dynamic_mhz);
2090 if (desc->static_mhz)
2091 add_summary_s(tb, _("CPU static MHz:"), desc->static_mhz);
2092 if (desc->maxmhz)
2093 add_summary_s(tb, _("CPU max MHz:"), cpu_max_mhz(desc, buf, sizeof(buf)));
2094 if (desc->minmhz)
2095 add_summary_s(tb, _("CPU min MHz:"), cpu_min_mhz(desc, buf, sizeof(buf)));
2096 if (desc->bogomips)
2097 add_summary_s(tb, _("BogoMIPS:"), desc->bogomips);
2098 if (desc->virtflag) {
2099 if (!strcmp(desc->virtflag, "svm"))
2100 add_summary_s(tb, _("Virtualization:"), "AMD-V");
2101 else if (!strcmp(desc->virtflag, "vmx"))
2102 add_summary_s(tb, _("Virtualization:"), "VT-x");
2103 }
2104 if (desc->hypervisor)
2105 add_summary_s(tb, _("Hypervisor:"), desc->hypervisor);
2106 if (desc->hyper) {
2107 add_summary_s(tb, _("Hypervisor vendor:"), hv_vendors[desc->hyper]);
2108 add_summary_s(tb, _("Virtualization type:"), _(virt_types[desc->virtype]));
2109 }
2110 if (desc->dispatching >= 0)
2111 add_summary_s(tb, _("Dispatching mode:"), _(disp_modes[desc->dispatching]));
2112 if (desc->ncaches) {
2113 for (i = desc->ncaches - 1; i >= 0; i--) {
2114 uint64_t sz = 0;
2115 char *tmp;
2116 struct cpu_cache *ca = &desc->caches[i];
2117
2118 if (ca->size == 0)
2119 continue;
2120 if (get_cache_full_size(desc, ca, &sz) != 0 || sz == 0)
2121 continue;
2122 if (mod->bytes)
2123 xasprintf(&tmp, "%" PRIu64, sz);
2124 else
2125 tmp = size_to_human_string(
2126 SIZE_SUFFIX_3LETTER | SIZE_SUFFIX_SPACE,
2127 sz);
2128 snprintf(buf, sizeof(buf), _("%s cache:"), ca->name);
2129 add_summary_s(tb, buf, tmp);
2130 free(tmp);
2131 }
2132 }
2133 if (desc->necaches) {
2134 for (i = desc->necaches - 1; i >= 0; i--) {
2135 char *tmp;
2136 struct cpu_cache *ca = &desc->ecaches[i];
2137
2138 if (ca->size == 0)
2139 continue;
2140 if (mod->bytes)
2141 xasprintf(&tmp, "%" PRIu64, ca->size);
2142 else
2143 tmp = size_to_human_string(
2144 SIZE_SUFFIX_3LETTER | SIZE_SUFFIX_SPACE,
2145 ca->size);
2146 snprintf(buf, sizeof(buf), _("%s cache:"), ca->name);
2147 add_summary_s(tb, buf, tmp);
2148 free(tmp);
2149 }
2150 }
2151
2152 for (i = 0; i < desc->nnodes; i++) {
2153 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
2154 print_cpuset(tb, buf, desc->nodemaps[i], mod->hex);
2155 }
2156
2157 if (desc->physsockets) {
2158 add_summary_n(tb, _("Physical sockets:"), desc->physsockets);
2159 add_summary_n(tb, _("Physical chips:"), desc->physchips);
2160 add_summary_n(tb, _("Physical cores/chip:"), desc->physcoresperchip);
2161 }
2162
2163 if (desc->vuls) {
2164 for (i = 0; i < desc->nvuls; i++) {
2165 snprintf(buf, sizeof(buf), ("Vulnerability %s:"), desc->vuls[i].name);
2166 add_summary_s(tb, buf, desc->vuls[i].text);
2167 }
2168 }
2169
2170 if (desc->flags)
2171 add_summary_s(tb, _("Flags:"), desc->flags);
2172
2173 scols_print_table(tb);
2174 scols_unref_table(tb);
2175 }
2176
2177 static void __attribute__((__noreturn__)) usage(void)
2178 {
2179 FILE *out = stdout;
2180 size_t i;
2181
2182 fputs(USAGE_HEADER, out);
2183 fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
2184
2185 fputs(USAGE_SEPARATOR, out);
2186 fputs(_("Display information about the CPU architecture.\n"), out);
2187
2188 fputs(USAGE_OPTIONS, out);
2189 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
2190 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
2191 fputs(_(" -B, --bytes print sizes in bytes rather than in human readable format\n"), out);
2192 fputs(_(" -C, --caches[=<list>] info about caches in extended readable format\n"), out);
2193 fputs(_(" -c, --offline print offline CPUs only\n"), out);
2194 fputs(_(" -J, --json use JSON for default or extended format\n"), out);
2195 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
2196 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
2197 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
2198 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
2199 fputs(_(" -y, --physical print physical instead of logical IDs\n"), out);
2200 fputs(_(" --output-all print all available columns for -e, -p or -C\n"), out);
2201 fputs(USAGE_SEPARATOR, out);
2202 printf(USAGE_HELP_OPTIONS(25));
2203
2204 fputs(_("\nAvailable output columns for -e or -p:\n"), out);
2205 for (i = 0; i < ARRAY_SIZE(coldescs_cpu); i++)
2206 fprintf(out, " %13s %s\n", coldescs_cpu[i].name, _(coldescs_cpu[i].help));
2207
2208 fputs(_("\nAvailable output columns for -C:\n"), out);
2209 for (i = 0; i < ARRAY_SIZE(coldescs_cache); i++)
2210 fprintf(out, " %13s %s\n", coldescs_cache[i].name, _(coldescs_cache[i].help));
2211
2212 printf(USAGE_MAN_TAIL("lscpu(1)"));
2213
2214 exit(EXIT_SUCCESS);
2215 }
2216
2217 int main(int argc, char *argv[])
2218 {
2219 struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
2220 struct lscpu_desc _desc = { .flags = NULL }, *desc = &_desc;
2221 int c, i, all = 0;
2222 int columns[ARRAY_SIZE(coldescs_cpu)], ncolumns = 0;
2223 int cpu_modifier_specified = 0;
2224 size_t setsize;
2225
2226 enum {
2227 OPT_OUTPUT_ALL = CHAR_MAX + 1,
2228 };
2229 static const struct option longopts[] = {
2230 { "all", no_argument, NULL, 'a' },
2231 { "online", no_argument, NULL, 'b' },
2232 { "bytes", no_argument, NULL, 'B' },
2233 { "caches", optional_argument, NULL, 'C' },
2234 { "offline", no_argument, NULL, 'c' },
2235 { "help", no_argument, NULL, 'h' },
2236 { "extended", optional_argument, NULL, 'e' },
2237 { "json", no_argument, NULL, 'J' },
2238 { "parse", optional_argument, NULL, 'p' },
2239 { "sysroot", required_argument, NULL, 's' },
2240 { "physical", no_argument, NULL, 'y' },
2241 { "hex", no_argument, NULL, 'x' },
2242 { "version", no_argument, NULL, 'V' },
2243 { "output-all", no_argument, NULL, OPT_OUTPUT_ALL },
2244 { NULL, 0, NULL, 0 }
2245 };
2246
2247 static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
2248 { 'C','e','p' },
2249 { 'a','b','c' },
2250 { 0 }
2251 };
2252 int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
2253
2254 setlocale(LC_ALL, "");
2255 bindtextdomain(PACKAGE, LOCALEDIR);
2256 textdomain(PACKAGE);
2257 atexit(close_stdout);
2258
2259 while ((c = getopt_long(argc, argv, "aBbC::ce::hJp::s:xyV", longopts, NULL)) != -1) {
2260
2261 err_exclusive_options(c, longopts, excl, excl_st);
2262
2263 switch (c) {
2264 case 'a':
2265 mod->online = mod->offline = 1;
2266 cpu_modifier_specified = 1;
2267 break;
2268 case 'B':
2269 mod->bytes = 1;
2270 break;
2271 case 'b':
2272 mod->online = 1;
2273 cpu_modifier_specified = 1;
2274 break;
2275 case 'c':
2276 mod->offline = 1;
2277 cpu_modifier_specified = 1;
2278 break;
2279 case 'C':
2280 if (optarg) {
2281 if (*optarg == '=')
2282 optarg++;
2283 ncolumns = string_to_idarray(optarg,
2284 columns, ARRAY_SIZE(columns),
2285 cache_column_name_to_id);
2286 if (ncolumns < 0)
2287 return EXIT_FAILURE;
2288 }
2289 mod->mode = OUTPUT_CACHES;
2290 break;
2291 case 'J':
2292 mod->json = 1;
2293 break;
2294 case 'p':
2295 case 'e':
2296 if (optarg) {
2297 if (*optarg == '=')
2298 optarg++;
2299 ncolumns = string_to_idarray(optarg,
2300 columns, ARRAY_SIZE(columns),
2301 cpu_column_name_to_id);
2302 if (ncolumns < 0)
2303 return EXIT_FAILURE;
2304 }
2305 mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
2306 break;
2307 case 's':
2308 desc->prefix = optarg;
2309 mod->system = SYSTEM_SNAPSHOT;
2310 break;
2311 case 'x':
2312 mod->hex = 1;
2313 break;
2314 case 'y':
2315 mod->physical = 1;
2316 break;
2317 case OPT_OUTPUT_ALL:
2318 all = 1;
2319 break;
2320
2321 case 'h':
2322 usage();
2323 case 'V':
2324 print_version(EXIT_SUCCESS);
2325 default:
2326 errtryhelp(EXIT_FAILURE);
2327 }
2328 }
2329
2330 if (all && ncolumns == 0) {
2331 size_t sz, maxsz = mod->mode == OUTPUT_CACHES ?
2332 ARRAY_SIZE(coldescs_cache) :
2333 ARRAY_SIZE(coldescs_cpu);
2334
2335 for (sz = 0; sz < maxsz; sz++)
2336 columns[ncolumns++] = sz;
2337 }
2338
2339 if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
2340 fprintf(stderr,
2341 _("%s: options --all, --online and --offline may only "
2342 "be used with options --extended or --parse.\n"),
2343 program_invocation_short_name);
2344 return EXIT_FAILURE;
2345 }
2346
2347 if (argc != optind) {
2348 warnx(_("bad usage"));
2349 errtryhelp(EXIT_FAILURE);
2350 }
2351
2352 /* set default cpu display mode if none was specified */
2353 if (!mod->online && !mod->offline) {
2354 mod->online = 1;
2355 mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
2356 }
2357
2358 ul_path_init_debug();
2359
2360 /* /sys/devices/system/cpu */
2361 desc->syscpu = ul_new_path(_PATH_SYS_CPU);
2362 if (!desc->syscpu)
2363 err(EXIT_FAILURE, _("failed to initialize CPUs sysfs handler"));
2364 if (desc->prefix)
2365 ul_path_set_prefix(desc->syscpu, desc->prefix);
2366
2367 /* /proc */
2368 desc->procfs = ul_new_path("/proc");
2369 if (!desc->procfs)
2370 err(EXIT_FAILURE, _("failed to initialize procfs handler"));
2371 if (desc->prefix)
2372 ul_path_set_prefix(desc->procfs, desc->prefix);
2373
2374 read_basicinfo(desc, mod);
2375
2376 setsize = CPU_ALLOC_SIZE(maxcpus);
2377
2378 for (i = 0; i < desc->ncpuspos; i++) {
2379 /* only consider present CPUs */
2380 if (desc->present &&
2381 !CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present))
2382 continue;
2383 read_topology(desc, i);
2384 read_cache(desc, i);
2385 read_polarization(desc, i);
2386 read_address(desc, i);
2387 read_configured(desc, i);
2388 read_max_mhz(desc, i);
2389 read_min_mhz(desc, i);
2390 }
2391
2392 if (desc->caches)
2393 qsort(desc->caches, desc->ncaches,
2394 sizeof(struct cpu_cache), cachecmp);
2395
2396 if (desc->ecaches)
2397 qsort(desc->ecaches, desc->necaches,
2398 sizeof(struct cpu_cache), cachecmp);
2399
2400 read_nodes(desc);
2401 read_hypervisor(desc, mod);
2402 arm_cpu_decode(desc);
2403
2404 switch(mod->mode) {
2405 case OUTPUT_SUMMARY:
2406 print_summary(desc, mod);
2407 break;
2408 case OUTPUT_CACHES:
2409 if (!ncolumns) {
2410 columns[ncolumns++] = COL_CACHE_NAME;
2411 columns[ncolumns++] = COL_CACHE_ONESIZE;
2412 columns[ncolumns++] = COL_CACHE_ALLSIZE;
2413 columns[ncolumns++] = COL_CACHE_WAYS;
2414 columns[ncolumns++] = COL_CACHE_TYPE;
2415 columns[ncolumns++] = COL_CACHE_LEVEL;
2416 }
2417 print_caches_readable(desc, columns, ncolumns, mod);
2418 break;
2419 case OUTPUT_PARSABLE:
2420 if (!ncolumns) {
2421 columns[ncolumns++] = COL_CPU_CPU;
2422 columns[ncolumns++] = COL_CPU_CORE;
2423 columns[ncolumns++] = COL_CPU_SOCKET;
2424 columns[ncolumns++] = COL_CPU_NODE;
2425 columns[ncolumns++] = COL_CPU_CACHE;
2426 mod->compat = 1;
2427 }
2428 print_cpus_parsable(desc, columns, ncolumns, mod);
2429 break;
2430 case OUTPUT_READABLE:
2431 if (!ncolumns) {
2432 /* No list was given. Just print whatever is there. */
2433 columns[ncolumns++] = COL_CPU_CPU;
2434 if (desc->nodemaps)
2435 columns[ncolumns++] = COL_CPU_NODE;
2436 if (desc->drawermaps)
2437 columns[ncolumns++] = COL_CPU_DRAWER;
2438 if (desc->bookmaps)
2439 columns[ncolumns++] = COL_CPU_BOOK;
2440 if (desc->socketmaps)
2441 columns[ncolumns++] = COL_CPU_SOCKET;
2442 if (desc->coremaps)
2443 columns[ncolumns++] = COL_CPU_CORE;
2444 if (desc->caches)
2445 columns[ncolumns++] = COL_CPU_CACHE;
2446 if (desc->online)
2447 columns[ncolumns++] = COL_CPU_ONLINE;
2448 if (desc->configured)
2449 columns[ncolumns++] = COL_CPU_CONFIGURED;
2450 if (desc->polarization)
2451 columns[ncolumns++] = COL_CPU_POLARIZATION;
2452 if (desc->addresses)
2453 columns[ncolumns++] = COL_CPU_ADDRESS;
2454 if (desc->maxmhz)
2455 columns[ncolumns++] = COL_CPU_MAXMHZ;
2456 if (desc->minmhz)
2457 columns[ncolumns++] = COL_CPU_MINMHZ;
2458 }
2459 print_cpus_readable(desc, columns, ncolumns, mod);
2460 break;
2461 }
2462
2463 ul_unref_path(desc->syscpu);
2464 ul_unref_path(desc->procfs);
2465 return EXIT_SUCCESS;
2466 }