2 * lscpu - CPU architecture information helper
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
31 #include <sys/utsname.h>
34 #include <sys/types.h>
36 #include <sys/personality.h>
38 #if (defined(__x86_64__) || defined(__i386__))
39 # if !defined( __SANITIZE_ADDRESS__)
40 # define INCLUDE_VMWARE_BDOOR
42 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
46 #ifdef INCLUDE_VMWARE_BDOOR
56 #if defined(HAVE_LIBRTAS)
60 #include <libsmartcols.h>
62 #include "closestream.h"
64 #include "fileutils.h"
71 #define _PATH_SYS_SYSTEM "/sys/devices/system"
72 #define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
73 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
74 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
76 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
77 #define XENFEAT_supervisor_mode_kernel 3
78 #define XENFEAT_mmu_pt_update_preserve_ad 5
79 #define XENFEAT_hvm_callback_vector 8
81 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
82 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
83 | (1U << XENFEAT_hvm_callback_vector) )
85 static const char *virt_types
[] = {
86 [VIRT_NONE
] = N_("none"),
87 [VIRT_PARA
] = N_("para"),
88 [VIRT_FULL
] = N_("full"),
89 [VIRT_CONT
] = N_("container"),
92 static const char *hv_vendors
[] = {
96 [HYPER_MSHV
] = "Microsoft",
97 [HYPER_VMWARE
] = "VMware",
99 [HYPER_VSERVER
] = "Linux-VServer",
100 [HYPER_UML
] = "User-mode Linux",
101 [HYPER_INNOTEK
] = "Innotek GmbH",
102 [HYPER_HITACHI
] = "Hitachi",
103 [HYPER_PARALLELS
] = "Parallels",
104 [HYPER_VBOX
] = "Oracle",
105 [HYPER_OS400
] = "OS/400",
106 [HYPER_PHYP
] = "pHyp",
107 [HYPER_SPAR
] = "Unisys s-Par",
108 [HYPER_WSL
] = "Windows Subsystem for Linux"
111 static const int hv_vendor_pci
[] = {
112 [HYPER_NONE
] = 0x0000,
113 [HYPER_XEN
] = 0x5853,
114 [HYPER_KVM
] = 0x0000,
115 [HYPER_MSHV
] = 0x1414,
116 [HYPER_VMWARE
] = 0x15ad,
117 [HYPER_VBOX
] = 0x80ee,
120 static const int hv_graphics_pci
[] = {
121 [HYPER_NONE
] = 0x0000,
122 [HYPER_XEN
] = 0x0001,
123 [HYPER_KVM
] = 0x0000,
124 [HYPER_MSHV
] = 0x5353,
125 [HYPER_VMWARE
] = 0x0710,
126 [HYPER_VBOX
] = 0xbeef,
130 /* dispatching modes */
131 static const char *disp_modes
[] = {
132 [DISP_HORIZONTAL
] = N_("horizontal"),
133 [DISP_VERTICAL
] = N_("vertical")
136 static struct polarization_modes polar_modes
[] = {
137 [POLAR_UNKNOWN
] = {"U", "-"},
138 [POLAR_VLOW
] = {"VL", "vert-low"},
139 [POLAR_VMEDIUM
] = {"VM", "vert-medium"},
140 [POLAR_VHIGH
] = {"VH", "vert-high"},
141 [POLAR_HORIZONTAL
] = {"H", "horizontal"},
144 static int maxcpus
; /* size in bits of kernel cpu mask */
146 #define is_cpu_online(_d, _cpu) \
147 ((_d) && (_d)->online ? \
148 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
149 #define is_cpu_present(_d, _cpu) \
150 ((_d) && (_d)->present ? \
151 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
153 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
166 COL_CPU_POLARIZATION
,
185 COL_CACHE_COHERENCYSIZE
189 /* column description
191 struct lscpu_coldesc
{
196 unsigned int is_abbr
:1; /* name is abbreviation */
199 static struct lscpu_coldesc coldescs_cpu
[] =
201 [COL_CPU_CPU
] = { "CPU", N_("logical CPU number"), SCOLS_FL_RIGHT
, 1 },
202 [COL_CPU_CORE
] = { "CORE", N_("logical core number"), SCOLS_FL_RIGHT
},
203 [COL_CPU_SOCKET
] = { "SOCKET", N_("logical socket number"), SCOLS_FL_RIGHT
},
204 [COL_CPU_NODE
] = { "NODE", N_("logical NUMA node number"), SCOLS_FL_RIGHT
},
205 [COL_CPU_BOOK
] = { "BOOK", N_("logical book number"), SCOLS_FL_RIGHT
},
206 [COL_CPU_DRAWER
] = { "DRAWER", N_("logical drawer number"), SCOLS_FL_RIGHT
},
207 [COL_CPU_CACHE
] = { "CACHE", N_("shows how caches are shared between CPUs") },
208 [COL_CPU_POLARIZATION
] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
209 [COL_CPU_ADDRESS
] = { "ADDRESS", N_("physical address of a CPU") },
210 [COL_CPU_CONFIGURED
] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
211 [COL_CPU_ONLINE
] = { "ONLINE", N_("shows if Linux currently makes use of the CPU"), SCOLS_FL_RIGHT
},
212 [COL_CPU_MAXMHZ
] = { "MAXMHZ", N_("shows the maximum MHz of the CPU"), SCOLS_FL_RIGHT
},
213 [COL_CPU_MINMHZ
] = { "MINMHZ", N_("shows the minimum MHz of the CPU"), SCOLS_FL_RIGHT
}
216 static struct lscpu_coldesc coldescs_cache
[] =
218 [COL_CACHE_ALLSIZE
] = { "ALL-SIZE", N_("size of all system caches"), SCOLS_FL_RIGHT
},
219 [COL_CACHE_LEVEL
] = { "LEVEL", N_("cache level"), SCOLS_FL_RIGHT
},
220 [COL_CACHE_NAME
] = { "NAME", N_("cache name") },
221 [COL_CACHE_ONESIZE
] = { "ONE-SIZE", N_("size of one cache"), SCOLS_FL_RIGHT
},
222 [COL_CACHE_TYPE
] = { "TYPE", N_("cache type") },
223 [COL_CACHE_WAYS
] = { "WAYS", N_("ways of associativity"), SCOLS_FL_RIGHT
},
224 [COL_CACHE_ALLOCPOL
] = { "ALLOC-POLICY", N_("allocation policy") },
225 [COL_CACHE_WRITEPOL
] = { "WRITE-POLICY", N_("write policy") },
226 [COL_CACHE_PHYLINE
] = { "PHY-LINE", N_("number of physical cache line per cache t"), SCOLS_FL_RIGHT
},
227 [COL_CACHE_SETS
] = { "SETS", N_("number of sets in the cache; set lines has the same cache index"), SCOLS_FL_RIGHT
},
228 [COL_CACHE_COHERENCYSIZE
] = { "COHERENCY-SIZE", N_("minimum amount of data in bytes transferred from memory to cache"), SCOLS_FL_RIGHT
}
232 static int get_cache_full_size(struct lscpu_desc
*desc
, struct cpu_cache
*ca
, uint64_t *res
);
235 cpu_column_name_to_id(const char *name
, size_t namesz
)
239 for (i
= 0; i
< ARRAY_SIZE(coldescs_cpu
); i
++) {
240 const char *cn
= coldescs_cpu
[i
].name
;
242 if (!strncasecmp(name
, cn
, namesz
) && !*(cn
+ namesz
))
245 warnx(_("unknown column: %s"), name
);
250 cache_column_name_to_id(const char *name
, size_t namesz
)
254 for (i
= 0; i
< ARRAY_SIZE(coldescs_cache
); i
++) {
255 const char *cn
= coldescs_cache
[i
].name
;
257 if (!strncasecmp(name
, cn
, namesz
) && !*(cn
+ namesz
))
260 warnx(_("unknown column: %s"), name
);
264 /* Lookup a pattern and get the value from cpuinfo.
267 * "<pattern> : <key>"
270 lookup(char *line
, char *pattern
, char **value
)
273 int len
= strlen(pattern
);
275 /* don't re-fill already found tags, first one wins */
276 if (!*line
|| *value
)
280 if (strncmp(line
, pattern
, len
))
284 for (p
= line
+ len
; isspace(*p
); p
++);
291 for (++p
; isspace(*p
); p
++);
299 len
= strlen(line
) - 1;
300 for (p
= line
+ len
; isspace(*(p
-1)); p
--);
307 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
308 * part of the cache topology information within the sysfs filesystem.
309 * This is true for all shared caches on e.g. s390. When there are layers of
310 * hypervisors in between it is not knows which CPUs share which caches.
311 * Therefore information about shared caches is only available in
314 * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
317 lookup_cache(char *line
, struct lscpu_desc
*desc
)
319 struct cpu_cache
*cache
;
324 /* Make sure line starts with "cache<nr> :" */
325 if (strncmp(line
, "cache", 5))
327 for (p
= line
+ 5; isdigit(*p
); p
++);
328 for (; isspace(*p
); p
++);
332 p
= strstr(line
, "scope=") + 6;
333 /* Skip private caches, also present in sysfs */
334 if (!p
|| strncmp(p
, "Private", 7) == 0)
336 p
= strstr(line
, "level=");
337 if (!p
|| sscanf(p
, "level=%d", &level
) != 1)
339 p
= strstr(line
, "type=") + 5;
343 if (strncmp(p
, "Data", 4) == 0)
345 else if (strncmp(p
, "Instruction", 11) == 0)
347 else if (strncmp(p
, "Unified", 7) == 0)
349 p
= strstr(line
, "size=");
350 if (!p
|| sscanf(p
, "size=%lld", &size
) != 1)
354 desc
->ecaches
= xrealloc(desc
->ecaches
,
355 desc
->necaches
* sizeof(struct cpu_cache
));
356 cache
= &desc
->ecaches
[desc
->necaches
- 1];
357 memset(cache
, 0 , sizeof(*cache
));
359 if (type
== 'i' || type
== 'd')
360 xasprintf(&cache
->name
, "L%d%c", level
, type
);
362 xasprintf(&cache
->name
, "L%d", level
);
364 cache
->level
= level
;
365 cache
->size
= size
* 1024;
367 cache
->type
= type
== 'i' ? xstrdup("Instruction") :
368 type
== 'd' ? xstrdup("Data") :
369 type
== 'u' ? xstrdup("Unified") : NULL
;
373 /* Don't init the mode for platforms where we are not able to
374 * detect that CPU supports 64-bit mode.
377 init_mode(struct lscpu_modifier
*mod
)
381 if (mod
->system
== SYSTEM_SNAPSHOT
)
382 /* reading info from any /{sys,proc} dump, don't mix it with
383 * information about our real CPU */
386 #if defined(__alpha__) || defined(__ia64__)
387 m
|= MODE_64BIT
; /* 64bit platforms only */
389 /* platforms with 64bit flag in /proc/cpuinfo, define
390 * 32bit default here */
391 #if defined(__i386__) || defined(__x86_64__) || \
392 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
396 #if defined(__aarch64__)
398 /* personality() is the most reliable way (since 4.7)
399 * to determine aarch32 support */
400 int pers
= personality(PER_LINUX32
);
411 #if defined(HAVE_LIBRTAS)
412 #define PROCESSOR_MODULE_INFO 43
413 static int strbe16toh(const char *buf
, int offset
)
415 return (buf
[offset
] << 8) + buf
[offset
+1];
418 static void read_physical_info_powerpc(struct lscpu_desc
*desc
)
423 desc
->physsockets
= desc
->physchips
= desc
->physcoresperchip
= 0;
425 rc
= rtas_get_sysparm(PROCESSOR_MODULE_INFO
, sizeof(buf
), buf
);
429 len
= strbe16toh(buf
, 0);
433 ntypes
= strbe16toh(buf
, 2);
439 desc
->physsockets
= strbe16toh(buf
, 4);
440 desc
->physchips
= strbe16toh(buf
, 6);
441 desc
->physcoresperchip
= strbe16toh(buf
, 8);
444 static void read_physical_info_powerpc(
445 struct lscpu_desc
*desc
__attribute__((__unused__
)))
450 static int cmp_vulnerability_name(const void *a0
, const void *b0
)
452 const struct cpu_vulnerability
*a
= (const struct cpu_vulnerability
*) a0
,
453 *b
= (const struct cpu_vulnerability
*) b0
;
454 return strcmp(a
->name
, b
->name
);
457 static void read_vulnerabilities(struct lscpu_desc
*desc
)
460 DIR *dir
= ul_path_opendir(desc
->syscpu
, "vulnerabilities");
468 while (xreaddir(dir
))
474 desc
->vuls
= xcalloc(n
, sizeof(struct cpu_vulnerability
));
476 while (desc
->nvuls
< n
&& (d
= xreaddir(dir
))) {
478 struct cpu_vulnerability
*vu
;
480 #ifdef _DIRENT_HAVE_D_TYPE
481 if (d
->d_type
== DT_DIR
|| d
->d_type
== DT_UNKNOWN
)
484 if (ul_path_readf_string(desc
->syscpu
, &str
,
485 "vulnerabilities/%s", d
->d_name
) <= 0)
488 vu
= &desc
->vuls
[desc
->nvuls
++];
491 vu
->name
= xstrdup(d
->d_name
);
492 *vu
->name
= toupper(*vu
->name
);
493 strrep(vu
->name
, '_', ' ');
497 p
= (char *) startswith(vu
->text
, "Mitigation");
500 strrem(vu
->text
, ':');
505 qsort(desc
->vuls
, desc
->nvuls
,
506 sizeof(struct cpu_vulnerability
), cmp_vulnerability_name
);
513 read_basicinfo(struct lscpu_desc
*desc
, struct lscpu_modifier
*mod
)
517 struct utsname utsbuf
;
519 cpu_set_t
*cpuset
= NULL
;
522 if (uname(&utsbuf
) == -1)
523 err(EXIT_FAILURE
, _("error: uname failed"));
525 fp
= ul_path_fopen(desc
->procfs
, "r", "cpuinfo");
527 err(EXIT_FAILURE
, _("cannot open %s"), "/proc/cpuinfo");
528 desc
->arch
= xstrdup(utsbuf
.machine
);
531 while (fgets(buf
, sizeof(buf
), fp
) != NULL
) {
532 if (lookup(buf
, "vendor", &desc
->vendor
)) ;
533 else if (lookup(buf
, "vendor_id", &desc
->vendor
)) ;
534 else if (lookup(buf
, "CPU implementer", &desc
->vendor
)) ; /* ARM and aarch64 */
535 else if (lookup(buf
, "family", &desc
->family
)) ;
536 else if (lookup(buf
, "cpu family", &desc
->family
)) ;
537 else if (lookup(buf
, "model", &desc
->model
)) ;
538 else if (lookup(buf
, "CPU part", &desc
->model
)) ; /* ARM and aarch64 */
539 else if (lookup(buf
, "model name", &desc
->modelname
)) ;
540 else if (lookup(buf
, "stepping", &desc
->stepping
)) ;
541 else if (lookup(buf
, "CPU variant", &desc
->stepping
)) ; /* aarch64 */
542 else if (lookup(buf
, "cpu MHz", &desc
->mhz
)) ;
543 else if (lookup(buf
, "cpu MHz dynamic", &desc
->dynamic_mhz
)) ; /* s390 */
544 else if (lookup(buf
, "cpu MHz static", &desc
->static_mhz
)) ; /* s390 */
545 else if (lookup(buf
, "flags", &desc
->flags
)) ; /* x86 */
546 else if (lookup(buf
, "features", &desc
->flags
)) ; /* s390 */
547 else if (lookup(buf
, "Features", &desc
->flags
)) ; /* aarch64 */
548 else if (lookup(buf
, "type", &desc
->flags
)) ; /* sparc64 */
549 else if (lookup(buf
, "bogomips", &desc
->bogomips
)) ;
550 else if (lookup(buf
, "BogoMIPS", &desc
->bogomips
)) ; /* aarch64 */
551 else if (lookup(buf
, "bogomips per cpu", &desc
->bogomips
)) ; /* s390 */
552 else if (lookup(buf
, "cpu", &desc
->cpu
)) ;
553 else if (lookup(buf
, "revision", &desc
->revision
)) ;
554 else if (lookup(buf
, "CPU revision", &desc
->revision
)) ; /* aarch64 */
555 else if (lookup(buf
, "max thread id", &desc
->mtid
)) ; /* s390 */
556 else if (lookup(buf
, "address sizes", &desc
->addrsz
)) ; /* x86 */
557 else if (lookup_cache(buf
, desc
)) ;
562 desc
->mode
= init_mode(mod
);
565 snprintf(buf
, sizeof(buf
), " %s ", desc
->flags
);
566 if (strstr(buf
, " svm "))
567 desc
->virtflag
= xstrdup("svm");
568 else if (strstr(buf
, " vmx "))
569 desc
->virtflag
= xstrdup("vmx");
570 if (strstr(buf
, " lm "))
571 desc
->mode
|= MODE_32BIT
| MODE_64BIT
; /* x86_64 */
572 if (strstr(buf
, " zarch "))
573 desc
->mode
|= MODE_32BIT
| MODE_64BIT
; /* s390x */
574 if (strstr(buf
, " sun4v ") || strstr(buf
, " sun4u "))
575 desc
->mode
|= MODE_32BIT
| MODE_64BIT
; /* sparc64 */
578 if (desc
->arch
&& mod
->system
!= SYSTEM_SNAPSHOT
) {
579 if (strcmp(desc
->arch
, "ppc64") == 0)
580 desc
->mode
|= MODE_32BIT
| MODE_64BIT
;
581 else if (strcmp(desc
->arch
, "ppc") == 0)
582 desc
->mode
|= MODE_32BIT
;
587 if (ul_path_read_s32(desc
->syscpu
, &maxcpus
, "kernel_max") == 0)
588 /* note that kernel_max is maximum index [NR_CPUS-1] */
591 else if (mod
->system
== SYSTEM_LIVE
)
592 /* the root is '/' so we are working with data from the current kernel */
593 maxcpus
= get_max_number_of_cpus();
596 /* error or we are reading some /sys snapshot instead of the
597 * real /sys, let's use any crazy number... */
600 setsize
= CPU_ALLOC_SIZE(maxcpus
);
602 if (ul_path_readf_cpulist(desc
->syscpu
, &cpuset
, maxcpus
, "possible") == 0) {
605 desc
->ncpuspos
= CPU_COUNT_S(setsize
, cpuset
);
606 desc
->idx2cpunum
= xcalloc(desc
->ncpuspos
, sizeof(int));
608 for (num
= 0, idx
= 0; num
< maxcpus
; num
++) {
609 if (CPU_ISSET_S(num
, setsize
, cpuset
))
610 desc
->idx2cpunum
[idx
++] = num
;
615 err(EXIT_FAILURE
, _("failed to determine number of CPUs: %s"),
616 _PATH_SYS_CPU
"/possible");
619 /* get mask for present CPUs */
620 if (ul_path_readf_cpulist(desc
->syscpu
, &desc
->present
, maxcpus
, "present") == 0)
621 desc
->ncpus
= CPU_COUNT_S(setsize
, desc
->present
);
623 /* get mask for online CPUs */
624 if (ul_path_readf_cpulist(desc
->syscpu
, &desc
->online
, maxcpus
, "online") == 0)
625 desc
->nthreads
= CPU_COUNT_S(setsize
, desc
->online
);
627 /* get dispatching mode */
628 if (ul_path_read_s32(desc
->syscpu
, &desc
->dispatching
, "dispatching") != 0)
629 desc
->dispatching
= -1;
631 /* get cpufreq boost mode */
632 if (ul_path_read_s32(desc
->syscpu
, &desc
->freqboost
, "cpufreq/boost") != 0)
633 desc
->freqboost
= -1;
635 if (mod
->system
== SYSTEM_LIVE
)
636 read_physical_info_powerpc(desc
);
638 if ((fp
= ul_path_fopen(desc
->procfs
, "r", "sysinfo"))) {
639 while (fgets(buf
, sizeof(buf
), fp
) != NULL
) {
640 if (lookup(buf
, "Type", &desc
->machinetype
))
646 /* vulnerabilities */
647 if (ul_path_access(desc
->syscpu
, F_OK
, "vulnerabilities") == 0)
648 read_vulnerabilities(desc
);
652 has_pci_device(struct lscpu_desc
*desc
, unsigned int vendor
, unsigned int device
)
655 unsigned int num
, fn
, ven
, dev
;
658 f
= ul_path_fopen(desc
->procfs
, "r", "bus/pci/devices");
662 /* for more details about bus/pci/devices format see
663 * drivers/pci/proc.c in linux kernel
665 while(fscanf(f
, "%02x%02x\t%04x%04x\t%*[^\n]",
666 &num
, &fn
, &ven
, &dev
) == 4) {
668 if (ven
== vendor
&& dev
== device
)
678 #if defined(__x86_64__) || defined(__i386__)
681 * This CPUID leaf returns the information about the hypervisor.
682 * EAX : maximum input value for CPUID supported by the hypervisor.
683 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
685 #define HYPERVISOR_INFO_LEAF 0x40000000
688 cpuid(unsigned int op
, unsigned int *eax
, unsigned int *ebx
,
689 unsigned int *ecx
, unsigned int *edx
)
692 #if defined(__PIC__) && defined(__i386__)
693 /* x86 PIC cannot clobber ebx -- gcc bitches */
709 read_hypervisor_cpuid(struct lscpu_desc
*desc
)
711 unsigned int eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
712 char hyper_vendor_id
[13];
714 memset(hyper_vendor_id
, 0, sizeof(hyper_vendor_id
));
716 cpuid(HYPERVISOR_INFO_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
717 memcpy(hyper_vendor_id
+ 0, &ebx
, 4);
718 memcpy(hyper_vendor_id
+ 4, &ecx
, 4);
719 memcpy(hyper_vendor_id
+ 8, &edx
, 4);
720 hyper_vendor_id
[12] = '\0';
722 if (!hyper_vendor_id
[0])
725 if (!strncmp("XenVMMXenVMM", hyper_vendor_id
, 12))
726 desc
->hyper
= HYPER_XEN
;
727 else if (!strncmp("KVMKVMKVM", hyper_vendor_id
, 9))
728 desc
->hyper
= HYPER_KVM
;
729 else if (!strncmp("Microsoft Hv", hyper_vendor_id
, 12))
730 desc
->hyper
= HYPER_MSHV
;
731 else if (!strncmp("VMwareVMware", hyper_vendor_id
, 12))
732 desc
->hyper
= HYPER_VMWARE
;
733 else if (!strncmp("UnisysSpar64", hyper_vendor_id
, 12))
734 desc
->hyper
= HYPER_SPAR
;
737 #else /* ! (__x86_64__ || __i386__) */
739 read_hypervisor_cpuid(struct lscpu_desc
*desc
__attribute__((__unused__
)))
744 static int is_devtree_compatible(struct lscpu_desc
*desc
, const char *str
)
746 FILE *fd
= ul_path_fopen(desc
->procfs
, "r", "device-tree/compatible");
752 memset(buf
, 0, sizeof(buf
));
753 len
= fread(buf
, 1, sizeof(buf
) - 1, fd
);
756 for (i
= 0; i
< len
;) {
757 if (!strcmp(&buf
[i
], str
))
759 i
+= strlen(&buf
[i
]);
768 read_hypervisor_powerpc(struct lscpu_desc
*desc
)
770 assert(!desc
->hyper
);
772 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
773 if (ul_path_access(desc
->procfs
, F_OK
, "iSeries") == 0) {
774 desc
->hyper
= HYPER_OS400
;
775 desc
->virtype
= VIRT_PARA
;
777 /* PowerNV (POWER Non-Virtualized, bare-metal) */
778 } else if (is_devtree_compatible(desc
, "ibm,powernv")) {
779 desc
->hyper
= HYPER_NONE
;
780 desc
->virtype
= VIRT_NONE
;
782 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
783 } else if (ul_path_access(desc
->procfs
, F_OK
, "device-tree/ibm,partition-name") == 0
784 && ul_path_access(desc
->procfs
, F_OK
, "device-tree/hmc-managed?") == 0
785 && ul_path_access(desc
->procfs
, F_OK
, "device-tree/chosen/qemu,graphic-width") != 0) {
788 desc
->hyper
= HYPER_PHYP
;
789 desc
->virtype
= VIRT_PARA
;
791 fd
= ul_path_fopen(desc
->procfs
, "r", "device-tree/ibm,partition-name");
794 if (fscanf(fd
, "%255s", buf
) == 1 && !strcmp(buf
, "full"))
795 desc
->virtype
= VIRT_NONE
;
800 } else if (is_devtree_compatible(desc
, "qemu,pseries")) {
801 desc
->hyper
= HYPER_KVM
;
802 desc
->virtype
= VIRT_PARA
;
807 #ifdef INCLUDE_VMWARE_BDOOR
809 #define VMWARE_BDOOR_MAGIC 0x564D5868
810 #define VMWARE_BDOOR_PORT 0x5658
811 #define VMWARE_BDOOR_CMD_GETVERSION 10
813 static UL_ASAN_BLACKLIST
814 void vmware_bdoor(uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
817 #if defined(__PIC__) && defined(__i386__)
818 /* x86 PIC cannot clobber ebx -- gcc bitches */
830 : "0" (VMWARE_BDOOR_MAGIC
),
831 "1" (VMWARE_BDOOR_CMD_GETVERSION
),
832 "2" (VMWARE_BDOOR_PORT
),
837 static jmp_buf segv_handler_env
;
840 segv_handler(__attribute__((__unused__
)) int sig
,
841 __attribute__((__unused__
)) siginfo_t
*info
,
842 __attribute__((__unused__
)) void *ignored
)
844 siglongjmp(segv_handler_env
, 1);
848 is_vmware_platform(void)
850 uint32_t eax
, ebx
, ecx
, edx
;
851 struct sigaction act
, oact
;
854 * FIXME: Not reliable for non-root users. Note it works as expected if
855 * vmware_bdoor() is not optimized for PIE, but then it fails to build
856 * on 32bit x86 systems. See lscpu git log for more details (commit
857 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
863 * The assembly routine for vmware detection works
864 * fine under vmware, even if ran as regular user. But
865 * on real HW or under other hypervisors, it segfaults (which is
866 * expected). So we temporarily install SIGSEGV handler to catch
867 * the signal. All this magic is needed because lscpu
868 * isn't supposed to require root privileges.
870 if (sigsetjmp(segv_handler_env
, 1))
873 memset(&act
, 0, sizeof(act
));
874 act
.sa_sigaction
= segv_handler
;
875 act
.sa_flags
= SA_SIGINFO
;
877 if (sigaction(SIGSEGV
, &act
, &oact
))
878 err(EXIT_FAILURE
, _("cannot set signal handler"));
880 vmware_bdoor(&eax
, &ebx
, &ecx
, &edx
);
882 if (sigaction(SIGSEGV
, &oact
, NULL
))
883 err(EXIT_FAILURE
, _("cannot restore signal handler"));
885 return eax
!= (uint32_t)-1 && ebx
== VMWARE_BDOOR_MAGIC
;
888 #else /* ! INCLUDE_VMWARE_BDOOR */
891 is_vmware_platform(void)
896 #endif /* INCLUDE_VMWARE_BDOOR */
899 read_hypervisor(struct lscpu_desc
*desc
, struct lscpu_modifier
*mod
)
903 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
905 if ((fd
= ul_path_fopen(desc
->procfs
, "r", "sys/kernel/osrelease"))) {
908 if (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
909 if (strstr(buf
, "Microsoft")) {
910 desc
->hyper
= HYPER_WSL
;
911 desc
->virtype
= VIRT_CONT
;
919 if (mod
->system
!= SYSTEM_SNAPSHOT
) {
920 read_hypervisor_cpuid(desc
);
922 desc
->hyper
= read_hypervisor_dmi();
923 if (!desc
->hyper
&& is_vmware_platform())
924 desc
->hyper
= HYPER_VMWARE
;
928 desc
->virtype
= VIRT_FULL
;
930 if (desc
->hyper
== HYPER_XEN
) {
933 fd
= ul_prefix_fopen(desc
->prefix
, "r", _PATH_SYS_HYP_FEATURES
);
935 if (fd
&& fscanf(fd
, "%x", &features
) == 1) {
937 if (features
& XEN_FEATURES_PV_MASK
)
938 desc
->virtype
= VIRT_PARA
;
940 else if ((features
& XEN_FEATURES_PVH_MASK
)
941 == XEN_FEATURES_PVH_MASK
)
942 desc
->virtype
= VIRT_PARA
;
947 } else if (read_hypervisor_powerpc(desc
) > 0) {}
949 /* Xen para-virt or dom0 */
950 else if (ul_path_access(desc
->procfs
, F_OK
, "xen") == 0) {
953 fd
= ul_path_fopen(desc
->procfs
, "r", "xen/capabilities");
957 if (fscanf(fd
, "%255s", buf
) == 1 &&
958 !strcmp(buf
, "control_d"))
962 desc
->virtype
= dom0
? VIRT_NONE
: VIRT_PARA
;
963 desc
->hyper
= HYPER_XEN
;
965 /* Xen full-virt on non-x86_64 */
966 } else if (has_pci_device(desc
, hv_vendor_pci
[HYPER_XEN
], hv_graphics_pci
[HYPER_XEN
])) {
967 desc
->hyper
= HYPER_XEN
;
968 desc
->virtype
= VIRT_FULL
;
969 } else if (has_pci_device(desc
, hv_vendor_pci
[HYPER_VMWARE
], hv_graphics_pci
[HYPER_VMWARE
])) {
970 desc
->hyper
= HYPER_VMWARE
;
971 desc
->virtype
= VIRT_FULL
;
972 } else if (has_pci_device(desc
, hv_vendor_pci
[HYPER_VBOX
], hv_graphics_pci
[HYPER_VBOX
])) {
973 desc
->hyper
= HYPER_VBOX
;
974 desc
->virtype
= VIRT_FULL
;
977 } else if ((fd
= ul_path_fopen(desc
->procfs
, "r", "sysinfo"))) {
980 desc
->hyper
= HYPER_IBM
;
981 desc
->hypervisor
= "PR/SM";
982 desc
->virtype
= VIRT_FULL
;
983 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
986 if (!strstr(buf
, "Control Program:"))
988 if (!strstr(buf
, "KVM"))
989 desc
->hyper
= HYPER_IBM
;
991 desc
->hyper
= HYPER_KVM
;
992 p
= strchr(buf
, ':');
995 xasprintf(&str
, "%s", p
+ 1);
997 /* remove leading, trailing and repeating whitespace */
1000 desc
->hypervisor
= str
;
1001 str
+= strlen(str
) - 1;
1002 while ((*str
== '\n') || (*str
== ' '))
1004 while ((str
= strstr(desc
->hypervisor
, " ")))
1005 memmove(str
, str
+ 1, strlen(str
));
1011 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
1012 * /proc/bc should not */
1013 else if (ul_path_access(desc
->procfs
, F_OK
, "vz") == 0 &&
1014 ul_path_access(desc
->procfs
, F_OK
, "bc") != 0) {
1015 desc
->hyper
= HYPER_PARALLELS
;
1016 desc
->virtype
= VIRT_CONT
;
1019 } else if (desc
->vendor
&&
1020 (strcmp(desc
->vendor
, "PowerVM Lx86") == 0 ||
1021 strcmp(desc
->vendor
, "IBM/S390") == 0)) {
1022 desc
->hyper
= HYPER_IBM
;
1023 desc
->virtype
= VIRT_FULL
;
1025 /* User-mode-linux */
1026 } else if (desc
->modelname
&& strstr(desc
->modelname
, "UML")) {
1027 desc
->hyper
= HYPER_UML
;
1028 desc
->virtype
= VIRT_PARA
;
1031 } else if ((fd
= ul_path_fopen(desc
->procfs
, "r", "self/status"))) {
1035 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
1036 if (lookup(buf
, "VxID", &val
))
1044 while (isdigit(*val
))
1047 desc
->hyper
= HYPER_VSERVER
;
1048 desc
->virtype
= VIRT_CONT
;
1055 /* add @set to the @ary, unnecessary set is deallocated. */
1056 static int add_cpuset_to_array(cpu_set_t
**ary
, int *items
, cpu_set_t
*set
)
1059 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1064 for (i
= 0; i
< *items
; i
++) {
1065 if (CPU_EQUAL_S(setsize
, set
, ary
[i
]))
1078 read_topology(struct lscpu_desc
*desc
, int idx
)
1080 cpu_set_t
*thread_siblings
, *core_siblings
;
1081 cpu_set_t
*book_siblings
, *drawer_siblings
;
1082 int coreid
, socketid
, bookid
, drawerid
;
1083 int i
, num
= real_cpu_num(desc
, idx
);
1085 if (ul_path_accessf(desc
->syscpu
, F_OK
, "cpu%d/topology/thread_siblings", num
) != 0)
1088 ul_path_readf_cpuset(desc
->syscpu
, &thread_siblings
, maxcpus
,
1089 "cpu%d/topology/thread_siblings", num
);
1090 ul_path_readf_cpuset(desc
->syscpu
, &core_siblings
, maxcpus
,
1091 "cpu%d/topology/core_siblings", num
);
1092 ul_path_readf_cpuset(desc
->syscpu
, &book_siblings
, maxcpus
,
1093 "cpu%d/topology/book_siblings", num
);
1094 ul_path_readf_cpuset(desc
->syscpu
, &drawer_siblings
, maxcpus
,
1095 "cpu%d/topology/drawer_siblings", num
);
1097 if (ul_path_readf_s32(desc
->syscpu
, &coreid
, "cpu%d/topology/core_id", num
) != 0)
1100 if (ul_path_readf_s32(desc
->syscpu
, &socketid
, "cpu%d/topology/physical_package_id", num
) != 0)
1103 if (ul_path_readf_s32(desc
->syscpu
, &bookid
, "cpu%d/topology/book_id", num
) != 0)
1106 if (ul_path_readf_s32(desc
->syscpu
, &drawerid
, "cpu%d/topology/drawer_id", num
) != 0)
1109 if (!desc
->coremaps
) {
1110 int ndrawers
, nbooks
, nsockets
, ncores
, nthreads
;
1111 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1113 /* threads within one core */
1114 nthreads
= CPU_COUNT_S(setsize
, thread_siblings
);
1118 /* cores within one socket */
1119 ncores
= CPU_COUNT_S(setsize
, core_siblings
) / nthreads
;
1123 /* number of sockets within one book. Because of odd /
1124 * non-present cpu maps and to keep calculation easy we make
1125 * sure that nsockets and nbooks is at least 1.
1127 nsockets
= desc
->ncpus
/ nthreads
/ ncores
;
1131 /* number of books */
1132 nbooks
= desc
->ncpus
/ nthreads
/ ncores
/ nsockets
;
1136 /* number of drawers */
1137 ndrawers
= desc
->ncpus
/ nbooks
/ nthreads
/ ncores
/ nsockets
;
1141 /* all threads, see also read_basicinfo()
1142 * -- fallback for kernels without
1143 * /sys/devices/system/cpu/online.
1145 if (!desc
->nthreads
)
1146 desc
->nthreads
= ndrawers
* nbooks
* nsockets
* ncores
* nthreads
;
1148 /* For each map we make sure that it can have up to ncpuspos
1149 * entries. This is because we cannot reliably calculate the
1150 * number of cores, sockets and books on all architectures.
1151 * E.g. completely virtualized architectures like s390 may
1152 * have multiple sockets of different sizes.
1154 desc
->coremaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1155 desc
->socketmaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1156 desc
->coreids
= xcalloc(desc
->ncpuspos
, sizeof(*desc
->drawerids
));
1157 desc
->socketids
= xcalloc(desc
->ncpuspos
, sizeof(*desc
->drawerids
));
1158 for (i
= 0; i
< desc
->ncpuspos
; i
++)
1159 desc
->coreids
[i
] = desc
->socketids
[i
] = -1;
1160 if (book_siblings
) {
1161 desc
->bookmaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1162 desc
->bookids
= xcalloc(desc
->ncpuspos
, sizeof(*desc
->drawerids
));
1163 for (i
= 0; i
< desc
->ncpuspos
; i
++)
1164 desc
->bookids
[i
] = -1;
1166 if (drawer_siblings
) {
1167 desc
->drawermaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1168 desc
->drawerids
= xcalloc(desc
->ncpuspos
, sizeof(*desc
->drawerids
));
1169 for (i
= 0; i
< desc
->ncpuspos
; i
++)
1170 desc
->drawerids
[i
] = -1;
1174 add_cpuset_to_array(desc
->socketmaps
, &desc
->nsockets
, core_siblings
);
1175 desc
->coreids
[idx
] = coreid
;
1176 add_cpuset_to_array(desc
->coremaps
, &desc
->ncores
, thread_siblings
);
1177 desc
->socketids
[idx
] = socketid
;
1178 if (book_siblings
) {
1179 add_cpuset_to_array(desc
->bookmaps
, &desc
->nbooks
, book_siblings
);
1180 desc
->bookids
[idx
] = bookid
;
1182 if (drawer_siblings
) {
1183 add_cpuset_to_array(desc
->drawermaps
, &desc
->ndrawers
, drawer_siblings
);
1184 desc
->drawerids
[idx
] = drawerid
;
1189 read_polarization(struct lscpu_desc
*desc
, int idx
)
1192 int num
= real_cpu_num(desc
, idx
);
1194 if (desc
->dispatching
< 0)
1196 if (ul_path_accessf(desc
->syscpu
, F_OK
, "cpu%d/polarization", num
) != 0)
1198 if (!desc
->polarization
)
1199 desc
->polarization
= xcalloc(desc
->ncpuspos
, sizeof(int));
1201 ul_path_readf_buffer(desc
->syscpu
, mode
, sizeof(mode
), "cpu%d/polarization", num
);
1203 if (strncmp(mode
, "vertical:low", sizeof(mode
)) == 0)
1204 desc
->polarization
[idx
] = POLAR_VLOW
;
1205 else if (strncmp(mode
, "vertical:medium", sizeof(mode
)) == 0)
1206 desc
->polarization
[idx
] = POLAR_VMEDIUM
;
1207 else if (strncmp(mode
, "vertical:high", sizeof(mode
)) == 0)
1208 desc
->polarization
[idx
] = POLAR_VHIGH
;
1209 else if (strncmp(mode
, "horizontal", sizeof(mode
)) == 0)
1210 desc
->polarization
[idx
] = POLAR_HORIZONTAL
;
1212 desc
->polarization
[idx
] = POLAR_UNKNOWN
;
1216 read_address(struct lscpu_desc
*desc
, int idx
)
1218 int num
= real_cpu_num(desc
, idx
);
1220 if (ul_path_accessf(desc
->syscpu
, F_OK
, "cpu%d/address", num
) != 0)
1222 if (!desc
->addresses
)
1223 desc
->addresses
= xcalloc(desc
->ncpuspos
, sizeof(int));
1224 ul_path_readf_s32(desc
->syscpu
, &desc
->addresses
[idx
], "cpu%d/address", num
);
1228 read_configured(struct lscpu_desc
*desc
, int idx
)
1230 int num
= real_cpu_num(desc
, idx
);
1232 if (ul_path_accessf(desc
->syscpu
, F_OK
, "cpu%d/configure", num
) != 0)
1234 if (!desc
->configured
)
1235 desc
->configured
= xcalloc(desc
->ncpuspos
, sizeof(int));
1236 ul_path_readf_s32(desc
->syscpu
, &desc
->configured
[idx
], "cpu%d/configure", num
);
1239 /* Read overall maximum frequency of cpu */
1241 cpu_max_mhz(struct lscpu_desc
*desc
, char *buf
, size_t bufsz
)
1244 float cpu_freq
= 0.0;
1245 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1247 if (desc
->present
) {
1248 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1249 if (CPU_ISSET_S(real_cpu_num(desc
, i
), setsize
, desc
->present
)
1250 && desc
->maxmhz
[i
]) {
1251 float freq
= atof(desc
->maxmhz
[i
]);
1253 if (freq
> cpu_freq
)
1258 snprintf(buf
, bufsz
, "%.4f", cpu_freq
);
1262 /* Read overall minimum frequency of cpu */
1264 cpu_min_mhz(struct lscpu_desc
*desc
, char *buf
, size_t bufsz
)
1267 float cpu_freq
= -1.0;
1268 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1270 if (desc
->present
) {
1271 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1272 if (CPU_ISSET_S(real_cpu_num(desc
, i
), setsize
, desc
->present
)
1273 && desc
->minmhz
[i
]) {
1274 float freq
= atof(desc
->minmhz
[i
]);
1276 if (cpu_freq
< 0.0 || freq
< cpu_freq
)
1281 snprintf(buf
, bufsz
, "%.4f", cpu_freq
);
1287 read_max_mhz(struct lscpu_desc
*desc
, int idx
)
1289 int num
= real_cpu_num(desc
, idx
);
1292 if (ul_path_readf_s32(desc
->syscpu
, &mhz
, "cpu%d/cpufreq/cpuinfo_max_freq", num
) != 0)
1295 desc
->maxmhz
= xcalloc(desc
->ncpuspos
, sizeof(char *));
1296 xasprintf(&desc
->maxmhz
[idx
], "%.4f", (float) mhz
/ 1000);
1300 read_min_mhz(struct lscpu_desc
*desc
, int idx
)
1302 int num
= real_cpu_num(desc
, idx
);
1305 if (ul_path_readf_s32(desc
->syscpu
, &mhz
, "cpu%d/cpufreq/cpuinfo_min_freq", num
) != 0)
1308 desc
->minmhz
= xcalloc(desc
->ncpuspos
, sizeof(char *));
1309 xasprintf(&desc
->minmhz
[idx
], "%.4f", (float) mhz
/ 1000);
1313 cachecmp(const void *a
, const void *b
)
1315 struct cpu_cache
*c1
= (struct cpu_cache
*) a
;
1316 struct cpu_cache
*c2
= (struct cpu_cache
*) b
;
1318 return strcmp(c2
->name
, c1
->name
);
1322 read_cache(struct lscpu_desc
*desc
, int idx
)
1326 int num
= real_cpu_num(desc
, idx
);
1328 if (!desc
->ncaches
) {
1329 while (ul_path_accessf(desc
->syscpu
, F_OK
,
1330 "cpu%d/cache/index%d",
1331 num
, desc
->ncaches
) == 0)
1336 desc
->caches
= xcalloc(desc
->ncaches
, sizeof(*desc
->caches
));
1338 for (i
= 0; i
< desc
->ncaches
; i
++) {
1339 struct cpu_cache
*ca
= &desc
->caches
[i
];
1342 if (ul_path_accessf(desc
->syscpu
, F_OK
,
1343 "cpu%d/cache/index%d", num
, i
) != 0)
1349 if (ul_path_readf_string(desc
->syscpu
, &ca
->type
,
1350 "cpu%d/cache/index%d/type", num
, i
) > 0) {
1351 if (!strcmp(ca
->type
, "Data"))
1353 else if (!strcmp(ca
->type
, "Instruction"))
1358 ul_path_readf_s32(desc
->syscpu
, &ca
->level
,
1359 "cpu%d/cache/index%d/level", num
, i
);
1361 snprintf(buf
, sizeof(buf
), "L%d%c", ca
->level
, type
);
1363 snprintf(buf
, sizeof(buf
), "L%d", ca
->level
);
1365 ca
->name
= xstrdup(buf
);
1367 ul_path_readf_u32(desc
->syscpu
, &ca
->ways_of_associativity
,
1368 "cpu%d/cache/index%d/ways_of_associativity", num
, i
);
1369 ul_path_readf_u32(desc
->syscpu
, &ca
->physical_line_partition
,
1370 "cpu%d/cache/index%d/physical_line_partition", num
, i
);
1371 ul_path_readf_u32(desc
->syscpu
, &ca
->number_of_sets
,
1372 "cpu%d/cache/index%d/number_of_sets", num
, i
);
1373 ul_path_readf_u32(desc
->syscpu
, &ca
->coherency_line_size
,
1374 "cpu%d/cache/index%d/coherency_line_size", num
, i
);
1376 ul_path_readf_string(desc
->syscpu
, &ca
->allocation_policy
,
1377 "cpu%d/cache/index%d/allocation_policy", num
, i
);
1378 ul_path_readf_string(desc
->syscpu
, &ca
->write_policy
,
1379 "cpu%d/cache/index%d/write_policy", num
, i
);
1382 if (ul_path_readf_buffer(desc
->syscpu
, buf
, sizeof(buf
),
1383 "cpu%d/cache/index%d/size", num
, i
) > 0)
1384 parse_size(buf
, &ca
->size
, NULL
);
1389 /* information about how CPUs share different caches */
1390 ul_path_readf_cpuset(desc
->syscpu
, &map
, maxcpus
,
1391 "cpu%d/cache/index%d/shared_cpu_map", num
, i
);
1393 if (!ca
->sharedmaps
)
1394 ca
->sharedmaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1395 add_cpuset_to_array(ca
->sharedmaps
, &ca
->nsharedmaps
, map
);
1399 static inline int is_node_dirent(struct dirent
*d
)
1403 #ifdef _DIRENT_HAVE_D_TYPE
1404 (d
->d_type
== DT_DIR
|| d
->d_type
== DT_UNKNOWN
) &&
1406 strncmp(d
->d_name
, "node", 4) == 0 &&
1407 isdigit_string(d
->d_name
+ 4);
1411 nodecmp(const void *ap
, const void *bp
)
1413 int *a
= (int *) ap
, *b
= (int *) bp
;
1418 read_nodes(struct lscpu_desc
*desc
)
1423 struct path_cxt
*sysnode
;
1427 sysnode
= ul_new_path(_PATH_SYS_NODE
);
1429 err(EXIT_FAILURE
, _("failed to initialize %s handler"), _PATH_SYS_NODE
);
1430 ul_path_set_prefix(sysnode
, desc
->prefix
);
1432 dir
= ul_path_opendir(sysnode
, NULL
);
1436 while ((d
= readdir(dir
))) {
1437 if (is_node_dirent(d
))
1441 if (!desc
->nnodes
) {
1446 desc
->nodemaps
= xcalloc(desc
->nnodes
, sizeof(cpu_set_t
*));
1447 desc
->idx2nodenum
= xmalloc(desc
->nnodes
* sizeof(int));
1450 while ((d
= readdir(dir
)) && i
< desc
->nnodes
) {
1451 if (is_node_dirent(d
))
1452 desc
->idx2nodenum
[i
++] = strtol_or_err(((d
->d_name
) + 4),
1453 _("Failed to extract the node number"));
1456 qsort(desc
->idx2nodenum
, desc
->nnodes
, sizeof(int), nodecmp
);
1458 /* information about how nodes share different CPUs */
1459 for (i
= 0; i
< desc
->nnodes
; i
++)
1460 ul_path_readf_cpuset(sysnode
, &desc
->nodemaps
[i
], maxcpus
,
1461 "node%d/cpumap", desc
->idx2nodenum
[i
]);
1463 ul_unref_path(sysnode
);
1467 get_cell_data(struct lscpu_desc
*desc
, int idx
, int col
,
1468 struct lscpu_modifier
*mod
,
1469 char *buf
, size_t bufsz
)
1471 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1473 int cpu
= real_cpu_num(desc
, idx
);
1479 snprintf(buf
, bufsz
, "%d", cpu
);
1482 if (mod
->physical
) {
1483 if (desc
->coreids
[idx
] == -1)
1484 snprintf(buf
, bufsz
, "-");
1486 snprintf(buf
, bufsz
, "%d", desc
->coreids
[idx
]);
1488 if (cpuset_ary_isset(cpu
, desc
->coremaps
,
1489 desc
->ncores
, setsize
, &i
) == 0)
1490 snprintf(buf
, bufsz
, "%zu", i
);
1493 case COL_CPU_SOCKET
:
1494 if (mod
->physical
) {
1495 if (desc
->socketids
[idx
] == -1)
1496 snprintf(buf
, bufsz
, "-");
1498 snprintf(buf
, bufsz
, "%d", desc
->socketids
[idx
]);
1500 if (cpuset_ary_isset(cpu
, desc
->socketmaps
,
1501 desc
->nsockets
, setsize
, &i
) == 0)
1502 snprintf(buf
, bufsz
, "%zu", i
);
1506 if (cpuset_ary_isset(cpu
, desc
->nodemaps
,
1507 desc
->nnodes
, setsize
, &i
) == 0)
1508 snprintf(buf
, bufsz
, "%d", desc
->idx2nodenum
[i
]);
1510 case COL_CPU_DRAWER
:
1511 if (mod
->physical
) {
1512 if (desc
->drawerids
[idx
] == -1)
1513 snprintf(buf
, bufsz
, "-");
1515 snprintf(buf
, bufsz
, "%d", desc
->drawerids
[idx
]);
1517 if (cpuset_ary_isset(cpu
, desc
->drawermaps
,
1518 desc
->ndrawers
, setsize
, &i
) == 0)
1519 snprintf(buf
, bufsz
, "%zu", i
);
1523 if (mod
->physical
) {
1524 if (desc
->bookids
[idx
] == -1)
1525 snprintf(buf
, bufsz
, "-");
1527 snprintf(buf
, bufsz
, "%d", desc
->bookids
[idx
]);
1529 if (cpuset_ary_isset(cpu
, desc
->bookmaps
,
1530 desc
->nbooks
, setsize
, &i
) == 0)
1531 snprintf(buf
, bufsz
, "%zu", i
);
1540 for (j
= desc
->ncaches
- 1; j
>= 0; j
--) {
1541 struct cpu_cache
*ca
= &desc
->caches
[j
];
1543 if (cpuset_ary_isset(cpu
, ca
->sharedmaps
,
1544 ca
->nsharedmaps
, setsize
, &i
) == 0) {
1545 int x
= snprintf(p
, sz
, "%zu", i
);
1546 if (x
< 0 || (size_t) x
>= sz
)
1554 *p
++ = mod
->compat
? ',' : ':';
1561 case COL_CPU_POLARIZATION
:
1562 if (desc
->polarization
) {
1563 int x
= desc
->polarization
[idx
];
1565 snprintf(buf
, bufsz
, "%s",
1566 mod
->mode
== OUTPUT_PARSABLE
?
1567 polar_modes
[x
].parsable
:
1568 polar_modes
[x
].readable
);
1571 case COL_CPU_ADDRESS
:
1572 if (desc
->addresses
)
1573 snprintf(buf
, bufsz
, "%d", desc
->addresses
[idx
]);
1575 case COL_CPU_CONFIGURED
:
1576 if (!desc
->configured
)
1578 if (mod
->mode
== OUTPUT_PARSABLE
)
1579 snprintf(buf
, bufsz
, "%s",
1580 desc
->configured
[idx
] ? _("Y") : _("N"));
1582 snprintf(buf
, bufsz
, "%s",
1583 desc
->configured
[idx
] ? _("yes") : _("no"));
1585 case COL_CPU_ONLINE
:
1588 if (mod
->mode
== OUTPUT_PARSABLE
)
1589 snprintf(buf
, bufsz
, "%s",
1590 is_cpu_online(desc
, cpu
) ? _("Y") : _("N"));
1592 snprintf(buf
, bufsz
, "%s",
1593 is_cpu_online(desc
, cpu
) ? _("yes") : _("no"));
1595 case COL_CPU_MAXMHZ
:
1596 if (desc
->maxmhz
&& desc
->maxmhz
[idx
])
1597 xstrncpy(buf
, desc
->maxmhz
[idx
], bufsz
);
1599 case COL_CPU_MINMHZ
:
1600 if (desc
->minmhz
&& desc
->minmhz
[idx
])
1601 xstrncpy(buf
, desc
->minmhz
[idx
], bufsz
);
1608 get_cell_header(struct lscpu_desc
*desc
, int col
,
1609 struct lscpu_modifier
*mod
,
1610 char *buf
, size_t bufsz
)
1614 if (col
== COL_CPU_CACHE
) {
1619 for (i
= desc
->ncaches
- 1; i
>= 0; i
--) {
1620 int x
= snprintf(p
, sz
, "%s", desc
->caches
[i
].name
);
1621 if (x
< 0 || (size_t) x
>= sz
)
1628 *p
++ = mod
->compat
? ',' : ':';
1636 snprintf(buf
, bufsz
, "%s", coldescs_cpu
[col
].name
);
1644 print_caches_readable(struct lscpu_desc
*desc
, int cols
[], int ncols
,
1645 struct lscpu_modifier
*mod
)
1648 struct libscols_table
*table
;
1650 scols_init_debug(0);
1652 table
= scols_new_table();
1654 err(EXIT_FAILURE
, _("failed to allocate output table"));
1656 scols_table_enable_json(table
, 1);
1657 scols_table_set_name(table
, "caches");
1660 for (i
= 0; i
< ncols
; i
++) {
1661 struct lscpu_coldesc
*cd
= &coldescs_cache
[cols
[i
]];
1662 if (!scols_table_new_column(table
, cd
->name
, 0, cd
->flags
))
1663 err(EXIT_FAILURE
, _("failed to allocate output column"));
1666 for (i
= desc
->ncaches
- 1; i
>= 0; i
--) {
1667 struct cpu_cache
*ca
= &desc
->caches
[i
];
1668 struct libscols_line
*line
;
1671 line
= scols_table_new_line(table
, NULL
);
1673 err(EXIT_FAILURE
, _("failed to allocate output line"));
1675 for (c
= 0; c
< ncols
; c
++) {
1680 case COL_CACHE_NAME
:
1682 data
= xstrdup(ca
->name
);
1684 case COL_CACHE_ONESIZE
:
1688 xasprintf(&data
, "%" PRIu64
, ca
->size
);
1690 data
= size_to_human_string(SIZE_SUFFIX_1LETTER
, ca
->size
);
1692 case COL_CACHE_ALLSIZE
:
1696 if (get_cache_full_size(desc
, ca
, &sz
) != 0)
1699 xasprintf(&data
, "%" PRIu64
, sz
);
1701 data
= size_to_human_string(SIZE_SUFFIX_1LETTER
, sz
);
1704 case COL_CACHE_WAYS
:
1705 if (ca
->ways_of_associativity
)
1706 xasprintf(&data
, "%u", ca
->ways_of_associativity
);
1709 case COL_CACHE_TYPE
:
1711 data
= xstrdup(ca
->type
);
1713 case COL_CACHE_LEVEL
:
1715 xasprintf(&data
, "%d", ca
->level
);
1717 case COL_CACHE_ALLOCPOL
:
1718 if (ca
->allocation_policy
)
1719 data
= xstrdup(ca
->allocation_policy
);
1721 case COL_CACHE_WRITEPOL
:
1722 if (ca
->write_policy
)
1723 data
= xstrdup(ca
->write_policy
);
1725 case COL_CACHE_PHYLINE
:
1726 if (ca
->physical_line_partition
)
1727 xasprintf(&data
, "%u", ca
->physical_line_partition
);
1729 case COL_CACHE_SETS
:
1730 if (ca
->number_of_sets
)
1731 xasprintf(&data
, "%u", ca
->number_of_sets
);
1733 case COL_CACHE_COHERENCYSIZE
:
1734 if (ca
->coherency_line_size
)
1735 xasprintf(&data
, "%u", ca
->coherency_line_size
);
1739 if (data
&& scols_line_refer_data(line
, c
, data
))
1740 err(EXIT_FAILURE
, _("failed to add output data"));
1744 scols_print_table(table
);
1745 scols_unref_table(table
);
1749 * [-p] backend, we support two parsable formats:
1751 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1752 * output and it contains fixed set of the columns. The CACHE columns are at
1753 * the end of the line and the CACHE is not printed if the number of the caches
1754 * is zero. The CACHE columns are separated by two commas, for example:
1757 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1761 * 2) "user defined output" -- this format prints always all columns without
1762 * special prefix for CACHE column. If there are not CACHEs then the column is
1763 * empty and the header "Cache" is printed rather than a real name of the cache.
1764 * The CACHE columns are separated by ':'.
1766 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1767 * # CPU,Core,Socket,Node,L1d:L1i:L2
1772 print_cpus_parsable(struct lscpu_desc
*desc
, int cols
[], int ncols
,
1773 struct lscpu_modifier
*mod
)
1775 char buf
[BUFSIZ
], *data
;
1782 "# The following is the parsable format, which can be fed to other\n"
1783 "# programs. Each different item in every column has an unique ID\n"
1784 "# starting from zero.\n"));
1786 fputs("# ", stdout
);
1787 for (i
= 0; i
< ncols
; i
++) {
1790 if (col
== COL_CPU_CACHE
) {
1791 if (mod
->compat
&& !desc
->ncaches
)
1793 if (mod
->compat
&& i
!= 0)
1799 data
= get_cell_header(desc
, col
, mod
, buf
, sizeof(buf
));
1801 if (data
&& * data
&& col
!= COL_CPU_CACHE
&&
1802 !coldescs_cpu
[col
].is_abbr
) {
1804 * For normal column names use mixed case (e.g. "Socket")
1808 while (p
&& *p
!= '\0') {
1809 *p
= tolower((unsigned int) *p
);
1813 fputs(data
&& *data
? data
: "", stdout
);
1820 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1822 int cpu
= real_cpu_num(desc
, i
);
1825 if (!mod
->offline
&& !is_cpu_online(desc
, cpu
))
1827 if (!mod
->online
&& is_cpu_online(desc
, cpu
))
1830 if (desc
->present
&& !is_cpu_present(desc
, cpu
))
1832 for (c
= 0; c
< ncols
; c
++) {
1833 if (mod
->compat
&& cols
[c
] == COL_CPU_CACHE
) {
1842 data
= get_cell_data(desc
, i
, cols
[c
], mod
,
1844 fputs(data
&& *data
? data
: "", stdout
);
1854 print_cpus_readable(struct lscpu_desc
*desc
, int cols
[], int ncols
,
1855 struct lscpu_modifier
*mod
)
1860 struct libscols_table
*table
;
1862 scols_init_debug(0);
1864 table
= scols_new_table();
1866 err(EXIT_FAILURE
, _("failed to allocate output table"));
1868 scols_table_enable_json(table
, 1);
1869 scols_table_set_name(table
, "cpus");
1872 for (i
= 0; i
< ncols
; i
++) {
1873 data
= get_cell_header(desc
, cols
[i
], mod
, buf
, sizeof(buf
));
1874 if (!scols_table_new_column(table
, data
, 0, coldescs_cpu
[cols
[i
]].flags
))
1875 err(EXIT_FAILURE
, _("failed to allocate output column"));
1878 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1880 struct libscols_line
*line
;
1881 int cpu
= real_cpu_num(desc
, i
);
1884 if (!mod
->offline
&& !is_cpu_online(desc
, cpu
))
1886 if (!mod
->online
&& is_cpu_online(desc
, cpu
))
1889 if (desc
->present
&& !is_cpu_present(desc
, cpu
))
1892 line
= scols_table_new_line(table
, NULL
);
1894 err(EXIT_FAILURE
, _("failed to allocate output line"));
1896 for (c
= 0; c
< ncols
; c
++) {
1897 data
= get_cell_data(desc
, i
, cols
[c
], mod
,
1899 if (!data
|| !*data
)
1901 if (scols_line_set_data(line
, c
, data
))
1902 err(EXIT_FAILURE
, _("failed to add output data"));
1906 scols_print_table(table
);
1907 scols_unref_table(table
);
1911 static void __attribute__ ((__format__(printf
, 3, 4)))
1912 add_summary_sprint(struct libscols_table
*tb
,
1917 struct libscols_line
*ln
= scols_table_new_line(tb
, NULL
);
1922 err(EXIT_FAILURE
, _("failed to allocate output line"));
1924 /* description column */
1925 if (txt
&& scols_line_set_data(ln
, 0, txt
))
1926 err(EXIT_FAILURE
, _("failed to add output data"));
1929 va_start(args
, fmt
);
1930 xvasprintf(&data
, fmt
, args
);
1933 if (data
&& scols_line_refer_data(ln
, 1, data
))
1934 err(EXIT_FAILURE
, _("failed to add output data"));
1937 #define add_summary_n(tb, txt, num) add_summary_sprint(tb, txt, "%d", num)
1938 #define add_summary_s(tb, txt, str) add_summary_sprint(tb, txt, "%s", str)
1941 print_cpuset(struct libscols_table
*tb
,
1942 const char *key
, cpu_set_t
*set
, int hex
)
1944 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1945 size_t setbuflen
= 7 * maxcpus
;
1946 char setbuf
[setbuflen
], *p
;
1949 p
= cpumask_create(setbuf
, setbuflen
, set
, setsize
);
1950 add_summary_s(tb
, key
, p
);
1952 p
= cpulist_create(setbuf
, setbuflen
, set
, setsize
);
1953 add_summary_s(tb
, key
, p
);
1957 static int get_cache_full_size(struct lscpu_desc
*desc
,
1958 struct cpu_cache
*ca
, uint64_t *res
)
1960 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1963 /* Count number of CPUs which shares the cache */
1964 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1965 int cpu
= real_cpu_num(desc
, i
);
1967 if (desc
->present
&& !is_cpu_present(desc
, cpu
))
1969 if (CPU_ISSET_S(cpu
, setsize
, ca
->sharedmaps
[0]))
1973 /* Correction for CPU threads */
1974 if (desc
->nthreads
> desc
->ncores
)
1975 nshares
/= (desc
->nthreads
/ desc
->ncores
);
1979 *res
= (desc
->ncores
/ nshares
) * ca
->size
;
1987 print_summary(struct lscpu_desc
*desc
, struct lscpu_modifier
*mod
)
1991 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1992 struct libscols_table
*tb
;
1994 scols_init_debug(0);
1996 tb
= scols_new_table();
1998 err(EXIT_FAILURE
, _("failed to allocate output table"));
2000 scols_table_enable_noheadings(tb
, 1);
2002 scols_table_enable_json(tb
, 1);
2003 scols_table_set_name(tb
, "lscpu");
2006 if (scols_table_new_column(tb
, "field", 0, 0) == NULL
||
2007 scols_table_new_column(tb
, "data", 0, SCOLS_FL_NOEXTREMES
| SCOLS_FL_WRAP
) == NULL
)
2008 err(EXIT_FAILURE
, _("failed to initialize output column"));
2010 add_summary_s(tb
, _("Architecture:"), desc
->arch
);
2014 if (desc
->mode
& MODE_32BIT
) {
2015 strcpy(p
, "32-bit, ");
2018 if (desc
->mode
& MODE_64BIT
) {
2019 strcpy(p
, "64-bit, ");
2023 add_summary_s(tb
, _("CPU op-mode(s):"), buf
);
2025 #if !defined(WORDS_BIGENDIAN)
2026 add_summary_s(tb
, _("Byte Order:"), "Little Endian");
2028 add_summary_s(tb
, _("Byte Order:"), "Big Endian");
2032 add_summary_s(tb
, _("Address sizes:"), desc
->addrsz
);
2034 add_summary_n(tb
, _("CPU(s):"), desc
->ncpus
);
2037 print_cpuset(tb
, mod
->hex
? _("On-line CPU(s) mask:") :
2038 _("On-line CPU(s) list:"),
2039 desc
->online
, mod
->hex
);
2041 if (desc
->online
&& CPU_COUNT_S(setsize
, desc
->online
) != desc
->ncpus
) {
2044 /* Linux kernel provides cpuset of off-line CPUs that contains
2045 * all configured CPUs (see /sys/devices/system/cpu/offline),
2046 * but want to print real (present in system) off-line CPUs only.
2048 set
= cpuset_alloc(maxcpus
, NULL
, NULL
);
2050 err(EXIT_FAILURE
, _("failed to callocate cpu set"));
2051 CPU_ZERO_S(setsize
, set
);
2052 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
2053 int cpu
= real_cpu_num(desc
, i
);
2054 if (!is_cpu_online(desc
, cpu
) && is_cpu_present(desc
, cpu
))
2055 CPU_SET_S(cpu
, setsize
, set
);
2057 print_cpuset(tb
, mod
->hex
? _("Off-line CPU(s) mask:") :
2058 _("Off-line CPU(s) list:"),
2063 if (desc
->nsockets
) {
2064 int threads_per_core
, cores_per_socket
, sockets_per_book
;
2065 int books_per_drawer
, drawers
;
2068 threads_per_core
= cores_per_socket
= sockets_per_book
= 0;
2069 books_per_drawer
= drawers
= 0;
2070 /* s390 detects its cpu topology via /proc/sysinfo, if present.
2071 * Using simply the cpu topology masks in sysfs will not give
2072 * usable results since everything is virtualized. E.g.
2073 * virtual core 0 may have only 1 cpu, but virtual core 2 may
2075 * If the cpu topology is not exported (e.g. 2nd level guest)
2076 * fall back to old calculation scheme.
2078 if ((fd
= ul_path_fopen(desc
->procfs
, "r", "sysinfo"))) {
2081 while (fd
&& fgets(buf
, sizeof(buf
), fd
) != NULL
) {
2082 if (sscanf(buf
, "CPU Topology SW:%d%d%d%d%d%d",
2083 &t0
, &t1
, &drawers
, &books_per_drawer
,
2085 &cores_per_socket
) == 6)
2092 threads_per_core
= atoi(desc
->mtid
) + 1;
2093 add_summary_n(tb
, _("Thread(s) per core:"),
2094 threads_per_core
?: desc
->nthreads
/ desc
->ncores
);
2095 add_summary_n(tb
, _("Core(s) per socket:"),
2096 cores_per_socket
?: desc
->ncores
/ desc
->nsockets
);
2098 add_summary_n(tb
, _("Socket(s) per book:"),
2099 sockets_per_book
?: desc
->nsockets
/ desc
->nbooks
);
2100 if (desc
->ndrawers
) {
2101 add_summary_n(tb
, _("Book(s) per drawer:"),
2102 books_per_drawer
?: desc
->nbooks
/ desc
->ndrawers
);
2103 add_summary_n(tb
, _("Drawer(s):"), drawers
?: desc
->ndrawers
);
2105 add_summary_n(tb
, _("Book(s):"), books_per_drawer
?: desc
->nbooks
);
2108 add_summary_n(tb
, _("Socket(s):"), sockets_per_book
?: desc
->nsockets
);
2112 add_summary_n(tb
, _("NUMA node(s):"), desc
->nnodes
);
2114 add_summary_s(tb
, _("Vendor ID:"), desc
->vendor
);
2115 if (desc
->machinetype
)
2116 add_summary_s(tb
, _("Machine type:"), desc
->machinetype
);
2118 add_summary_s(tb
, _("CPU family:"), desc
->family
);
2119 if (desc
->model
|| desc
->revision
)
2120 add_summary_s(tb
, _("Model:"), desc
->revision
? desc
->revision
: desc
->model
);
2121 if (desc
->modelname
|| desc
->cpu
)
2122 add_summary_s(tb
, _("Model name:"), desc
->cpu
? desc
->cpu
: desc
->modelname
);
2124 add_summary_s(tb
, _("Stepping:"), desc
->stepping
);
2125 if (desc
->freqboost
>= 0)
2126 add_summary_s(tb
, _("Frequency boost:"), desc
->freqboost
?
2127 _("enabled") : _("disabled"));
2129 add_summary_s(tb
, _("CPU MHz:"), desc
->mhz
);
2130 if (desc
->dynamic_mhz
)
2131 add_summary_s(tb
, _("CPU dynamic MHz:"), desc
->dynamic_mhz
);
2132 if (desc
->static_mhz
)
2133 add_summary_s(tb
, _("CPU static MHz:"), desc
->static_mhz
);
2135 add_summary_s(tb
, _("CPU max MHz:"), cpu_max_mhz(desc
, buf
, sizeof(buf
)));
2137 add_summary_s(tb
, _("CPU min MHz:"), cpu_min_mhz(desc
, buf
, sizeof(buf
)));
2139 add_summary_s(tb
, _("BogoMIPS:"), desc
->bogomips
);
2140 if (desc
->virtflag
) {
2141 if (!strcmp(desc
->virtflag
, "svm"))
2142 add_summary_s(tb
, _("Virtualization:"), "AMD-V");
2143 else if (!strcmp(desc
->virtflag
, "vmx"))
2144 add_summary_s(tb
, _("Virtualization:"), "VT-x");
2146 if (desc
->hypervisor
)
2147 add_summary_s(tb
, _("Hypervisor:"), desc
->hypervisor
);
2149 add_summary_s(tb
, _("Hypervisor vendor:"), hv_vendors
[desc
->hyper
]);
2150 add_summary_s(tb
, _("Virtualization type:"), _(virt_types
[desc
->virtype
]));
2152 if (desc
->dispatching
>= 0)
2153 add_summary_s(tb
, _("Dispatching mode:"), _(disp_modes
[desc
->dispatching
]));
2154 if (desc
->ncaches
) {
2155 for (i
= desc
->ncaches
- 1; i
>= 0; i
--) {
2158 struct cpu_cache
*ca
= &desc
->caches
[i
];
2162 if (get_cache_full_size(desc
, ca
, &sz
) != 0 || sz
== 0)
2165 xasprintf(&tmp
, "%" PRIu64
, sz
);
2167 tmp
= size_to_human_string(
2168 SIZE_SUFFIX_3LETTER
| SIZE_SUFFIX_SPACE
,
2170 snprintf(buf
, sizeof(buf
), _("%s cache:"), ca
->name
);
2171 add_summary_s(tb
, buf
, tmp
);
2175 if (desc
->necaches
) {
2176 for (i
= desc
->necaches
- 1; i
>= 0; i
--) {
2178 struct cpu_cache
*ca
= &desc
->ecaches
[i
];
2183 xasprintf(&tmp
, "%" PRIu64
, ca
->size
);
2185 tmp
= size_to_human_string(
2186 SIZE_SUFFIX_3LETTER
| SIZE_SUFFIX_SPACE
,
2188 snprintf(buf
, sizeof(buf
), _("%s cache:"), ca
->name
);
2189 add_summary_s(tb
, buf
, tmp
);
2194 for (i
= 0; i
< desc
->nnodes
; i
++) {
2195 snprintf(buf
, sizeof(buf
), _("NUMA node%d CPU(s):"), desc
->idx2nodenum
[i
]);
2196 print_cpuset(tb
, buf
, desc
->nodemaps
[i
], mod
->hex
);
2199 if (desc
->physsockets
) {
2200 add_summary_n(tb
, _("Physical sockets:"), desc
->physsockets
);
2201 add_summary_n(tb
, _("Physical chips:"), desc
->physchips
);
2202 add_summary_n(tb
, _("Physical cores/chip:"), desc
->physcoresperchip
);
2206 for (i
= 0; i
< desc
->nvuls
; i
++) {
2207 snprintf(buf
, sizeof(buf
), ("Vulnerability %s:"), desc
->vuls
[i
].name
);
2208 add_summary_s(tb
, buf
, desc
->vuls
[i
].text
);
2213 add_summary_s(tb
, _("Flags:"), desc
->flags
);
2215 scols_print_table(tb
);
2216 scols_unref_table(tb
);
2219 static void __attribute__((__noreturn__
)) usage(void)
2224 fputs(USAGE_HEADER
, out
);
2225 fprintf(out
, _(" %s [options]\n"), program_invocation_short_name
);
2227 fputs(USAGE_SEPARATOR
, out
);
2228 fputs(_("Display information about the CPU architecture.\n"), out
);
2230 fputs(USAGE_OPTIONS
, out
);
2231 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out
);
2232 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out
);
2233 fputs(_(" -B, --bytes print sizes in bytes rather than in human readable format\n"), out
);
2234 fputs(_(" -C, --caches[=<list>] info about caches in extended readable format\n"), out
);
2235 fputs(_(" -c, --offline print offline CPUs only\n"), out
);
2236 fputs(_(" -J, --json use JSON for default or extended format\n"), out
);
2237 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out
);
2238 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out
);
2239 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out
);
2240 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out
);
2241 fputs(_(" -y, --physical print physical instead of logical IDs\n"), out
);
2242 fputs(_(" --output-all print all available columns for -e, -p or -C\n"), out
);
2243 fputs(USAGE_SEPARATOR
, out
);
2244 printf(USAGE_HELP_OPTIONS(25));
2246 fputs(_("\nAvailable output columns for -e or -p:\n"), out
);
2247 for (i
= 0; i
< ARRAY_SIZE(coldescs_cpu
); i
++)
2248 fprintf(out
, " %13s %s\n", coldescs_cpu
[i
].name
, _(coldescs_cpu
[i
].help
));
2250 fputs(_("\nAvailable output columns for -C:\n"), out
);
2251 for (i
= 0; i
< ARRAY_SIZE(coldescs_cache
); i
++)
2252 fprintf(out
, " %13s %s\n", coldescs_cache
[i
].name
, _(coldescs_cache
[i
].help
));
2254 printf(USAGE_MAN_TAIL("lscpu(1)"));
2259 int main(int argc
, char *argv
[])
2261 struct lscpu_modifier _mod
= { .mode
= OUTPUT_SUMMARY
}, *mod
= &_mod
;
2262 struct lscpu_desc _desc
= { .flags
= NULL
}, *desc
= &_desc
;
2264 int columns
[ARRAY_SIZE(coldescs_cpu
)], ncolumns
= 0;
2265 int cpu_modifier_specified
= 0;
2269 OPT_OUTPUT_ALL
= CHAR_MAX
+ 1,
2271 static const struct option longopts
[] = {
2272 { "all", no_argument
, NULL
, 'a' },
2273 { "online", no_argument
, NULL
, 'b' },
2274 { "bytes", no_argument
, NULL
, 'B' },
2275 { "caches", optional_argument
, NULL
, 'C' },
2276 { "offline", no_argument
, NULL
, 'c' },
2277 { "help", no_argument
, NULL
, 'h' },
2278 { "extended", optional_argument
, NULL
, 'e' },
2279 { "json", no_argument
, NULL
, 'J' },
2280 { "parse", optional_argument
, NULL
, 'p' },
2281 { "sysroot", required_argument
, NULL
, 's' },
2282 { "physical", no_argument
, NULL
, 'y' },
2283 { "hex", no_argument
, NULL
, 'x' },
2284 { "version", no_argument
, NULL
, 'V' },
2285 { "output-all", no_argument
, NULL
, OPT_OUTPUT_ALL
},
2286 { NULL
, 0, NULL
, 0 }
2289 static const ul_excl_t excl
[] = { /* rows and cols in ASCII order */
2294 int excl_st
[ARRAY_SIZE(excl
)] = UL_EXCL_STATUS_INIT
;
2296 setlocale(LC_ALL
, "");
2297 bindtextdomain(PACKAGE
, LOCALEDIR
);
2298 textdomain(PACKAGE
);
2299 atexit(close_stdout
);
2301 while ((c
= getopt_long(argc
, argv
, "aBbC::ce::hJp::s:xyV", longopts
, NULL
)) != -1) {
2303 err_exclusive_options(c
, longopts
, excl
, excl_st
);
2307 mod
->online
= mod
->offline
= 1;
2308 cpu_modifier_specified
= 1;
2315 cpu_modifier_specified
= 1;
2319 cpu_modifier_specified
= 1;
2325 ncolumns
= string_to_idarray(optarg
,
2326 columns
, ARRAY_SIZE(columns
),
2327 cache_column_name_to_id
);
2329 return EXIT_FAILURE
;
2331 mod
->mode
= OUTPUT_CACHES
;
2341 ncolumns
= string_to_idarray(optarg
,
2342 columns
, ARRAY_SIZE(columns
),
2343 cpu_column_name_to_id
);
2345 return EXIT_FAILURE
;
2347 mod
->mode
= c
== 'p' ? OUTPUT_PARSABLE
: OUTPUT_READABLE
;
2350 desc
->prefix
= optarg
;
2351 mod
->system
= SYSTEM_SNAPSHOT
;
2359 case OPT_OUTPUT_ALL
:
2366 print_version(EXIT_SUCCESS
);
2368 errtryhelp(EXIT_FAILURE
);
2372 if (all
&& ncolumns
== 0) {
2373 size_t sz
, maxsz
= mod
->mode
== OUTPUT_CACHES
?
2374 ARRAY_SIZE(coldescs_cache
) :
2375 ARRAY_SIZE(coldescs_cpu
);
2377 for (sz
= 0; sz
< maxsz
; sz
++)
2378 columns
[ncolumns
++] = sz
;
2381 if (cpu_modifier_specified
&& mod
->mode
== OUTPUT_SUMMARY
) {
2383 _("%s: options --all, --online and --offline may only "
2384 "be used with options --extended or --parse.\n"),
2385 program_invocation_short_name
);
2386 return EXIT_FAILURE
;
2389 if (argc
!= optind
) {
2390 warnx(_("bad usage"));
2391 errtryhelp(EXIT_FAILURE
);
2394 /* set default cpu display mode if none was specified */
2395 if (!mod
->online
&& !mod
->offline
) {
2397 mod
->offline
= mod
->mode
== OUTPUT_READABLE
? 1 : 0;
2400 ul_path_init_debug();
2402 /* /sys/devices/system/cpu */
2403 desc
->syscpu
= ul_new_path(_PATH_SYS_CPU
);
2405 err(EXIT_FAILURE
, _("failed to initialize CPUs sysfs handler"));
2407 ul_path_set_prefix(desc
->syscpu
, desc
->prefix
);
2410 desc
->procfs
= ul_new_path("/proc");
2412 err(EXIT_FAILURE
, _("failed to initialize procfs handler"));
2414 ul_path_set_prefix(desc
->procfs
, desc
->prefix
);
2416 read_basicinfo(desc
, mod
);
2418 setsize
= CPU_ALLOC_SIZE(maxcpus
);
2420 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
2421 /* only consider present CPUs */
2422 if (desc
->present
&&
2423 !CPU_ISSET_S(real_cpu_num(desc
, i
), setsize
, desc
->present
))
2425 read_topology(desc
, i
);
2426 read_cache(desc
, i
);
2427 read_polarization(desc
, i
);
2428 read_address(desc
, i
);
2429 read_configured(desc
, i
);
2430 read_max_mhz(desc
, i
);
2431 read_min_mhz(desc
, i
);
2435 qsort(desc
->caches
, desc
->ncaches
,
2436 sizeof(struct cpu_cache
), cachecmp
);
2439 qsort(desc
->ecaches
, desc
->necaches
,
2440 sizeof(struct cpu_cache
), cachecmp
);
2443 read_hypervisor(desc
, mod
);
2444 arm_cpu_decode(desc
);
2447 case OUTPUT_SUMMARY
:
2448 print_summary(desc
, mod
);
2452 columns
[ncolumns
++] = COL_CACHE_NAME
;
2453 columns
[ncolumns
++] = COL_CACHE_ONESIZE
;
2454 columns
[ncolumns
++] = COL_CACHE_ALLSIZE
;
2455 columns
[ncolumns
++] = COL_CACHE_WAYS
;
2456 columns
[ncolumns
++] = COL_CACHE_TYPE
;
2457 columns
[ncolumns
++] = COL_CACHE_LEVEL
;
2458 columns
[ncolumns
++] = COL_CACHE_SETS
;
2459 columns
[ncolumns
++] = COL_CACHE_PHYLINE
;
2460 columns
[ncolumns
++] = COL_CACHE_COHERENCYSIZE
;
2462 print_caches_readable(desc
, columns
, ncolumns
, mod
);
2464 case OUTPUT_PARSABLE
:
2466 columns
[ncolumns
++] = COL_CPU_CPU
;
2467 columns
[ncolumns
++] = COL_CPU_CORE
;
2468 columns
[ncolumns
++] = COL_CPU_SOCKET
;
2469 columns
[ncolumns
++] = COL_CPU_NODE
;
2470 columns
[ncolumns
++] = COL_CPU_CACHE
;
2473 print_cpus_parsable(desc
, columns
, ncolumns
, mod
);
2475 case OUTPUT_READABLE
:
2477 /* No list was given. Just print whatever is there. */
2478 columns
[ncolumns
++] = COL_CPU_CPU
;
2480 columns
[ncolumns
++] = COL_CPU_NODE
;
2481 if (desc
->drawermaps
)
2482 columns
[ncolumns
++] = COL_CPU_DRAWER
;
2484 columns
[ncolumns
++] = COL_CPU_BOOK
;
2485 if (desc
->socketmaps
)
2486 columns
[ncolumns
++] = COL_CPU_SOCKET
;
2488 columns
[ncolumns
++] = COL_CPU_CORE
;
2490 columns
[ncolumns
++] = COL_CPU_CACHE
;
2492 columns
[ncolumns
++] = COL_CPU_ONLINE
;
2493 if (desc
->configured
)
2494 columns
[ncolumns
++] = COL_CPU_CONFIGURED
;
2495 if (desc
->polarization
)
2496 columns
[ncolumns
++] = COL_CPU_POLARIZATION
;
2497 if (desc
->addresses
)
2498 columns
[ncolumns
++] = COL_CPU_ADDRESS
;
2500 columns
[ncolumns
++] = COL_CPU_MAXMHZ
;
2502 columns
[ncolumns
++] = COL_CPU_MINMHZ
;
2504 print_cpus_readable(desc
, columns
, ncolumns
, mod
);
2508 ul_unref_path(desc
->syscpu
);
2509 ul_unref_path(desc
->procfs
);
2510 return EXIT_SUCCESS
;