2 * lscpu - CPU architecture information helper
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
31 #include <sys/utsname.h>
34 #include <sys/types.h>
36 #include <sys/personality.h>
38 #if (defined(__x86_64__) || defined(__i386__))
39 # if !defined( __SANITIZE_ADDRESS__)
40 # define INCLUDE_VMWARE_BDOOR
42 # warning VMWARE detection disabled by __SANITIZE_ADDRESS__
46 #ifdef INCLUDE_VMWARE_BDOOR
56 #if defined(HAVE_LIBRTAS)
60 #include <libsmartcols.h>
62 #include "closestream.h"
70 #define _PATH_SYS_SYSTEM "/sys/devices/system"
71 #define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
72 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
73 #define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
75 /* Xen Domain feature flag used for /sys/hypervisor/properties/features */
76 #define XENFEAT_supervisor_mode_kernel 3
77 #define XENFEAT_mmu_pt_update_preserve_ad 5
78 #define XENFEAT_hvm_callback_vector 8
80 #define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
81 #define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
82 | (1U << XENFEAT_hvm_callback_vector) )
84 static const char *virt_types
[] = {
85 [VIRT_NONE
] = N_("none"),
86 [VIRT_PARA
] = N_("para"),
87 [VIRT_FULL
] = N_("full"),
88 [VIRT_CONT
] = N_("container"),
91 static const char *hv_vendors
[] = {
95 [HYPER_MSHV
] = "Microsoft",
96 [HYPER_VMWARE
] = "VMware",
98 [HYPER_VSERVER
] = "Linux-VServer",
99 [HYPER_UML
] = "User-mode Linux",
100 [HYPER_INNOTEK
] = "Innotek GmbH",
101 [HYPER_HITACHI
] = "Hitachi",
102 [HYPER_PARALLELS
] = "Parallels",
103 [HYPER_VBOX
] = "Oracle",
104 [HYPER_OS400
] = "OS/400",
105 [HYPER_PHYP
] = "pHyp",
106 [HYPER_SPAR
] = "Unisys s-Par",
107 [HYPER_WSL
] = "Windows Subsystem for Linux"
110 static const int hv_vendor_pci
[] = {
111 [HYPER_NONE
] = 0x0000,
112 [HYPER_XEN
] = 0x5853,
113 [HYPER_KVM
] = 0x0000,
114 [HYPER_MSHV
] = 0x1414,
115 [HYPER_VMWARE
] = 0x15ad,
116 [HYPER_VBOX
] = 0x80ee,
119 static const int hv_graphics_pci
[] = {
120 [HYPER_NONE
] = 0x0000,
121 [HYPER_XEN
] = 0x0001,
122 [HYPER_KVM
] = 0x0000,
123 [HYPER_MSHV
] = 0x5353,
124 [HYPER_VMWARE
] = 0x0710,
125 [HYPER_VBOX
] = 0xbeef,
129 /* dispatching modes */
130 static const char *disp_modes
[] = {
131 [DISP_HORIZONTAL
] = N_("horizontal"),
132 [DISP_VERTICAL
] = N_("vertical")
135 static struct polarization_modes polar_modes
[] = {
136 [POLAR_UNKNOWN
] = {"U", "-"},
137 [POLAR_VLOW
] = {"VL", "vert-low"},
138 [POLAR_VMEDIUM
] = {"VM", "vert-medium"},
139 [POLAR_VHIGH
] = {"VH", "vert-high"},
140 [POLAR_HORIZONTAL
] = {"H", "horizontal"},
143 static int maxcpus
; /* size in bits of kernel cpu mask */
145 #define is_cpu_online(_d, _cpu) \
146 ((_d) && (_d)->online ? \
147 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
148 #define is_cpu_present(_d, _cpu) \
149 ((_d) && (_d)->present ? \
150 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
152 #define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
173 /* column description
175 struct lscpu_coldesc
{
179 unsigned int is_abbr
:1; /* name is abbreviation */
182 static struct lscpu_coldesc coldescs
[] =
184 [COL_CPU
] = { "CPU", N_("logical CPU number"), 1 },
185 [COL_CORE
] = { "CORE", N_("logical core number") },
186 [COL_SOCKET
] = { "SOCKET", N_("logical socket number") },
187 [COL_NODE
] = { "NODE", N_("logical NUMA node number") },
188 [COL_BOOK
] = { "BOOK", N_("logical book number") },
189 [COL_DRAWER
] = { "DRAWER", N_("logical drawer number") },
190 [COL_CACHE
] = { "CACHE", N_("shows how caches are shared between CPUs") },
191 [COL_POLARIZATION
] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
192 [COL_ADDRESS
] = { "ADDRESS", N_("physical address of a CPU") },
193 [COL_CONFIGURED
] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
194 [COL_ONLINE
] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
195 [COL_MAXMHZ
] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
196 [COL_MINMHZ
] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
200 column_name_to_id(const char *name
, size_t namesz
)
204 for (i
= 0; i
< ARRAY_SIZE(coldescs
); i
++) {
205 const char *cn
= coldescs
[i
].name
;
207 if (!strncasecmp(name
, cn
, namesz
) && !*(cn
+ namesz
))
210 warnx(_("unknown column: %s"), name
);
214 /* Lookup a pattern and get the value from cpuinfo.
217 * "<pattern> : <key>"
220 lookup(char *line
, char *pattern
, char **value
)
223 int len
= strlen(pattern
);
225 /* don't re-fill already found tags, first one wins */
226 if (!*line
|| *value
)
230 if (strncmp(line
, pattern
, len
))
234 for (p
= line
+ len
; isspace(*p
); p
++);
241 for (++p
; isspace(*p
); p
++);
249 len
= strlen(line
) - 1;
250 for (p
= line
+ len
; isspace(*(p
-1)); p
--);
257 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
258 * part of the cache topology information within the sysfs filesystem.
259 * This is true for all shared caches on e.g. s390. When there are layers of
260 * hypervisors in between it is not knows which CPUs share which caches.
261 * Therefore information about shared caches is only available in
264 * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
267 lookup_cache(char *line
, struct lscpu_desc
*desc
)
269 struct cpu_cache
*cache
;
274 /* Make sure line starts with "cache<nr> :" */
275 if (strncmp(line
, "cache", 5))
277 for (p
= line
+ 5; isdigit(*p
); p
++);
278 for (; isspace(*p
); p
++);
282 p
= strstr(line
, "scope=") + 6;
283 /* Skip private caches, also present in sysfs */
284 if (!p
|| strncmp(p
, "Private", 7) == 0)
286 p
= strstr(line
, "level=");
287 if (!p
|| sscanf(p
, "level=%d", &level
) != 1)
289 p
= strstr(line
, "type=") + 5;
293 if (strncmp(p
, "Data", 4) == 0)
295 if (strncmp(p
, "Instruction", 11) == 0)
297 p
= strstr(line
, "size=");
298 if (!p
|| sscanf(p
, "size=%lld", &size
) != 1)
302 desc
->ecaches
= xrealloc(desc
->ecaches
,
303 desc
->necaches
* sizeof(struct cpu_cache
));
304 cache
= &desc
->ecaches
[desc
->necaches
- 1];
305 memset(cache
, 0 , sizeof(*cache
));
307 xasprintf(&cache
->name
, "L%d%c", level
, type
);
309 xasprintf(&cache
->name
, "L%d", level
);
310 xasprintf(&cache
->size
, "%lldK", size
);
314 /* Don't init the mode for platforms where we are not able to
315 * detect that CPU supports 64-bit mode.
318 init_mode(struct lscpu_modifier
*mod
)
322 if (mod
->system
== SYSTEM_SNAPSHOT
)
323 /* reading info from any /{sys,proc} dump, don't mix it with
324 * information about our real CPU */
327 #if defined(__alpha__) || defined(__ia64__)
328 m
|= MODE_64BIT
; /* 64bit platforms only */
330 /* platforms with 64bit flag in /proc/cpuinfo, define
331 * 32bit default here */
332 #if defined(__i386__) || defined(__x86_64__) || \
333 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
337 #if defined(__aarch64__)
339 /* personality() is the most reliable way (since 4.7)
340 * to determine aarch32 support */
341 int pers
= personality(PER_LINUX32
);
352 #if defined(HAVE_LIBRTAS)
353 #define PROCESSOR_MODULE_INFO 43
354 static int strbe16toh(const char *buf
, int offset
)
356 return (buf
[offset
] << 8) + buf
[offset
+1];
359 static void read_physical_info_powerpc(struct lscpu_desc
*desc
)
364 desc
->physsockets
= desc
->physchips
= desc
->physcoresperchip
= 0;
366 rc
= rtas_get_sysparm(PROCESSOR_MODULE_INFO
, sizeof(buf
), buf
);
370 len
= strbe16toh(buf
, 0);
374 ntypes
= strbe16toh(buf
, 2);
380 desc
->physsockets
= strbe16toh(buf
, 4);
381 desc
->physchips
= strbe16toh(buf
, 6);
382 desc
->physcoresperchip
= strbe16toh(buf
, 8);
385 static void read_physical_info_powerpc(
386 struct lscpu_desc
*desc
__attribute__((__unused__
)))
393 read_basicinfo(struct lscpu_desc
*desc
, struct lscpu_modifier
*mod
)
397 struct utsname utsbuf
;
399 cpu_set_t
*cpuset
= NULL
;
402 if (uname(&utsbuf
) == -1)
403 err(EXIT_FAILURE
, _("error: uname failed"));
405 fp
= ul_path_fopen(desc
->procfs
, "r", "cpuinfo");
407 err(EXIT_FAILURE
, _("cannot open %s"), "/proc/cpuinfo");
408 desc
->arch
= xstrdup(utsbuf
.machine
);
411 while (fgets(buf
, sizeof(buf
), fp
) != NULL
) {
412 if (lookup(buf
, "vendor", &desc
->vendor
)) ;
413 else if (lookup(buf
, "vendor_id", &desc
->vendor
)) ;
414 else if (lookup(buf
, "CPU implementer", &desc
->vendor
)) ; /* ARM and aarch64 */
415 else if (lookup(buf
, "family", &desc
->family
)) ;
416 else if (lookup(buf
, "cpu family", &desc
->family
)) ;
417 else if (lookup(buf
, "model", &desc
->model
)) ;
418 else if (lookup(buf
, "CPU part", &desc
->model
)) ; /* ARM and aarch64 */
419 else if (lookup(buf
, "model name", &desc
->modelname
)) ;
420 else if (lookup(buf
, "stepping", &desc
->stepping
)) ;
421 else if (lookup(buf
, "CPU variant", &desc
->stepping
)) ; /* aarch64 */
422 else if (lookup(buf
, "cpu MHz", &desc
->mhz
)) ;
423 else if (lookup(buf
, "cpu MHz dynamic", &desc
->dynamic_mhz
)) ; /* s390 */
424 else if (lookup(buf
, "cpu MHz static", &desc
->static_mhz
)) ; /* s390 */
425 else if (lookup(buf
, "flags", &desc
->flags
)) ; /* x86 */
426 else if (lookup(buf
, "features", &desc
->flags
)) ; /* s390 */
427 else if (lookup(buf
, "Features", &desc
->flags
)) ; /* aarch64 */
428 else if (lookup(buf
, "type", &desc
->flags
)) ; /* sparc64 */
429 else if (lookup(buf
, "bogomips", &desc
->bogomips
)) ;
430 else if (lookup(buf
, "BogoMIPS", &desc
->bogomips
)) ; /* aarch64 */
431 else if (lookup(buf
, "bogomips per cpu", &desc
->bogomips
)) ; /* s390 */
432 else if (lookup(buf
, "cpu", &desc
->cpu
)) ;
433 else if (lookup(buf
, "revision", &desc
->revision
)) ;
434 else if (lookup(buf
, "CPU revision", &desc
->revision
)) ; /* aarch64 */
435 else if (lookup(buf
, "max thread id", &desc
->mtid
)) ; /* s390 */
436 else if (lookup(buf
, "address sizes", &desc
->addrsz
)) ; /* x86 */
437 else if (lookup_cache(buf
, desc
)) ;
442 desc
->mode
= init_mode(mod
);
445 snprintf(buf
, sizeof(buf
), " %s ", desc
->flags
);
446 if (strstr(buf
, " svm "))
447 desc
->virtflag
= xstrdup("svm");
448 else if (strstr(buf
, " vmx "))
449 desc
->virtflag
= xstrdup("vmx");
450 if (strstr(buf
, " lm "))
451 desc
->mode
|= MODE_32BIT
| MODE_64BIT
; /* x86_64 */
452 if (strstr(buf
, " zarch "))
453 desc
->mode
|= MODE_32BIT
| MODE_64BIT
; /* s390x */
454 if (strstr(buf
, " sun4v ") || strstr(buf
, " sun4u "))
455 desc
->mode
|= MODE_32BIT
| MODE_64BIT
; /* sparc64 */
458 if (desc
->arch
&& mod
->system
!= SYSTEM_SNAPSHOT
) {
459 if (strcmp(desc
->arch
, "ppc64") == 0)
460 desc
->mode
|= MODE_32BIT
| MODE_64BIT
;
461 else if (strcmp(desc
->arch
, "ppc") == 0)
462 desc
->mode
|= MODE_32BIT
;
467 if (ul_path_read_s32(desc
->syscpu
, &maxcpus
, "kernel_max") == 0)
468 /* note that kernel_max is maximum index [NR_CPUS-1] */
471 else if (mod
->system
== SYSTEM_LIVE
)
472 /* the root is '/' so we are working with data from the current kernel */
473 maxcpus
= get_max_number_of_cpus();
476 /* error or we are reading some /sys snapshot instead of the
477 * real /sys, let's use any crazy number... */
480 setsize
= CPU_ALLOC_SIZE(maxcpus
);
482 if (ul_path_readf_cpulist(desc
->syscpu
, &cpuset
, maxcpus
, "possible") == 0) {
485 desc
->ncpuspos
= CPU_COUNT_S(setsize
, cpuset
);
486 desc
->idx2cpunum
= xcalloc(desc
->ncpuspos
, sizeof(int));
488 for (num
= 0, idx
= 0; num
< maxcpus
; num
++) {
489 if (CPU_ISSET_S(num
, setsize
, cpuset
))
490 desc
->idx2cpunum
[idx
++] = num
;
495 err(EXIT_FAILURE
, _("failed to determine number of CPUs: %s"),
496 _PATH_SYS_CPU
"/possible");
499 /* get mask for present CPUs */
500 if (ul_path_readf_cpulist(desc
->syscpu
, &desc
->present
, maxcpus
, "present") == 0)
501 desc
->ncpus
= CPU_COUNT_S(setsize
, desc
->present
);
503 /* get mask for online CPUs */
504 if (ul_path_readf_cpulist(desc
->syscpu
, &desc
->online
, maxcpus
, "online") == 0)
505 desc
->nthreads
= CPU_COUNT_S(setsize
, desc
->online
);
507 /* get dispatching mode */
508 if (ul_path_read_s32(desc
->syscpu
, &desc
->dispatching
, "dispatching") != 0)
509 desc
->dispatching
= -1;
511 /* get cpufreq boost mode */
512 if (ul_path_read_s32(desc
->syscpu
, &desc
->freqboost
, "cpufreq/boost") != 0)
513 desc
->freqboost
= -1;
515 if (mod
->system
== SYSTEM_LIVE
)
516 read_physical_info_powerpc(desc
);
518 if ((fp
= ul_path_fopen(desc
->procfs
, "r", "sysinfo"))) {
519 while (fgets(buf
, sizeof(buf
), fp
) != NULL
&& !desc
->machinetype
)
520 lookup(buf
, "Type", &desc
->machinetype
);
526 has_pci_device(struct lscpu_desc
*desc
, unsigned int vendor
, unsigned int device
)
529 unsigned int num
, fn
, ven
, dev
;
532 f
= ul_path_fopen(desc
->procfs
, "r", "bus/pci/devices");
536 /* for more details about bus/pci/devices format see
537 * drivers/pci/proc.c in linux kernel
539 while(fscanf(f
, "%02x%02x\t%04x%04x\t%*[^\n]",
540 &num
, &fn
, &ven
, &dev
) == 4) {
542 if (ven
== vendor
&& dev
== device
)
552 #if defined(__x86_64__) || defined(__i386__)
555 * This CPUID leaf returns the information about the hypervisor.
556 * EAX : maximum input value for CPUID supported by the hypervisor.
557 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
559 #define HYPERVISOR_INFO_LEAF 0x40000000
562 cpuid(unsigned int op
, unsigned int *eax
, unsigned int *ebx
,
563 unsigned int *ecx
, unsigned int *edx
)
566 #if defined(__PIC__) && defined(__i386__)
567 /* x86 PIC cannot clobber ebx -- gcc bitches */
583 read_hypervisor_cpuid(struct lscpu_desc
*desc
)
585 unsigned int eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
586 char hyper_vendor_id
[13];
588 memset(hyper_vendor_id
, 0, sizeof(hyper_vendor_id
));
590 cpuid(HYPERVISOR_INFO_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
591 memcpy(hyper_vendor_id
+ 0, &ebx
, 4);
592 memcpy(hyper_vendor_id
+ 4, &ecx
, 4);
593 memcpy(hyper_vendor_id
+ 8, &edx
, 4);
594 hyper_vendor_id
[12] = '\0';
596 if (!hyper_vendor_id
[0])
599 if (!strncmp("XenVMMXenVMM", hyper_vendor_id
, 12))
600 desc
->hyper
= HYPER_XEN
;
601 else if (!strncmp("KVMKVMKVM", hyper_vendor_id
, 9))
602 desc
->hyper
= HYPER_KVM
;
603 else if (!strncmp("Microsoft Hv", hyper_vendor_id
, 12))
604 desc
->hyper
= HYPER_MSHV
;
605 else if (!strncmp("VMwareVMware", hyper_vendor_id
, 12))
606 desc
->hyper
= HYPER_VMWARE
;
607 else if (!strncmp("UnisysSpar64", hyper_vendor_id
, 12))
608 desc
->hyper
= HYPER_SPAR
;
611 #else /* ! (__x86_64__ || __i386__) */
613 read_hypervisor_cpuid(struct lscpu_desc
*desc
__attribute__((__unused__
)))
618 static int is_devtree_compatible(struct lscpu_desc
*desc
, const char *str
)
620 FILE *fd
= ul_path_fopen(desc
->procfs
, "r", "device-tree/compatible");
626 memset(buf
, 0, sizeof(buf
));
627 len
= fread(buf
, 1, sizeof(buf
) - 1, fd
);
630 for (i
= 0; i
< len
;) {
631 if (!strcmp(&buf
[i
], str
))
633 i
+= strlen(&buf
[i
]);
642 read_hypervisor_powerpc(struct lscpu_desc
*desc
)
644 assert(!desc
->hyper
);
646 /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
647 if (ul_path_access(desc
->procfs
, F_OK
, "iSeries") == 0) {
648 desc
->hyper
= HYPER_OS400
;
649 desc
->virtype
= VIRT_PARA
;
651 /* PowerNV (POWER Non-Virtualized, bare-metal) */
652 } else if (is_devtree_compatible(desc
, "ibm,powernv")) {
653 desc
->hyper
= HYPER_NONE
;
654 desc
->virtype
= VIRT_NONE
;
656 /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
657 } else if (ul_path_access(desc
->procfs
, F_OK
, "device-tree/ibm,partition-name") == 0
658 && ul_path_access(desc
->procfs
, F_OK
, "device-tree/hmc-managed?") == 0
659 && ul_path_access(desc
->procfs
, F_OK
, "device-tree/chosen/qemu,graphic-width") != 0) {
662 desc
->hyper
= HYPER_PHYP
;
663 desc
->virtype
= VIRT_PARA
;
665 fd
= ul_path_fopen(desc
->procfs
, "r", "device-tree/ibm,partition-name");
668 if (fscanf(fd
, "%255s", buf
) == 1 && !strcmp(buf
, "full"))
669 desc
->virtype
= VIRT_NONE
;
674 } else if (is_devtree_compatible(desc
, "qemu,pseries")) {
675 desc
->hyper
= HYPER_KVM
;
676 desc
->virtype
= VIRT_PARA
;
681 #ifdef INCLUDE_VMWARE_BDOOR
683 #define VMWARE_BDOOR_MAGIC 0x564D5868
684 #define VMWARE_BDOOR_PORT 0x5658
685 #define VMWARE_BDOOR_CMD_GETVERSION 10
687 static UL_ASAN_BLACKLIST
688 void vmware_bdoor(uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
691 #if defined(__PIC__) && defined(__i386__)
692 /* x86 PIC cannot clobber ebx -- gcc bitches */
704 : "0" (VMWARE_BDOOR_MAGIC
),
705 "1" (VMWARE_BDOOR_CMD_GETVERSION
),
706 "2" (VMWARE_BDOOR_PORT
),
711 static jmp_buf segv_handler_env
;
714 segv_handler(__attribute__((__unused__
)) int sig
,
715 __attribute__((__unused__
)) siginfo_t
*info
,
716 __attribute__((__unused__
)) void *ignored
)
718 siglongjmp(segv_handler_env
, 1);
722 is_vmware_platform(void)
724 uint32_t eax
, ebx
, ecx
, edx
;
725 struct sigaction act
, oact
;
728 * FIXME: Not reliable for non-root users. Note it works as expected if
729 * vmware_bdoor() is not optimized for PIE, but then it fails to build
730 * on 32bit x86 systems. See lscpu git log for more details (commit
731 * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
737 * The assembly routine for vmware detection works
738 * fine under vmware, even if ran as regular user. But
739 * on real HW or under other hypervisors, it segfaults (which is
740 * expected). So we temporarily install SIGSEGV handler to catch
741 * the signal. All this magic is needed because lscpu
742 * isn't supposed to require root privileges.
744 if (sigsetjmp(segv_handler_env
, 1))
747 memset(&act
, 0, sizeof(act
));
748 act
.sa_sigaction
= segv_handler
;
749 act
.sa_flags
= SA_SIGINFO
;
751 if (sigaction(SIGSEGV
, &act
, &oact
))
752 err(EXIT_FAILURE
, _("cannot set signal handler"));
754 vmware_bdoor(&eax
, &ebx
, &ecx
, &edx
);
756 if (sigaction(SIGSEGV
, &oact
, NULL
))
757 err(EXIT_FAILURE
, _("cannot restore signal handler"));
759 return eax
!= (uint32_t)-1 && ebx
== VMWARE_BDOOR_MAGIC
;
762 #else /* ! INCLUDE_VMWARE_BDOOR */
765 is_vmware_platform(void)
770 #endif /* INCLUDE_VMWARE_BDOOR */
773 read_hypervisor(struct lscpu_desc
*desc
, struct lscpu_modifier
*mod
)
777 /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
779 if ((fd
= ul_path_fopen(desc
->procfs
, "r", "sys/kernel/osrelease"))) {
782 if (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
783 if (strstr(buf
, "Microsoft")) {
784 desc
->hyper
= HYPER_WSL
;
785 desc
->virtype
= VIRT_CONT
;
793 if (mod
->system
!= SYSTEM_SNAPSHOT
) {
794 read_hypervisor_cpuid(desc
);
796 desc
->hyper
= read_hypervisor_dmi();
797 if (!desc
->hyper
&& is_vmware_platform())
798 desc
->hyper
= HYPER_VMWARE
;
802 desc
->virtype
= VIRT_FULL
;
804 if (desc
->hyper
== HYPER_XEN
) {
807 fd
= ul_prefix_fopen(desc
->prefix
, "r", _PATH_SYS_HYP_FEATURES
);
809 if (fd
&& fscanf(fd
, "%x", &features
) == 1) {
811 if (features
& XEN_FEATURES_PV_MASK
)
812 desc
->virtype
= VIRT_PARA
;
814 else if ((features
& XEN_FEATURES_PVH_MASK
)
815 == XEN_FEATURES_PVH_MASK
)
816 desc
->virtype
= VIRT_PARA
;
821 } else if (read_hypervisor_powerpc(desc
) > 0) {}
823 /* Xen para-virt or dom0 */
824 else if (ul_path_access(desc
->procfs
, F_OK
, "xen") == 0) {
827 fd
= ul_path_fopen(desc
->procfs
, "r", "xen/capabilities");
831 if (fscanf(fd
, "%255s", buf
) == 1 &&
832 !strcmp(buf
, "control_d"))
836 desc
->virtype
= dom0
? VIRT_NONE
: VIRT_PARA
;
837 desc
->hyper
= HYPER_XEN
;
839 /* Xen full-virt on non-x86_64 */
840 } else if (has_pci_device(desc
, hv_vendor_pci
[HYPER_XEN
], hv_graphics_pci
[HYPER_XEN
])) {
841 desc
->hyper
= HYPER_XEN
;
842 desc
->virtype
= VIRT_FULL
;
843 } else if (has_pci_device(desc
, hv_vendor_pci
[HYPER_VMWARE
], hv_graphics_pci
[HYPER_VMWARE
])) {
844 desc
->hyper
= HYPER_VMWARE
;
845 desc
->virtype
= VIRT_FULL
;
846 } else if (has_pci_device(desc
, hv_vendor_pci
[HYPER_VBOX
], hv_graphics_pci
[HYPER_VBOX
])) {
847 desc
->hyper
= HYPER_VBOX
;
848 desc
->virtype
= VIRT_FULL
;
851 } else if ((fd
= ul_path_fopen(desc
->procfs
, "r", "sysinfo"))) {
854 desc
->hyper
= HYPER_IBM
;
855 desc
->hypervisor
= "PR/SM";
856 desc
->virtype
= VIRT_FULL
;
857 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
860 if (!strstr(buf
, "Control Program:"))
862 if (!strstr(buf
, "KVM"))
863 desc
->hyper
= HYPER_IBM
;
865 desc
->hyper
= HYPER_KVM
;
866 p
= strchr(buf
, ':');
869 xasprintf(&str
, "%s", p
+ 1);
871 /* remove leading, trailing and repeating whitespace */
874 desc
->hypervisor
= str
;
875 str
+= strlen(str
) - 1;
876 while ((*str
== '\n') || (*str
== ' '))
878 while ((str
= strstr(desc
->hypervisor
, " ")))
879 memmove(str
, str
+ 1, strlen(str
));
885 /* OpenVZ/Virtuozzo - /proc/vz dir should exist
886 * /proc/bc should not */
887 else if (ul_path_access(desc
->procfs
, F_OK
, "vz") == 0 &&
888 ul_path_access(desc
->procfs
, F_OK
, "bc") != 0) {
889 desc
->hyper
= HYPER_PARALLELS
;
890 desc
->virtype
= VIRT_CONT
;
893 } else if (desc
->vendor
&&
894 (strcmp(desc
->vendor
, "PowerVM Lx86") == 0 ||
895 strcmp(desc
->vendor
, "IBM/S390") == 0)) {
896 desc
->hyper
= HYPER_IBM
;
897 desc
->virtype
= VIRT_FULL
;
899 /* User-mode-linux */
900 } else if (desc
->modelname
&& strstr(desc
->modelname
, "UML")) {
901 desc
->hyper
= HYPER_UML
;
902 desc
->virtype
= VIRT_PARA
;
905 } else if ((fd
= ul_path_fopen(desc
->procfs
, "r", "self/status"))) {
909 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
910 if (lookup(buf
, "VxID", &val
))
918 while (isdigit(*val
))
921 desc
->hyper
= HYPER_VSERVER
;
922 desc
->virtype
= VIRT_CONT
;
929 /* add @set to the @ary, unnecessary set is deallocated. */
930 static int add_cpuset_to_array(cpu_set_t
**ary
, int *items
, cpu_set_t
*set
)
933 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
938 for (i
= 0; i
< *items
; i
++) {
939 if (CPU_EQUAL_S(setsize
, set
, ary
[i
]))
952 read_topology(struct lscpu_desc
*desc
, int idx
)
954 cpu_set_t
*thread_siblings
, *core_siblings
;
955 cpu_set_t
*book_siblings
, *drawer_siblings
;
956 int coreid
, socketid
, bookid
, drawerid
;
957 int i
, num
= real_cpu_num(desc
, idx
);
959 if (ul_path_accessf(desc
->syscpu
, F_OK
, "cpu%d/topology/thread_siblings", num
) != 0)
962 ul_path_readf_cpuset(desc
->syscpu
, &thread_siblings
, maxcpus
,
963 "cpu%d/topology/thread_siblings", num
);
964 ul_path_readf_cpuset(desc
->syscpu
, &core_siblings
, maxcpus
,
965 "cpu%d/topology/core_siblings", num
);
966 ul_path_readf_cpuset(desc
->syscpu
, &book_siblings
, maxcpus
,
967 "cpu%d/topology/book_siblings", num
);
968 ul_path_readf_cpuset(desc
->syscpu
, &drawer_siblings
, maxcpus
,
969 "cpu%d/topology/drawer_siblings", num
);
971 if (ul_path_readf_s32(desc
->syscpu
, &coreid
, "cpu%d/topology/core_id", num
) != 0)
974 if (ul_path_readf_s32(desc
->syscpu
, &socketid
, "cpu%d/topology/physical_package_id", num
) != 0)
977 if (ul_path_readf_s32(desc
->syscpu
, &bookid
, "cpu%d/topology/book_id", num
) != 0)
980 if (ul_path_readf_s32(desc
->syscpu
, &drawerid
, "cpu%d/topology/drawer_id", num
) != 0)
983 if (!desc
->coremaps
) {
984 int ndrawers
, nbooks
, nsockets
, ncores
, nthreads
;
985 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
987 /* threads within one core */
988 nthreads
= CPU_COUNT_S(setsize
, thread_siblings
);
992 /* cores within one socket */
993 ncores
= CPU_COUNT_S(setsize
, core_siblings
) / nthreads
;
997 /* number of sockets within one book. Because of odd /
998 * non-present cpu maps and to keep calculation easy we make
999 * sure that nsockets and nbooks is at least 1.
1001 nsockets
= desc
->ncpus
/ nthreads
/ ncores
;
1005 /* number of books */
1006 nbooks
= desc
->ncpus
/ nthreads
/ ncores
/ nsockets
;
1010 /* number of drawers */
1011 ndrawers
= desc
->ncpus
/ nbooks
/ nthreads
/ ncores
/ nsockets
;
1015 /* all threads, see also read_basicinfo()
1016 * -- fallback for kernels without
1017 * /sys/devices/system/cpu/online.
1019 if (!desc
->nthreads
)
1020 desc
->nthreads
= ndrawers
* nbooks
* nsockets
* ncores
* nthreads
;
1022 /* For each map we make sure that it can have up to ncpuspos
1023 * entries. This is because we cannot reliably calculate the
1024 * number of cores, sockets and books on all architectures.
1025 * E.g. completely virtualized architectures like s390 may
1026 * have multiple sockets of different sizes.
1028 desc
->coremaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1029 desc
->socketmaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1030 desc
->coreids
= xcalloc(desc
->ncpuspos
, sizeof(*desc
->drawerids
));
1031 desc
->socketids
= xcalloc(desc
->ncpuspos
, sizeof(*desc
->drawerids
));
1032 for (i
= 0; i
< desc
->ncpuspos
; i
++)
1033 desc
->coreids
[i
] = desc
->socketids
[i
] = -1;
1034 if (book_siblings
) {
1035 desc
->bookmaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1036 desc
->bookids
= xcalloc(desc
->ncpuspos
, sizeof(*desc
->drawerids
));
1037 for (i
= 0; i
< desc
->ncpuspos
; i
++)
1038 desc
->bookids
[i
] = -1;
1040 if (drawer_siblings
) {
1041 desc
->drawermaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1042 desc
->drawerids
= xcalloc(desc
->ncpuspos
, sizeof(*desc
->drawerids
));
1043 for (i
= 0; i
< desc
->ncpuspos
; i
++)
1044 desc
->drawerids
[i
] = -1;
1048 add_cpuset_to_array(desc
->socketmaps
, &desc
->nsockets
, core_siblings
);
1049 desc
->coreids
[idx
] = coreid
;
1050 add_cpuset_to_array(desc
->coremaps
, &desc
->ncores
, thread_siblings
);
1051 desc
->socketids
[idx
] = socketid
;
1052 if (book_siblings
) {
1053 add_cpuset_to_array(desc
->bookmaps
, &desc
->nbooks
, book_siblings
);
1054 desc
->bookids
[idx
] = bookid
;
1056 if (drawer_siblings
) {
1057 add_cpuset_to_array(desc
->drawermaps
, &desc
->ndrawers
, drawer_siblings
);
1058 desc
->drawerids
[idx
] = drawerid
;
1063 read_polarization(struct lscpu_desc
*desc
, int idx
)
1066 int num
= real_cpu_num(desc
, idx
);
1068 if (desc
->dispatching
< 0)
1070 if (ul_path_accessf(desc
->syscpu
, F_OK
, "cpu%d/polarization", num
) != 0)
1072 if (!desc
->polarization
)
1073 desc
->polarization
= xcalloc(desc
->ncpuspos
, sizeof(int));
1075 ul_path_readf_buffer(desc
->syscpu
, mode
, sizeof(mode
), "cpu%d/polarization", num
);
1077 if (strncmp(mode
, "vertical:low", sizeof(mode
)) == 0)
1078 desc
->polarization
[idx
] = POLAR_VLOW
;
1079 else if (strncmp(mode
, "vertical:medium", sizeof(mode
)) == 0)
1080 desc
->polarization
[idx
] = POLAR_VMEDIUM
;
1081 else if (strncmp(mode
, "vertical:high", sizeof(mode
)) == 0)
1082 desc
->polarization
[idx
] = POLAR_VHIGH
;
1083 else if (strncmp(mode
, "horizontal", sizeof(mode
)) == 0)
1084 desc
->polarization
[idx
] = POLAR_HORIZONTAL
;
1086 desc
->polarization
[idx
] = POLAR_UNKNOWN
;
1090 read_address(struct lscpu_desc
*desc
, int idx
)
1092 int num
= real_cpu_num(desc
, idx
);
1094 if (ul_path_accessf(desc
->syscpu
, F_OK
, "cpu%d/address", num
) != 0)
1096 if (!desc
->addresses
)
1097 desc
->addresses
= xcalloc(desc
->ncpuspos
, sizeof(int));
1098 ul_path_readf_s32(desc
->syscpu
, &desc
->addresses
[idx
], "cpu%d/address", num
);
1102 read_configured(struct lscpu_desc
*desc
, int idx
)
1104 int num
= real_cpu_num(desc
, idx
);
1106 if (ul_path_accessf(desc
->syscpu
, F_OK
, "cpu%d/configure", num
) != 0)
1108 if (!desc
->configured
)
1109 desc
->configured
= xcalloc(desc
->ncpuspos
, sizeof(int));
1110 ul_path_readf_s32(desc
->syscpu
, &desc
->configured
[idx
], "cpu%d/configure", num
);
1113 /* Read overall maximum frequency of cpu */
1115 cpu_max_mhz(struct lscpu_desc
*desc
, char *buf
, size_t bufsz
)
1118 float cpu_freq
= 0.0;
1119 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1121 if (desc
->present
) {
1122 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1123 if (CPU_ISSET_S(real_cpu_num(desc
, i
), setsize
, desc
->present
)
1124 && desc
->maxmhz
[i
]) {
1125 float freq
= atof(desc
->maxmhz
[i
]);
1127 if (freq
> cpu_freq
)
1132 snprintf(buf
, bufsz
, "%.4f", cpu_freq
);
1136 /* Read overall minimum frequency of cpu */
1138 cpu_min_mhz(struct lscpu_desc
*desc
, char *buf
, size_t bufsz
)
1141 float cpu_freq
= -1.0;
1142 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1144 if (desc
->present
) {
1145 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1146 if (CPU_ISSET_S(real_cpu_num(desc
, i
), setsize
, desc
->present
)
1147 && desc
->minmhz
[i
]) {
1148 float freq
= atof(desc
->minmhz
[i
]);
1150 if (cpu_freq
< 0.0 || freq
< cpu_freq
)
1155 snprintf(buf
, bufsz
, "%.4f", cpu_freq
);
1161 read_max_mhz(struct lscpu_desc
*desc
, int idx
)
1163 int num
= real_cpu_num(desc
, idx
);
1166 if (ul_path_readf_s32(desc
->syscpu
, &mhz
, "cpu%d/cpufreq/cpuinfo_max_freq", num
) != 0)
1169 desc
->maxmhz
= xcalloc(desc
->ncpuspos
, sizeof(char *));
1170 xasprintf(&desc
->maxmhz
[idx
], "%.4f", (float) mhz
/ 1000);
1174 read_min_mhz(struct lscpu_desc
*desc
, int idx
)
1176 int num
= real_cpu_num(desc
, idx
);
1179 if (ul_path_readf_s32(desc
->syscpu
, &mhz
, "cpu%d/cpufreq/cpuinfo_min_freq", num
) != 0)
1182 desc
->minmhz
= xcalloc(desc
->ncpuspos
, sizeof(char *));
1183 xasprintf(&desc
->minmhz
[idx
], "%.4f", (float) mhz
/ 1000);
1187 cachecmp(const void *a
, const void *b
)
1189 struct cpu_cache
*c1
= (struct cpu_cache
*) a
;
1190 struct cpu_cache
*c2
= (struct cpu_cache
*) b
;
1192 return strcmp(c2
->name
, c1
->name
);
1196 read_cache(struct lscpu_desc
*desc
, int idx
)
1200 int num
= real_cpu_num(desc
, idx
);
1202 if (!desc
->ncaches
) {
1203 while (ul_path_accessf(desc
->syscpu
, F_OK
,
1204 "cpu%d/cache/index%d",
1205 num
, desc
->ncaches
) == 0)
1210 desc
->caches
= xcalloc(desc
->ncaches
, sizeof(*desc
->caches
));
1212 for (i
= 0; i
< desc
->ncaches
; i
++) {
1213 struct cpu_cache
*ca
= &desc
->caches
[i
];
1216 if (ul_path_accessf(desc
->syscpu
, F_OK
,
1217 "cpu%d/cache/index%d", num
, i
) != 0)
1220 int type
= 0, level
;
1223 if (ul_path_readf_buffer(desc
->syscpu
, buf
, sizeof(buf
),
1224 "cpu%d/cache/index%d/type", num
, i
) > 0) {
1225 if (!strcmp(buf
, "Data"))
1227 else if (!strcmp(buf
, "Instruction"))
1232 ul_path_readf_s32(desc
->syscpu
, &level
,
1233 "cpu%d/cache/index%d/level", num
, i
);
1235 snprintf(buf
, sizeof(buf
), "L%d%c", level
, type
);
1237 snprintf(buf
, sizeof(buf
), "L%d", level
);
1239 ca
->name
= xstrdup(buf
);
1242 if (ul_path_readf_string(desc
->syscpu
, &ca
->size
,
1243 "cpu%d/cache/index%d/size", num
, i
) < 0)
1244 ca
->size
= xstrdup("unknown size");
1247 /* information about how CPUs share different caches */
1248 ul_path_readf_cpuset(desc
->syscpu
, &map
, maxcpus
,
1249 "cpu%d/cache/index%d/shared_cpu_map", num
, i
);
1251 if (!ca
->sharedmaps
)
1252 ca
->sharedmaps
= xcalloc(desc
->ncpuspos
, sizeof(cpu_set_t
*));
1253 add_cpuset_to_array(ca
->sharedmaps
, &ca
->nsharedmaps
, map
);
1257 static inline int is_node_dirent(struct dirent
*d
)
1261 #ifdef _DIRENT_HAVE_D_TYPE
1262 (d
->d_type
== DT_DIR
|| d
->d_type
== DT_UNKNOWN
) &&
1264 strncmp(d
->d_name
, "node", 4) == 0 &&
1265 isdigit_string(d
->d_name
+ 4);
1269 nodecmp(const void *ap
, const void *bp
)
1271 int *a
= (int *) ap
, *b
= (int *) bp
;
1276 read_nodes(struct lscpu_desc
*desc
)
1281 struct path_cxt
*sysnode
;
1285 sysnode
= ul_new_path(_PATH_SYS_NODE
);
1287 err(EXIT_FAILURE
, _("failed to initialize %s handler"), _PATH_SYS_NODE
);
1288 ul_path_set_prefix(sysnode
, desc
->prefix
);
1290 dir
= ul_path_opendir(sysnode
, NULL
);
1294 while ((d
= readdir(dir
))) {
1295 if (is_node_dirent(d
))
1299 if (!desc
->nnodes
) {
1304 desc
->nodemaps
= xcalloc(desc
->nnodes
, sizeof(cpu_set_t
*));
1305 desc
->idx2nodenum
= xmalloc(desc
->nnodes
* sizeof(int));
1308 while ((d
= readdir(dir
)) && i
< desc
->nnodes
) {
1309 if (is_node_dirent(d
))
1310 desc
->idx2nodenum
[i
++] = strtol_or_err(((d
->d_name
) + 4),
1311 _("Failed to extract the node number"));
1314 qsort(desc
->idx2nodenum
, desc
->nnodes
, sizeof(int), nodecmp
);
1316 /* information about how nodes share different CPUs */
1317 for (i
= 0; i
< desc
->nnodes
; i
++)
1318 ul_path_readf_cpuset(sysnode
, &desc
->nodemaps
[i
], maxcpus
,
1319 "node%d/cpumap", desc
->idx2nodenum
[i
]);
1321 ul_unref_path(sysnode
);
1325 get_cell_data(struct lscpu_desc
*desc
, int idx
, int col
,
1326 struct lscpu_modifier
*mod
,
1327 char *buf
, size_t bufsz
)
1329 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1331 int cpu
= real_cpu_num(desc
, idx
);
1337 snprintf(buf
, bufsz
, "%d", cpu
);
1340 if (mod
->physical
) {
1341 if (desc
->coreids
[idx
] == -1)
1342 snprintf(buf
, bufsz
, "-");
1344 snprintf(buf
, bufsz
, "%d", desc
->coreids
[idx
]);
1346 if (cpuset_ary_isset(cpu
, desc
->coremaps
,
1347 desc
->ncores
, setsize
, &i
) == 0)
1348 snprintf(buf
, bufsz
, "%zu", i
);
1352 if (mod
->physical
) {
1353 if (desc
->socketids
[idx
] == -1)
1354 snprintf(buf
, bufsz
, "-");
1356 snprintf(buf
, bufsz
, "%d", desc
->socketids
[idx
]);
1358 if (cpuset_ary_isset(cpu
, desc
->socketmaps
,
1359 desc
->nsockets
, setsize
, &i
) == 0)
1360 snprintf(buf
, bufsz
, "%zu", i
);
1364 if (cpuset_ary_isset(cpu
, desc
->nodemaps
,
1365 desc
->nnodes
, setsize
, &i
) == 0)
1366 snprintf(buf
, bufsz
, "%d", desc
->idx2nodenum
[i
]);
1369 if (mod
->physical
) {
1370 if (desc
->drawerids
[idx
] == -1)
1371 snprintf(buf
, bufsz
, "-");
1373 snprintf(buf
, bufsz
, "%d", desc
->drawerids
[idx
]);
1375 if (cpuset_ary_isset(cpu
, desc
->drawermaps
,
1376 desc
->ndrawers
, setsize
, &i
) == 0)
1377 snprintf(buf
, bufsz
, "%zu", i
);
1381 if (mod
->physical
) {
1382 if (desc
->bookids
[idx
] == -1)
1383 snprintf(buf
, bufsz
, "-");
1385 snprintf(buf
, bufsz
, "%d", desc
->bookids
[idx
]);
1387 if (cpuset_ary_isset(cpu
, desc
->bookmaps
,
1388 desc
->nbooks
, setsize
, &i
) == 0)
1389 snprintf(buf
, bufsz
, "%zu", i
);
1398 for (j
= desc
->ncaches
- 1; j
>= 0; j
--) {
1399 struct cpu_cache
*ca
= &desc
->caches
[j
];
1401 if (cpuset_ary_isset(cpu
, ca
->sharedmaps
,
1402 ca
->nsharedmaps
, setsize
, &i
) == 0) {
1403 int x
= snprintf(p
, sz
, "%zu", i
);
1404 if (x
< 0 || (size_t) x
>= sz
)
1412 *p
++ = mod
->compat
? ',' : ':';
1419 case COL_POLARIZATION
:
1420 if (desc
->polarization
) {
1421 int x
= desc
->polarization
[idx
];
1423 snprintf(buf
, bufsz
, "%s",
1424 mod
->mode
== OUTPUT_PARSABLE
?
1425 polar_modes
[x
].parsable
:
1426 polar_modes
[x
].readable
);
1430 if (desc
->addresses
)
1431 snprintf(buf
, bufsz
, "%d", desc
->addresses
[idx
]);
1433 case COL_CONFIGURED
:
1434 if (!desc
->configured
)
1436 if (mod
->mode
== OUTPUT_PARSABLE
)
1437 snprintf(buf
, bufsz
, "%s",
1438 desc
->configured
[idx
] ? _("Y") : _("N"));
1440 snprintf(buf
, bufsz
, "%s",
1441 desc
->configured
[idx
] ? _("yes") : _("no"));
1446 if (mod
->mode
== OUTPUT_PARSABLE
)
1447 snprintf(buf
, bufsz
, "%s",
1448 is_cpu_online(desc
, cpu
) ? _("Y") : _("N"));
1450 snprintf(buf
, bufsz
, "%s",
1451 is_cpu_online(desc
, cpu
) ? _("yes") : _("no"));
1454 if (desc
->maxmhz
&& desc
->maxmhz
[idx
])
1455 xstrncpy(buf
, desc
->maxmhz
[idx
], bufsz
);
1458 if (desc
->minmhz
&& desc
->minmhz
[idx
])
1459 xstrncpy(buf
, desc
->minmhz
[idx
], bufsz
);
1466 get_cell_header(struct lscpu_desc
*desc
, int col
,
1467 struct lscpu_modifier
*mod
,
1468 char *buf
, size_t bufsz
)
1472 if (col
== COL_CACHE
) {
1477 for (i
= desc
->ncaches
- 1; i
>= 0; i
--) {
1478 int x
= snprintf(p
, sz
, "%s", desc
->caches
[i
].name
);
1479 if (x
< 0 || (size_t) x
>= sz
)
1486 *p
++ = mod
->compat
? ',' : ':';
1494 snprintf(buf
, bufsz
, "%s", coldescs
[col
].name
);
1499 * [-p] backend, we support two parsable formats:
1501 * 1) "compatible" -- this format is compatible with the original lscpu(1)
1502 * output and it contains fixed set of the columns. The CACHE columns are at
1503 * the end of the line and the CACHE is not printed if the number of the caches
1504 * is zero. The CACHE columns are separated by two commas, for example:
1507 * # CPU,Core,Socket,Node,,L1d,L1i,L2
1511 * 2) "user defined output" -- this format prints always all columns without
1512 * special prefix for CACHE column. If there are not CACHEs then the column is
1513 * empty and the header "Cache" is printed rather than a real name of the cache.
1514 * The CACHE columns are separated by ':'.
1516 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
1517 * # CPU,Core,Socket,Node,L1d:L1i:L2
1522 print_parsable(struct lscpu_desc
*desc
, int cols
[], int ncols
,
1523 struct lscpu_modifier
*mod
)
1525 char buf
[BUFSIZ
], *data
;
1532 "# The following is the parsable format, which can be fed to other\n"
1533 "# programs. Each different item in every column has an unique ID\n"
1534 "# starting from zero.\n"));
1536 fputs("# ", stdout
);
1537 for (i
= 0; i
< ncols
; i
++) {
1540 if (col
== COL_CACHE
) {
1541 if (mod
->compat
&& !desc
->ncaches
)
1543 if (mod
->compat
&& i
!= 0)
1549 data
= get_cell_header(desc
, col
, mod
, buf
, sizeof(buf
));
1551 if (data
&& * data
&& col
!= COL_CACHE
&&
1552 !coldescs
[col
].is_abbr
) {
1554 * For normal column names use mixed case (e.g. "Socket")
1558 while (p
&& *p
!= '\0') {
1559 *p
= tolower((unsigned int) *p
);
1563 fputs(data
&& *data
? data
: "", stdout
);
1570 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1572 int cpu
= real_cpu_num(desc
, i
);
1574 if (!mod
->offline
&& desc
->online
&& !is_cpu_online(desc
, cpu
))
1576 if (!mod
->online
&& desc
->online
&& is_cpu_online(desc
, cpu
))
1578 if (desc
->present
&& !is_cpu_present(desc
, cpu
))
1580 for (c
= 0; c
< ncols
; c
++) {
1581 if (mod
->compat
&& cols
[c
] == COL_CACHE
) {
1590 data
= get_cell_data(desc
, i
, cols
[c
], mod
,
1592 fputs(data
&& *data
? data
: "", stdout
);
1602 print_readable(struct lscpu_desc
*desc
, int cols
[], int ncols
,
1603 struct lscpu_modifier
*mod
)
1608 struct libscols_table
*table
;
1610 scols_init_debug(0);
1612 table
= scols_new_table();
1614 err(EXIT_FAILURE
, _("failed to allocate output table"));
1616 scols_table_enable_json(table
, 1);
1617 scols_table_set_name(table
, "cpus");
1620 for (i
= 0; i
< ncols
; i
++) {
1621 data
= get_cell_header(desc
, cols
[i
], mod
, buf
, sizeof(buf
));
1622 if (!scols_table_new_column(table
, data
, 0, 0))
1623 err(EXIT_FAILURE
, _("failed to allocate output column"));
1626 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1628 struct libscols_line
*line
;
1629 int cpu
= real_cpu_num(desc
, i
);
1631 if (!mod
->offline
&& desc
->online
&& !is_cpu_online(desc
, cpu
))
1633 if (!mod
->online
&& desc
->online
&& is_cpu_online(desc
, cpu
))
1635 if (desc
->present
&& !is_cpu_present(desc
, cpu
))
1638 line
= scols_table_new_line(table
, NULL
);
1640 err(EXIT_FAILURE
, _("failed to allocate output line"));
1642 for (c
= 0; c
< ncols
; c
++) {
1643 data
= get_cell_data(desc
, i
, cols
[c
], mod
,
1645 if (!data
|| !*data
)
1647 if (scols_line_set_data(line
, c
, data
))
1648 err(EXIT_FAILURE
, _("failed to add output data"));
1652 scols_print_table(table
);
1653 scols_unref_table(table
);
1657 static void __attribute__ ((__format__(printf
, 3, 4)))
1658 add_summary_sprint(struct libscols_table
*tb
,
1663 struct libscols_line
*ln
= scols_table_new_line(tb
, NULL
);
1668 err(EXIT_FAILURE
, _("failed to allocate output line"));
1670 /* description column */
1671 scols_line_set_data(ln
, 0, txt
);
1674 va_start(args
, fmt
);
1675 xvasprintf(&data
, fmt
, args
);
1678 if (data
&& scols_line_refer_data(ln
, 1, data
))
1679 err(EXIT_FAILURE
, _("failed to add output data"));
1682 #define add_summary_n(tb, txt, num) add_summary_sprint(tb, txt, "%d", num)
1683 #define add_summary_s(tb, txt, str) add_summary_sprint(tb, txt, "%s", str)
1686 print_cpuset(struct libscols_table
*tb
,
1687 const char *key
, cpu_set_t
*set
, int hex
)
1689 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1690 size_t setbuflen
= 7 * maxcpus
;
1691 char setbuf
[setbuflen
], *p
;
1694 p
= cpumask_create(setbuf
, setbuflen
, set
, setsize
);
1695 add_summary_s(tb
, key
, p
);
1697 p
= cpulist_create(setbuf
, setbuflen
, set
, setsize
);
1698 add_summary_s(tb
, key
, p
);
1702 static int get_cache_full_size(struct lscpu_desc
*desc
, int idx
, uint64_t *res
)
1704 struct cpu_cache
*ca
= &desc
->caches
[idx
];
1705 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1706 int i
, nshares
= 0, rc
;
1709 /* Convert size to number */
1710 rc
= parse_size(ca
->size
, &sz
, NULL
);
1714 /* Count number of CPUs which shares the cache */
1715 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1716 int cpu
= real_cpu_num(desc
, i
);
1718 if (desc
->present
&& !is_cpu_present(desc
, cpu
))
1720 if (CPU_ISSET_S(cpu
, setsize
, ca
->sharedmaps
[0]))
1724 /* Correction for CPU threads */
1725 if (desc
->nthreads
> desc
->ncores
)
1726 nshares
/= (desc
->nthreads
/ desc
->ncores
);
1728 *res
= (desc
->ncores
/ nshares
) * sz
;
1736 print_summary(struct lscpu_desc
*desc
, struct lscpu_modifier
*mod
)
1740 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
1741 struct libscols_table
*tb
;
1743 scols_init_debug(0);
1745 tb
= scols_new_table();
1747 err(EXIT_FAILURE
, _("failed to allocate output table"));
1749 scols_table_enable_noheadings(tb
, 1);
1751 scols_table_enable_json(tb
, 1);
1752 scols_table_set_name(tb
, "lscpu");
1755 if (scols_table_new_column(tb
, "field", 0, 0) == NULL
||
1756 scols_table_new_column(tb
, "data", 0, SCOLS_FL_NOEXTREMES
) == NULL
)
1757 err(EXIT_FAILURE
, _("failed to initialize output column"));
1759 add_summary_s(tb
, _("Architecture:"), desc
->arch
);
1763 if (desc
->mode
& MODE_32BIT
) {
1764 strcpy(p
, "32-bit, ");
1767 if (desc
->mode
& MODE_64BIT
) {
1768 strcpy(p
, "64-bit, ");
1772 add_summary_s(tb
, _("CPU op-mode(s):"), buf
);
1774 #if !defined(WORDS_BIGENDIAN)
1775 add_summary_s(tb
, _("Byte Order:"), "Little Endian");
1777 add_summary_s(tb
, _("Byte Order:"), "Big Endian");
1781 add_summary_s(tb
, _("Address sizes:"), desc
->addrsz
);
1783 add_summary_n(tb
, _("CPU(s):"), desc
->ncpus
);
1786 print_cpuset(tb
, mod
->hex
? _("On-line CPU(s) mask:") :
1787 _("On-line CPU(s) list:"),
1788 desc
->online
, mod
->hex
);
1790 if (desc
->online
&& CPU_COUNT_S(setsize
, desc
->online
) != desc
->ncpus
) {
1793 /* Linux kernel provides cpuset of off-line CPUs that contains
1794 * all configured CPUs (see /sys/devices/system/cpu/offline),
1795 * but want to print real (present in system) off-line CPUs only.
1797 set
= cpuset_alloc(maxcpus
, NULL
, NULL
);
1799 err(EXIT_FAILURE
, _("failed to callocate cpu set"));
1800 CPU_ZERO_S(setsize
, set
);
1801 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
1802 int cpu
= real_cpu_num(desc
, i
);
1803 if (!is_cpu_online(desc
, cpu
) && is_cpu_present(desc
, cpu
))
1804 CPU_SET_S(cpu
, setsize
, set
);
1806 print_cpuset(tb
, mod
->hex
? _("Off-line CPU(s) mask:") :
1807 _("Off-line CPU(s) list:"),
1812 if (desc
->nsockets
) {
1813 int threads_per_core
, cores_per_socket
, sockets_per_book
;
1814 int books_per_drawer
, drawers
;
1817 threads_per_core
= cores_per_socket
= sockets_per_book
= 0;
1818 books_per_drawer
= drawers
= 0;
1819 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1820 * Using simply the cpu topology masks in sysfs will not give
1821 * usable results since everything is virtualized. E.g.
1822 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1824 * If the cpu topology is not exported (e.g. 2nd level guest)
1825 * fall back to old calculation scheme.
1827 if ((fd
= ul_path_fopen(desc
->procfs
, "r", "sysinfo"))) {
1830 while (fd
&& fgets(buf
, sizeof(buf
), fd
) != NULL
) {
1831 if (sscanf(buf
, "CPU Topology SW:%d%d%d%d%d%d",
1832 &t0
, &t1
, &drawers
, &books_per_drawer
,
1834 &cores_per_socket
) == 6)
1841 threads_per_core
= atoi(desc
->mtid
) + 1;
1842 add_summary_n(tb
, _("Thread(s) per core:"),
1843 threads_per_core
?: desc
->nthreads
/ desc
->ncores
);
1844 add_summary_n(tb
, _("Core(s) per socket:"),
1845 cores_per_socket
?: desc
->ncores
/ desc
->nsockets
);
1847 add_summary_n(tb
, _("Socket(s) per book:"),
1848 sockets_per_book
?: desc
->nsockets
/ desc
->nbooks
);
1849 if (desc
->ndrawers
) {
1850 add_summary_n(tb
, _("Book(s) per drawer:"),
1851 books_per_drawer
?: desc
->nbooks
/ desc
->ndrawers
);
1852 add_summary_n(tb
, _("Drawer(s):"), drawers
?: desc
->ndrawers
);
1854 add_summary_n(tb
, _("Book(s):"), books_per_drawer
?: desc
->nbooks
);
1857 add_summary_n(tb
, _("Socket(s):"), sockets_per_book
?: desc
->nsockets
);
1861 add_summary_n(tb
, _("NUMA node(s):"), desc
->nnodes
);
1863 add_summary_s(tb
, _("Vendor ID:"), desc
->vendor
);
1864 if (desc
->machinetype
)
1865 add_summary_s(tb
, _("Machine type:"), desc
->machinetype
);
1867 add_summary_s(tb
, _("CPU family:"), desc
->family
);
1868 if (desc
->model
|| desc
->revision
)
1869 add_summary_s(tb
, _("Model:"), desc
->revision
? desc
->revision
: desc
->model
);
1870 if (desc
->modelname
|| desc
->cpu
)
1871 add_summary_s(tb
, _("Model name:"), desc
->cpu
? desc
->cpu
: desc
->modelname
);
1873 add_summary_s(tb
, _("Stepping:"), desc
->stepping
);
1874 if (desc
->freqboost
>= 0)
1875 add_summary_s(tb
, _("Frequency boost:"), desc
->freqboost
?
1876 _("enabled") : _("disabled"));
1878 add_summary_s(tb
, _("CPU MHz:"), desc
->mhz
);
1879 if (desc
->dynamic_mhz
)
1880 add_summary_s(tb
, _("CPU dynamic MHz:"), desc
->dynamic_mhz
);
1881 if (desc
->static_mhz
)
1882 add_summary_s(tb
, _("CPU static MHz:"), desc
->static_mhz
);
1884 add_summary_s(tb
, _("CPU max MHz:"), cpu_max_mhz(desc
, buf
, sizeof(buf
)));
1886 add_summary_s(tb
, _("CPU min MHz:"), cpu_min_mhz(desc
, buf
, sizeof(buf
)));
1888 add_summary_s(tb
, _("BogoMIPS:"), desc
->bogomips
);
1889 if (desc
->virtflag
) {
1890 if (!strcmp(desc
->virtflag
, "svm"))
1891 add_summary_s(tb
, _("Virtualization:"), "AMD-V");
1892 else if (!strcmp(desc
->virtflag
, "vmx"))
1893 add_summary_s(tb
, _("Virtualization:"), "VT-x");
1895 if (desc
->hypervisor
)
1896 add_summary_s(tb
, _("Hypervisor:"), desc
->hypervisor
);
1898 add_summary_s(tb
, _("Hypervisor vendor:"), hv_vendors
[desc
->hyper
]);
1899 add_summary_s(tb
, _("Virtualization type:"), _(virt_types
[desc
->virtype
]));
1901 if (desc
->dispatching
>= 0)
1902 add_summary_s(tb
, _("Dispatching mode:"), _(disp_modes
[desc
->dispatching
]));
1903 if (desc
->ncaches
) {
1904 for (i
= desc
->ncaches
- 1; i
>= 0; i
--) {
1908 if (get_cache_full_size(desc
, i
, &sz
) != 0)
1910 tmp
= size_to_human_string(
1911 SIZE_SUFFIX_3LETTER
| SIZE_SUFFIX_SPACE
,
1913 snprintf(buf
, sizeof(buf
),
1914 _("%s cache: "), desc
->caches
[i
].name
);
1915 add_summary_s(tb
, buf
, tmp
);
1919 if (desc
->necaches
) {
1920 for (i
= desc
->necaches
- 1; i
>= 0; i
--) {
1921 snprintf(buf
, sizeof(buf
),
1922 _("%s cache:"), desc
->ecaches
[i
].name
);
1923 add_summary_s(tb
, buf
, desc
->ecaches
[i
].size
);
1927 for (i
= 0; i
< desc
->nnodes
; i
++) {
1928 snprintf(buf
, sizeof(buf
), _("NUMA node%d CPU(s):"), desc
->idx2nodenum
[i
]);
1929 print_cpuset(tb
, buf
, desc
->nodemaps
[i
], mod
->hex
);
1932 if (desc
->physsockets
) {
1933 add_summary_n(tb
, _("Physical sockets:"), desc
->physsockets
);
1934 add_summary_n(tb
, _("Physical chips:"), desc
->physchips
);
1935 add_summary_n(tb
, _("Physical cores/chip:"), desc
->physcoresperchip
);
1939 add_summary_s(tb
, _("Flags:"), desc
->flags
);
1941 scols_print_table(tb
);
1942 scols_unref_table(tb
);
1945 static void __attribute__((__noreturn__
)) usage(void)
1950 fputs(USAGE_HEADER
, out
);
1951 fprintf(out
, _(" %s [options]\n"), program_invocation_short_name
);
1953 fputs(USAGE_SEPARATOR
, out
);
1954 fputs(_("Display information about the CPU architecture.\n"), out
);
1956 fputs(USAGE_OPTIONS
, out
);
1957 fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out
);
1958 fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out
);
1959 fputs(_(" -c, --offline print offline CPUs only\n"), out
);
1960 fputs(_(" -J, --json use JSON for default or extended format\n"), out
);
1961 fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out
);
1962 fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out
);
1963 fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out
);
1964 fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out
);
1965 fputs(_(" -y, --physical print physical instead of logical IDs\n"), out
);
1966 fputs(USAGE_SEPARATOR
, out
);
1967 printf(USAGE_HELP_OPTIONS(25));
1969 fputs(USAGE_COLUMNS
, out
);
1970 for (i
= 0; i
< ARRAY_SIZE(coldescs
); i
++)
1971 fprintf(out
, " %13s %s\n", coldescs
[i
].name
, _(coldescs
[i
].help
));
1973 printf(USAGE_MAN_TAIL("lscpu(1)"));
1978 int main(int argc
, char *argv
[])
1980 struct lscpu_modifier _mod
= { .mode
= OUTPUT_SUMMARY
}, *mod
= &_mod
;
1981 struct lscpu_desc _desc
= { .flags
= NULL
}, *desc
= &_desc
;
1983 int columns
[ARRAY_SIZE(coldescs
)], ncolumns
= 0;
1984 int cpu_modifier_specified
= 0;
1988 OPT_OUTPUT_ALL
= CHAR_MAX
+ 1,
1990 static const struct option longopts
[] = {
1991 { "all", no_argument
, NULL
, 'a' },
1992 { "online", no_argument
, NULL
, 'b' },
1993 { "offline", no_argument
, NULL
, 'c' },
1994 { "help", no_argument
, NULL
, 'h' },
1995 { "extended", optional_argument
, NULL
, 'e' },
1996 { "json", no_argument
, NULL
, 'J' },
1997 { "parse", optional_argument
, NULL
, 'p' },
1998 { "sysroot", required_argument
, NULL
, 's' },
1999 { "physical", no_argument
, NULL
, 'y' },
2000 { "hex", no_argument
, NULL
, 'x' },
2001 { "version", no_argument
, NULL
, 'V' },
2002 { "output-all", no_argument
, NULL
, OPT_OUTPUT_ALL
},
2003 { NULL
, 0, NULL
, 0 }
2006 static const ul_excl_t excl
[] = { /* rows and cols in ASCII order */
2011 int excl_st
[ARRAY_SIZE(excl
)] = UL_EXCL_STATUS_INIT
;
2013 setlocale(LC_ALL
, "");
2014 bindtextdomain(PACKAGE
, LOCALEDIR
);
2015 textdomain(PACKAGE
);
2016 atexit(close_stdout
);
2018 while ((c
= getopt_long(argc
, argv
, "abce::hJp::s:xyV", longopts
, NULL
)) != -1) {
2020 err_exclusive_options(c
, longopts
, excl
, excl_st
);
2024 mod
->online
= mod
->offline
= 1;
2025 cpu_modifier_specified
= 1;
2029 cpu_modifier_specified
= 1;
2033 cpu_modifier_specified
= 1;
2045 ncolumns
= string_to_idarray(optarg
,
2046 columns
, ARRAY_SIZE(columns
),
2049 return EXIT_FAILURE
;
2051 mod
->mode
= c
== 'p' ? OUTPUT_PARSABLE
: OUTPUT_READABLE
;
2054 desc
->prefix
= optarg
;
2055 mod
->system
= SYSTEM_SNAPSHOT
;
2064 printf(UTIL_LINUX_VERSION
);
2065 return EXIT_SUCCESS
;
2066 case OPT_OUTPUT_ALL
:
2069 for (sz
= 0; sz
< ARRAY_SIZE(coldescs
); sz
++)
2074 errtryhelp(EXIT_FAILURE
);
2078 if (cpu_modifier_specified
&& mod
->mode
== OUTPUT_SUMMARY
) {
2080 _("%s: options --all, --online and --offline may only "
2081 "be used with options --extended or --parse.\n"),
2082 program_invocation_short_name
);
2083 return EXIT_FAILURE
;
2086 if (argc
!= optind
) {
2087 warnx(_("bad usage"));
2088 errtryhelp(EXIT_FAILURE
);
2091 /* set default cpu display mode if none was specified */
2092 if (!mod
->online
&& !mod
->offline
) {
2094 mod
->offline
= mod
->mode
== OUTPUT_READABLE
? 1 : 0;
2097 ul_path_init_debug();
2099 /* /sys/devices/system/cpu */
2100 desc
->syscpu
= ul_new_path(_PATH_SYS_CPU
);
2102 err(EXIT_FAILURE
, _("failed to initialize CPUs sysfs handler"));
2104 ul_path_set_prefix(desc
->syscpu
, desc
->prefix
);
2107 desc
->procfs
= ul_new_path("/proc");
2109 err(EXIT_FAILURE
, _("failed to initialize procfs handler"));
2111 ul_path_set_prefix(desc
->procfs
, desc
->prefix
);
2113 read_basicinfo(desc
, mod
);
2115 setsize
= CPU_ALLOC_SIZE(maxcpus
);
2117 for (i
= 0; i
< desc
->ncpuspos
; i
++) {
2118 /* only consider present CPUs */
2119 if (desc
->present
&&
2120 !CPU_ISSET_S(real_cpu_num(desc
, i
), setsize
, desc
->present
))
2122 read_topology(desc
, i
);
2123 read_cache(desc
, i
);
2124 read_polarization(desc
, i
);
2125 read_address(desc
, i
);
2126 read_configured(desc
, i
);
2127 read_max_mhz(desc
, i
);
2128 read_min_mhz(desc
, i
);
2132 qsort(desc
->caches
, desc
->ncaches
,
2133 sizeof(struct cpu_cache
), cachecmp
);
2136 qsort(desc
->ecaches
, desc
->necaches
,
2137 sizeof(struct cpu_cache
), cachecmp
);
2140 read_hypervisor(desc
, mod
);
2141 arm_cpu_decode(desc
);
2144 case OUTPUT_SUMMARY
:
2145 print_summary(desc
, mod
);
2147 case OUTPUT_PARSABLE
:
2149 columns
[ncolumns
++] = COL_CPU
;
2150 columns
[ncolumns
++] = COL_CORE
;
2151 columns
[ncolumns
++] = COL_SOCKET
;
2152 columns
[ncolumns
++] = COL_NODE
;
2153 columns
[ncolumns
++] = COL_CACHE
;
2156 print_parsable(desc
, columns
, ncolumns
, mod
);
2158 case OUTPUT_READABLE
:
2160 /* No list was given. Just print whatever is there. */
2161 columns
[ncolumns
++] = COL_CPU
;
2163 columns
[ncolumns
++] = COL_NODE
;
2164 if (desc
->drawermaps
)
2165 columns
[ncolumns
++] = COL_DRAWER
;
2167 columns
[ncolumns
++] = COL_BOOK
;
2168 if (desc
->socketmaps
)
2169 columns
[ncolumns
++] = COL_SOCKET
;
2171 columns
[ncolumns
++] = COL_CORE
;
2173 columns
[ncolumns
++] = COL_CACHE
;
2175 columns
[ncolumns
++] = COL_ONLINE
;
2176 if (desc
->configured
)
2177 columns
[ncolumns
++] = COL_CONFIGURED
;
2178 if (desc
->polarization
)
2179 columns
[ncolumns
++] = COL_POLARIZATION
;
2180 if (desc
->addresses
)
2181 columns
[ncolumns
++] = COL_ADDRESS
;
2183 columns
[ncolumns
++] = COL_MAXMHZ
;
2185 columns
[ncolumns
++] = COL_MINMHZ
;
2187 print_readable(desc
, columns
, ncolumns
, mod
);
2191 ul_unref_path(desc
->syscpu
);
2192 ul_unref_path(desc
->procfs
);
2193 return EXIT_SUCCESS
;