2 * lscpu - CPU architecture information helper
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 #include <sys/utsname.h>
33 #include <sys/types.h>
47 #define _PATH_SYS_SYSTEM "/sys/devices/system"
48 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
49 #define _PATH_PROC_XEN "/proc/xen"
50 #define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
51 #define _PATH_PROC_CPUINFO "/proc/cpuinfo"
52 #define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
53 #define _PATH_PROC_SYSINFO "/proc/sysinfo"
55 /* virtualization types */
61 const char *virt_types
[] = {
62 [VIRT_NONE
] = N_("none"),
63 [VIRT_PARA
] = N_("para"),
64 [VIRT_FULL
] = N_("full")
67 /* hypervisor vendors */
76 const char *hv_vendors
[] = {
80 [HYPER_MSHV
] = "Microsoft",
81 [HYPER_VMWARE
] = "VMware",
87 MODE_32BIT
= (1 << 1),
91 /* cache(s) description */
97 cpu_set_t
**sharedmaps
;
100 /* global description */
106 char *virtflag
; /* virtualization flag (vmx, svm) */
107 int hyper
; /* hypervisor vendor ID */
108 int virtype
; /* VIRT_PARA|FULL|NONE ? */
113 int mode
; /* rm, lm or/and tm */
115 int ncpus
; /* number of CPUs */
116 cpu_set_t
*online
; /* mask with online CPUs */
118 int nnodes
; /* number of NUMA modes */
119 cpu_set_t
**nodemaps
; /* array with NUMA nodes */
121 /* books -- based on book_siblings (internal kernel map of cpuX's
122 * hardware threads within the same book */
123 int nbooks
; /* number of all online books */
124 cpu_set_t
**bookmaps
; /* unique book_siblings */
126 /* sockets -- based on core_siblings (internal kernel map of cpuX's
127 * hardware threads within the same physical_package_id (socket)) */
128 int nsockets
; /* number of all online sockets */
129 cpu_set_t
**socketmaps
; /* unique core_siblings */
131 /* cores -- based on thread_siblings (internel kernel map of cpuX's
132 * hardware threads within the same core as cpuX) */
133 int ncores
; /* number of all online cores */
134 cpu_set_t
**coremaps
; /* unique thread_siblings */
136 int nthreads
; /* number of online threads */
139 struct cpu_cache
*caches
;
142 static size_t sysrootlen
;
143 static char pathbuf
[PATH_MAX
];
144 static int maxcpus
; /* size in bits of kernel cpu mask */
146 #define is_cpu_online(_d, _cpu) \
147 ((_d) && (_d)->online ? \
148 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
150 static FILE *path_fopen(const char *mode
, int exit_on_err
, const char *path
, ...)
151 __attribute__ ((__format__ (__printf__
, 3, 4)));
152 static void path_getstr(char *result
, size_t len
, const char *path
, ...)
153 __attribute__ ((__format__ (__printf__
, 3, 4)));
154 static int path_getnum(const char *path
, ...)
155 __attribute__ ((__format__ (__printf__
, 1, 2)));
156 static int path_exist(const char *path
, ...)
157 __attribute__ ((__format__ (__printf__
, 1, 2)));
158 static cpu_set_t
*path_cpuset(const char *path
, ...)
159 __attribute__ ((__format__ (__printf__
, 1, 2)));
173 static const char *colnames
[] =
177 [COL_SOCKET
] = "Socket",
180 [COL_CACHE
] = "Cache"
184 static int column_name_to_id(const char *name
, size_t namesz
)
188 for (i
= 0; i
< ARRAY_SIZE(colnames
); i
++) {
189 const char *cn
= colnames
[i
];
191 if (!strncasecmp(name
, cn
, namesz
) && !*(cn
+ namesz
))
194 warnx(_("unknown column: %s"), name
);
199 path_vcreate(const char *path
, va_list ap
)
202 vsnprintf(pathbuf
+ sysrootlen
,
203 sizeof(pathbuf
) - sysrootlen
, path
, ap
);
205 vsnprintf(pathbuf
, sizeof(pathbuf
), path
, ap
);
210 path_vfopen(const char *mode
, int exit_on_error
, const char *path
, va_list ap
)
213 const char *p
= path_vcreate(path
, ap
);
216 if (!f
&& exit_on_error
)
217 err(EXIT_FAILURE
, _("error: cannot open %s"), p
);
222 path_fopen(const char *mode
, int exit_on_error
, const char *path
, ...)
228 fd
= path_vfopen(mode
, exit_on_error
, path
, ap
);
235 path_getstr(char *result
, size_t len
, const char *path
, ...)
241 fd
= path_vfopen("r", 1, path
, ap
);
244 if (!fgets(result
, len
, fd
))
245 err(EXIT_FAILURE
, _("failed to read: %s"), pathbuf
);
248 len
= strlen(result
);
249 if (result
[len
- 1] == '\n')
250 result
[len
- 1] = '\0';
254 path_getnum(const char *path
, ...)
261 fd
= path_vfopen("r", 1, path
, ap
);
264 if (fscanf(fd
, "%d", &result
) != 1) {
266 err(EXIT_FAILURE
, _("failed to read: %s"), pathbuf
);
268 errx(EXIT_FAILURE
, _("parse error: %s"), pathbuf
);
275 path_exist(const char *path
, ...)
281 p
= path_vcreate(path
, ap
);
284 return access(p
, F_OK
) == 0;
288 path_cpuparse(int islist
, const char *path
, va_list ap
)
292 size_t setsize
, len
= maxcpus
* 7;
295 fd
= path_vfopen("r", 1, path
, ap
);
297 if (!fgets(buf
, len
, fd
))
298 err(EXIT_FAILURE
, _("failed to read: %s"), pathbuf
);
302 if (buf
[len
- 1] == '\n')
305 set
= cpuset_alloc(maxcpus
, &setsize
, NULL
);
307 err(EXIT_FAILURE
, _("failed to callocate cpu set"));
310 if (cpulist_parse(buf
, set
, setsize
))
311 errx(EXIT_FAILURE
, _("failed to parse CPU list %s"), buf
);
313 if (cpumask_parse(buf
, set
, setsize
))
314 errx(EXIT_FAILURE
, _("failed to parse CPU mask %s"), buf
);
320 path_cpuset(const char *path
, ...)
326 set
= path_cpuparse(0, path
, ap
);
333 path_cpulist(const char *path
, ...)
339 set
= path_cpuparse(1, path
, ap
);
345 /* Lookup a pattern and get the value from cpuinfo.
348 * "<pattern> : <key>"
350 int lookup(char *line
, char *pattern
, char **value
)
353 int len
= strlen(pattern
);
359 if (strncmp(line
, pattern
, len
))
363 for (p
= line
+ len
; isspace(*p
); p
++);
370 for (++p
; isspace(*p
); p
++);
378 len
= strlen(line
) - 1;
379 for (p
= line
+ len
; isspace(*(p
-1)); p
--);
386 /* Don't init the mode for platforms where we are not able to
387 * detect that CPU supports 64-bit mode.
395 /* reading info from any /{sys,proc} dump, don't mix it with
396 * information about our real CPU */
399 #if defined(__alpha__) || defined(__ia64__)
400 m
|= MODE_64BIT
; /* 64bit platforms only */
402 /* platforms with 64bit flag in /proc/cpuinfo, define
403 * 32bit default here */
404 #if defined(__i386__) || defined(__x86_64__) || \
405 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
412 read_basicinfo(struct lscpu_desc
*desc
)
414 FILE *fp
= path_fopen("r", 1, _PATH_PROC_CPUINFO
);
416 struct utsname utsbuf
;
419 if (uname(&utsbuf
) == -1)
420 err(EXIT_FAILURE
, _("error: uname failed"));
421 desc
->arch
= xstrdup(utsbuf
.machine
);
424 while(path_exist(_PATH_SYS_SYSTEM
"/cpu/cpu%d", desc
->ncpus
))
428 while (fgets(buf
, sizeof(buf
), fp
) != NULL
) {
429 if (lookup(buf
, "vendor", &desc
->vendor
)) ;
430 else if (lookup(buf
, "vendor_id", &desc
->vendor
)) ;
431 else if (lookup(buf
, "family", &desc
->family
)) ;
432 else if (lookup(buf
, "cpu family", &desc
->family
)) ;
433 else if (lookup(buf
, "model", &desc
->model
)) ;
434 else if (lookup(buf
, "stepping", &desc
->stepping
)) ;
435 else if (lookup(buf
, "cpu MHz", &desc
->mhz
)) ;
436 else if (lookup(buf
, "flags", &desc
->flags
)) ; /* x86 */
437 else if (lookup(buf
, "features", &desc
->flags
)) ; /* s390 */
438 else if (lookup(buf
, "type", &desc
->flags
)) ; /* sparc64 */
439 else if (lookup(buf
, "bogomips", &desc
->bogomips
)) ;
440 else if (lookup(buf
, "bogomips per cpu", &desc
->bogomips
)) ; /* s390 */
445 desc
->mode
= init_mode();
448 snprintf(buf
, sizeof(buf
), " %s ", desc
->flags
);
449 if (strstr(buf
, " svm "))
450 desc
->virtflag
= strdup("svm");
451 else if (strstr(buf
, " vmx "))
452 desc
->virtflag
= strdup("vmx");
453 if (strstr(buf
, " lm "))
454 desc
->mode
|= MODE_32BIT
| MODE_64BIT
; /* x86_64 */
455 if (strstr(buf
, " zarch "))
456 desc
->mode
|= MODE_32BIT
| MODE_64BIT
; /* s390x */
457 if (strstr(buf
, " sun4v ") || strstr(buf
, " sun4u "))
458 desc
->mode
|= MODE_32BIT
| MODE_64BIT
; /* sparc64 */
463 if (path_exist(_PATH_SYS_SYSTEM
"/cpu/kernel_max"))
464 /* note that kernel_max is maximum index [NR_CPUS-1] */
465 maxcpus
= path_getnum(_PATH_SYS_SYSTEM
"/cpu/kernel_max") + 1;
467 else if (!sysrootlen
)
468 /* the root is '/' so we are working with data from the current kernel */
469 maxcpus
= get_max_number_of_cpus();
471 /* we are reading some /sys snapshot instead of the real /sys,
472 * let's use any crazy number... */
473 maxcpus
= desc
->ncpus
> 2048 ? desc
->ncpus
: 2048;
475 /* get mask for online CPUs */
476 if (path_exist(_PATH_SYS_SYSTEM
"/cpu/online")) {
477 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
478 desc
->online
= path_cpulist(_PATH_SYS_SYSTEM
"/cpu/online");
479 desc
->nthreads
= CPU_COUNT_S(setsize
, desc
->online
);
484 has_pci_device(int vendor
, int device
)
487 int num
, fn
, ven
, dev
;
490 f
= path_fopen("r", 0, _PATH_PROC_PCIDEVS
);
494 /* for more details about bus/pci/devices format see
495 * drivers/pci/proc.c in linux kernel
497 while(fscanf(f
, "%02x%02x\t%04x%04x\t%*[^\n]",
498 &num
, &fn
, &ven
, &dev
) == 4) {
500 if (ven
== vendor
&& dev
== device
)
510 #if defined(__x86_64__) || defined(__i386__)
513 * This CPUID leaf returns the information about the hypervisor.
514 * EAX : maximum input value for CPUID supported by the hypervisor.
515 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
517 #define HYPERVISOR_INFO_LEAF 0x40000000
520 cpuid(unsigned int op
, unsigned int *eax
, unsigned int *ebx
,
521 unsigned int *ecx
, unsigned int *edx
)
524 #if defined(__PIC__) && defined(__i386__)
525 /* x86 PIC cannot clobber ebx -- gcc bitches */
542 read_hypervisor_cpuid(struct lscpu_desc
*desc
)
544 unsigned int eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
545 char hyper_vendor_id
[13];
547 memset(hyper_vendor_id
, 0, sizeof(hyper_vendor_id
));
549 cpuid(HYPERVISOR_INFO_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
550 memcpy(hyper_vendor_id
+ 0, &ebx
, 4);
551 memcpy(hyper_vendor_id
+ 4, &ecx
, 4);
552 memcpy(hyper_vendor_id
+ 8, &edx
, 4);
553 hyper_vendor_id
[12] = '\0';
555 if (!hyper_vendor_id
[0])
558 if (!strncmp("XenVMMXenVMM", hyper_vendor_id
, 12))
559 desc
->hyper
= HYPER_XEN
;
560 else if (!strncmp("KVMKVMKVM", hyper_vendor_id
, 9))
561 desc
->hyper
= HYPER_KVM
;
562 else if (!strncmp("Microsoft Hv", hyper_vendor_id
, 12))
563 desc
->hyper
= HYPER_MSHV
;
564 else if (!strncmp("VMwareVMware", hyper_vendor_id
, 12))
565 desc
->hyper
= HYPER_VMWARE
;
568 #else /* ! __x86_64__ */
570 read_hypervisor_cpuid(struct lscpu_desc
*desc
)
576 read_hypervisor(struct lscpu_desc
*desc
)
578 read_hypervisor_cpuid(desc
);
582 desc
->virtype
= VIRT_FULL
;
584 else if (path_exist(_PATH_PROC_XEN
)) {
585 /* Xen para-virt or dom0 */
586 FILE *fd
= path_fopen("r", 0, _PATH_PROC_XENCAP
);
592 if (fscanf(fd
, "%s", buf
) == 1 &&
593 !strcmp(buf
, "control_d"))
597 desc
->virtype
= dom0
? VIRT_NONE
: VIRT_PARA
;
598 desc
->hyper
= HYPER_XEN
;
600 } else if (has_pci_device(0x5853, 0x0001)) {
601 /* Xen full-virt on non-x86_64 */
602 desc
->hyper
= HYPER_XEN
;
603 desc
->virtype
= VIRT_FULL
;
604 } else if (path_exist(_PATH_PROC_SYSINFO
)) {
605 FILE *fd
= path_fopen("r", 0, _PATH_PROC_SYSINFO
);
608 desc
->hyper
= HYPER_IBM
;
609 desc
->virtype
= VIRT_FULL
;
610 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
611 if (!strstr(buf
, "Control Program:"))
613 if (!strstr(buf
, "KVM"))
614 desc
->hyper
= HYPER_IBM
;
616 desc
->hyper
= HYPER_KVM
;
622 /* add @set to the @ary, unnecesary set is deallocated. */
623 static int add_cpuset_to_array(cpu_set_t
**ary
, int *items
, cpu_set_t
*set
)
626 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
631 for (i
= 0; i
< *items
; i
++) {
632 if (CPU_EQUAL_S(setsize
, set
, ary
[i
]))
645 read_topology(struct lscpu_desc
*desc
, int num
)
647 cpu_set_t
*thread_siblings
, *core_siblings
, *book_siblings
;
649 if (!path_exist(_PATH_SYS_CPU
"/cpu%d/topology/thread_siblings", num
))
652 thread_siblings
= path_cpuset(_PATH_SYS_CPU
653 "/cpu%d/topology/thread_siblings", num
);
654 core_siblings
= path_cpuset(_PATH_SYS_CPU
655 "/cpu%d/topology/core_siblings", num
);
656 book_siblings
= NULL
;
657 if (path_exist(_PATH_SYS_CPU
"/cpu%d/topology/book_siblings", num
)) {
658 book_siblings
= path_cpuset(_PATH_SYS_CPU
659 "/cpu%d/topology/book_siblings", num
);
662 if (!desc
->coremaps
) {
663 int nbooks
, nsockets
, ncores
, nthreads
;
664 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
666 /* threads within one core */
667 nthreads
= CPU_COUNT_S(setsize
, thread_siblings
);
668 /* cores within one socket */
669 ncores
= CPU_COUNT_S(setsize
, core_siblings
) / nthreads
;
670 /* number of sockets within one book.
671 * Because of odd / non-present cpu maps and to keep
672 * calculation easy we make sure that nsockets and
673 * nbooks is at least 1.
675 nsockets
= desc
->ncpus
/ nthreads
/ ncores
?: 1;
676 /* number of books */
677 nbooks
= desc
->ncpus
/ nthreads
/ ncores
/ nsockets
?: 1;
679 /* all threads, see also read_basicinfo()
680 * -- fallback for kernels without
681 * /sys/devices/system/cpu/online.
684 desc
->nthreads
= nbooks
* nsockets
* ncores
* nthreads
;
685 /* For each map we make sure that it can have up to ncpus
686 * entries. This is because we cannot reliably calculate the
687 * number of cores, sockets and books on all architectures.
688 * E.g. completely virtualized architectures like s390 may
689 * have multiple sockets of different sizes.
691 desc
->coremaps
= xcalloc(desc
->ncpus
, sizeof(cpu_set_t
*));
692 desc
->socketmaps
= xcalloc(desc
->ncpus
, sizeof(cpu_set_t
*));
694 desc
->bookmaps
= xcalloc(desc
->ncpus
, sizeof(cpu_set_t
*));
697 add_cpuset_to_array(desc
->socketmaps
, &desc
->nsockets
, core_siblings
);
698 add_cpuset_to_array(desc
->coremaps
, &desc
->ncores
, thread_siblings
);
700 add_cpuset_to_array(desc
->bookmaps
, &desc
->nbooks
, book_siblings
);
704 cachecmp(const void *a
, const void *b
)
706 struct cpu_cache
*c1
= (struct cpu_cache
*) a
;
707 struct cpu_cache
*c2
= (struct cpu_cache
*) b
;
709 return strcmp(c2
->name
, c1
->name
);
713 read_cache(struct lscpu_desc
*desc
, int num
)
718 if (!desc
->ncaches
) {
719 while(path_exist(_PATH_SYS_SYSTEM
"/cpu/cpu%d/cache/index%d",
726 desc
->caches
= xcalloc(desc
->ncaches
, sizeof(*desc
->caches
));
728 for (i
= 0; i
< desc
->ncaches
; i
++) {
729 struct cpu_cache
*ca
= &desc
->caches
[i
];
736 path_getstr(buf
, sizeof(buf
),
737 _PATH_SYS_CPU
"/cpu%d/cache/index%d/type",
739 if (!strcmp(buf
, "Data"))
741 else if (!strcmp(buf
, "Instruction"))
747 level
= path_getnum(_PATH_SYS_CPU
"/cpu%d/cache/index%d/level",
750 snprintf(buf
, sizeof(buf
), "L%d%c", level
, type
);
752 snprintf(buf
, sizeof(buf
), "L%d", level
);
754 ca
->name
= xstrdup(buf
);
757 path_getstr(buf
, sizeof(buf
),
758 _PATH_SYS_CPU
"/cpu%d/cache/index%d/size",
760 ca
->size
= xstrdup(buf
);
763 /* information about how CPUs share different caches */
764 map
= path_cpuset(_PATH_SYS_CPU
"/cpu%d/cache/index%d/shared_cpu_map",
768 ca
->sharedmaps
= xcalloc(desc
->ncpus
, sizeof(cpu_set_t
*));
769 add_cpuset_to_array(ca
->sharedmaps
, &ca
->nsharedmaps
, map
);
774 read_nodes(struct lscpu_desc
*desc
)
778 /* number of NUMA node */
779 while (path_exist(_PATH_SYS_SYSTEM
"/node/node%d", desc
->nnodes
))
785 desc
->nodemaps
= xcalloc(desc
->nnodes
, sizeof(cpu_set_t
*));
787 /* information about how nodes share different CPUs */
788 for (i
= 0; i
< desc
->nnodes
; i
++)
789 desc
->nodemaps
[i
] = path_cpuset(
790 _PATH_SYS_SYSTEM
"/node/node%d/cpumap",
795 print_parsable_cell(struct lscpu_desc
*desc
, int i
, int col
, int compatible
)
798 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
805 for (j
= 0; j
< desc
->ncores
; j
++) {
806 if (CPU_ISSET_S(i
, setsize
, desc
->coremaps
[j
])) {
813 for (j
= 0; j
< desc
->nsockets
; j
++) {
814 if (CPU_ISSET_S(i
, setsize
, desc
->socketmaps
[j
])) {
821 for (j
= 0; j
< desc
->nnodes
; j
++) {
822 if (CPU_ISSET_S(i
, setsize
, desc
->nodemaps
[j
])) {
829 for (j
= 0; j
< desc
->nbooks
; j
++) {
830 if (CPU_ISSET_S(i
, setsize
, desc
->bookmaps
[j
])) {
837 for (j
= desc
->ncaches
- 1; j
>= 0; j
--) {
838 struct cpu_cache
*ca
= &desc
->caches
[j
];
841 for (x
= 0; x
< ca
->nsharedmaps
; x
++) {
842 if (CPU_ISSET_S(i
, setsize
, ca
->sharedmaps
[x
])) {
843 if (j
!= desc
->ncaches
- 1)
844 putchar(compatible
? ',' : ':');
849 if (x
== ca
->nsharedmaps
)
857 * We support two formats:
859 * 1) "compatible" -- this format is compatible with the original lscpu(1)
860 * output and it contains fixed set of the columns. The CACHE columns are at
861 * the end of the line and the CACHE is not printed if the number of the caches
862 * is zero. The CACHE columns are separated by two commas, for example:
865 * # CPU,Core,Socket,Node,,L1d,L1i,L2
869 * 2) "user defined output" -- this format prints always all columns without
870 * special prefix for CACHE column. If there are not CACHEs then the column is
871 * empty and the header "Cache" is printed rather than a real name of the cache.
872 * The CACHE columns are separated by ':'.
874 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
875 * # CPU,Core,Socket,Node,L1d:L1i:L2
880 print_parsable(struct lscpu_desc
*desc
, int cols
[], int ncols
, int compatible
)
885 "# The following is the parsable format, which can be fed to other\n"
886 "# programs. Each different item in every column has an unique ID\n"
887 "# starting from zero.\n"));
890 for (i
= 0; i
< ncols
; i
++) {
891 if (cols
[i
] == COL_CACHE
) {
892 if (compatible
&& !desc
->ncaches
)
896 if (compatible
&& i
!= 0)
898 for (c
= desc
->ncaches
- 1; c
>= 0; c
--) {
899 printf("%s", desc
->caches
[c
].name
);
901 putchar(compatible
? ',' : ':');
904 fputs(colnames
[cols
[i
]], stdout
);
908 fputs(colnames
[cols
[i
]], stdout
);
913 for (i
= 0; i
< desc
->ncpus
; i
++) {
914 if (desc
->online
&& !is_cpu_online(desc
, i
))
916 for (c
= 0; c
< ncols
; c
++) {
917 if (compatible
&& cols
[c
] == COL_CACHE
) {
925 print_parsable_cell(desc
, i
, cols
[c
], compatible
);
932 /* output formats "<key> <value>"*/
933 #define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
934 #define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
937 print_cpuset(const char *key
, cpu_set_t
*set
, int hex
)
939 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
940 size_t setbuflen
= 7 * maxcpus
;
941 char setbuf
[setbuflen
], *p
;
944 p
= cpumask_create(setbuf
, setbuflen
, set
, setsize
);
945 printf("%-23s0x%s\n", key
, p
);
947 p
= cpulist_create(setbuf
, setbuflen
, set
, setsize
);
954 print_readable(struct lscpu_desc
*desc
, int hex
)
958 size_t setsize
= CPU_ALLOC_SIZE(maxcpus
);
960 print_s(_("Architecture:"), desc
->arch
);
963 char buf
[64], *p
= buf
;
965 if (desc
->mode
& MODE_32BIT
) {
966 strcpy(p
, "32-bit, ");
969 if (desc
->mode
& MODE_64BIT
) {
970 strcpy(p
, "64-bit, ");
974 print_s(_("CPU op-mode(s):"), buf
);
976 #if !defined(WORDS_BIGENDIAN)
977 print_s(_("Byte Order:"), "Little Endian");
979 print_s(_("Byte Order:"), "Big Endian");
981 print_n(_("CPU(s):"), desc
->ncpus
);
984 print_cpuset(hex
? _("On-line CPU(s) mask:") :
985 _("On-line CPU(s) list:"),
988 if (desc
->online
&& CPU_COUNT_S(setsize
, desc
->online
) != desc
->ncpus
) {
991 /* Linux kernel provides cpuset of off-line CPUs that contains
992 * all configured CPUs (see /sys/devices/system/cpu/offline),
993 * but want to print real (present in system) off-line CPUs only.
995 set
= cpuset_alloc(maxcpus
, NULL
, NULL
);
997 err(EXIT_FAILURE
, _("failed to callocate cpu set"));
998 CPU_ZERO_S(setsize
, set
);
999 for (i
= 0; i
< desc
->ncpus
; i
++) {
1000 if (!is_cpu_online(desc
, i
))
1001 CPU_SET_S(i
, setsize
, set
);
1003 print_cpuset(hex
? _("Off-line CPU(s) mask:") :
1004 _("Off-line CPU(s) list:"),
1009 if (desc
->nsockets
) {
1010 int cores_per_socket
, sockets_per_book
, books
;
1012 cores_per_socket
= sockets_per_book
= books
= 0;
1013 /* s390 detects its cpu topology via /proc/sysinfo, if present.
1014 * Using simply the cpu topology masks in sysfs will not give
1015 * usable results since everything is virtualized. E.g.
1016 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1018 * If the cpu topology is not exported (e.g. 2nd level guest)
1019 * fall back to old calculation scheme.
1021 if (path_exist(_PATH_PROC_SYSINFO
)) {
1022 FILE *fd
= path_fopen("r", 0, _PATH_PROC_SYSINFO
);
1026 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
1027 if (sscanf(buf
, "CPU Topology SW:%d%d%d%d%d%d",
1028 &t0
, &t1
, &t2
, &books
, &sockets_per_book
,
1029 &cores_per_socket
) == 6)
1033 print_n(_("Thread(s) per core:"), desc
->nthreads
/ desc
->ncores
);
1034 print_n(_("Core(s) per socket:"),
1035 cores_per_socket
?: desc
->ncores
/ desc
->nsockets
);
1037 print_n(_("Socket(s) per book:"),
1038 sockets_per_book
?: desc
->nsockets
/ desc
->nbooks
);
1039 print_n(_("Book(s):"), books
?: desc
->nbooks
);
1041 print_n(_("Socket(s):"), sockets_per_book
?: desc
->nsockets
);
1045 print_n(_("NUMA node(s):"), desc
->nnodes
);
1047 print_s(_("Vendor ID:"), desc
->vendor
);
1049 print_s(_("CPU family:"), desc
->family
);
1051 print_s(_("Model:"), desc
->model
);
1053 print_s(_("Stepping:"), desc
->stepping
);
1055 print_s(_("CPU MHz:"), desc
->mhz
);
1057 print_s(_("BogoMIPS:"), desc
->bogomips
);
1058 if (desc
->virtflag
) {
1059 if (!strcmp(desc
->virtflag
, "svm"))
1060 print_s(_("Virtualization:"), "AMD-V");
1061 else if (!strcmp(desc
->virtflag
, "vmx"))
1062 print_s(_("Virtualization:"), "VT-x");
1065 print_s(_("Hypervisor vendor:"), hv_vendors
[desc
->hyper
]);
1066 print_s(_("Virtualization type:"), virt_types
[desc
->virtype
]);
1068 if (desc
->ncaches
) {
1072 for (i
= desc
->ncaches
- 1; i
>= 0; i
--) {
1073 snprintf(buf
, sizeof(buf
),
1074 _("%s cache:"), desc
->caches
[i
].name
);
1075 print_s(buf
, desc
->caches
[i
].size
);
1079 for (i
= 0; i
< desc
->nnodes
; i
++) {
1080 snprintf(buf
, sizeof(buf
), _("NUMA node%d CPU(s):"), i
);
1081 print_cpuset(buf
, desc
->nodemaps
[i
], hex
);
1085 static void __attribute__((__noreturn__
)) usage(FILE *out
)
1089 " %s [options]\n"), program_invocation_short_name
);
1091 puts(_( "\nOptions:\n"
1092 " -h, --help print this help\n"
1093 " -p, --parse[=LIST] print out a parsable instead of a readable format\n"
1094 " -s, --sysroot DIR use directory DIR as system root\n"
1095 " -x, --hex print hexadecimal masks rather than lists of CPUs\n"));
1097 exit(out
== stderr
? EXIT_FAILURE
: EXIT_SUCCESS
);
1100 int main(int argc
, char *argv
[])
1102 struct lscpu_desc _desc
, *desc
= &_desc
;
1103 int parsable
= 0, c
, i
, hex
= 0;
1104 int columns
[ARRAY_SIZE(colnames
)], ncolumns
= 0;
1107 static const struct option longopts
[] = {
1108 { "help", no_argument
, 0, 'h' },
1109 { "parse", optional_argument
, 0, 'p' },
1110 { "sysroot", required_argument
, 0, 's' },
1111 { "hex", no_argument
, 0, 'x' },
1115 setlocale(LC_ALL
, "");
1116 bindtextdomain(PACKAGE
, LOCALEDIR
);
1117 textdomain(PACKAGE
);
1119 while ((c
= getopt_long(argc
, argv
, "hp::s:x", longopts
, NULL
)) != -1) {
1128 ncolumns
= string_to_idarray(optarg
,
1129 columns
, ARRAY_SIZE(columns
),
1132 return EXIT_FAILURE
;
1134 columns
[ncolumns
++] = COL_CPU
;
1135 columns
[ncolumns
++] = COL_CORE
;
1136 columns
[ncolumns
++] = COL_SOCKET
,
1137 columns
[ncolumns
++] = COL_NODE
,
1138 columns
[ncolumns
++] = COL_CACHE
;
1143 sysrootlen
= strlen(optarg
);
1144 strncpy(pathbuf
, optarg
, sizeof(pathbuf
));
1145 pathbuf
[sizeof(pathbuf
) - 1] = '\0';
1155 memset(desc
, 0, sizeof(*desc
));
1157 read_basicinfo(desc
);
1159 for (i
= 0; i
< desc
->ncpus
; i
++) {
1160 if (desc
->online
&& !is_cpu_online(desc
, i
))
1162 read_topology(desc
, i
);
1163 read_cache(desc
, i
);
1166 qsort(desc
->caches
, desc
->ncaches
, sizeof(struct cpu_cache
), cachecmp
);
1170 read_hypervisor(desc
);
1174 print_parsable(desc
, columns
, ncolumns
, compatible
);
1176 print_readable(desc
, hex
);
1178 return EXIT_SUCCESS
;