* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
+#if (defined(__x86_64__) || defined(__i386__))
+# if !defined( __SANITIZE_ADDRESS__)
+# define INCLUDE_VMWARE_BDOOR
+# else
+# warning VMWARE detection disabled by __SANITIZE_ADDRESS__
+# endif
+#endif
+
+#ifdef INCLUDE_VMWARE_BDOOR
+# include <stdint.h>
+# include <signal.h>
+# include <strings.h>
+# include <setjmp.h>
+# ifdef HAVE_SYS_IO_H
+# include <sys/io.h>
+# endif
+#endif
+
+#if defined(HAVE_LIBRTAS)
+#include <librtas.h>
+#endif
+
+#include <libsmartcols.h>
+
#include "cpuset.h"
#include "nls.h"
#include "xalloc.h"
#include "c.h"
#include "strutils.h"
#include "bitops.h"
-#include "tt.h"
#include "path.h"
#include "closestream.h"
#include "optutils.h"
/* /sys paths */
#define _PATH_SYS_SYSTEM "/sys/devices/system"
+#define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
#define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
#define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
#define _PATH_PROC_XEN "/proc/xen"
#define _PATH_PROC_STATUS "/proc/self/status"
#define _PATH_PROC_VZ "/proc/vz"
#define _PATH_PROC_BC "/proc/bc"
+#define _PATH_PROC_DEVICETREE "/proc/device-tree"
#define _PATH_DEV_MEM "/dev/mem"
+#define _PATH_PROC_OSRELEASE "/proc/sys/kernel/osrelease"
+
+/* Xen Domain feature flag used for /sys/hypervisor/properties/features */
+#define XENFEAT_supervisor_mode_kernel 3
+#define XENFEAT_mmu_pt_update_preserve_ad 5
+#define XENFEAT_hvm_callback_vector 8
+
+#define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
+#define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
+ | (1U << XENFEAT_hvm_callback_vector) )
/* virtualization types */
enum {
VIRT_FULL,
VIRT_CONT
};
-const char *virt_types[] = {
+static const char *virt_types[] = {
[VIRT_NONE] = N_("none"),
[VIRT_PARA] = N_("para"),
[VIRT_FULL] = N_("full"),
[VIRT_CONT] = N_("container"),
};
-const char *hv_vendors[] = {
+static const char *hv_vendors[] = {
[HYPER_NONE] = NULL,
[HYPER_XEN] = "Xen",
[HYPER_KVM] = "KVM",
[HYPER_UML] = "User-mode Linux",
[HYPER_INNOTEK] = "Innotek GmbH",
[HYPER_HITACHI] = "Hitachi",
- [HYPER_PARALLELS] = "Parallels"
+ [HYPER_PARALLELS] = "Parallels",
+ [HYPER_VBOX] = "Oracle",
+ [HYPER_OS400] = "OS/400",
+ [HYPER_PHYP] = "pHyp",
+ [HYPER_SPAR] = "Unisys s-Par",
+ [HYPER_WSL] = "Windows Subsystem for Linux"
+};
+
+static const int hv_vendor_pci[] = {
+ [HYPER_NONE] = 0x0000,
+ [HYPER_XEN] = 0x5853,
+ [HYPER_KVM] = 0x0000,
+ [HYPER_MSHV] = 0x1414,
+ [HYPER_VMWARE] = 0x15ad,
+ [HYPER_VBOX] = 0x80ee,
+};
+
+static const int hv_graphics_pci[] = {
+ [HYPER_NONE] = 0x0000,
+ [HYPER_XEN] = 0x0001,
+ [HYPER_KVM] = 0x0000,
+ [HYPER_MSHV] = 0x5353,
+ [HYPER_VMWARE] = 0x0710,
+ [HYPER_VBOX] = 0xbeef,
};
/* CPU modes */
DISP_VERTICAL = 1
};
-const char *disp_modes[] = {
+static const char *disp_modes[] = {
[DISP_HORIZONTAL] = N_("horizontal"),
[DISP_VERTICAL] = N_("vertical")
};
char *readable;
};
-struct polarization_modes polar_modes[] = {
+static struct polarization_modes polar_modes[] = {
[POLAR_UNKNOWN] = {"U", "-"},
[POLAR_VLOW] = {"VL", "vert-low"},
[POLAR_VMEDIUM] = {"VM", "vert-medium"},
struct lscpu_desc {
char *arch;
char *vendor;
+ char *machinetype; /* s390 */
char *family;
char *model;
char *modelname;
+ char *revision; /* alternative for model (ppc) */
+ char *cpu; /* alternative for modelname (ppc, sparc) */
char *virtflag; /* virtualization flag (vmx, svm) */
char *hypervisor; /* hypervisor software */
int hyper; /* hypervisor vendor ID */
int virtype; /* VIRT_PARA|FULL|NONE ? */
char *mhz;
+ char *dynamic_mhz; /* dynamic mega hertz (s390) */
+ char *static_mhz; /* static mega hertz (s390) */
char **maxmhz; /* maximum mega hertz */
char **minmhz; /* minimum mega hertz */
char *stepping;
char *bogomips;
char *flags;
+ char *mtid; /* maximum thread id (s390) */
int dispatching; /* none, horizontal or vertical */
int mode; /* rm, lm or/and tm */
int ncaches;
struct cpu_cache *caches;
+ int necaches; /* extra caches (s390) */
+ struct cpu_cache *ecaches;
+
/*
* All maps are sequentially indexed (0..ncpuspos), the array index
* does not have match with cpuX number as presented by kernel. You
int *idx2nodenum; /* Support for discontinuous nodes */
cpu_set_t **nodemaps; /* array with NUMA nodes */
+ /* drawers -- based on drawer_siblings (internal kernel map of cpuX's
+ * hardware threads within the same drawer */
+ int ndrawers; /* number of all online drawers */
+ cpu_set_t **drawermaps; /* unique drawer_siblings */
+ int *drawerids; /* physical drawer ids */
+
/* books -- based on book_siblings (internal kernel map of cpuX's
* hardware threads within the same book */
int nbooks; /* number of all online books */
cpu_set_t **bookmaps; /* unique book_siblings */
+ int *bookids; /* physical book ids */
/* sockets -- based on core_siblings (internal kernel map of cpuX's
* hardware threads within the same physical_package_id (socket)) */
int nsockets; /* number of all online sockets */
cpu_set_t **socketmaps; /* unique core_siblings */
+ int *socketids; /* physical socket ids */
- /* cores -- based on thread_siblings (internel kernel map of cpuX's
+ /* cores -- based on thread_siblings (internal kernel map of cpuX's
* hardware threads within the same core as cpuX) */
int ncores; /* number of all online cores */
cpu_set_t **coremaps; /* unique thread_siblings */
+ int *coreids; /* physical core ids */
int *polarization; /* cpu polarization */
int *addresses; /* physical cpu addresses */
int *configured; /* cpu configured */
+ int physsockets; /* Physical sockets (modules) */
+ int physchips; /* Physical chips */
+ int physcoresperchip; /* Physical cores per chip */
};
enum {
unsigned int hex:1, /* print CPU masks rather than CPU lists */
compat:1, /* use backwardly compatible format */
online:1, /* print online CPUs */
- offline:1; /* print offline CPUs */
+ offline:1, /* print offline CPUs */
+ json:1, /* JSON output format */
+ physical:1; /* use physical numbers */
};
static int maxcpus; /* size in bits of kernel cpu mask */
COL_SOCKET,
COL_NODE,
COL_BOOK,
+ COL_DRAWER,
COL_CACHE,
COL_POLARIZATION,
COL_ADDRESS,
[COL_SOCKET] = { "SOCKET", N_("logical socket number") },
[COL_NODE] = { "NODE", N_("logical NUMA node number") },
[COL_BOOK] = { "BOOK", N_("logical book number") },
+ [COL_DRAWER] = { "DRAWER", N_("logical drawer number") },
[COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
[COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
[COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
char *p, *v;
int len = strlen(pattern);
- if (!*line)
+ /* don't re-fill already found tags, first one wins */
+ if (!*line || *value)
return 0;
/* pattern */
return 1;
}
+/* Parse extra cache lines contained within /proc/cpuinfo but which are not
+ * part of the cache topology information within the sysfs filesystem.
+ * This is true for all shared caches on e.g. s390. When there are layers of
+ * hypervisors in between it is not knows which CPUs share which caches.
+ * Therefore information about shared caches is only available in
+ * /proc/cpuinfo.
+ * Format is:
+ * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
+ */
+static int
+lookup_cache(char *line, struct lscpu_desc *desc)
+{
+ struct cpu_cache *cache;
+ long long size;
+ char *p, type;
+ int level;
+
+ /* Make sure line starts with "cache<nr> :" */
+ if (strncmp(line, "cache", 5))
+ return 0;
+ for (p = line + 5; isdigit(*p); p++);
+ for (; isspace(*p); p++);
+ if (*p != ':')
+ return 0;
+
+ p = strstr(line, "scope=") + 6;
+ /* Skip private caches, also present in sysfs */
+ if (!p || strncmp(p, "Private", 7) == 0)
+ return 0;
+ p = strstr(line, "level=");
+ if (!p || sscanf(p, "level=%d", &level) != 1)
+ return 0;
+ p = strstr(line, "type=") + 5;
+ if (!p || !*p)
+ return 0;
+ type = 0;
+ if (strncmp(p, "Data", 4) == 0)
+ type = 'd';
+ if (strncmp(p, "Instruction", 11) == 0)
+ type = 'i';
+ p = strstr(line, "size=");
+ if (!p || sscanf(p, "size=%lld", &size) != 1)
+ return 0;
+
+ desc->necaches++;
+ desc->ecaches = xrealloc(desc->ecaches,
+ desc->necaches * sizeof(struct cpu_cache));
+ cache = &desc->ecaches[desc->necaches - 1];
+ memset(cache, 0 , sizeof(*cache));
+ if (type)
+ xasprintf(&cache->name, "L%d%c", level, type);
+ else
+ xasprintf(&cache->name, "L%d", level);
+ xasprintf(&cache->size, "%lldK", size);
+ return 1;
+}
+
/* Don't init the mode for platforms where we are not able to
* detect that CPU supports 64-bit mode.
*/
return m;
}
+#if defined(HAVE_LIBRTAS)
+#define PROCESSOR_MODULE_INFO 43
+static int strbe16toh(const char *buf, int offset)
+{
+ return (buf[offset] << 8) + buf[offset+1];
+}
+
+static void read_physical_info_powerpc(struct lscpu_desc *desc)
+{
+ char buf[BUFSIZ];
+ int rc, len, ntypes;
+
+ desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
+
+ rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
+ if (rc < 0)
+ return;
+
+ len = strbe16toh(buf, 0);
+ if (len < 8)
+ return;
+
+ ntypes = strbe16toh(buf, 2);
+
+ assert(ntypes <= 1);
+ if (!ntypes)
+ return;
+
+ desc->physsockets = strbe16toh(buf, 4);
+ desc->physchips = strbe16toh(buf, 6);
+ desc->physcoresperchip = strbe16toh(buf, 8);
+}
+#else
+static void read_physical_info_powerpc(
+ struct lscpu_desc *desc __attribute__((__unused__)))
+{
+}
+#endif
+
static void
read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
{
else if (lookup(buf, "model name", &desc->modelname)) ;
else if (lookup(buf, "stepping", &desc->stepping)) ;
else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
+ else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
+ else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
+ else if (lookup(buf, "Features", &desc->flags)) ; /* aarch64 */
else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
else if (lookup(buf, "bogomips", &desc->bogomips)) ;
+ else if (lookup(buf, "BogoMIPS", &desc->bogomips)) ; /* aarch64 */
else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
+ else if (lookup(buf, "cpu", &desc->cpu)) ;
+ else if (lookup(buf, "revision", &desc->revision)) ;
+ else if (lookup(buf, "CPU revision", &desc->revision)) ; /* aarch64 */
+ else if (lookup(buf, "max thread id", &desc->mtid)) ; /* s390 */
+ else if (lookup_cache(buf, desc)) ;
else
continue;
}
desc->dispatching = path_read_s32(_PATH_SYS_CPU "/dispatching");
else
desc->dispatching = -1;
+
+ if (mod->system == SYSTEM_LIVE)
+ read_physical_info_powerpc(desc);
+
+ if ((fp = path_fopen("r", 0, _PATH_PROC_SYSINFO))) {
+ while (fgets(buf, sizeof(buf), fp) != NULL && !desc->machinetype)
+ lookup(buf, "Type", &desc->machinetype);
+ fclose(fp);
+ }
}
static int
__asm__(
#if defined(__PIC__) && defined(__i386__)
/* x86 PIC cannot clobber ebx -- gcc bitches */
- "pushl %%ebx;"
+ "xchg %%ebx, %%esi;"
"cpuid;"
- "movl %%ebx, %%esi;"
- "popl %%ebx;"
+ "xchg %%esi, %%ebx;"
: "=S" (*ebx),
#else
"cpuid;"
desc->hyper = HYPER_MSHV;
else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
desc->hyper = HYPER_VMWARE;
+ else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
+ desc->hyper = HYPER_SPAR;
}
-#else /* ! __x86_64__ */
+#else /* ! (__x86_64__ || __i386__) */
static void
read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
{
}
#endif
+static int is_compatible(const char *path, const char *str)
+{
+ FILE *fd = path_fopen("r", 0, "%s", path);
+
+ if (fd) {
+ char buf[256];
+ size_t i, len;
+
+ memset(buf, 0, sizeof(buf));
+ len = fread(buf, 1, sizeof(buf) - 1, fd);
+ fclose(fd);
+
+ for (i = 0; i < len;) {
+ if (!strcmp(&buf[i], str))
+ return 1;
+ i += strlen(&buf[i]);
+ i++;
+ }
+ }
+
+ return 0;
+}
+
+static int
+read_hypervisor_powerpc(struct lscpu_desc *desc)
+{
+ assert(!desc->hyper);
+
+ /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
+ if (path_exist("/proc/iSeries")) {
+ desc->hyper = HYPER_OS400;
+ desc->virtype = VIRT_PARA;
+
+ /* PowerNV (POWER Non-Virtualized, bare-metal) */
+ } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "ibm,powernv")) {
+ desc->hyper = HYPER_NONE;
+ desc->virtype = VIRT_NONE;
+
+ /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
+ } else if (path_exist(_PATH_PROC_DEVICETREE "/ibm,partition-name")
+ && path_exist(_PATH_PROC_DEVICETREE "/hmc-managed?")
+ && !path_exist(_PATH_PROC_DEVICETREE "/chosen/qemu,graphic-width")) {
+ FILE *fd;
+ desc->hyper = HYPER_PHYP;
+ desc->virtype = VIRT_PARA;
+ fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/ibm,partition-name");
+ if (fd) {
+ char buf[256];
+ if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
+ desc->virtype = VIRT_NONE;
+ fclose(fd);
+ }
+
+ /* Qemu */
+ } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "qemu,pseries")) {
+ desc->hyper = HYPER_KVM;
+ desc->virtype = VIRT_PARA;
+ }
+ return desc->hyper;
+}
+
+#ifdef INCLUDE_VMWARE_BDOOR
+
+#define VMWARE_BDOOR_MAGIC 0x564D5868
+#define VMWARE_BDOOR_PORT 0x5658
+#define VMWARE_BDOOR_CMD_GETVERSION 10
+
+static UL_ASAN_BLACKLIST
+void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
+{
+ __asm__(
+#if defined(__PIC__) && defined(__i386__)
+ /* x86 PIC cannot clobber ebx -- gcc bitches */
+ "xchg %%ebx, %%esi;"
+ "inl (%%dx), %%eax;"
+ "xchg %%esi, %%ebx;"
+ : "=S" (*ebx),
+#else
+ "inl (%%dx), %%eax;"
+ : "=b" (*ebx),
+#endif
+ "=a" (*eax),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (VMWARE_BDOOR_MAGIC),
+ "1" (VMWARE_BDOOR_CMD_GETVERSION),
+ "2" (VMWARE_BDOOR_PORT),
+ "3" (0)
+ : "memory");
+}
+
+static jmp_buf segv_handler_env;
+
+static void
+segv_handler(__attribute__((__unused__)) int sig,
+ __attribute__((__unused__)) siginfo_t *info,
+ __attribute__((__unused__)) void *ignored)
+{
+ siglongjmp(segv_handler_env, 1);
+}
+
+static int
+is_vmware_platform(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+ struct sigaction act, oact;
+
+ /*
+ * FIXME: Not reliable for non-root users. Note it works as expected if
+ * vmware_bdoor() is not optimized for PIE, but then it fails to build
+ * on 32bit x86 systems. See lscpu git log for more details (commit
+ * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
+ */
+ if (getuid() != 0)
+ return 0;
+
+ /*
+ * The assembly routine for vmware detection works
+ * fine under vmware, even if ran as regular user. But
+ * on real HW or under other hypervisors, it segfaults (which is
+ * expected). So we temporarily install SIGSEGV handler to catch
+ * the signal. All this magic is needed because lscpu
+ * isn't supposed to require root privileges.
+ */
+ if (sigsetjmp(segv_handler_env, 1))
+ return 0;
+
+ memset(&act, 0, sizeof(act));
+ act.sa_sigaction = segv_handler;
+ act.sa_flags = SA_SIGINFO;
+
+ if (sigaction(SIGSEGV, &act, &oact))
+ err(EXIT_FAILURE, _("cannot set signal handler"));
+
+ vmware_bdoor(&eax, &ebx, &ecx, &edx);
+
+ if (sigaction(SIGSEGV, &oact, NULL))
+ err(EXIT_FAILURE, _("cannot restore signal handler"));
+
+ return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
+}
+
+#else /* ! INCLUDE_VMWARE_BDOOR */
+
+static int
+is_vmware_platform(void)
+{
+ return 0;
+}
+
+#endif /* INCLUDE_VMWARE_BDOOR */
+
static void
read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
{
FILE *fd;
+ /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
+
+ if ((fd = path_fopen("r", 0, _PATH_PROC_OSRELEASE))) {
+ char buf[256];
+
+ if (fgets(buf, sizeof(buf), fd) != NULL) {
+ if (strstr(buf, "Microsoft")) {
+ desc->hyper = HYPER_WSL;
+ desc->virtype = VIRT_CONT;
+ }
+ }
+ fclose(fd);
+ if (desc->virtype)
+ return;
+ }
+
if (mod->system != SYSTEM_SNAPSHOT) {
read_hypervisor_cpuid(desc);
if (!desc->hyper)
desc->hyper = read_hypervisor_dmi();
+ if (!desc->hyper && is_vmware_platform())
+ desc->hyper = HYPER_VMWARE;
}
- if (desc->hyper)
+ if (desc->hyper) {
desc->virtype = VIRT_FULL;
+ if (desc->hyper == HYPER_XEN) {
+ uint32_t features;
+
+ fd = path_fopen("r", 0, _PATH_SYS_HYP_FEATURES);
+ if (fd && fscanf(fd, "%x", &features) == 1) {
+ /* Xen PV domain */
+ if (features & XEN_FEATURES_PV_MASK)
+ desc->virtype = VIRT_PARA;
+ /* Xen PVH domain */
+ else if ((features & XEN_FEATURES_PVH_MASK)
+ == XEN_FEATURES_PVH_MASK)
+ desc->virtype = VIRT_PARA;
+ fclose(fd);
+ } else {
+ err(EXIT_FAILURE, _("failed to read from: %s"),
+ _PATH_SYS_HYP_FEATURES);
+ }
+ }
+ } else if (read_hypervisor_powerpc(desc) > 0) {}
+
/* Xen para-virt or dom0 */
else if (path_exist(_PATH_PROC_XEN)) {
int dom0 = 0;
if (fd) {
char buf[256];
- if (fscanf(fd, "%s", buf) == 1 &&
+ if (fscanf(fd, "%255s", buf) == 1 &&
!strcmp(buf, "control_d"))
dom0 = 1;
fclose(fd);
desc->hyper = HYPER_XEN;
/* Xen full-virt on non-x86_64 */
- } else if (has_pci_device(0x5853, 0x0001)) {
+ } else if (has_pci_device( hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
desc->hyper = HYPER_XEN;
desc->virtype = VIRT_FULL;
+ } else if (has_pci_device( hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
+ desc->hyper = HYPER_VMWARE;
+ desc->virtype = VIRT_FULL;
+ } else if (has_pci_device( hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
+ desc->hyper = HYPER_VBOX;
+ desc->virtype = VIRT_FULL;
/* IBM PR/SM */
- } else if (path_exist(_PATH_PROC_SYSINFO)) {
- FILE *sysinfo_fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
+ } else if ((fd = path_fopen("r", 0, _PATH_PROC_SYSINFO))) {
char buf[BUFSIZ];
- if (!sysinfo_fd)
- return;
desc->hyper = HYPER_IBM;
desc->hypervisor = "PR/SM";
desc->virtype = VIRT_FULL;
- while (fgets(buf, sizeof(buf), sysinfo_fd) != NULL) {
+ while (fgets(buf, sizeof(buf), fd) != NULL) {
char *str;
if (!strstr(buf, "Control Program:"))
while ((str = strstr(desc->hypervisor, " ")))
memmove(str, str + 1, strlen(str));
}
- fclose(sysinfo_fd);
+ fclose(fd);
}
/* OpenVZ/Virtuozzo - /proc/vz dir should exist
desc->virtype = VIRT_PARA;
/* Linux-VServer */
- } else if (path_exist(_PATH_PROC_STATUS)) {
+ } else if ((fd = path_fopen("r", 0, _PATH_PROC_STATUS))) {
char buf[BUFSIZ];
char *val = NULL;
- fd = path_fopen("r", 1, _PATH_PROC_STATUS);
while (fgets(buf, sizeof(buf), fd) != NULL) {
if (lookup(buf, "VxID", &val))
break;
fclose(fd);
if (val) {
+ char *org = val;
+
while (isdigit(*val))
++val;
if (!*val) {
desc->hyper = HYPER_VSERVER;
desc->virtype = VIRT_CONT;
}
+ free(org);
}
}
}
static void
read_topology(struct lscpu_desc *desc, int idx)
{
- cpu_set_t *thread_siblings, *core_siblings, *book_siblings;
- int num = real_cpu_num(desc, idx);
+ cpu_set_t *thread_siblings, *core_siblings;
+ cpu_set_t *book_siblings, *drawer_siblings;
+ int coreid, socketid, bookid, drawerid;
+ int i, num = real_cpu_num(desc, idx);
if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
return;
if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num))
book_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
"/cpu%d/topology/book_siblings", num);
+ drawer_siblings = NULL;
+ if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/drawer_siblings", num))
+ drawer_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
+ "/cpu%d/topology/drawer_siblings", num);
+ coreid = -1;
+ if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/core_id", num))
+ coreid = path_read_s32(_PATH_SYS_CPU
+ "/cpu%d/topology/core_id", num);
+ socketid = -1;
+ if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/physical_package_id", num))
+ socketid = path_read_s32(_PATH_SYS_CPU
+ "/cpu%d/topology/physical_package_id", num);
+ bookid = -1;
+ if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_id", num))
+ bookid = path_read_s32(_PATH_SYS_CPU
+ "/cpu%d/topology/book_id", num);
+ drawerid = -1;
+ if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/drawer_id", num))
+ drawerid = path_read_s32(_PATH_SYS_CPU
+ "/cpu%d/topology/drawer_id", num);
if (!desc->coremaps) {
- int nbooks, nsockets, ncores, nthreads;
+ int ndrawers, nbooks, nsockets, ncores, nthreads;
size_t setsize = CPU_ALLOC_SIZE(maxcpus);
/* threads within one core */
if (!nbooks)
nbooks = 1;
+ /* number of drawers */
+ ndrawers = desc->ncpus / nbooks / nthreads / ncores / nsockets;
+ if (!ndrawers)
+ ndrawers = 1;
+
/* all threads, see also read_basicinfo()
* -- fallback for kernels without
* /sys/devices/system/cpu/online.
*/
if (!desc->nthreads)
- desc->nthreads = nbooks * nsockets * ncores * nthreads;
+ desc->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
/* For each map we make sure that it can have up to ncpuspos
* entries. This is because we cannot reliably calculate the
*/
desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
- if (book_siblings)
+ desc->coreids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
+ desc->socketids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
+ for (i = 0; i < desc->ncpuspos; i++)
+ desc->coreids[i] = desc->socketids[i] = -1;
+ if (book_siblings) {
desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
+ desc->bookids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
+ for (i = 0; i < desc->ncpuspos; i++)
+ desc->bookids[i] = -1;
+ }
+ if (drawer_siblings) {
+ desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
+ desc->drawerids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
+ for (i = 0; i < desc->ncpuspos; i++)
+ desc->drawerids[i] = -1;
+ }
}
add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
+ desc->coreids[idx] = coreid;
add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
- if (book_siblings)
+ desc->socketids[idx] = socketid;
+ if (book_siblings) {
add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
+ desc->bookids[idx] = bookid;
+ }
+ if (drawer_siblings) {
+ add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
+ desc->drawerids[idx] = drawerid;
+ }
}
static void
desc->configured[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/configure", num);
}
+/* Read overall maximum frequency of cpu */
+static char *
+cpu_max_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
+{
+ int i;
+ float cpu_freq = atof(desc->maxmhz[0]);
+
+ if (desc->present) {
+ for (i = 1; i < desc->ncpuspos; i++) {
+ if (CPU_ISSET(real_cpu_num(desc, i), desc->present)) {
+ float freq = atof(desc->maxmhz[i]);
+
+ if (freq > cpu_freq)
+ cpu_freq = freq;
+ }
+ }
+ }
+ snprintf(buf, bufsz, "%.4f", cpu_freq);
+ return buf;
+}
+
+/* Read overall minimum frequency of cpu */
+static char *
+cpu_min_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
+{
+ int i;
+ float cpu_freq = atof(desc->minmhz[0]);
+
+ if (desc->present) {
+ for (i = 1; i < desc->ncpuspos; i++) {
+ if (CPU_ISSET(real_cpu_num(desc, i), desc->present)) {
+ float freq = atof(desc->minmhz[i]);
+
+ if (freq < cpu_freq)
+ cpu_freq = freq;
+ }
+ }
+ }
+ snprintf(buf, bufsz, "%.4f", cpu_freq);
+ return buf;
+}
+
+
static void
read_max_mhz(struct lscpu_desc *desc, int idx)
{
snprintf(buf, bufsz, "%d", cpu);
break;
case COL_CORE:
- if (cpuset_ary_isset(cpu, desc->coremaps,
- desc->ncores, setsize, &i) == 0)
- snprintf(buf, bufsz, "%zd", i);
+ if (mod->physical) {
+ if (desc->coreids[idx] == -1)
+ snprintf(buf, bufsz, "-");
+ else
+ snprintf(buf, bufsz, "%d", desc->coreids[idx]);
+ } else {
+ if (cpuset_ary_isset(cpu, desc->coremaps,
+ desc->ncores, setsize, &i) == 0)
+ snprintf(buf, bufsz, "%zu", i);
+ }
break;
case COL_SOCKET:
- if (cpuset_ary_isset(cpu, desc->socketmaps,
- desc->nsockets, setsize, &i) == 0)
- snprintf(buf, bufsz, "%zd", i);
+ if (mod->physical) {
+ if (desc->socketids[idx] == -1)
+ snprintf(buf, bufsz, "-");
+ else
+ snprintf(buf, bufsz, "%d", desc->socketids[idx]);
+ } else {
+ if (cpuset_ary_isset(cpu, desc->socketmaps,
+ desc->nsockets, setsize, &i) == 0)
+ snprintf(buf, bufsz, "%zu", i);
+ }
break;
case COL_NODE:
if (cpuset_ary_isset(cpu, desc->nodemaps,
desc->nnodes, setsize, &i) == 0)
snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
break;
+ case COL_DRAWER:
+ if (mod->physical) {
+ if (desc->drawerids[idx] == -1)
+ snprintf(buf, bufsz, "-");
+ else
+ snprintf(buf, bufsz, "%d", desc->drawerids[idx]);
+ } else {
+ if (cpuset_ary_isset(cpu, desc->drawermaps,
+ desc->ndrawers, setsize, &i) == 0)
+ snprintf(buf, bufsz, "%zu", i);
+ }
+ break;
case COL_BOOK:
- if (cpuset_ary_isset(cpu, desc->bookmaps,
- desc->nbooks, setsize, &i) == 0)
- snprintf(buf, bufsz, "%zd", i);
+ if (mod->physical) {
+ if (desc->bookids[idx] == -1)
+ snprintf(buf, bufsz, "-");
+ else
+ snprintf(buf, bufsz, "%d", desc->bookids[idx]);
+ } else {
+ if (cpuset_ary_isset(cpu, desc->bookmaps,
+ desc->nbooks, setsize, &i) == 0)
+ snprintf(buf, bufsz, "%zu", i);
+ }
break;
case COL_CACHE:
{
if (cpuset_ary_isset(cpu, ca->sharedmaps,
ca->nsharedmaps, setsize, &i) == 0) {
- int x = snprintf(p, sz, "%zd", i);
- if (x <= 0 || (size_t) x + 2 >= sz)
+ int x = snprintf(p, sz, "%zu", i);
+ if (x < 0 || (size_t) x >= sz)
return NULL;
p += x;
sz -= x;
}
if (j != 0) {
+ if (sz < 2)
+ return NULL;
*p++ = mod->compat ? ',' : ':';
*p = '\0';
- sz++;
+ sz--;
}
}
break;
if (!desc->configured)
break;
if (mod->mode == OUTPUT_PARSABLE)
- snprintf(buf, bufsz,
+ snprintf(buf, bufsz, "%s",
desc->configured[idx] ? _("Y") : _("N"));
else
- snprintf(buf, bufsz,
+ snprintf(buf, bufsz, "%s",
desc->configured[idx] ? _("yes") : _("no"));
break;
case COL_ONLINE:
if (!desc->online)
break;
if (mod->mode == OUTPUT_PARSABLE)
- snprintf(buf, bufsz,
+ snprintf(buf, bufsz, "%s",
is_cpu_online(desc, cpu) ? _("Y") : _("N"));
else
- snprintf(buf, bufsz,
+ snprintf(buf, bufsz, "%s",
is_cpu_online(desc, cpu) ? _("yes") : _("no"));
break;
case COL_MAXMHZ:
for (i = desc->ncaches - 1; i >= 0; i--) {
int x = snprintf(p, sz, "%s", desc->caches[i].name);
- if (x <= 0 || (size_t) x + 2 > sz)
+ if (x < 0 || (size_t) x >= sz)
return NULL;
sz -= x;
p += x;
if (i > 0) {
+ if (sz < 2)
+ return NULL;
*p++ = mod->compat ? ',' : ':';
*p = '\0';
- sz++;
+ sz--;
}
}
if (desc->ncaches)
struct lscpu_modifier *mod)
{
int i;
- char buf[BUFSIZ], *data;
- struct tt *tt = tt_new_table(TT_FL_FREEDATA);
+ char buf[BUFSIZ];
+ const char *data;
+ struct libscols_table *table;
+
+ scols_init_debug(0);
- if (!tt)
- err(EXIT_FAILURE, _("failed to initialize output table"));
+ table = scols_new_table();
+ if (!table)
+ err(EXIT_FAILURE, _("failed to allocate output table"));
+ if (mod->json) {
+ scols_table_enable_json(table, 1);
+ scols_table_set_name(table, "cpus");
+ }
for (i = 0; i < ncols; i++) {
data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
- tt_define_column(tt, xstrdup(data), 0, 0);
+ if (!scols_table_new_column(table, data, 0, 0))
+ err(EXIT_FAILURE, _("failed to allocate output column"));
}
for (i = 0; i < desc->ncpuspos; i++) {
int c;
- struct tt_line *line;
+ struct libscols_line *line;
int cpu = real_cpu_num(desc, i);
if (!mod->offline && desc->online && !is_cpu_online(desc, cpu))
if (desc->present && !is_cpu_present(desc, cpu))
continue;
- line = tt_add_line(tt, NULL);
+ line = scols_table_new_line(table, NULL);
+ if (!line)
+ err(EXIT_FAILURE, _("failed to allocate output line"));
for (c = 0; c < ncols; c++) {
data = get_cell_data(desc, i, cols[c], mod,
buf, sizeof(buf));
- tt_line_set_data(line, c,
- xstrdup(data && *data ? data : "-"));
+ if (!data || !*data)
+ data = "-";
+ if (scols_line_set_data(line, c, data))
+ err(EXIT_FAILURE, _("failed to add output data"));
}
}
- tt_print_table(tt);
- tt_free_table(tt);
+ scols_print_table(table);
+ scols_unref_table(table);
}
-/* output formats "<key> <value>"*/
-#define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
-#define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
+
+static void __attribute__ ((__format__(printf, 3, 4)))
+ add_summary_sprint(struct libscols_table *tb,
+ const char *txt,
+ const char *fmt,
+ ...)
+{
+ struct libscols_line *ln = scols_table_new_line(tb, NULL);
+ char *data;
+ va_list args;
+
+ if (!ln)
+ err(EXIT_FAILURE, _("failed to allocate output line"));
+
+ /* description column */
+ scols_line_set_data(ln, 0, txt);
+
+ /* data column */
+ va_start(args, fmt);
+ xvasprintf(&data, fmt, args);
+ va_end(args);
+
+ if (data && scols_line_refer_data(ln, 1, data))
+ err(EXIT_FAILURE, _("failed to add output data"));
+}
+
+#define add_summary_n(tb, txt, num) add_summary_sprint(tb, txt, "%d", num)
+#define add_summary_s(tb, txt, str) add_summary_sprint(tb, txt, "%s", str)
static void
-print_cpuset(const char *key, cpu_set_t *set, int hex)
+print_cpuset(struct libscols_table *tb,
+ const char *key, cpu_set_t *set, int hex)
{
size_t setsize = CPU_ALLOC_SIZE(maxcpus);
size_t setbuflen = 7 * maxcpus;
if (hex) {
p = cpumask_create(setbuf, setbuflen, set, setsize);
- printf("%-23s0x%s\n", key, p);
+ add_summary_s(tb, key, p);
} else {
p = cpulist_create(setbuf, setbuflen, set, setsize);
- print_s(key, p);
+ add_summary_s(tb, key, p);
}
-
}
/*
static void
print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
{
- char buf[512];
- int i;
+ char buf[BUFSIZ];
+ int i = 0;
size_t setsize = CPU_ALLOC_SIZE(maxcpus);
+ struct libscols_table *tb;
+
+ scols_init_debug(0);
- print_s(_("Architecture:"), desc->arch);
+ tb = scols_new_table();
+ if (!tb)
+ err(EXIT_FAILURE, _("failed to allocate output table"));
+ scols_table_enable_noheadings(tb, 1);
+ if (mod->json) {
+ scols_table_enable_json(tb, 1);
+ scols_table_set_name(tb, "lscpu");
+ }
+
+ if (scols_table_new_column(tb, "field", 0, 0) == NULL ||
+ scols_table_new_column(tb, "data", 0, SCOLS_FL_NOEXTREMES) == NULL)
+ err(EXIT_FAILURE, _("failed to initialize output column"));
+
+ add_summary_s(tb, _("Architecture:"), desc->arch);
if (desc->mode) {
- char mbuf[64], *p = mbuf;
+ char *p = buf;
if (desc->mode & MODE_32BIT) {
strcpy(p, "32-bit, ");
p += 8;
}
*(p - 2) = '\0';
- print_s(_("CPU op-mode(s):"), mbuf);
+ add_summary_s(tb, _("CPU op-mode(s):"), buf);
}
#if !defined(WORDS_BIGENDIAN)
- print_s(_("Byte Order:"), "Little Endian");
+ add_summary_s(tb, _("Byte Order:"), "Little Endian");
#else
- print_s(_("Byte Order:"), "Big Endian");
+ add_summary_s(tb, _("Byte Order:"), "Big Endian");
#endif
- print_n(_("CPU(s):"), desc->ncpus);
+ add_summary_n(tb, _("CPU(s):"), desc->ncpus);
if (desc->online)
- print_cpuset(mod->hex ? _("On-line CPU(s) mask:") :
- _("On-line CPU(s) list:"),
+ print_cpuset(tb, mod->hex ? _("On-line CPU(s) mask:") :
+ _("On-line CPU(s) list:"),
desc->online, mod->hex);
if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
CPU_SET_S(cpu, setsize, set);
}
- print_cpuset(mod->hex ? _("Off-line CPU(s) mask:") :
- _("Off-line CPU(s) list:"),
+ print_cpuset(tb, mod->hex ? _("Off-line CPU(s) mask:") :
+ _("Off-line CPU(s) list:"),
set, mod->hex);
cpuset_free(set);
}
if (desc->nsockets) {
- int cores_per_socket, sockets_per_book, books;
+ int threads_per_core, cores_per_socket, sockets_per_book;
+ int books_per_drawer, drawers;
+ FILE *fd;
- cores_per_socket = sockets_per_book = books = 0;
+ threads_per_core = cores_per_socket = sockets_per_book = 0;
+ books_per_drawer = drawers = 0;
/* s390 detects its cpu topology via /proc/sysinfo, if present.
* Using simply the cpu topology masks in sysfs will not give
* usable results since everything is virtualized. E.g.
* If the cpu topology is not exported (e.g. 2nd level guest)
* fall back to old calculation scheme.
*/
- if (path_exist(_PATH_PROC_SYSINFO)) {
- FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
- char pbuf[BUFSIZ];
- int t0, t1, t2;
-
- while (fd && fgets(pbuf, sizeof(pbuf), fd) != NULL) {
- if (sscanf(pbuf, "CPU Topology SW:%d%d%d%d%d%d",
- &t0, &t1, &t2, &books, &sockets_per_book,
+ if ((fd = path_fopen("r", 0, _PATH_PROC_SYSINFO))) {
+ int t0, t1;
+
+ while (fd && fgets(buf, sizeof(buf), fd) != NULL) {
+ if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
+ &t0, &t1, &drawers, &books_per_drawer,
+ &sockets_per_book,
&cores_per_socket) == 6)
break;
}
if (fd)
fclose(fd);
}
- print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
- print_n(_("Core(s) per socket:"),
+ if (desc->mtid)
+ threads_per_core = atoi(desc->mtid) + 1;
+ add_summary_n(tb, _("Thread(s) per core:"),
+ threads_per_core ?: desc->nthreads / desc->ncores);
+ add_summary_n(tb, _("Core(s) per socket:"),
cores_per_socket ?: desc->ncores / desc->nsockets);
if (desc->nbooks) {
- print_n(_("Socket(s) per book:"),
+ add_summary_n(tb, _("Socket(s) per book:"),
sockets_per_book ?: desc->nsockets / desc->nbooks);
- print_n(_("Book(s):"), books ?: desc->nbooks);
+ if (desc->ndrawers) {
+ add_summary_n(tb, _("Book(s) per drawer:"),
+ books_per_drawer ?: desc->nbooks / desc->ndrawers);
+ add_summary_n(tb, _("Drawer(s):"), drawers ?: desc->ndrawers);
+ } else {
+ add_summary_n(tb, _("Book(s):"), books_per_drawer ?: desc->nbooks);
+ }
} else {
- print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
+ add_summary_n(tb, _("Socket(s):"), sockets_per_book ?: desc->nsockets);
}
}
if (desc->nnodes)
- print_n(_("NUMA node(s):"), desc->nnodes);
+ add_summary_n(tb, _("NUMA node(s):"), desc->nnodes);
if (desc->vendor)
- print_s(_("Vendor ID:"), desc->vendor);
+ add_summary_s(tb, _("Vendor ID:"), desc->vendor);
+ if (desc->machinetype)
+ add_summary_s(tb, _("Machine type:"), desc->machinetype);
if (desc->family)
- print_s(_("CPU family:"), desc->family);
- if (desc->model)
- print_s(_("Model:"), desc->model);
- if (desc->modelname)
- print_s(_("Model name:"), desc->modelname);
+ add_summary_s(tb, _("CPU family:"), desc->family);
+ if (desc->model || desc->revision)
+ add_summary_s(tb, _("Model:"), desc->revision ? desc->revision : desc->model);
+ if (desc->modelname || desc->cpu)
+ add_summary_s(tb, _("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
if (desc->stepping)
- print_s(_("Stepping:"), desc->stepping);
+ add_summary_s(tb, _("Stepping:"), desc->stepping);
if (desc->mhz)
- print_s(_("CPU MHz:"), desc->mhz);
+ add_summary_s(tb, _("CPU MHz:"), desc->mhz);
+ if (desc->dynamic_mhz)
+ add_summary_s(tb, _("CPU dynamic MHz:"), desc->dynamic_mhz);
+ if (desc->static_mhz)
+ add_summary_s(tb, _("CPU static MHz:"), desc->static_mhz);
if (desc->maxmhz)
- print_s(_("CPU max MHz:"), desc->maxmhz[0]);
+ add_summary_s(tb, _("CPU max MHz:"), cpu_max_mhz(desc, buf, sizeof(buf)));
if (desc->minmhz)
- print_s(_("CPU min MHz:"), desc->minmhz[0]);
+ add_summary_s(tb, _("CPU min MHz:"), cpu_min_mhz(desc, buf, sizeof(buf)));
if (desc->bogomips)
- print_s(_("BogoMIPS:"), desc->bogomips);
+ add_summary_s(tb, _("BogoMIPS:"), desc->bogomips);
if (desc->virtflag) {
if (!strcmp(desc->virtflag, "svm"))
- print_s(_("Virtualization:"), "AMD-V");
+ add_summary_s(tb, _("Virtualization:"), "AMD-V");
else if (!strcmp(desc->virtflag, "vmx"))
- print_s(_("Virtualization:"), "VT-x");
+ add_summary_s(tb, _("Virtualization:"), "VT-x");
}
if (desc->hypervisor)
- print_s(_("Hypervisor:"), desc->hypervisor);
+ add_summary_s(tb, _("Hypervisor:"), desc->hypervisor);
if (desc->hyper) {
- print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
- print_s(_("Virtualization type:"), _(virt_types[desc->virtype]));
+ add_summary_s(tb, _("Hypervisor vendor:"), hv_vendors[desc->hyper]);
+ add_summary_s(tb, _("Virtualization type:"), _(virt_types[desc->virtype]));
}
if (desc->dispatching >= 0)
- print_s(_("Dispatching mode:"), _(disp_modes[desc->dispatching]));
+ add_summary_s(tb, _("Dispatching mode:"), _(disp_modes[desc->dispatching]));
if (desc->ncaches) {
- char cbuf[512];
-
for (i = desc->ncaches - 1; i >= 0; i--) {
- snprintf(cbuf, sizeof(cbuf),
+ snprintf(buf, sizeof(buf),
_("%s cache:"), desc->caches[i].name);
- print_s(cbuf, desc->caches[i].size);
+ add_summary_s(tb, buf, desc->caches[i].size);
+ }
+ }
+ if (desc->necaches) {
+ for (i = desc->necaches - 1; i >= 0; i--) {
+ snprintf(buf, sizeof(buf),
+ _("%s cache:"), desc->ecaches[i].name);
+ add_summary_s(tb, buf, desc->ecaches[i].size);
}
}
for (i = 0; i < desc->nnodes; i++) {
snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
- print_cpuset(buf, desc->nodemaps[i], mod->hex);
+ print_cpuset(tb, buf, desc->nodemaps[i], mod->hex);
}
+
+ if (desc->physsockets) {
+ add_summary_n(tb, _("Physical sockets:"), desc->physsockets);
+ add_summary_n(tb, _("Physical chips:"), desc->physchips);
+ add_summary_n(tb, _("Physical cores/chip:"), desc->physcoresperchip);
+ }
+
+ if (desc->flags)
+ add_summary_s(tb, _("Flags:"), desc->flags);
+
+ scols_print_table(tb);
+ scols_unref_table(tb);
}
static void __attribute__((__noreturn__)) usage(FILE *out)
fputs(USAGE_HEADER, out);
fprintf(out, _(" %s [options]\n"), program_invocation_short_name);
+ fputs(USAGE_SEPARATOR, out);
+ fputs(_("Display information about the CPU architecture.\n"), out);
+
fputs(USAGE_OPTIONS, out);
fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
fputs(_(" -c, --offline print offline CPUs only\n"), out);
+ fputs(_(" -J, --json use JSON for default or extended format\n"), out);
fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
+ fputs(_(" -y, --physical print physical instead of logical IDs\n"), out);
fputs(USAGE_SEPARATOR, out);
fputs(USAGE_HELP, out);
fputs(USAGE_VERSION, out);
for (i = 0; i < ARRAY_SIZE(coldescs); i++)
fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
- fprintf(out, _("\nFor more details see lscpu(1).\n"));
+ fprintf(out, USAGE_MAN_TAIL("lscpu(1)"));
exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
}
int main(int argc, char *argv[])
{
struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
- struct lscpu_desc _desc = { .flags = 0 }, *desc = &_desc;
+ struct lscpu_desc _desc = { .flags = NULL }, *desc = &_desc;
int c, i;
int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
int cpu_modifier_specified = 0;
static const struct option longopts[] = {
- { "all", no_argument, 0, 'a' },
- { "online", no_argument, 0, 'b' },
- { "offline", no_argument, 0, 'c' },
- { "help", no_argument, 0, 'h' },
- { "extended", optional_argument, 0, 'e' },
- { "parse", optional_argument, 0, 'p' },
- { "sysroot", required_argument, 0, 's' },
- { "hex", no_argument, 0, 'x' },
- { "version", no_argument, 0, 'V' },
- { NULL, 0, 0, 0 }
+ { "all", no_argument, NULL, 'a' },
+ { "online", no_argument, NULL, 'b' },
+ { "offline", no_argument, NULL, 'c' },
+ { "help", no_argument, NULL, 'h' },
+ { "extended", optional_argument, NULL, 'e' },
+ { "json", no_argument, NULL, 'J' },
+ { "parse", optional_argument, NULL, 'p' },
+ { "sysroot", required_argument, NULL, 's' },
+ { "physical", no_argument, NULL, 'y' },
+ { "hex", no_argument, NULL, 'x' },
+ { "version", no_argument, NULL, 'V' },
+ { NULL, 0, NULL, 0 }
};
static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
textdomain(PACKAGE);
atexit(close_stdout);
- while ((c = getopt_long(argc, argv, "abce::hp::s:xV", longopts, NULL)) != -1) {
+ while ((c = getopt_long(argc, argv, "abce::hJp::s:xyV", longopts, NULL)) != -1) {
err_exclusive_options(c, longopts, excl, excl_st);
break;
case 'h':
usage(stdout);
+ case 'J':
+ mod->json = 1;
+ break;
case 'p':
case 'e':
if (optarg) {
case 'x':
mod->hex = 1;
break;
+ case 'y':
+ mod->physical = 1;
+ break;
case 'V':
- printf(_("%s from %s\n"), program_invocation_short_name,
- PACKAGE_STRING);
+ printf(UTIL_LINUX_VERSION);
return EXIT_SUCCESS;
default:
- usage(stderr);
+ errtryhelp(EXIT_FAILURE);
}
}
read_basicinfo(desc, mod);
for (i = 0; i < desc->ncpuspos; i++) {
+ /* only consider present CPUs */
+ if (desc->present &&
+ !CPU_ISSET(real_cpu_num(desc, i), desc->present))
+ continue;
read_topology(desc, i);
read_cache(desc, i);
read_polarization(desc, i);
qsort(desc->caches, desc->ncaches,
sizeof(struct cpu_cache), cachecmp);
+ if (desc->ecaches)
+ qsort(desc->ecaches, desc->necaches,
+ sizeof(struct cpu_cache), cachecmp);
+
read_nodes(desc);
read_hypervisor(desc, mod);
columns[ncolumns++] = COL_CPU;
if (desc->nodemaps)
columns[ncolumns++] = COL_NODE;
+ if (desc->drawermaps)
+ columns[ncolumns++] = COL_DRAWER;
if (desc->bookmaps)
columns[ncolumns++] = COL_BOOK;
if (desc->socketmaps)