#include <stdarg.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/personality.h>
#if (defined(__x86_64__) || defined(__i386__))
# if !defined( __SANITIZE_ADDRESS__)
#include <libsmartcols.h>
-#include "cpuset.h"
-#include "nls.h"
-#include "xalloc.h"
-#include "c.h"
-#include "strutils.h"
-#include "bitops.h"
-#include "path.h"
#include "closestream.h"
#include "optutils.h"
+
#include "lscpu.h"
#define CACHE_MAX 100
/* /sys paths */
#define _PATH_SYS_SYSTEM "/sys/devices/system"
-#define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
+#define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
#define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
#define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
-#define _PATH_PROC_XEN "/proc/xen"
-#define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
-#define _PATH_PROC_CPUINFO "/proc/cpuinfo"
-#define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
-#define _PATH_PROC_SYSINFO "/proc/sysinfo"
-#define _PATH_PROC_STATUS "/proc/self/status"
-#define _PATH_PROC_VZ "/proc/vz"
-#define _PATH_PROC_BC "/proc/bc"
-#define _PATH_PROC_DEVICETREE "/proc/device-tree"
-#define _PATH_DEV_MEM "/dev/mem"
/* Xen Domain feature flag used for /sys/hypervisor/properties/features */
#define XENFEAT_supervisor_mode_kernel 3
#define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
| (1U << XENFEAT_hvm_callback_vector) )
-/* virtualization types */
-enum {
- VIRT_NONE = 0,
- VIRT_PARA,
- VIRT_FULL,
- VIRT_CONT
-};
-const char *virt_types[] = {
+static const char *virt_types[] = {
[VIRT_NONE] = N_("none"),
[VIRT_PARA] = N_("para"),
[VIRT_FULL] = N_("full"),
[VIRT_CONT] = N_("container"),
};
-const char *hv_vendors[] = {
+static const char *hv_vendors[] = {
[HYPER_NONE] = NULL,
[HYPER_XEN] = "Xen",
[HYPER_KVM] = "KVM",
[HYPER_VBOX] = "Oracle",
[HYPER_OS400] = "OS/400",
[HYPER_PHYP] = "pHyp",
- [HYPER_SPAR] = "Unisys s-Par"
+ [HYPER_SPAR] = "Unisys s-Par",
+ [HYPER_WSL] = "Windows Subsystem for Linux"
};
-const int hv_vendor_pci[] = {
+static const int hv_vendor_pci[] = {
[HYPER_NONE] = 0x0000,
[HYPER_XEN] = 0x5853,
[HYPER_KVM] = 0x0000,
[HYPER_VBOX] = 0x80ee,
};
-const int hv_graphics_pci[] = {
+static const int hv_graphics_pci[] = {
[HYPER_NONE] = 0x0000,
[HYPER_XEN] = 0x0001,
[HYPER_KVM] = 0x0000,
[HYPER_VBOX] = 0xbeef,
};
-/* CPU modes */
-enum {
- MODE_32BIT = (1 << 1),
- MODE_64BIT = (1 << 2)
-};
-
-/* cache(s) description */
-struct cpu_cache {
- char *name;
- char *size;
-
- int nsharedmaps;
- cpu_set_t **sharedmaps;
-};
/* dispatching modes */
-enum {
- DISP_HORIZONTAL = 0,
- DISP_VERTICAL = 1
-};
-
-const char *disp_modes[] = {
+static const char *disp_modes[] = {
[DISP_HORIZONTAL] = N_("horizontal"),
[DISP_VERTICAL] = N_("vertical")
};
-/* cpu polarization */
-enum {
- POLAR_UNKNOWN = 0,
- POLAR_VLOW,
- POLAR_VMEDIUM,
- POLAR_VHIGH,
- POLAR_HORIZONTAL
-};
-
-struct polarization_modes {
- char *parsable;
- char *readable;
-};
-
-struct polarization_modes polar_modes[] = {
+static struct polarization_modes polar_modes[] = {
[POLAR_UNKNOWN] = {"U", "-"},
[POLAR_VLOW] = {"VL", "vert-low"},
[POLAR_VMEDIUM] = {"VM", "vert-medium"},
[POLAR_HORIZONTAL] = {"H", "horizontal"},
};
-/* global description */
-struct lscpu_desc {
- char *arch;
- char *vendor;
- char *machinetype; /* s390 */
- char *family;
- char *model;
- char *modelname;
- char *revision; /* alternative for model (ppc) */
- char *cpu; /* alternative for modelname (ppc, sparc) */
- char *virtflag; /* virtualization flag (vmx, svm) */
- char *hypervisor; /* hypervisor software */
- int hyper; /* hypervisor vendor ID */
- int virtype; /* VIRT_PARA|FULL|NONE ? */
- char *mhz;
- char *dynamic_mhz; /* dynamic mega hertz (s390) */
- char *static_mhz; /* static mega hertz (s390) */
- char **maxmhz; /* maximum mega hertz */
- char **minmhz; /* minimum mega hertz */
- char *stepping;
- char *bogomips;
- char *flags;
- int dispatching; /* none, horizontal or vertical */
- int mode; /* rm, lm or/and tm */
-
- int ncpuspos; /* maximal possible CPUs */
- int ncpus; /* number of present CPUs */
- cpu_set_t *present; /* mask with present CPUs */
- cpu_set_t *online; /* mask with online CPUs */
-
- int nthreads; /* number of online threads */
-
- int ncaches;
- struct cpu_cache *caches;
-
- int necaches; /* extra caches (s390) */
- struct cpu_cache *ecaches;
-
- /*
- * All maps are sequentially indexed (0..ncpuspos), the array index
- * does not have match with cpuX number as presented by kernel. You
- * have to use real_cpu_num() to get the real cpuX number.
- *
- * For example, the possible system CPUs are: 1,3,5, it means that
- * ncpuspos=3, so all arrays are in range 0..3.
- */
- int *idx2cpunum; /* mapping index to CPU num */
-
- int nnodes; /* number of NUMA modes */
- int *idx2nodenum; /* Support for discontinuous nodes */
- cpu_set_t **nodemaps; /* array with NUMA nodes */
-
- /* drawers -- based on drawer_siblings (internal kernel map of cpuX's
- * hardware threads within the same drawer */
- int ndrawers; /* number of all online drawers */
- cpu_set_t **drawermaps; /* unique drawer_siblings */
-
- /* books -- based on book_siblings (internal kernel map of cpuX's
- * hardware threads within the same book */
- int nbooks; /* number of all online books */
- cpu_set_t **bookmaps; /* unique book_siblings */
-
- /* sockets -- based on core_siblings (internal kernel map of cpuX's
- * hardware threads within the same physical_package_id (socket)) */
- int nsockets; /* number of all online sockets */
- cpu_set_t **socketmaps; /* unique core_siblings */
-
- /* cores -- based on thread_siblings (internal kernel map of cpuX's
- * hardware threads within the same core as cpuX) */
- int ncores; /* number of all online cores */
- cpu_set_t **coremaps; /* unique thread_siblings */
-
- int *polarization; /* cpu polarization */
- int *addresses; /* physical cpu addresses */
- int *configured; /* cpu configured */
- int physsockets; /* Physical sockets (modules) */
- int physchips; /* Physical chips */
- int physcoresperchip; /* Physical cores per chip */
-};
-
-enum {
- OUTPUT_SUMMARY = 0, /* default */
- OUTPUT_PARSABLE, /* -p */
- OUTPUT_READABLE, /* -e */
-};
-
-enum {
- SYSTEM_LIVE = 0, /* analyzing a live system */
- SYSTEM_SNAPSHOT, /* analyzing a snapshot of a different system */
-};
-
-struct lscpu_modifier {
- int mode; /* OUTPUT_* */
- int system; /* SYSTEM_* */
- unsigned int hex:1, /* print CPU masks rather than CPU lists */
- compat:1, /* use backwardly compatible format */
- online:1, /* print online CPUs */
- offline:1; /* print offline CPUs */
-};
-
static int maxcpus; /* size in bits of kernel cpu mask */
#define is_cpu_online(_d, _cpu) \
* IDs
*/
enum {
- COL_CPU,
- COL_CORE,
- COL_SOCKET,
- COL_NODE,
- COL_BOOK,
- COL_DRAWER,
- COL_CACHE,
- COL_POLARIZATION,
- COL_ADDRESS,
- COL_CONFIGURED,
- COL_ONLINE,
- COL_MAXMHZ,
- COL_MINMHZ,
+ COL_CPU_CPU,
+ COL_CPU_CORE,
+ COL_CPU_SOCKET,
+ COL_CPU_NODE,
+ COL_CPU_BOOK,
+ COL_CPU_DRAWER,
+ COL_CPU_CACHE,
+ COL_CPU_POLARIZATION,
+ COL_CPU_ADDRESS,
+ COL_CPU_CONFIGURED,
+ COL_CPU_ONLINE,
+ COL_CPU_MAXMHZ,
+ COL_CPU_MINMHZ,
+};
+
+enum {
+ COL_CACHE_ALLSIZE,
+ COL_CACHE_LEVEL,
+ COL_CACHE_NAME,
+ COL_CACHE_ONESIZE,
+ COL_CACHE_TYPE,
+ COL_CACHE_WAYS,
};
+
/* column description
*/
struct lscpu_coldesc {
const char *name;
const char *help;
+ int flags;
unsigned int is_abbr:1; /* name is abbreviation */
};
-static struct lscpu_coldesc coldescs[] =
+static struct lscpu_coldesc coldescs_cpu[] =
+{
+ [COL_CPU_CPU] = { "CPU", N_("logical CPU number"), 0, 1 },
+ [COL_CPU_CORE] = { "CORE", N_("logical core number") },
+ [COL_CPU_SOCKET] = { "SOCKET", N_("logical socket number") },
+ [COL_CPU_NODE] = { "NODE", N_("logical NUMA node number") },
+ [COL_CPU_BOOK] = { "BOOK", N_("logical book number") },
+ [COL_CPU_DRAWER] = { "DRAWER", N_("logical drawer number") },
+ [COL_CPU_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
+ [COL_CPU_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
+ [COL_CPU_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
+ [COL_CPU_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
+ [COL_CPU_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
+ [COL_CPU_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
+ [COL_CPU_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
+};
+
+static struct lscpu_coldesc coldescs_cache[] =
{
- [COL_CPU] = { "CPU", N_("logical CPU number"), 1 },
- [COL_CORE] = { "CORE", N_("logical core number") },
- [COL_SOCKET] = { "SOCKET", N_("logical socket number") },
- [COL_NODE] = { "NODE", N_("logical NUMA node number") },
- [COL_BOOK] = { "BOOK", N_("logical book number") },
- [COL_DRAWER] = { "DRAWER", N_("logical drawer number") },
- [COL_CACHE] = { "CACHE", N_("shows how caches are shared between CPUs") },
- [COL_POLARIZATION] = { "POLARIZATION", N_("CPU dispatching mode on virtual hardware") },
- [COL_ADDRESS] = { "ADDRESS", N_("physical address of a CPU") },
- [COL_CONFIGURED] = { "CONFIGURED", N_("shows if the hypervisor has allocated the CPU") },
- [COL_ONLINE] = { "ONLINE", N_("shows if Linux currently makes use of the CPU") },
- [COL_MAXMHZ] = { "MAXMHZ", N_("shows the maximum MHz of the CPU") },
- [COL_MINMHZ] = { "MINMHZ", N_("shows the minimum MHz of the CPU") }
+ [COL_CACHE_ALLSIZE] = { "ALL-SIZE", N_("size of all system caches"), SCOLS_FL_RIGHT },
+ [COL_CACHE_LEVEL] = { "LEVEL", N_("cache level"), SCOLS_FL_RIGHT },
+ [COL_CACHE_NAME] = { "NAME", N_("cache name") },
+ [COL_CACHE_ONESIZE] = { "ONE-SIZE", N_("size of one cache"), SCOLS_FL_RIGHT },
+ [COL_CACHE_TYPE] = { "TYPE", N_("cache type") },
+ [COL_CACHE_WAYS] = { "WAYS", N_("ways of associativity"), SCOLS_FL_RIGHT }
};
+
+static int get_cache_full_size(struct lscpu_desc *desc, struct cpu_cache *ca, uint64_t *res);
+
+static int
+cpu_column_name_to_id(const char *name, size_t namesz)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(coldescs_cpu); i++) {
+ const char *cn = coldescs_cpu[i].name;
+
+ if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
+ return i;
+ }
+ warnx(_("unknown column: %s"), name);
+ return -1;
+}
+
static int
-column_name_to_id(const char *name, size_t namesz)
+cache_column_name_to_id(const char *name, size_t namesz)
{
size_t i;
- for (i = 0; i < ARRAY_SIZE(coldescs); i++) {
- const char *cn = coldescs[i].name;
+ for (i = 0; i < ARRAY_SIZE(coldescs_cache); i++) {
+ const char *cn = coldescs_cache[i].name;
if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
return i;
type = 0;
if (strncmp(p, "Data", 4) == 0)
type = 'd';
- if (strncmp(p, "Instruction", 11) == 0)
+ else if (strncmp(p, "Instruction", 11) == 0)
type = 'i';
+ else if (strncmp(p, "Unified", 7) == 0)
+ type = 'u';
p = strstr(line, "size=");
if (!p || sscanf(p, "size=%lld", &size) != 1)
return 0;
desc->necaches * sizeof(struct cpu_cache));
cache = &desc->ecaches[desc->necaches - 1];
memset(cache, 0 , sizeof(*cache));
- if (type)
+
+ if (type == 'i' || type == 'd')
xasprintf(&cache->name, "L%d%c", level, type);
else
xasprintf(&cache->name, "L%d", level);
- xasprintf(&cache->size, "%lldK", size);
+
+ cache->level = level;
+ cache->size = size * 1024;
+
+ cache->type = type == 'i' ? xstrdup("Instruction") :
+ type == 'd' ? xstrdup("Data") :
+ type == 'u' ? xstrdup("Unified") : NULL;
return 1;
}
defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
m |= MODE_32BIT;
#endif
+
+#if defined(__aarch64__)
+ {
+ /* personality() is the most reliable way (since 4.7)
+ * to determine aarch32 support */
+ int pers = personality(PER_LINUX32);
+ if (pers != -1) {
+ personality(pers);
+ m |= MODE_32BIT;
+ }
+ m |= MODE_64BIT;
+ }
+#endif
return m;
}
}
#endif
+
static void
read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
{
- FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
+ FILE *fp;
char buf[BUFSIZ];
struct utsname utsbuf;
size_t setsize;
+ cpu_set_t *cpuset = NULL;
/* architecture */
if (uname(&utsbuf) == -1)
err(EXIT_FAILURE, _("error: uname failed"));
+
+ fp = ul_path_fopen(desc->procfs, "r", "cpuinfo");
+ if (!fp)
+ err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
desc->arch = xstrdup(utsbuf.machine);
/* details */
while (fgets(buf, sizeof(buf), fp) != NULL) {
if (lookup(buf, "vendor", &desc->vendor)) ;
else if (lookup(buf, "vendor_id", &desc->vendor)) ;
+ else if (lookup(buf, "CPU implementer", &desc->vendor)) ; /* ARM and aarch64 */
else if (lookup(buf, "family", &desc->family)) ;
else if (lookup(buf, "cpu family", &desc->family)) ;
else if (lookup(buf, "model", &desc->model)) ;
+ else if (lookup(buf, "CPU part", &desc->model)) ; /* ARM and aarch64 */
else if (lookup(buf, "model name", &desc->modelname)) ;
else if (lookup(buf, "stepping", &desc->stepping)) ;
+ else if (lookup(buf, "CPU variant", &desc->stepping)) ; /* aarch64 */
else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
+ else if (lookup(buf, "Features", &desc->flags)) ; /* aarch64 */
else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
else if (lookup(buf, "bogomips", &desc->bogomips)) ;
+ else if (lookup(buf, "BogoMIPS", &desc->bogomips)) ; /* aarch64 */
else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
else if (lookup(buf, "cpu", &desc->cpu)) ;
else if (lookup(buf, "revision", &desc->revision)) ;
+ else if (lookup(buf, "CPU revision", &desc->revision)) ; /* aarch64 */
+ else if (lookup(buf, "max thread id", &desc->mtid)) ; /* s390 */
+ else if (lookup(buf, "address sizes", &desc->addrsz)) ; /* x86 */
else if (lookup_cache(buf, desc)) ;
else
continue;
fclose(fp);
- if (path_exist(_PATH_SYS_CPU "/kernel_max"))
+ if (ul_path_read_s32(desc->syscpu, &maxcpus, "kernel_max") == 0)
/* note that kernel_max is maximum index [NR_CPUS-1] */
- maxcpus = path_read_s32(_PATH_SYS_CPU "/kernel_max") + 1;
+ maxcpus += 1;
else if (mod->system == SYSTEM_LIVE)
/* the root is '/' so we are working with data from the current kernel */
setsize = CPU_ALLOC_SIZE(maxcpus);
- if (path_exist(_PATH_SYS_CPU "/possible")) {
- cpu_set_t *tmp = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/possible");
+ if (ul_path_readf_cpulist(desc->syscpu, &cpuset, maxcpus, "possible") == 0) {
int num, idx;
- desc->ncpuspos = CPU_COUNT_S(setsize, tmp);
+ desc->ncpuspos = CPU_COUNT_S(setsize, cpuset);
desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
for (num = 0, idx = 0; num < maxcpus; num++) {
- if (CPU_ISSET(num, tmp))
+ if (CPU_ISSET_S(num, setsize, cpuset))
desc->idx2cpunum[idx++] = num;
}
- cpuset_free(tmp);
+ cpuset_free(cpuset);
+ cpuset = NULL;
} else
err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
_PATH_SYS_CPU "/possible");
/* get mask for present CPUs */
- if (path_exist(_PATH_SYS_CPU "/present")) {
- desc->present = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/present");
+ if (ul_path_readf_cpulist(desc->syscpu, &desc->present, maxcpus, "present") == 0)
desc->ncpus = CPU_COUNT_S(setsize, desc->present);
- }
/* get mask for online CPUs */
- if (path_exist(_PATH_SYS_CPU "/online")) {
- desc->online = path_read_cpulist(maxcpus, _PATH_SYS_CPU "/online");
+ if (ul_path_readf_cpulist(desc->syscpu, &desc->online, maxcpus, "online") == 0)
desc->nthreads = CPU_COUNT_S(setsize, desc->online);
- }
/* get dispatching mode */
- if (path_exist(_PATH_SYS_CPU "/dispatching"))
- desc->dispatching = path_read_s32(_PATH_SYS_CPU "/dispatching");
- else
+ if (ul_path_read_s32(desc->syscpu, &desc->dispatching, "dispatching") != 0)
desc->dispatching = -1;
+ /* get cpufreq boost mode */
+ if (ul_path_read_s32(desc->syscpu, &desc->freqboost, "cpufreq/boost") != 0)
+ desc->freqboost = -1;
+
if (mod->system == SYSTEM_LIVE)
read_physical_info_powerpc(desc);
- if (path_exist(_PATH_PROC_SYSINFO)) {
- FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
-
- while (fd && fgets(buf, sizeof(buf), fd) != NULL && !desc->machinetype)
+ if ((fp = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
+ while (fgets(buf, sizeof(buf), fp) != NULL && !desc->machinetype)
lookup(buf, "Type", &desc->machinetype);
- if (fd)
- fclose(fd);
+ fclose(fp);
}
}
static int
-has_pci_device(unsigned int vendor, unsigned int device)
+has_pci_device(struct lscpu_desc *desc, unsigned int vendor, unsigned int device)
{
FILE *f;
unsigned int num, fn, ven, dev;
int res = 1;
- f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
+ f = ul_path_fopen(desc->procfs, "r", "bus/pci/devices");
if (!f)
return 0;
}
#endif
-static int is_compatible(const char *path, const char *str)
+static int is_devtree_compatible(struct lscpu_desc *desc, const char *str)
{
- FILE *fd = path_fopen("r", 0, "%s", path);
+ FILE *fd = ul_path_fopen(desc->procfs, "r", "device-tree/compatible");
if (fd) {
char buf[256];
assert(!desc->hyper);
/* IBM iSeries: legacy, para-virtualized on top of OS/400 */
- if (path_exist("/proc/iSeries")) {
+ if (ul_path_access(desc->procfs, F_OK, "iSeries") == 0) {
desc->hyper = HYPER_OS400;
desc->virtype = VIRT_PARA;
/* PowerNV (POWER Non-Virtualized, bare-metal) */
- } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "ibm,powernv")) {
+ } else if (is_devtree_compatible(desc, "ibm,powernv")) {
desc->hyper = HYPER_NONE;
desc->virtype = VIRT_NONE;
/* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
- } else if (path_exist(_PATH_PROC_DEVICETREE "/ibm,partition-name")
- && path_exist(_PATH_PROC_DEVICETREE "/hmc-managed?")
- && !path_exist(_PATH_PROC_DEVICETREE "/chosen/qemu,graphic-width")) {
+ } else if (ul_path_access(desc->procfs, F_OK, "device-tree/ibm,partition-name") == 0
+ && ul_path_access(desc->procfs, F_OK, "device-tree/hmc-managed?") == 0
+ && ul_path_access(desc->procfs, F_OK, "device-tree/chosen/qemu,graphic-width") != 0) {
+
FILE *fd;
desc->hyper = HYPER_PHYP;
desc->virtype = VIRT_PARA;
- fd = path_fopen("r", 0, _PATH_PROC_DEVICETREE "/ibm,partition-name");
+
+ fd = ul_path_fopen(desc->procfs, "r", "device-tree/ibm,partition-name");
if (fd) {
char buf[256];
if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
}
/* Qemu */
- } else if (is_compatible(_PATH_PROC_DEVICETREE "/compatible", "qemu,pseries")) {
+ } else if (is_devtree_compatible(desc, "qemu,pseries")) {
desc->hyper = HYPER_KVM;
desc->virtype = VIRT_PARA;
}
uint32_t eax, ebx, ecx, edx;
struct sigaction act, oact;
+ /*
+ * FIXME: Not reliable for non-root users. Note it works as expected if
+ * vmware_bdoor() is not optimized for PIE, but then it fails to build
+ * on 32bit x86 systems. See lscpu git log for more details (commit
+ * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
+ */
+ if (getuid() != 0)
+ return 0;
+
/*
* The assembly routine for vmware detection works
* fine under vmware, even if ran as regular user. But
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGSEGV, &act, &oact))
- err(EXIT_FAILURE, _("error: can not set signal handler"));
+ err(EXIT_FAILURE, _("cannot set signal handler"));
vmware_bdoor(&eax, &ebx, &ecx, &edx);
if (sigaction(SIGSEGV, &oact, NULL))
- err(EXIT_FAILURE, _("error: can not restore signal handler"));
+ err(EXIT_FAILURE, _("cannot restore signal handler"));
return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
}
{
FILE *fd;
+ /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
+
+ if ((fd = ul_path_fopen(desc->procfs, "r", "sys/kernel/osrelease"))) {
+ char buf[256];
+
+ if (fgets(buf, sizeof(buf), fd) != NULL) {
+ if (strstr(buf, "Microsoft")) {
+ desc->hyper = HYPER_WSL;
+ desc->virtype = VIRT_CONT;
+ }
+ }
+ fclose(fd);
+ if (desc->virtype)
+ return;
+ }
+
if (mod->system != SYSTEM_SNAPSHOT) {
read_hypervisor_cpuid(desc);
if (!desc->hyper)
if (desc->hyper == HYPER_XEN) {
uint32_t features;
- fd = path_fopen("r", 0, _PATH_SYS_HYP_FEATURES);
+ fd = ul_prefix_fopen(desc->prefix, "r", _PATH_SYS_HYP_FEATURES);
+
if (fd && fscanf(fd, "%x", &features) == 1) {
/* Xen PV domain */
if (features & XEN_FEATURES_PV_MASK)
else if ((features & XEN_FEATURES_PVH_MASK)
== XEN_FEATURES_PVH_MASK)
desc->virtype = VIRT_PARA;
- fclose(fd);
- } else {
- err(EXIT_FAILURE, _("failed to read from: %s"),
- _PATH_SYS_HYP_FEATURES);
}
+ if (fd)
+ fclose(fd);
}
} else if (read_hypervisor_powerpc(desc) > 0) {}
/* Xen para-virt or dom0 */
- else if (path_exist(_PATH_PROC_XEN)) {
+ else if (ul_path_access(desc->procfs, F_OK, "xen") == 0) {
int dom0 = 0;
- fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
+ fd = ul_path_fopen(desc->procfs, "r", "xen/capabilities");
if (fd) {
char buf[256];
desc->hyper = HYPER_XEN;
/* Xen full-virt on non-x86_64 */
- } else if (has_pci_device( hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
+ } else if (has_pci_device(desc, hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
desc->hyper = HYPER_XEN;
desc->virtype = VIRT_FULL;
- } else if (has_pci_device( hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
+ } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
desc->hyper = HYPER_VMWARE;
desc->virtype = VIRT_FULL;
- } else if (has_pci_device( hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
+ } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
desc->hyper = HYPER_VBOX;
desc->virtype = VIRT_FULL;
/* IBM PR/SM */
- } else if (path_exist(_PATH_PROC_SYSINFO)) {
- FILE *sysinfo_fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
+ } else if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
char buf[BUFSIZ];
- if (!sysinfo_fd)
- return;
desc->hyper = HYPER_IBM;
desc->hypervisor = "PR/SM";
desc->virtype = VIRT_FULL;
- while (fgets(buf, sizeof(buf), sysinfo_fd) != NULL) {
- char *str;
+ while (fgets(buf, sizeof(buf), fd) != NULL) {
+ char *str, *p;
if (!strstr(buf, "Control Program:"))
continue;
desc->hyper = HYPER_IBM;
else
desc->hyper = HYPER_KVM;
- str = strchr(buf, ':');
- if (!str)
+ p = strchr(buf, ':');
+ if (!p)
continue;
- xasprintf(&str, "%s", str + 1);
+ xasprintf(&str, "%s", p + 1);
/* remove leading, trailing and repeating whitespace */
while (*str == ' ')
*(str--) = '\0';
while ((str = strstr(desc->hypervisor, " ")))
memmove(str, str + 1, strlen(str));
+ break;
}
- fclose(sysinfo_fd);
+ fclose(fd);
}
/* OpenVZ/Virtuozzo - /proc/vz dir should exist
* /proc/bc should not */
- else if (path_exist(_PATH_PROC_VZ) && !path_exist(_PATH_PROC_BC)) {
+ else if (ul_path_access(desc->procfs, F_OK, "vz") == 0 &&
+ ul_path_access(desc->procfs, F_OK, "bc") != 0) {
desc->hyper = HYPER_PARALLELS;
desc->virtype = VIRT_CONT;
desc->virtype = VIRT_PARA;
/* Linux-VServer */
- } else if (path_exist(_PATH_PROC_STATUS)) {
+ } else if ((fd = ul_path_fopen(desc->procfs, "r", "self/status"))) {
char buf[BUFSIZ];
char *val = NULL;
- fd = path_fopen("r", 1, _PATH_PROC_STATUS);
while (fgets(buf, sizeof(buf), fd) != NULL) {
if (lookup(buf, "VxID", &val))
break;
fclose(fd);
if (val) {
+ char *org = val;
+
while (isdigit(*val))
++val;
if (!*val) {
desc->hyper = HYPER_VSERVER;
desc->virtype = VIRT_CONT;
}
+ free(org);
}
}
}
{
cpu_set_t *thread_siblings, *core_siblings;
cpu_set_t *book_siblings, *drawer_siblings;
- int num = real_cpu_num(desc, idx);
+ int coreid, socketid, bookid, drawerid;
+ int i, num = real_cpu_num(desc, idx);
- if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
+ if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/topology/thread_siblings", num) != 0)
return;
- thread_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
- "/cpu%d/topology/thread_siblings", num);
- core_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
- "/cpu%d/topology/core_siblings", num);
- book_siblings = NULL;
- if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num))
- book_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
- "/cpu%d/topology/book_siblings", num);
- drawer_siblings = NULL;
- if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/drawer_siblings", num))
- drawer_siblings = path_read_cpuset(maxcpus, _PATH_SYS_CPU
- "/cpu%d/topology/drawer_siblings", num);
+ ul_path_readf_cpuset(desc->syscpu, &thread_siblings, maxcpus,
+ "cpu%d/topology/thread_siblings", num);
+ ul_path_readf_cpuset(desc->syscpu, &core_siblings, maxcpus,
+ "cpu%d/topology/core_siblings", num);
+ ul_path_readf_cpuset(desc->syscpu, &book_siblings, maxcpus,
+ "cpu%d/topology/book_siblings", num);
+ ul_path_readf_cpuset(desc->syscpu, &drawer_siblings, maxcpus,
+ "cpu%d/topology/drawer_siblings", num);
+
+ if (ul_path_readf_s32(desc->syscpu, &coreid, "cpu%d/topology/core_id", num) != 0)
+ coreid = -1;
+
+ if (ul_path_readf_s32(desc->syscpu, &socketid, "cpu%d/topology/physical_package_id", num) != 0)
+ socketid = -1;
+
+ if (ul_path_readf_s32(desc->syscpu, &bookid, "cpu%d/topology/book_id", num) != 0)
+ bookid = -1;
+
+ if (ul_path_readf_s32(desc->syscpu, &drawerid, "cpu%d/topology/drawer_id", num) != 0)
+ drawerid = -1;
if (!desc->coremaps) {
int ndrawers, nbooks, nsockets, ncores, nthreads;
*/
desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
- if (book_siblings)
+ desc->coreids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
+ desc->socketids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
+ for (i = 0; i < desc->ncpuspos; i++)
+ desc->coreids[i] = desc->socketids[i] = -1;
+ if (book_siblings) {
desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
- if (drawer_siblings)
+ desc->bookids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
+ for (i = 0; i < desc->ncpuspos; i++)
+ desc->bookids[i] = -1;
+ }
+ if (drawer_siblings) {
desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
+ desc->drawerids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
+ for (i = 0; i < desc->ncpuspos; i++)
+ desc->drawerids[i] = -1;
+ }
}
add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
+ desc->coreids[idx] = coreid;
add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
- if (book_siblings)
+ desc->socketids[idx] = socketid;
+ if (book_siblings) {
add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
- if (drawer_siblings)
+ desc->bookids[idx] = bookid;
+ }
+ if (drawer_siblings) {
add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
+ desc->drawerids[idx] = drawerid;
+ }
}
static void
if (desc->dispatching < 0)
return;
- if (!path_exist(_PATH_SYS_CPU "/cpu%d/polarization", num))
+ if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/polarization", num) != 0)
return;
if (!desc->polarization)
desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
- path_read_str(mode, sizeof(mode), _PATH_SYS_CPU "/cpu%d/polarization", num);
+
+ ul_path_readf_buffer(desc->syscpu, mode, sizeof(mode), "cpu%d/polarization", num);
+
if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
desc->polarization[idx] = POLAR_VLOW;
else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
{
int num = real_cpu_num(desc, idx);
- if (!path_exist(_PATH_SYS_CPU "/cpu%d/address", num))
+ if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/address", num) != 0)
return;
if (!desc->addresses)
desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
- desc->addresses[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/address", num);
+ ul_path_readf_s32(desc->syscpu, &desc->addresses[idx], "cpu%d/address", num);
}
static void
{
int num = real_cpu_num(desc, idx);
- if (!path_exist(_PATH_SYS_CPU "/cpu%d/configure", num))
+ if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/configure", num) != 0)
return;
if (!desc->configured)
desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
- desc->configured[idx] = path_read_s32(_PATH_SYS_CPU "/cpu%d/configure", num);
+ ul_path_readf_s32(desc->syscpu, &desc->configured[idx], "cpu%d/configure", num);
}
+/* Read overall maximum frequency of cpu */
+static char *
+cpu_max_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
+{
+ int i;
+ float cpu_freq = 0.0;
+ size_t setsize = CPU_ALLOC_SIZE(maxcpus);
+
+ if (desc->present) {
+ for (i = 0; i < desc->ncpuspos; i++) {
+ if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
+ && desc->maxmhz[i]) {
+ float freq = atof(desc->maxmhz[i]);
+
+ if (freq > cpu_freq)
+ cpu_freq = freq;
+ }
+ }
+ }
+ snprintf(buf, bufsz, "%.4f", cpu_freq);
+ return buf;
+}
+
+/* Read overall minimum frequency of cpu */
+static char *
+cpu_min_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
+{
+ int i;
+ float cpu_freq = -1.0;
+ size_t setsize = CPU_ALLOC_SIZE(maxcpus);
+
+ if (desc->present) {
+ for (i = 0; i < desc->ncpuspos; i++) {
+ if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
+ && desc->minmhz[i]) {
+ float freq = atof(desc->minmhz[i]);
+
+ if (cpu_freq < 0.0 || freq < cpu_freq)
+ cpu_freq = freq;
+ }
+ }
+ }
+ snprintf(buf, bufsz, "%.4f", cpu_freq);
+ return buf;
+}
+
+
static void
read_max_mhz(struct lscpu_desc *desc, int idx)
{
int num = real_cpu_num(desc, idx);
+ int mhz;
- if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_max_freq", num))
+ if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) != 0)
return;
if (!desc->maxmhz)
desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
- xasprintf(&(desc->maxmhz[idx]), "%.4f",
- (float)path_read_s32(_PATH_SYS_CPU
- "/cpu%d/cpufreq/cpuinfo_max_freq", num) / 1000);
+ xasprintf(&desc->maxmhz[idx], "%.4f", (float) mhz / 1000);
}
static void
read_min_mhz(struct lscpu_desc *desc, int idx)
{
int num = real_cpu_num(desc, idx);
+ int mhz;
- if (!path_exist(_PATH_SYS_CPU "/cpu%d/cpufreq/cpuinfo_min_freq", num))
+ if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) != 0)
return;
if (!desc->minmhz)
desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
- xasprintf(&(desc->minmhz[idx]), "%.4f",
- (float)path_read_s32(_PATH_SYS_CPU
- "/cpu%d/cpufreq/cpuinfo_min_freq", num) / 1000);
+ xasprintf(&desc->minmhz[idx], "%.4f", (float) mhz / 1000);
}
static int
int num = real_cpu_num(desc, idx);
if (!desc->ncaches) {
- while(path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
- num, desc->ncaches))
+ while (ul_path_accessf(desc->syscpu, F_OK,
+ "cpu%d/cache/index%d",
+ num, desc->ncaches) == 0)
desc->ncaches++;
if (!desc->ncaches)
return;
-
desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
}
for (i = 0; i < desc->ncaches; i++) {
struct cpu_cache *ca = &desc->caches[i];
cpu_set_t *map;
- if (!path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d",
- num, i))
+ if (ul_path_accessf(desc->syscpu, F_OK,
+ "cpu%d/cache/index%d", num, i) != 0)
continue;
if (!ca->name) {
- int type, level;
+ int type = 0;
/* cache type */
- path_read_str(buf, sizeof(buf),
- _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
- num, i);
- if (!strcmp(buf, "Data"))
- type = 'd';
- else if (!strcmp(buf, "Instruction"))
- type = 'i';
- else
- type = 0;
+ if (ul_path_readf_string(desc->syscpu, &ca->type,
+ "cpu%d/cache/index%d/type", num, i) > 0) {
+ if (!strcmp(ca->type, "Data"))
+ type = 'd';
+ else if (!strcmp(ca->type, "Instruction"))
+ type = 'i';
+ }
/* cache level */
- level = path_read_s32(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
- num, i);
+ ul_path_readf_s32(desc->syscpu, &ca->level,
+ "cpu%d/cache/index%d/level", num, i);
if (type)
- snprintf(buf, sizeof(buf), "L%d%c", level, type);
+ snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
else
- snprintf(buf, sizeof(buf), "L%d", level);
+ snprintf(buf, sizeof(buf), "L%d", ca->level);
ca->name = xstrdup(buf);
+ /* cache ways */
+ ul_path_readf_s32(desc->syscpu, &ca->ways,
+ "cpu%d/cache/index%d/ways_of_associativity", num, i);
+
/* cache size */
- if (path_exist(_PATH_SYS_CPU "/cpu%d/cache/index%d/size",num, i)) {
- path_read_str(buf, sizeof(buf),
- _PATH_SYS_CPU "/cpu%d/cache/index%d/size", num, i);
- ca->size = xstrdup(buf);
- } else {
- ca->size = xstrdup("unknown size");
- }
+ if (ul_path_readf_buffer(desc->syscpu, buf, sizeof(buf),
+ "cpu%d/cache/index%d/size", num, i) > 0)
+ parse_size(buf, &ca->size, NULL);
+ else
+ ca->size = 0;
}
/* information about how CPUs share different caches */
- map = path_read_cpuset(maxcpus,
- _PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
- num, i);
+ ul_path_readf_cpuset(desc->syscpu, &map, maxcpus,
+ "cpu%d/cache/index%d/shared_cpu_map", num, i);
if (!ca->sharedmaps)
ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
int i = 0;
DIR *dir;
struct dirent *d;
- char *path;
+ struct path_cxt *sysnode;
+
+ desc->nnodes = 0;
+
+ sysnode = ul_new_path(_PATH_SYS_NODE);
+ if (!sysnode)
+ err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
+ ul_path_set_prefix(sysnode, desc->prefix);
- /* number of NUMA node */
- path = path_strdup(_PATH_SYS_NODE);
- dir = opendir(path);
- free(path);
+ dir = ul_path_opendir(sysnode, NULL);
+ if (!dir)
+ goto done;
- while (dir && (d = readdir(dir))) {
+ while ((d = readdir(dir))) {
if (is_node_dirent(d))
desc->nnodes++;
}
if (!desc->nnodes) {
- if (dir)
- closedir(dir);
- return;
+ closedir(dir);
+ goto done;
}
desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
- if (dir) {
- rewinddir(dir);
- while ((d = readdir(dir)) && i < desc->nnodes) {
- if (is_node_dirent(d))
- desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
- _("Failed to extract the node number"));
- }
- closedir(dir);
- qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
+ rewinddir(dir);
+ while ((d = readdir(dir)) && i < desc->nnodes) {
+ if (is_node_dirent(d))
+ desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
+ _("Failed to extract the node number"));
}
+ closedir(dir);
+ qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
/* information about how nodes share different CPUs */
for (i = 0; i < desc->nnodes; i++)
- desc->nodemaps[i] = path_read_cpuset(maxcpus,
- _PATH_SYS_NODE "/node%d/cpumap",
- desc->idx2nodenum[i]);
+ ul_path_readf_cpuset(sysnode, &desc->nodemaps[i], maxcpus,
+ "node%d/cpumap", desc->idx2nodenum[i]);
+done:
+ ul_unref_path(sysnode);
}
static char *
*buf = '\0';
switch (col) {
- case COL_CPU:
+ case COL_CPU_CPU:
snprintf(buf, bufsz, "%d", cpu);
break;
- case COL_CORE:
- if (cpuset_ary_isset(cpu, desc->coremaps,
- desc->ncores, setsize, &i) == 0)
- snprintf(buf, bufsz, "%zu", i);
+ case COL_CPU_CORE:
+ if (mod->physical) {
+ if (desc->coreids[idx] == -1)
+ snprintf(buf, bufsz, "-");
+ else
+ snprintf(buf, bufsz, "%d", desc->coreids[idx]);
+ } else {
+ if (cpuset_ary_isset(cpu, desc->coremaps,
+ desc->ncores, setsize, &i) == 0)
+ snprintf(buf, bufsz, "%zu", i);
+ }
break;
- case COL_SOCKET:
- if (cpuset_ary_isset(cpu, desc->socketmaps,
- desc->nsockets, setsize, &i) == 0)
- snprintf(buf, bufsz, "%zu", i);
+ case COL_CPU_SOCKET:
+ if (mod->physical) {
+ if (desc->socketids[idx] == -1)
+ snprintf(buf, bufsz, "-");
+ else
+ snprintf(buf, bufsz, "%d", desc->socketids[idx]);
+ } else {
+ if (cpuset_ary_isset(cpu, desc->socketmaps,
+ desc->nsockets, setsize, &i) == 0)
+ snprintf(buf, bufsz, "%zu", i);
+ }
break;
- case COL_NODE:
+ case COL_CPU_NODE:
if (cpuset_ary_isset(cpu, desc->nodemaps,
desc->nnodes, setsize, &i) == 0)
snprintf(buf, bufsz, "%d", desc->idx2nodenum[i]);
break;
- case COL_DRAWER:
- if (cpuset_ary_isset(cpu, desc->drawermaps,
- desc->ndrawers, setsize, &i) == 0)
- snprintf(buf, bufsz, "%zu", i);
+ case COL_CPU_DRAWER:
+ if (mod->physical) {
+ if (desc->drawerids[idx] == -1)
+ snprintf(buf, bufsz, "-");
+ else
+ snprintf(buf, bufsz, "%d", desc->drawerids[idx]);
+ } else {
+ if (cpuset_ary_isset(cpu, desc->drawermaps,
+ desc->ndrawers, setsize, &i) == 0)
+ snprintf(buf, bufsz, "%zu", i);
+ }
break;
- case COL_BOOK:
- if (cpuset_ary_isset(cpu, desc->bookmaps,
- desc->nbooks, setsize, &i) == 0)
- snprintf(buf, bufsz, "%zu", i);
+ case COL_CPU_BOOK:
+ if (mod->physical) {
+ if (desc->bookids[idx] == -1)
+ snprintf(buf, bufsz, "-");
+ else
+ snprintf(buf, bufsz, "%d", desc->bookids[idx]);
+ } else {
+ if (cpuset_ary_isset(cpu, desc->bookmaps,
+ desc->nbooks, setsize, &i) == 0)
+ snprintf(buf, bufsz, "%zu", i);
+ }
break;
- case COL_CACHE:
+ case COL_CPU_CACHE:
{
char *p = buf;
size_t sz = bufsz;
}
break;
}
- case COL_POLARIZATION:
+ case COL_CPU_POLARIZATION:
if (desc->polarization) {
int x = desc->polarization[idx];
polar_modes[x].readable);
}
break;
- case COL_ADDRESS:
+ case COL_CPU_ADDRESS:
if (desc->addresses)
snprintf(buf, bufsz, "%d", desc->addresses[idx]);
break;
- case COL_CONFIGURED:
+ case COL_CPU_CONFIGURED:
if (!desc->configured)
break;
if (mod->mode == OUTPUT_PARSABLE)
snprintf(buf, bufsz, "%s",
desc->configured[idx] ? _("yes") : _("no"));
break;
- case COL_ONLINE:
+ case COL_CPU_ONLINE:
if (!desc->online)
break;
if (mod->mode == OUTPUT_PARSABLE)
snprintf(buf, bufsz, "%s",
is_cpu_online(desc, cpu) ? _("yes") : _("no"));
break;
- case COL_MAXMHZ:
- if (desc->maxmhz)
+ case COL_CPU_MAXMHZ:
+ if (desc->maxmhz && desc->maxmhz[idx])
xstrncpy(buf, desc->maxmhz[idx], bufsz);
break;
- case COL_MINMHZ:
- if (desc->minmhz)
+ case COL_CPU_MINMHZ:
+ if (desc->minmhz && desc->minmhz[idx])
xstrncpy(buf, desc->minmhz[idx], bufsz);
break;
}
{
*buf = '\0';
- if (col == COL_CACHE) {
+ if (col == COL_CPU_CACHE) {
char *p = buf;
size_t sz = bufsz;
int i;
if (desc->ncaches)
return buf;
}
- snprintf(buf, bufsz, "%s", coldescs[col].name);
+ snprintf(buf, bufsz, "%s", coldescs_cpu[col].name);
return buf;
}
+/*
+ * [-C] backend
+ */
+static void
+print_caches_readable(struct lscpu_desc *desc, int cols[], int ncols,
+ struct lscpu_modifier *mod)
+{
+ int i;
+ struct libscols_table *table;
+
+ scols_init_debug(0);
+
+ table = scols_new_table();
+ if (!table)
+ err(EXIT_FAILURE, _("failed to allocate output table"));
+ if (mod->json) {
+ scols_table_enable_json(table, 1);
+ scols_table_set_name(table, "caches");
+ }
+
+ for (i = 0; i < ncols; i++) {
+ struct lscpu_coldesc *cd = &coldescs_cache[cols[i]];
+ if (!scols_table_new_column(table, cd->name, 0, cd->flags))
+ err(EXIT_FAILURE, _("failed to allocate output column"));
+ }
+
+ for (i = desc->ncaches - 1; i >= 0; i--) {
+ struct cpu_cache *ca = &desc->caches[i];
+ struct libscols_line *line;
+ int c;
+
+ line = scols_table_new_line(table, NULL);
+ if (!line)
+ err(EXIT_FAILURE, _("failed to allocate output line"));
+
+ for (c = 0; c < ncols; c++) {
+ char *data = NULL;
+ int col = cols[c];
+
+ switch (col) {
+ case COL_CACHE_NAME:
+ if (ca->name)
+ data = xstrdup(ca->name);
+ break;
+ case COL_CACHE_ONESIZE:
+ if (!ca->size)
+ break;
+ if (mod->bytes)
+ xasprintf(&data, "%" PRIu64, ca->size);
+ else
+ data = size_to_human_string(SIZE_SUFFIX_1LETTER, ca->size);
+ break;
+ case COL_CACHE_ALLSIZE:
+ {
+ uint64_t sz = 0;
+
+ if (get_cache_full_size(desc, ca, &sz) != 0)
+ break;
+ if (mod->bytes)
+ xasprintf(&data, "%" PRIu64, sz);
+ else
+ data = size_to_human_string(SIZE_SUFFIX_1LETTER, sz);
+ break;
+ }
+ case COL_CACHE_WAYS:
+ if (ca->ways)
+ xasprintf(&data, "%d", ca->ways);
+ break;
+ case COL_CACHE_TYPE:
+ if (ca->type)
+ data = xstrdup(ca->type);
+ break;
+ case COL_CACHE_LEVEL:
+ if (ca->level)
+ xasprintf(&data, "%d", ca->level);
+ break;
+ }
+
+ if (data && scols_line_refer_data(line, c, data))
+ err(EXIT_FAILURE, _("failed to add output data"));
+ }
+ }
+
+ scols_print_table(table);
+ scols_unref_table(table);
+}
+
/*
* [-p] backend, we support two parsable formats:
*
* 1,1,0,0,1:1:0
*/
static void
-print_parsable(struct lscpu_desc *desc, int cols[], int ncols,
+print_cpus_parsable(struct lscpu_desc *desc, int cols[], int ncols,
struct lscpu_modifier *mod)
{
char buf[BUFSIZ], *data;
for (i = 0; i < ncols; i++) {
int col = cols[i];
- if (col == COL_CACHE) {
+ if (col == COL_CPU_CACHE) {
if (mod->compat && !desc->ncaches)
continue;
if (mod->compat && i != 0)
data = get_cell_header(desc, col, mod, buf, sizeof(buf));
- if (data && * data && col != COL_CACHE &&
- !coldescs[col].is_abbr) {
+ if (data && * data && col != COL_CPU_CACHE &&
+ !coldescs_cpu[col].is_abbr) {
/*
* For normal column names use mixed case (e.g. "Socket")
*/
if (desc->present && !is_cpu_present(desc, cpu))
continue;
for (c = 0; c < ncols; c++) {
- if (mod->compat && cols[c] == COL_CACHE) {
+ if (mod->compat && cols[c] == COL_CPU_CACHE) {
if (!desc->ncaches)
continue;
if (c > 0)
* [-e] backend
*/
static void
-print_readable(struct lscpu_desc *desc, int cols[], int ncols,
+print_cpus_readable(struct lscpu_desc *desc, int cols[], int ncols,
struct lscpu_modifier *mod)
{
int i;
table = scols_new_table();
if (!table)
- err(EXIT_FAILURE, _("failed to initialize output table"));
+ err(EXIT_FAILURE, _("failed to allocate output table"));
+ if (mod->json) {
+ scols_table_enable_json(table, 1);
+ scols_table_set_name(table, "cpus");
+ }
for (i = 0; i < ncols; i++) {
data = get_cell_header(desc, cols[i], mod, buf, sizeof(buf));
- if (!scols_table_new_column(table, xstrdup(data), 0, 0))
- err(EXIT_FAILURE, _("failed to initialize output column"));
+ if (!scols_table_new_column(table, data, 0, 0))
+ err(EXIT_FAILURE, _("failed to allocate output column"));
}
for (i = 0; i < desc->ncpuspos; i++) {
line = scols_table_new_line(table, NULL);
if (!line)
- err(EXIT_FAILURE, _("failed to initialize output line"));
+ err(EXIT_FAILURE, _("failed to allocate output line"));
for (c = 0; c < ncols; c++) {
data = get_cell_data(desc, i, cols[c], mod,
buf, sizeof(buf));
if (!data || !*data)
data = "-";
- scols_line_set_data(line, c, data);
+ if (scols_line_set_data(line, c, data))
+ err(EXIT_FAILURE, _("failed to add output data"));
}
}
scols_unref_table(table);
}
-/* output formats "<key> <value>"*/
-#define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
-#define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
+
+static void __attribute__ ((__format__(printf, 3, 4)))
+ add_summary_sprint(struct libscols_table *tb,
+ const char *txt,
+ const char *fmt,
+ ...)
+{
+ struct libscols_line *ln = scols_table_new_line(tb, NULL);
+ char *data;
+ va_list args;
+
+ if (!ln)
+ err(EXIT_FAILURE, _("failed to allocate output line"));
+
+ /* description column */
+ scols_line_set_data(ln, 0, txt);
+
+ /* data column */
+ va_start(args, fmt);
+ xvasprintf(&data, fmt, args);
+ va_end(args);
+
+ if (data && scols_line_refer_data(ln, 1, data))
+ err(EXIT_FAILURE, _("failed to add output data"));
+}
+
+#define add_summary_n(tb, txt, num) add_summary_sprint(tb, txt, "%d", num)
+#define add_summary_s(tb, txt, str) add_summary_sprint(tb, txt, "%s", str)
static void
-print_cpuset(const char *key, cpu_set_t *set, int hex)
+print_cpuset(struct libscols_table *tb,
+ const char *key, cpu_set_t *set, int hex)
{
size_t setsize = CPU_ALLOC_SIZE(maxcpus);
size_t setbuflen = 7 * maxcpus;
if (hex) {
p = cpumask_create(setbuf, setbuflen, set, setsize);
- printf("%-23s0x%s\n", key, p);
+ add_summary_s(tb, key, p);
} else {
p = cpulist_create(setbuf, setbuflen, set, setsize);
- print_s(key, p);
+ add_summary_s(tb, key, p);
+ }
+}
+
+static int get_cache_full_size(struct lscpu_desc *desc,
+ struct cpu_cache *ca, uint64_t *res)
+{
+ size_t setsize = CPU_ALLOC_SIZE(maxcpus);
+ int i, nshares = 0;
+
+ /* Count number of CPUs which shares the cache */
+ for (i = 0; i < desc->ncpuspos; i++) {
+ int cpu = real_cpu_num(desc, i);
+
+ if (desc->present && !is_cpu_present(desc, cpu))
+ continue;
+ if (CPU_ISSET_S(cpu, setsize, ca->sharedmaps[0]))
+ nshares++;
}
+ /* Correction for CPU threads */
+ if (desc->nthreads > desc->ncores)
+ nshares /= (desc->nthreads / desc->ncores);
+
+ *res = (desc->ncores / nshares) * ca->size;
+ return 0;
}
/*
static void
print_summary(struct lscpu_desc *desc, struct lscpu_modifier *mod)
{
- char buf[512];
- int i;
+ char buf[BUFSIZ];
+ int i = 0;
size_t setsize = CPU_ALLOC_SIZE(maxcpus);
+ struct libscols_table *tb;
- print_s(_("Architecture:"), desc->arch);
+ scols_init_debug(0);
+ tb = scols_new_table();
+ if (!tb)
+ err(EXIT_FAILURE, _("failed to allocate output table"));
+
+ scols_table_enable_noheadings(tb, 1);
+ if (mod->json) {
+ scols_table_enable_json(tb, 1);
+ scols_table_set_name(tb, "lscpu");
+ }
+
+ if (scols_table_new_column(tb, "field", 0, 0) == NULL ||
+ scols_table_new_column(tb, "data", 0, SCOLS_FL_NOEXTREMES) == NULL)
+ err(EXIT_FAILURE, _("failed to initialize output column"));
+
+ add_summary_s(tb, _("Architecture:"), desc->arch);
if (desc->mode) {
- char mbuf[64], *p = mbuf;
+ char *p = buf;
if (desc->mode & MODE_32BIT) {
strcpy(p, "32-bit, ");
p += 8;
}
*(p - 2) = '\0';
- print_s(_("CPU op-mode(s):"), mbuf);
+ add_summary_s(tb, _("CPU op-mode(s):"), buf);
}
#if !defined(WORDS_BIGENDIAN)
- print_s(_("Byte Order:"), "Little Endian");
+ add_summary_s(tb, _("Byte Order:"), "Little Endian");
#else
- print_s(_("Byte Order:"), "Big Endian");
+ add_summary_s(tb, _("Byte Order:"), "Big Endian");
#endif
- print_n(_("CPU(s):"), desc->ncpus);
+
+ if (desc->addrsz)
+ add_summary_s(tb, _("Address sizes:"), desc->addrsz);
+
+ add_summary_n(tb, _("CPU(s):"), desc->ncpus);
if (desc->online)
- print_cpuset(mod->hex ? _("On-line CPU(s) mask:") :
- _("On-line CPU(s) list:"),
+ print_cpuset(tb, mod->hex ? _("On-line CPU(s) mask:") :
+ _("On-line CPU(s) list:"),
desc->online, mod->hex);
if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
if (!is_cpu_online(desc, cpu) && is_cpu_present(desc, cpu))
CPU_SET_S(cpu, setsize, set);
}
- print_cpuset(mod->hex ? _("Off-line CPU(s) mask:") :
- _("Off-line CPU(s) list:"),
+ print_cpuset(tb, mod->hex ? _("Off-line CPU(s) mask:") :
+ _("Off-line CPU(s) list:"),
set, mod->hex);
cpuset_free(set);
}
if (desc->nsockets) {
- int cores_per_socket, sockets_per_book, books_per_drawer, drawers;
+ int threads_per_core, cores_per_socket, sockets_per_book;
+ int books_per_drawer, drawers;
+ FILE *fd;
- cores_per_socket = sockets_per_book = books_per_drawer = drawers = 0;
+ threads_per_core = cores_per_socket = sockets_per_book = 0;
+ books_per_drawer = drawers = 0;
/* s390 detects its cpu topology via /proc/sysinfo, if present.
* Using simply the cpu topology masks in sysfs will not give
* usable results since everything is virtualized. E.g.
* If the cpu topology is not exported (e.g. 2nd level guest)
* fall back to old calculation scheme.
*/
- if (path_exist(_PATH_PROC_SYSINFO)) {
- FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
- char pbuf[BUFSIZ];
+ if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
int t0, t1;
- while (fd && fgets(pbuf, sizeof(pbuf), fd) != NULL) {
- if (sscanf(pbuf, "CPU Topology SW:%d%d%d%d%d%d",
+ while (fd && fgets(buf, sizeof(buf), fd) != NULL) {
+ if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
&t0, &t1, &drawers, &books_per_drawer,
&sockets_per_book,
&cores_per_socket) == 6)
if (fd)
fclose(fd);
}
- print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
- print_n(_("Core(s) per socket:"),
+ if (desc->mtid)
+ threads_per_core = atoi(desc->mtid) + 1;
+ add_summary_n(tb, _("Thread(s) per core:"),
+ threads_per_core ?: desc->nthreads / desc->ncores);
+ add_summary_n(tb, _("Core(s) per socket:"),
cores_per_socket ?: desc->ncores / desc->nsockets);
if (desc->nbooks) {
- print_n(_("Socket(s) per book:"),
+ add_summary_n(tb, _("Socket(s) per book:"),
sockets_per_book ?: desc->nsockets / desc->nbooks);
if (desc->ndrawers) {
- print_n(_("Book(s) per drawer:"),
+ add_summary_n(tb, _("Book(s) per drawer:"),
books_per_drawer ?: desc->nbooks / desc->ndrawers);
- print_n(_("Drawer(s):"), drawers ?: desc->ndrawers);
+ add_summary_n(tb, _("Drawer(s):"), drawers ?: desc->ndrawers);
} else {
- print_n(_("Book(s):"), books_per_drawer ?: desc->nbooks);
+ add_summary_n(tb, _("Book(s):"), books_per_drawer ?: desc->nbooks);
}
} else {
- print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
+ add_summary_n(tb, _("Socket(s):"), sockets_per_book ?: desc->nsockets);
}
}
if (desc->nnodes)
- print_n(_("NUMA node(s):"), desc->nnodes);
+ add_summary_n(tb, _("NUMA node(s):"), desc->nnodes);
if (desc->vendor)
- print_s(_("Vendor ID:"), desc->vendor);
+ add_summary_s(tb, _("Vendor ID:"), desc->vendor);
if (desc->machinetype)
- print_s(_("Machine type:"), desc->machinetype);
+ add_summary_s(tb, _("Machine type:"), desc->machinetype);
if (desc->family)
- print_s(_("CPU family:"), desc->family);
+ add_summary_s(tb, _("CPU family:"), desc->family);
if (desc->model || desc->revision)
- print_s(_("Model:"), desc->revision ? desc->revision : desc->model);
+ add_summary_s(tb, _("Model:"), desc->revision ? desc->revision : desc->model);
if (desc->modelname || desc->cpu)
- print_s(_("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
+ add_summary_s(tb, _("Model name:"), desc->cpu ? desc->cpu : desc->modelname);
if (desc->stepping)
- print_s(_("Stepping:"), desc->stepping);
+ add_summary_s(tb, _("Stepping:"), desc->stepping);
+ if (desc->freqboost >= 0)
+ add_summary_s(tb, _("Frequency boost:"), desc->freqboost ?
+ _("enabled") : _("disabled"));
if (desc->mhz)
- print_s(_("CPU MHz:"), desc->mhz);
+ add_summary_s(tb, _("CPU MHz:"), desc->mhz);
if (desc->dynamic_mhz)
- print_s(_("CPU dynamic MHz:"), desc->dynamic_mhz);
+ add_summary_s(tb, _("CPU dynamic MHz:"), desc->dynamic_mhz);
if (desc->static_mhz)
- print_s(_("CPU static MHz:"), desc->static_mhz);
+ add_summary_s(tb, _("CPU static MHz:"), desc->static_mhz);
if (desc->maxmhz)
- print_s(_("CPU max MHz:"), desc->maxmhz[0]);
+ add_summary_s(tb, _("CPU max MHz:"), cpu_max_mhz(desc, buf, sizeof(buf)));
if (desc->minmhz)
- print_s(_("CPU min MHz:"), desc->minmhz[0]);
+ add_summary_s(tb, _("CPU min MHz:"), cpu_min_mhz(desc, buf, sizeof(buf)));
if (desc->bogomips)
- print_s(_("BogoMIPS:"), desc->bogomips);
+ add_summary_s(tb, _("BogoMIPS:"), desc->bogomips);
if (desc->virtflag) {
if (!strcmp(desc->virtflag, "svm"))
- print_s(_("Virtualization:"), "AMD-V");
+ add_summary_s(tb, _("Virtualization:"), "AMD-V");
else if (!strcmp(desc->virtflag, "vmx"))
- print_s(_("Virtualization:"), "VT-x");
+ add_summary_s(tb, _("Virtualization:"), "VT-x");
}
if (desc->hypervisor)
- print_s(_("Hypervisor:"), desc->hypervisor);
+ add_summary_s(tb, _("Hypervisor:"), desc->hypervisor);
if (desc->hyper) {
- print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
- print_s(_("Virtualization type:"), _(virt_types[desc->virtype]));
+ add_summary_s(tb, _("Hypervisor vendor:"), hv_vendors[desc->hyper]);
+ add_summary_s(tb, _("Virtualization type:"), _(virt_types[desc->virtype]));
}
if (desc->dispatching >= 0)
- print_s(_("Dispatching mode:"), _(disp_modes[desc->dispatching]));
+ add_summary_s(tb, _("Dispatching mode:"), _(disp_modes[desc->dispatching]));
if (desc->ncaches) {
- char cbuf[512];
-
for (i = desc->ncaches - 1; i >= 0; i--) {
- snprintf(cbuf, sizeof(cbuf),
- _("%s cache:"), desc->caches[i].name);
- print_s(cbuf, desc->caches[i].size);
+ uint64_t sz = 0;
+ char *tmp;
+ struct cpu_cache *ca = &desc->caches[i];
+
+ if (ca->size == 0)
+ continue;
+ if (get_cache_full_size(desc, ca, &sz) != 0 || sz == 0)
+ continue;
+ if (mod->bytes)
+ xasprintf(&tmp, "%" PRIu64, sz);
+ else
+ tmp = size_to_human_string(
+ SIZE_SUFFIX_3LETTER | SIZE_SUFFIX_SPACE,
+ sz);
+ snprintf(buf, sizeof(buf), _("%s cache: "), ca->name);
+ add_summary_s(tb, buf, tmp);
+ free(tmp);
}
}
-
if (desc->necaches) {
- char cbuf[512];
-
for (i = desc->necaches - 1; i >= 0; i--) {
- snprintf(cbuf, sizeof(cbuf),
- _("%s cache:"), desc->ecaches[i].name);
- print_s(cbuf, desc->ecaches[i].size);
+ char *tmp;
+ struct cpu_cache *ca = &desc->ecaches[i];
+
+ if (ca->size == 0)
+ continue;
+ if (mod->bytes)
+ xasprintf(&tmp, "%" PRIu64, ca->size);
+ else
+ tmp = size_to_human_string(
+ SIZE_SUFFIX_3LETTER | SIZE_SUFFIX_SPACE,
+ ca->size);
+ snprintf(buf, sizeof(buf), _("%s cache: "), ca->name);
+ add_summary_s(tb, buf, tmp);
+ free(tmp);
}
}
for (i = 0; i < desc->nnodes; i++) {
snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), desc->idx2nodenum[i]);
- print_cpuset(buf, desc->nodemaps[i], mod->hex);
+ print_cpuset(tb, buf, desc->nodemaps[i], mod->hex);
}
- if (desc->flags)
- print_s(_("Flags:"), desc->flags);
-
if (desc->physsockets) {
- print_n(_("Physical sockets:"), desc->physsockets);
- print_n(_("Physical chips:"), desc->physchips);
- print_n(_("Physical cores/chip:"), desc->physcoresperchip);
+ add_summary_n(tb, _("Physical sockets:"), desc->physsockets);
+ add_summary_n(tb, _("Physical chips:"), desc->physchips);
+ add_summary_n(tb, _("Physical cores/chip:"), desc->physcoresperchip);
}
+
+ if (desc->flags)
+ add_summary_s(tb, _("Flags:"), desc->flags);
+
+ scols_print_table(tb);
+ scols_unref_table(tb);
}
-static void __attribute__((__noreturn__)) usage(FILE *out)
+static void __attribute__((__noreturn__)) usage(void)
{
+ FILE *out = stdout;
size_t i;
fputs(USAGE_HEADER, out);
fputs(USAGE_OPTIONS, out);
fputs(_(" -a, --all print both online and offline CPUs (default for -e)\n"), out);
fputs(_(" -b, --online print online CPUs only (default for -p)\n"), out);
+ fputs(_(" -B, --bytes print sizes in bytes rather than in human readable format\n"), out);
+ fputs(_(" -C, --caches[=<list>] info about caches in extended readable format\n"), out);
fputs(_(" -c, --offline print offline CPUs only\n"), out);
+ fputs(_(" -J, --json use JSON for default or extended format\n"), out);
fputs(_(" -e, --extended[=<list>] print out an extended readable format\n"), out);
fputs(_(" -p, --parse[=<list>] print out a parsable format\n"), out);
fputs(_(" -s, --sysroot <dir> use specified directory as system root\n"), out);
fputs(_(" -x, --hex print hexadecimal masks rather than lists of CPUs\n"), out);
+ fputs(_(" -y, --physical print physical instead of logical IDs\n"), out);
fputs(USAGE_SEPARATOR, out);
- fputs(USAGE_HELP, out);
- fputs(USAGE_VERSION, out);
+ printf(USAGE_HELP_OPTIONS(25));
- fprintf(out, _("\nAvailable columns:\n"));
+ fputs(_("\nAvailable output columns for -e or -p:\n"), out);
+ for (i = 0; i < ARRAY_SIZE(coldescs_cpu); i++)
+ fprintf(out, " %13s %s\n", coldescs_cpu[i].name, _(coldescs_cpu[i].help));
- for (i = 0; i < ARRAY_SIZE(coldescs); i++)
- fprintf(out, " %13s %s\n", coldescs[i].name, _(coldescs[i].help));
+ fputs(_("\nAvailable output columns for -C:\n"), out);
+ for (i = 0; i < ARRAY_SIZE(coldescs_cache); i++)
+ fprintf(out, " %13s %s\n", coldescs_cache[i].name, _(coldescs_cache[i].help));
- fprintf(out, USAGE_MAN_TAIL("lscpu(1)"));
+ printf(USAGE_MAN_TAIL("lscpu(1)"));
- exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
+ exit(EXIT_SUCCESS);
}
int main(int argc, char *argv[])
{
struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
- struct lscpu_desc _desc = { .flags = 0 }, *desc = &_desc;
- int c, i;
- int columns[ARRAY_SIZE(coldescs)], ncolumns = 0;
+ struct lscpu_desc _desc = { .flags = NULL }, *desc = &_desc;
+ int c, i, all = 0;
+ int columns[ARRAY_SIZE(coldescs_cpu)], ncolumns = 0;
int cpu_modifier_specified = 0;
+ size_t setsize;
+ enum {
+ OPT_OUTPUT_ALL = CHAR_MAX + 1,
+ };
static const struct option longopts[] = {
- { "all", no_argument, 0, 'a' },
- { "online", no_argument, 0, 'b' },
- { "offline", no_argument, 0, 'c' },
- { "help", no_argument, 0, 'h' },
- { "extended", optional_argument, 0, 'e' },
- { "parse", optional_argument, 0, 'p' },
- { "sysroot", required_argument, 0, 's' },
- { "hex", no_argument, 0, 'x' },
- { "version", no_argument, 0, 'V' },
- { NULL, 0, 0, 0 }
+ { "all", no_argument, NULL, 'a' },
+ { "online", no_argument, NULL, 'b' },
+ { "bytes", no_argument, NULL, 'B' },
+ { "caches", optional_argument, NULL, 'C' },
+ { "offline", no_argument, NULL, 'c' },
+ { "help", no_argument, NULL, 'h' },
+ { "extended", optional_argument, NULL, 'e' },
+ { "json", no_argument, NULL, 'J' },
+ { "parse", optional_argument, NULL, 'p' },
+ { "sysroot", required_argument, NULL, 's' },
+ { "physical", no_argument, NULL, 'y' },
+ { "hex", no_argument, NULL, 'x' },
+ { "version", no_argument, NULL, 'V' },
+ { "output-all", no_argument, NULL, OPT_OUTPUT_ALL },
+ { NULL, 0, NULL, 0 }
};
static const ul_excl_t excl[] = { /* rows and cols in ASCII order */
{ 'a','b','c' },
- { 'e','p' },
+ { 'C','e','p' },
{ 0 }
};
int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT;
textdomain(PACKAGE);
atexit(close_stdout);
- while ((c = getopt_long(argc, argv, "abce::hp::s:xV", longopts, NULL)) != -1) {
+ while ((c = getopt_long(argc, argv, "aBbC::ce::hJp::s:xyV", longopts, NULL)) != -1) {
err_exclusive_options(c, longopts, excl, excl_st);
mod->online = mod->offline = 1;
cpu_modifier_specified = 1;
break;
+ case 'B':
+ mod->bytes = 1;
+ break;
case 'b':
mod->online = 1;
cpu_modifier_specified = 1;
mod->offline = 1;
cpu_modifier_specified = 1;
break;
+ case 'C':
+ if (optarg) {
+ if (*optarg == '=')
+ optarg++;
+ ncolumns = string_to_idarray(optarg,
+ columns, ARRAY_SIZE(columns),
+ cache_column_name_to_id);
+ if (ncolumns < 0)
+ return EXIT_FAILURE;
+ }
+ mod->mode = OUTPUT_CACHES;
+ break;
case 'h':
- usage(stdout);
+ usage();
+ case 'J':
+ mod->json = 1;
+ break;
case 'p':
case 'e':
if (optarg) {
optarg++;
ncolumns = string_to_idarray(optarg,
columns, ARRAY_SIZE(columns),
- column_name_to_id);
+ cpu_column_name_to_id);
if (ncolumns < 0)
return EXIT_FAILURE;
}
mod->mode = c == 'p' ? OUTPUT_PARSABLE : OUTPUT_READABLE;
break;
case 's':
- path_set_prefix(optarg);
+ desc->prefix = optarg;
mod->system = SYSTEM_SNAPSHOT;
break;
case 'x':
mod->hex = 1;
break;
+ case 'y':
+ mod->physical = 1;
+ break;
case 'V':
printf(UTIL_LINUX_VERSION);
return EXIT_SUCCESS;
+ case OPT_OUTPUT_ALL:
+ all = 1;
+ break;
default:
- usage(stderr);
+ errtryhelp(EXIT_FAILURE);
}
}
+ if (all) {
+ size_t sz, maxsz = mod->mode == OUTPUT_CACHES ?
+ ARRAY_SIZE(coldescs_cache) :
+ ARRAY_SIZE(coldescs_cpu);
+
+ for (sz = 0; sz < maxsz; sz++)
+ columns[sz] = 1;
+ }
+
if (cpu_modifier_specified && mod->mode == OUTPUT_SUMMARY) {
fprintf(stderr,
_("%s: options --all, --online and --offline may only "
return EXIT_FAILURE;
}
- if (argc != optind)
- usage(stderr);
+ if (argc != optind) {
+ warnx(_("bad usage"));
+ errtryhelp(EXIT_FAILURE);
+ }
/* set default cpu display mode if none was specified */
if (!mod->online && !mod->offline) {
mod->offline = mod->mode == OUTPUT_READABLE ? 1 : 0;
}
+ ul_path_init_debug();
+
+ /* /sys/devices/system/cpu */
+ desc->syscpu = ul_new_path(_PATH_SYS_CPU);
+ if (!desc->syscpu)
+ err(EXIT_FAILURE, _("failed to initialize CPUs sysfs handler"));
+ if (desc->prefix)
+ ul_path_set_prefix(desc->syscpu, desc->prefix);
+
+ /* /proc */
+ desc->procfs = ul_new_path("/proc");
+ if (!desc->procfs)
+ err(EXIT_FAILURE, _("failed to initialize procfs handler"));
+ if (desc->prefix)
+ ul_path_set_prefix(desc->procfs, desc->prefix);
+
read_basicinfo(desc, mod);
+ setsize = CPU_ALLOC_SIZE(maxcpus);
+
for (i = 0; i < desc->ncpuspos; i++) {
+ /* only consider present CPUs */
+ if (desc->present &&
+ !CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present))
+ continue;
read_topology(desc, i);
read_cache(desc, i);
read_polarization(desc, i);
read_nodes(desc);
read_hypervisor(desc, mod);
+ arm_cpu_decode(desc);
switch(mod->mode) {
case OUTPUT_SUMMARY:
print_summary(desc, mod);
break;
+ case OUTPUT_CACHES:
+ if (!ncolumns) {
+ columns[ncolumns++] = COL_CACHE_NAME;
+ columns[ncolumns++] = COL_CACHE_ONESIZE;
+ columns[ncolumns++] = COL_CACHE_ALLSIZE;
+ columns[ncolumns++] = COL_CACHE_WAYS;
+ columns[ncolumns++] = COL_CACHE_TYPE;
+ columns[ncolumns++] = COL_CACHE_LEVEL;
+ }
+ print_caches_readable(desc, columns, ncolumns, mod);
+ break;
case OUTPUT_PARSABLE:
if (!ncolumns) {
- columns[ncolumns++] = COL_CPU;
- columns[ncolumns++] = COL_CORE;
- columns[ncolumns++] = COL_SOCKET;
- columns[ncolumns++] = COL_NODE;
- columns[ncolumns++] = COL_CACHE;
+ columns[ncolumns++] = COL_CPU_CPU;
+ columns[ncolumns++] = COL_CPU_CORE;
+ columns[ncolumns++] = COL_CPU_SOCKET;
+ columns[ncolumns++] = COL_CPU_NODE;
+ columns[ncolumns++] = COL_CPU_CACHE;
mod->compat = 1;
}
- print_parsable(desc, columns, ncolumns, mod);
+ print_cpus_parsable(desc, columns, ncolumns, mod);
break;
case OUTPUT_READABLE:
if (!ncolumns) {
/* No list was given. Just print whatever is there. */
- columns[ncolumns++] = COL_CPU;
+ columns[ncolumns++] = COL_CPU_CPU;
if (desc->nodemaps)
- columns[ncolumns++] = COL_NODE;
+ columns[ncolumns++] = COL_CPU_NODE;
if (desc->drawermaps)
- columns[ncolumns++] = COL_DRAWER;
+ columns[ncolumns++] = COL_CPU_DRAWER;
if (desc->bookmaps)
- columns[ncolumns++] = COL_BOOK;
+ columns[ncolumns++] = COL_CPU_BOOK;
if (desc->socketmaps)
- columns[ncolumns++] = COL_SOCKET;
+ columns[ncolumns++] = COL_CPU_SOCKET;
if (desc->coremaps)
- columns[ncolumns++] = COL_CORE;
+ columns[ncolumns++] = COL_CPU_CORE;
if (desc->caches)
- columns[ncolumns++] = COL_CACHE;
+ columns[ncolumns++] = COL_CPU_CACHE;
if (desc->online)
- columns[ncolumns++] = COL_ONLINE;
+ columns[ncolumns++] = COL_CPU_ONLINE;
if (desc->configured)
- columns[ncolumns++] = COL_CONFIGURED;
+ columns[ncolumns++] = COL_CPU_CONFIGURED;
if (desc->polarization)
- columns[ncolumns++] = COL_POLARIZATION;
+ columns[ncolumns++] = COL_CPU_POLARIZATION;
if (desc->addresses)
- columns[ncolumns++] = COL_ADDRESS;
+ columns[ncolumns++] = COL_CPU_ADDRESS;
if (desc->maxmhz)
- columns[ncolumns++] = COL_MAXMHZ;
+ columns[ncolumns++] = COL_CPU_MAXMHZ;
if (desc->minmhz)
- columns[ncolumns++] = COL_MINMHZ;
+ columns[ncolumns++] = COL_CPU_MINMHZ;
}
- print_readable(desc, columns, ncolumns, mod);
+ print_cpus_readable(desc, columns, ncolumns, mod);
break;
}
+ ul_unref_path(desc->syscpu);
+ ul_unref_path(desc->procfs);
return EXIT_SUCCESS;
}