#include <sys/stat.h>
#include <sys/personality.h>
-#if (defined(__x86_64__) || defined(__i386__))
-# define INCLUDE_VMWARE_BDOOR
-#endif
-
-#ifdef INCLUDE_VMWARE_BDOOR
-# include <stdint.h>
-# include <signal.h>
-# include <strings.h>
-# include <setjmp.h>
-# ifdef HAVE_SYS_IO_H
-# include <sys/io.h>
-# endif
-#endif
-
-#if defined(HAVE_LIBRTAS)
-#include <librtas.h>
-#endif
-
#include <libsmartcols.h>
#include "closestream.h"
#include "optutils.h"
-#include "fileutils.h"
#include "lscpu.h"
-
-#define CACHE_MAX 100
-
-/* /sys paths */
-#define _PATH_SYS_SYSTEM "/sys/devices/system"
-#define _PATH_SYS_HYP_FEATURES "/sys/hypervisor/properties/features"
-#define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
-#define _PATH_SYS_NODE _PATH_SYS_SYSTEM "/node"
-
-/* Xen Domain feature flag used for /sys/hypervisor/properties/features */
-#define XENFEAT_supervisor_mode_kernel 3
-#define XENFEAT_mmu_pt_update_preserve_ad 5
-#define XENFEAT_hvm_callback_vector 8
-
-#define XEN_FEATURES_PV_MASK (1U << XENFEAT_mmu_pt_update_preserve_ad)
-#define XEN_FEATURES_PVH_MASK ( (1U << XENFEAT_supervisor_mode_kernel) \
- | (1U << XENFEAT_hvm_callback_vector) )
+#include "lscpu-api.h"
static const char *virt_types[] = {
[VIRT_NONE] = N_("none"),
[DISP_VERTICAL] = N_("vertical")
};
+struct polarization_modes {
+ char *parsable;
+ char *readable;
+};
+
static struct polarization_modes polar_modes[] = {
[POLAR_UNKNOWN] = {"U", "-"},
[POLAR_VLOW] = {"VL", "vert-low"},
[POLAR_HORIZONTAL] = {"H", "horizontal"},
};
-static int maxcpus; /* size in bits of kernel cpu mask */
-
-#define is_cpu_online(_d, _cpu) \
- ((_d) && (_d)->online ? \
- CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
-#define is_cpu_present(_d, _cpu) \
- ((_d) && (_d)->present ? \
- CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->present) : 0)
-
-#define real_cpu_num(_d, _i) ((_d)->idx2cpunum[(_i)])
-
/*
* IDs
*/
[COL_CACHE_COHERENCYSIZE] = { "COHERENCY-SIZE", N_("minimum amount of data in bytes transferred from memory to cache"), SCOLS_FL_RIGHT }
};
-
-static int get_cache_full_size(struct lscpu_desc *desc, struct cpu_cache *ca, uint64_t *res);
-
static int
cpu_column_name_to_id(const char *name, size_t namesz)
{
return -1;
}
-/* Lookup a pattern and get the value from cpuinfo.
- * Format is:
- *
- * "<pattern> : <key>"
- */
-static int
-lookup(char *line, char *pattern, char **value)
-{
- char *p, *v;
- int len = strlen(pattern);
-
- /* don't re-fill already found tags, first one wins */
- if (!*line || *value)
- return 0;
-
- /* pattern */
- if (strncmp(line, pattern, len) != 0)
- return 0;
-
- /* white spaces */
- for (p = line + len; isspace(*p); p++);
-
- /* separator */
- if (*p != ':')
- return 0;
-
- /* white spaces */
- for (++p; isspace(*p); p++);
-
- /* value */
- if (!*p)
- return 0;
- v = p;
-
- /* end of value */
- len = strlen(line) - 1;
- for (p = line + len; isspace(*(p-1)); p--);
- *p = '\0';
-
- *value = xstrdup(v);
- return 1;
-}
-
-/* Parse extra cache lines contained within /proc/cpuinfo but which are not
- * part of the cache topology information within the sysfs filesystem.
- * This is true for all shared caches on e.g. s390. When there are layers of
- * hypervisors in between it is not knows which CPUs share which caches.
- * Therefore information about shared caches is only available in
- * /proc/cpuinfo.
- * Format is:
- * "cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>"
- */
-static int
-lookup_cache(char *line, struct lscpu_desc *desc)
-{
- struct cpu_cache *cache;
- long long size;
- char *p, type;
- int level, line_size, associativity;
-
- /* Make sure line starts with "cache<nr> :" */
- if (strncmp(line, "cache", 5) != 0)
- return 0;
- for (p = line + 5; isdigit(*p); p++);
- for (; isspace(*p); p++);
- if (*p != ':')
- return 0;
-
- p = strstr(line, "scope=") + 6;
- /* Skip private caches, also present in sysfs */
- if (!p || strncmp(p, "Private", 7) == 0)
- return 0;
- p = strstr(line, "level=");
- if (!p || sscanf(p, "level=%d", &level) != 1)
- return 0;
- p = strstr(line, "type=") + 5;
- if (!p || !*p)
- return 0;
- type = 0;
- if (strncmp(p, "Data", 4) == 0)
- type = 'd';
- else if (strncmp(p, "Instruction", 11) == 0)
- type = 'i';
- else if (strncmp(p, "Unified", 7) == 0)
- type = 'u';
- p = strstr(line, "size=");
- if (!p || sscanf(p, "size=%lld", &size) != 1)
- return 0;
-
- p = strstr(line, "line_size=");
- if (!p || sscanf(p, "line_size=%u", &line_size) != 1)
- return 0;
-
- p = strstr(line, "associativity=");
- if (!p || sscanf(p, "associativity=%u", &associativity) != 1)
- return 0;
-
- desc->necaches++;
- desc->ecaches = xrealloc(desc->ecaches,
- desc->necaches * sizeof(struct cpu_cache));
- cache = &desc->ecaches[desc->necaches - 1];
- memset(cache, 0 , sizeof(*cache));
-
- if (type == 'i' || type == 'd')
- xasprintf(&cache->name, "L%d%c", level, type);
- else
- xasprintf(&cache->name, "L%d", level);
-
- cache->level = level;
- cache->size = size * 1024;
- cache->ways_of_associativity = associativity;
- cache->coherency_line_size = line_size;
- /* Number of sets for s390. For safety, just check divide by zero */
- cache->number_of_sets = line_size ? (cache->size / line_size): 0;
- cache->number_of_sets = associativity ? (cache->number_of_sets / associativity) : 0;
-
- cache->type = type == 'i' ? xstrdup("Instruction") :
- type == 'd' ? xstrdup("Data") :
- type == 'u' ? xstrdup("Unified") : NULL;
- return 1;
-}
-
-/* Don't init the mode for platforms where we are not able to
- * detect that CPU supports 64-bit mode.
- */
-static int
-init_mode(struct lscpu_modifier *mod)
-{
- int m = 0;
-
- if (mod->system == SYSTEM_SNAPSHOT)
- /* reading info from any /{sys,proc} dump, don't mix it with
- * information about our real CPU */
- return 0;
-
-#if defined(__alpha__) || defined(__ia64__)
- m |= MODE_64BIT; /* 64bit platforms only */
-#endif
- /* platforms with 64bit flag in /proc/cpuinfo, define
- * 32bit default here */
-#if defined(__i386__) || defined(__x86_64__) || \
- defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
- m |= MODE_32BIT;
-#endif
-
-#if defined(__aarch64__)
- {
- /* personality() is the most reliable way (since 4.7)
- * to determine aarch32 support */
- int pers = personality(PER_LINUX32);
- if (pers != -1) {
- personality(pers);
- m |= MODE_32BIT;
- }
- m |= MODE_64BIT;
- }
-#endif
- return m;
-}
-
-#if defined(HAVE_LIBRTAS)
-#define PROCESSOR_MODULE_INFO 43
-static int strbe16toh(const char *buf, int offset)
-{
- return (buf[offset] << 8) + buf[offset+1];
-}
-
-static void read_physical_info_powerpc(struct lscpu_desc *desc)
-{
- char buf[BUFSIZ];
- int rc, len, ntypes;
-
- desc->physsockets = desc->physchips = desc->physcoresperchip = 0;
-
- rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
- if (rc < 0)
- return;
-
- len = strbe16toh(buf, 0);
- if (len < 8)
- return;
-
- ntypes = strbe16toh(buf, 2);
- if (!ntypes)
- return;
-
- desc->physsockets = strbe16toh(buf, 4);
- desc->physchips = strbe16toh(buf, 6);
- desc->physcoresperchip = strbe16toh(buf, 8);
-}
-#else
-static void read_physical_info_powerpc(
- struct lscpu_desc *desc __attribute__((__unused__)))
-{
-}
-#endif
-
-static int cmp_vulnerability_name(const void *a0, const void *b0)
-{
- const struct cpu_vulnerability *a = (const struct cpu_vulnerability *) a0,
- *b = (const struct cpu_vulnerability *) b0;
- return strcmp(a->name, b->name);
-}
-
-static void read_vulnerabilities(struct lscpu_desc *desc)
-{
- struct dirent *d;
- DIR *dir = ul_path_opendir(desc->syscpu, "vulnerabilities");
- int n = 0;
-
- if (!dir)
- return;
-
- desc->nvuls = n = 0;
-
- while (xreaddir(dir))
- n++;
- if (!n)
- return;
-
- rewinddir(dir);
- desc->vuls = xcalloc(n, sizeof(struct cpu_vulnerability));
-
- while (desc->nvuls < n && (d = xreaddir(dir))) {
- char *str, *p;
- struct cpu_vulnerability *vu;
-
-#ifdef _DIRENT_HAVE_D_TYPE
- if (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN)
- continue;
-#endif
- if (ul_path_readf_string(desc->syscpu, &str,
- "vulnerabilities/%s", d->d_name) <= 0)
- continue;
-
- vu = &desc->vuls[desc->nvuls++];
-
- /* Name */
- vu->name = xstrdup(d->d_name);
- *vu->name = toupper(*vu->name);
- strrep(vu->name, '_', ' ');
-
- /* Description */
- vu->text = str;
- p = (char *) startswith(vu->text, "Mitigation");
- if (p) {
- *p = ';';
- strrem(vu->text, ':');
- }
- }
- closedir(dir);
-
- qsort(desc->vuls, desc->nvuls,
- sizeof(struct cpu_vulnerability), cmp_vulnerability_name);
-}
-
-
-
-
-static void
-read_basicinfo(struct lscpu_desc *desc, struct lscpu_modifier *mod)
-{
- FILE *fp;
- char buf[BUFSIZ];
- struct utsname utsbuf;
- size_t setsize;
- cpu_set_t *cpuset = NULL;
-
- /* architecture */
- if (uname(&utsbuf) == -1)
- err(EXIT_FAILURE, _("error: uname failed"));
-
- fp = ul_path_fopen(desc->procfs, "r", "cpuinfo");
- if (!fp)
- err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
- desc->arch = xstrdup(utsbuf.machine);
-
- /* details */
- while (fgets(buf, sizeof(buf), fp) != NULL) {
- if (lookup(buf, "vendor", &desc->vendor)) ;
- else if (lookup(buf, "vendor_id", &desc->vendor)) ;
- else if (lookup(buf, "CPU implementer", &desc->vendor)) ; /* ARM and aarch64 */
- else if (lookup(buf, "family", &desc->family)) ;
- else if (lookup(buf, "cpu family", &desc->family)) ;
- else if (lookup(buf, "model", &desc->model)) ;
- else if (lookup(buf, "CPU part", &desc->model)) ; /* ARM and aarch64 */
- else if (lookup(buf, "cpu model", &desc->model)) ; /* mips */
- else if (lookup(buf, "model name", &desc->modelname)) ;
- else if (lookup(buf, "stepping", &desc->stepping)) ;
- else if (lookup(buf, "CPU variant", &desc->stepping)) ; /* aarch64 */
- else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
- else if (lookup(buf, "cpu MHz dynamic", &desc->dynamic_mhz)) ; /* s390 */
- else if (lookup(buf, "cpu MHz static", &desc->static_mhz)) ; /* s390 */
- else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
- else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
- else if (lookup(buf, "Features", &desc->flags)) ; /* aarch64 */
- else if (lookup(buf, "ASEs implemented", &desc->flags)) ; /* mips */
- else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
- else if (lookup(buf, "bogomips", &desc->bogomips)) ;
- else if (lookup(buf, "BogoMIPS", &desc->bogomips)) ; /* aarch64 */
- else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
- else if (lookup(buf, "cpu", &desc->cpu)) ;
- else if (lookup(buf, "revision", &desc->revision)) ;
- else if (lookup(buf, "CPU revision", &desc->revision)) ; /* aarch64 */
- else if (lookup(buf, "max thread id", &desc->mtid)) ; /* s390 */
- else if (lookup(buf, "address sizes", &desc->addrsz)) ; /* x86 */
- else if (lookup_cache(buf, desc)) ;
- else
- continue;
- }
-
- desc->mode = init_mode(mod);
-
- if (desc->flags) {
- snprintf(buf, sizeof(buf), " %s ", desc->flags);
- if (strstr(buf, " svm "))
- desc->virtflag = xstrdup("svm");
- else if (strstr(buf, " vmx "))
- desc->virtflag = xstrdup("vmx");
- if (strstr(buf, " lm "))
- desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
- if (strstr(buf, " zarch "))
- desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
- if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
- desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
- }
-
- if (desc->arch && mod->system != SYSTEM_SNAPSHOT) {
- if (strcmp(desc->arch, "ppc64") == 0)
- desc->mode |= MODE_32BIT | MODE_64BIT;
- else if (strcmp(desc->arch, "ppc") == 0)
- desc->mode |= MODE_32BIT;
- }
-
- fclose(fp);
-
- if (ul_path_read_s32(desc->syscpu, &maxcpus, "kernel_max") == 0)
- /* note that kernel_max is maximum index [NR_CPUS-1] */
- maxcpus += 1;
-
- else if (mod->system == SYSTEM_LIVE)
- /* the root is '/' so we are working with data from the current kernel */
- maxcpus = get_max_number_of_cpus();
-
- if (maxcpus <= 0)
- /* error or we are reading some /sys snapshot instead of the
- * real /sys, let's use any crazy number... */
- maxcpus = 2048;
-
- setsize = CPU_ALLOC_SIZE(maxcpus);
-
- if (ul_path_readf_cpulist(desc->syscpu, &cpuset, maxcpus, "possible") == 0) {
- int num, idx;
-
- desc->ncpuspos = CPU_COUNT_S(setsize, cpuset);
- desc->idx2cpunum = xcalloc(desc->ncpuspos, sizeof(int));
-
- for (num = 0, idx = 0; num < maxcpus; num++) {
- if (CPU_ISSET_S(num, setsize, cpuset))
- desc->idx2cpunum[idx++] = num;
- }
- cpuset_free(cpuset);
- cpuset = NULL;
- } else
- err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
- _PATH_SYS_CPU "/possible");
-
-
- /* get mask for present CPUs */
- if (ul_path_readf_cpulist(desc->syscpu, &desc->present, maxcpus, "present") == 0)
- desc->ncpus = CPU_COUNT_S(setsize, desc->present);
-
- /* get mask for online CPUs */
- if (ul_path_readf_cpulist(desc->syscpu, &desc->online, maxcpus, "online") == 0)
- desc->nthreads = CPU_COUNT_S(setsize, desc->online);
-
- /* get dispatching mode */
- if (ul_path_read_s32(desc->syscpu, &desc->dispatching, "dispatching") != 0)
- desc->dispatching = -1;
-
- /* get cpufreq boost mode */
- if (ul_path_read_s32(desc->syscpu, &desc->freqboost, "cpufreq/boost") != 0)
- desc->freqboost = -1;
-
- if (mod->system == SYSTEM_LIVE)
- read_physical_info_powerpc(desc);
-
- if ((fp = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
- while (fgets(buf, sizeof(buf), fp) != NULL) {
- if (lookup(buf, "Type", &desc->machinetype))
- break;
- }
- fclose(fp);
- }
-
- /* vulnerabilities */
- if (ul_path_access(desc->syscpu, F_OK, "vulnerabilities") == 0)
- read_vulnerabilities(desc);
-}
-
-static int
-has_pci_device(struct lscpu_desc *desc, unsigned int vendor, unsigned int device)
-{
- FILE *f;
- unsigned int num, fn, ven, dev;
- int res = 1;
-
- f = ul_path_fopen(desc->procfs, "r", "bus/pci/devices");
- if (!f)
- return 0;
-
- /* for more details about bus/pci/devices format see
- * drivers/pci/proc.c in linux kernel
- */
- while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
- &num, &fn, &ven, &dev) == 4) {
-
- if (ven == vendor && dev == device)
- goto found;
- }
-
- res = 0;
-found:
- fclose(f);
- return res;
-}
-
-#if defined(__x86_64__) || defined(__i386__)
-
-/*
- * This CPUID leaf returns the information about the hypervisor.
- * EAX : maximum input value for CPUID supported by the hypervisor.
- * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
- */
-#define HYPERVISOR_INFO_LEAF 0x40000000
-
-static inline void
-cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- __asm__(
-#if defined(__PIC__) && defined(__i386__)
- /* x86 PIC cannot clobber ebx -- gcc bitches */
- "xchg %%ebx, %%esi;"
- "cpuid;"
- "xchg %%esi, %%ebx;"
- : "=S" (*ebx),
-#else
- "cpuid;"
- : "=b" (*ebx),
-#endif
- "=a" (*eax),
- "=c" (*ecx),
- "=d" (*edx)
- : "1" (op), "c"(0));
-}
-
-static void
-read_hypervisor_cpuid(struct lscpu_desc *desc)
-{
- unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
- char hyper_vendor_id[13];
-
- memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
-
- cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
- memcpy(hyper_vendor_id + 0, &ebx, 4);
- memcpy(hyper_vendor_id + 4, &ecx, 4);
- memcpy(hyper_vendor_id + 8, &edx, 4);
- hyper_vendor_id[12] = '\0';
-
- if (!hyper_vendor_id[0])
- return;
-
- if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
- desc->hyper = HYPER_XEN;
- else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
- desc->hyper = HYPER_KVM;
- else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
- desc->hyper = HYPER_MSHV;
- else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
- desc->hyper = HYPER_VMWARE;
- else if (!strncmp("UnisysSpar64", hyper_vendor_id, 12))
- desc->hyper = HYPER_SPAR;
-}
-
-#else /* ! (__x86_64__ || __i386__) */
-static void
-read_hypervisor_cpuid(struct lscpu_desc *desc __attribute__((__unused__)))
-{
-}
-#endif
-
-static int is_devtree_compatible(struct lscpu_desc *desc, const char *str)
-{
- FILE *fd = ul_path_fopen(desc->procfs, "r", "device-tree/compatible");
-
- if (fd) {
- char buf[256];
- size_t i, len;
-
- memset(buf, 0, sizeof(buf));
- len = fread(buf, 1, sizeof(buf) - 1, fd);
- fclose(fd);
-
- for (i = 0; i < len;) {
- if (!strcmp(&buf[i], str))
- return 1;
- i += strlen(&buf[i]);
- i++;
- }
- }
-
- return 0;
-}
-
-static int
-read_hypervisor_powerpc(struct lscpu_desc *desc)
-{
- assert(!desc->hyper);
-
- /* IBM iSeries: legacy, para-virtualized on top of OS/400 */
- if (ul_path_access(desc->procfs, F_OK, "iSeries") == 0) {
- desc->hyper = HYPER_OS400;
- desc->virtype = VIRT_PARA;
-
- /* PowerNV (POWER Non-Virtualized, bare-metal) */
- } else if (is_devtree_compatible(desc, "ibm,powernv")) {
- desc->hyper = HYPER_NONE;
- desc->virtype = VIRT_NONE;
-
- /* PowerVM (IBM's proprietary hypervisor, aka pHyp) */
- } else if (ul_path_access(desc->procfs, F_OK, "device-tree/ibm,partition-name") == 0
- && ul_path_access(desc->procfs, F_OK, "device-tree/hmc-managed?") == 0
- && ul_path_access(desc->procfs, F_OK, "device-tree/chosen/qemu,graphic-width") != 0) {
-
- FILE *fd;
- desc->hyper = HYPER_PHYP;
- desc->virtype = VIRT_PARA;
-
- fd = ul_path_fopen(desc->procfs, "r", "device-tree/ibm,partition-name");
- if (fd) {
- char buf[256];
- if (fscanf(fd, "%255s", buf) == 1 && !strcmp(buf, "full"))
- desc->virtype = VIRT_NONE;
- fclose(fd);
- }
-
- /* Qemu */
- } else if (is_devtree_compatible(desc, "qemu,pseries")) {
- desc->hyper = HYPER_KVM;
- desc->virtype = VIRT_PARA;
- }
- return desc->hyper;
-}
-
-#ifdef INCLUDE_VMWARE_BDOOR
-
-#define VMWARE_BDOOR_MAGIC 0x564D5868
-#define VMWARE_BDOOR_PORT 0x5658
-#define VMWARE_BDOOR_CMD_GETVERSION 10
-
-static UL_ASAN_BLACKLIST
-void vmware_bdoor(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
-{
- __asm__(
-#if defined(__PIC__) && defined(__i386__)
- /* x86 PIC cannot clobber ebx -- gcc bitches */
- "xchg %%ebx, %%esi;"
- "inl (%%dx), %%eax;"
- "xchg %%esi, %%ebx;"
- : "=S" (*ebx),
-#else
- "inl (%%dx), %%eax;"
- : "=b" (*ebx),
-#endif
- "=a" (*eax),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (VMWARE_BDOOR_MAGIC),
- "1" (VMWARE_BDOOR_CMD_GETVERSION),
- "2" (VMWARE_BDOOR_PORT),
- "3" (0)
- : "memory");
-}
-
-static jmp_buf segv_handler_env;
-
-static void
-segv_handler(__attribute__((__unused__)) int sig,
- __attribute__((__unused__)) siginfo_t *info,
- __attribute__((__unused__)) void *ignored)
-{
- siglongjmp(segv_handler_env, 1);
-}
-
-static int
-is_vmware_platform(void)
-{
- uint32_t eax, ebx, ecx, edx;
- struct sigaction act, oact;
-
- /*
- * FIXME: Not reliable for non-root users. Note it works as expected if
- * vmware_bdoor() is not optimized for PIE, but then it fails to build
- * on 32bit x86 systems. See lscpu git log for more details (commit
- * 7845b91dbc7690064a2be6df690e4aaba728fb04). kzak [3-Nov-2016]
- */
- if (getuid() != 0)
- return 0;
-
- /*
- * The assembly routine for vmware detection works
- * fine under vmware, even if ran as regular user. But
- * on real HW or under other hypervisors, it segfaults (which is
- * expected). So we temporarily install SIGSEGV handler to catch
- * the signal. All this magic is needed because lscpu
- * isn't supposed to require root privileges.
- */
- if (sigsetjmp(segv_handler_env, 1))
- return 0;
-
- memset(&act, 0, sizeof(act));
- act.sa_sigaction = segv_handler;
- act.sa_flags = SA_SIGINFO;
-
- if (sigaction(SIGSEGV, &act, &oact))
- err(EXIT_FAILURE, _("cannot set signal handler"));
-
- vmware_bdoor(&eax, &ebx, &ecx, &edx);
-
- if (sigaction(SIGSEGV, &oact, NULL))
- err(EXIT_FAILURE, _("cannot restore signal handler"));
-
- return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC;
-}
-
-#else /* ! INCLUDE_VMWARE_BDOOR */
-
-static int
-is_vmware_platform(void)
-{
- return 0;
-}
-
-#endif /* INCLUDE_VMWARE_BDOOR */
-
-static void
-read_hypervisor(struct lscpu_desc *desc, struct lscpu_modifier *mod)
-{
- FILE *fd;
-
- /* We have to detect WSL first. is_vmware_platform() crashes on Windows 10. */
-
- if ((fd = ul_path_fopen(desc->procfs, "r", "sys/kernel/osrelease"))) {
- char buf[256];
-
- if (fgets(buf, sizeof(buf), fd) != NULL) {
- if (strstr(buf, "Microsoft")) {
- desc->hyper = HYPER_WSL;
- desc->virtype = VIRT_CONT;
- }
- }
- fclose(fd);
- if (desc->virtype)
- return;
- }
-
- if (mod->system != SYSTEM_SNAPSHOT) {
- read_hypervisor_cpuid(desc);
- if (!desc->hyper)
- desc->hyper = read_hypervisor_dmi();
- if (!desc->hyper && is_vmware_platform())
- desc->hyper = HYPER_VMWARE;
- }
-
- if (desc->hyper) {
- desc->virtype = VIRT_FULL;
-
- if (desc->hyper == HYPER_XEN) {
- uint32_t features;
-
- fd = ul_prefix_fopen(desc->prefix, "r", _PATH_SYS_HYP_FEATURES);
-
- if (fd && fscanf(fd, "%x", &features) == 1) {
- /* Xen PV domain */
- if (features & XEN_FEATURES_PV_MASK)
- desc->virtype = VIRT_PARA;
- /* Xen PVH domain */
- else if ((features & XEN_FEATURES_PVH_MASK)
- == XEN_FEATURES_PVH_MASK)
- desc->virtype = VIRT_PARA;
- }
- if (fd)
- fclose(fd);
- }
- } else if (read_hypervisor_powerpc(desc) > 0) {
- /* read_hypervisor_powerpc() sets all necessary stuff to @desc */
- ;
- /* Xen para-virt or dom0 */
- } else if (ul_path_access(desc->procfs, F_OK, "xen") == 0) {
- int dom0 = 0;
-
- fd = ul_path_fopen(desc->procfs, "r", "xen/capabilities");
- if (fd) {
- char buf[256];
-
- if (fscanf(fd, "%255s", buf) == 1 &&
- !strcmp(buf, "control_d"))
- dom0 = 1;
- fclose(fd);
- }
- desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
- desc->hyper = HYPER_XEN;
-
- /* Xen full-virt on non-x86_64 */
- } else if (has_pci_device(desc, hv_vendor_pci[HYPER_XEN], hv_graphics_pci[HYPER_XEN])) {
- desc->hyper = HYPER_XEN;
- desc->virtype = VIRT_FULL;
- } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VMWARE], hv_graphics_pci[HYPER_VMWARE])) {
- desc->hyper = HYPER_VMWARE;
- desc->virtype = VIRT_FULL;
- } else if (has_pci_device(desc, hv_vendor_pci[HYPER_VBOX], hv_graphics_pci[HYPER_VBOX])) {
- desc->hyper = HYPER_VBOX;
- desc->virtype = VIRT_FULL;
-
- /* IBM PR/SM */
- } else if ((fd = ul_path_fopen(desc->procfs, "r", "sysinfo"))) {
- char buf[BUFSIZ];
-
- desc->hyper = HYPER_IBM;
- desc->hypervisor = "PR/SM";
- desc->virtype = VIRT_FULL;
- while (fgets(buf, sizeof(buf), fd) != NULL) {
- char *str, *p;
-
- if (!strstr(buf, "Control Program:"))
- continue;
- if (!strstr(buf, "KVM"))
- desc->hyper = HYPER_IBM;
- else
- desc->hyper = HYPER_KVM;
- p = strchr(buf, ':');
- if (!p)
- continue;
- xasprintf(&str, "%s", p + 1);
-
- /* remove leading, trailing and repeating whitespace */
- while (*str == ' ')
- str++;
- desc->hypervisor = str;
- str += strlen(str) - 1;
- while ((*str == '\n') || (*str == ' '))
- *(str--) = '\0';
- while ((str = strstr(desc->hypervisor, " ")))
- memmove(str, str + 1, strlen(str));
- break;
- }
- fclose(fd);
- }
-
- /* OpenVZ/Virtuozzo - /proc/vz dir should exist
- * /proc/bc should not */
- else if (ul_path_access(desc->procfs, F_OK, "vz") == 0 &&
- ul_path_access(desc->procfs, F_OK, "bc") != 0) {
- desc->hyper = HYPER_PARALLELS;
- desc->virtype = VIRT_CONT;
-
- /* IBM */
- } else if (desc->vendor &&
- (strcmp(desc->vendor, "PowerVM Lx86") == 0 ||
- strcmp(desc->vendor, "IBM/S390") == 0)) {
- desc->hyper = HYPER_IBM;
- desc->virtype = VIRT_FULL;
-
- /* User-mode-linux */
- } else if (desc->modelname && strstr(desc->modelname, "UML")) {
- desc->hyper = HYPER_UML;
- desc->virtype = VIRT_PARA;
-
- /* Linux-VServer */
- } else if ((fd = ul_path_fopen(desc->procfs, "r", "self/status"))) {
- char buf[BUFSIZ];
- char *val = NULL;
-
- while (fgets(buf, sizeof(buf), fd) != NULL) {
- if (lookup(buf, "VxID", &val))
- break;
- }
- fclose(fd);
-
- if (val) {
- char *org = val;
-
- while (isdigit(*val))
- ++val;
- if (!*val) {
- desc->hyper = HYPER_VSERVER;
- desc->virtype = VIRT_CONT;
- }
- free(org);
- }
- }
-}
-
-/* add @set to the @ary, unnecessary set is deallocated. */
-static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
-{
- int i;
- size_t setsize = CPU_ALLOC_SIZE(maxcpus);
-
- if (!ary)
- return -1;
-
- for (i = 0; i < *items; i++) {
- if (CPU_EQUAL_S(setsize, set, ary[i]))
- break;
- }
- if (i == *items) {
- ary[*items] = set;
- ++*items;
- return 0;
- }
- CPU_FREE(set);
- return 1;
-}
-
-static void
-read_topology(struct lscpu_desc *desc, int idx)
-{
- cpu_set_t *thread_siblings, *core_siblings;
- cpu_set_t *book_siblings, *drawer_siblings;
- int coreid, socketid, bookid, drawerid;
- int i, num = real_cpu_num(desc, idx);
-
- if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/topology/thread_siblings", num) != 0)
- return;
-
- ul_path_readf_cpuset(desc->syscpu, &thread_siblings, maxcpus,
- "cpu%d/topology/thread_siblings", num);
- ul_path_readf_cpuset(desc->syscpu, &core_siblings, maxcpus,
- "cpu%d/topology/core_siblings", num);
- ul_path_readf_cpuset(desc->syscpu, &book_siblings, maxcpus,
- "cpu%d/topology/book_siblings", num);
- ul_path_readf_cpuset(desc->syscpu, &drawer_siblings, maxcpus,
- "cpu%d/topology/drawer_siblings", num);
-
- if (ul_path_readf_s32(desc->syscpu, &coreid, "cpu%d/topology/core_id", num) != 0)
- coreid = -1;
-
- if (ul_path_readf_s32(desc->syscpu, &socketid, "cpu%d/topology/physical_package_id", num) != 0)
- socketid = -1;
-
- if (ul_path_readf_s32(desc->syscpu, &bookid, "cpu%d/topology/book_id", num) != 0)
- bookid = -1;
-
- if (ul_path_readf_s32(desc->syscpu, &drawerid, "cpu%d/topology/drawer_id", num) != 0)
- drawerid = -1;
-
- if (!desc->coremaps) {
- int ndrawers, nbooks, nsockets, ncores, nthreads;
- size_t setsize = CPU_ALLOC_SIZE(maxcpus);
-
- /* threads within one core */
- nthreads = CPU_COUNT_S(setsize, thread_siblings);
- if (!nthreads)
- nthreads = 1;
-
- /* cores within one socket */
- ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
- if (!ncores)
- ncores = 1;
-
- /* number of sockets within one book. Because of odd /
- * non-present cpu maps and to keep calculation easy we make
- * sure that nsockets and nbooks is at least 1.
- */
- nsockets = desc->ncpus / nthreads / ncores;
- if (!nsockets)
- nsockets = 1;
-
- /* number of books */
- nbooks = desc->ncpus / nthreads / ncores / nsockets;
- if (!nbooks)
- nbooks = 1;
-
- /* number of drawers */
- ndrawers = desc->ncpus / nbooks / nthreads / ncores / nsockets;
- if (!ndrawers)
- ndrawers = 1;
-
- /* all threads, see also read_basicinfo()
- * -- fallback for kernels without
- * /sys/devices/system/cpu/online.
- */
- if (!desc->nthreads)
- desc->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
-
- /* For each map we make sure that it can have up to ncpuspos
- * entries. This is because we cannot reliably calculate the
- * number of cores, sockets and books on all architectures.
- * E.g. completely virtualized architectures like s390 may
- * have multiple sockets of different sizes.
- */
- desc->coremaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
- desc->socketmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
- desc->coreids = xcalloc(desc->ncpuspos, sizeof(*desc->coreids));
- desc->socketids = xcalloc(desc->ncpuspos, sizeof(*desc->socketids));
-
- for (i = 0; i < desc->ncpuspos; i++)
- desc->coreids[i] = desc->socketids[i] = -1;
-
- if (book_siblings) {
- desc->bookmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
- desc->bookids = xcalloc(desc->ncpuspos, sizeof(*desc->bookids));
- for (i = 0; i < desc->ncpuspos; i++)
- desc->bookids[i] = -1;
- }
- if (drawer_siblings) {
- desc->drawermaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
- desc->drawerids = xcalloc(desc->ncpuspos, sizeof(*desc->drawerids));
- for (i = 0; i < desc->ncpuspos; i++)
- desc->drawerids[i] = -1;
- }
- }
-
- add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
- desc->coreids[idx] = coreid;
- add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
- desc->socketids[idx] = socketid;
-
- if (book_siblings && desc->bookmaps && desc->bookids) {
- add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
- desc->bookids[idx] = bookid;
- }
- if (drawer_siblings && desc->drawermaps && desc->drawerids) {
- add_cpuset_to_array(desc->drawermaps, &desc->ndrawers, drawer_siblings);
- desc->drawerids[idx] = drawerid;
- }
-}
-
-static void
-read_polarization(struct lscpu_desc *desc, int idx)
-{
- char mode[64];
- int num = real_cpu_num(desc, idx);
-
- if (desc->dispatching < 0)
- return;
- if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/polarization", num) != 0)
- return;
- if (!desc->polarization)
- desc->polarization = xcalloc(desc->ncpuspos, sizeof(int));
-
- ul_path_readf_buffer(desc->syscpu, mode, sizeof(mode), "cpu%d/polarization", num);
-
- if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
- desc->polarization[idx] = POLAR_VLOW;
- else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
- desc->polarization[idx] = POLAR_VMEDIUM;
- else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
- desc->polarization[idx] = POLAR_VHIGH;
- else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
- desc->polarization[idx] = POLAR_HORIZONTAL;
- else
- desc->polarization[idx] = POLAR_UNKNOWN;
-}
-
-static void
-read_address(struct lscpu_desc *desc, int idx)
-{
- int num = real_cpu_num(desc, idx);
-
- if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/address", num) != 0)
- return;
- if (!desc->addresses)
- desc->addresses = xcalloc(desc->ncpuspos, sizeof(int));
- ul_path_readf_s32(desc->syscpu, &desc->addresses[idx], "cpu%d/address", num);
-}
-
-static void
-read_configured(struct lscpu_desc *desc, int idx)
-{
- int num = real_cpu_num(desc, idx);
-
- if (ul_path_accessf(desc->syscpu, F_OK, "cpu%d/configure", num) != 0)
- return;
- if (!desc->configured)
- desc->configured = xcalloc(desc->ncpuspos, sizeof(int));
- ul_path_readf_s32(desc->syscpu, &desc->configured[idx], "cpu%d/configure", num);
-}
-
-/* Read overall maximum frequency of cpu */
-static char *
-cpu_max_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
-{
- int i;
- float cpu_freq = 0.0;
- size_t setsize = CPU_ALLOC_SIZE(maxcpus);
-
- if (desc->present) {
- for (i = 0; i < desc->ncpuspos; i++) {
- if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
- && desc->maxmhz[i]) {
- float freq = atof(desc->maxmhz[i]);
-
- if (freq > cpu_freq)
- cpu_freq = freq;
- }
- }
- }
- snprintf(buf, bufsz, "%.4f", cpu_freq);
- return buf;
-}
-
-/* Read overall minimum frequency of cpu */
-static char *
-cpu_min_mhz(struct lscpu_desc *desc, char *buf, size_t bufsz)
-{
- int i;
- float cpu_freq = -1.0;
- size_t setsize = CPU_ALLOC_SIZE(maxcpus);
-
- if (desc->present) {
- for (i = 0; i < desc->ncpuspos; i++) {
- if (CPU_ISSET_S(real_cpu_num(desc, i), setsize, desc->present)
- && desc->minmhz[i]) {
- float freq = atof(desc->minmhz[i]);
-
- if (cpu_freq < 0.0 || freq < cpu_freq)
- cpu_freq = freq;
- }
- }
- }
- snprintf(buf, bufsz, "%.4f", cpu_freq);
- return buf;
-}
-
-
-static void
-read_max_mhz(struct lscpu_desc *desc, int idx)
-{
- int num = real_cpu_num(desc, idx);
- int mhz;
-
- if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) != 0)
- return;
- if (!desc->maxmhz)
- desc->maxmhz = xcalloc(desc->ncpuspos, sizeof(char *));
- xasprintf(&desc->maxmhz[idx], "%.4f", (float) mhz / 1000);
-}
-
-static void
-read_min_mhz(struct lscpu_desc *desc, int idx)
-{
- int num = real_cpu_num(desc, idx);
- int mhz;
-
- if (ul_path_readf_s32(desc->syscpu, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) != 0)
- return;
- if (!desc->minmhz)
- desc->minmhz = xcalloc(desc->ncpuspos, sizeof(char *));
- xasprintf(&desc->minmhz[idx], "%.4f", (float) mhz / 1000);
-}
-
-static int
-cachecmp(const void *a, const void *b)
-{
- struct cpu_cache *c1 = (struct cpu_cache *) a;
- struct cpu_cache *c2 = (struct cpu_cache *) b;
-
- return strcmp(c2->name, c1->name);
-}
-
-static void
-read_cache(struct lscpu_desc *desc, int idx)
-{
- char buf[256];
- int i;
- int num = real_cpu_num(desc, idx);
-
- if (!desc->ncaches) {
- while (ul_path_accessf(desc->syscpu, F_OK,
- "cpu%d/cache/index%d",
- num, desc->ncaches) == 0)
- desc->ncaches++;
-
- if (!desc->ncaches)
- return;
- desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
- }
- for (i = 0; i < desc->ncaches; i++) {
- struct cpu_cache *ca = &desc->caches[i];
- cpu_set_t *map;
-
- if (ul_path_accessf(desc->syscpu, F_OK,
- "cpu%d/cache/index%d", num, i) != 0)
- continue;
- if (!ca->name) {
- int type = 0;
-
- /* cache type */
- if (ul_path_readf_string(desc->syscpu, &ca->type,
- "cpu%d/cache/index%d/type", num, i) > 0) {
- if (!strcmp(ca->type, "Data"))
- type = 'd';
- else if (!strcmp(ca->type, "Instruction"))
- type = 'i';
- }
-
- /* cache level */
- ul_path_readf_s32(desc->syscpu, &ca->level,
- "cpu%d/cache/index%d/level", num, i);
- if (type)
- snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
- else
- snprintf(buf, sizeof(buf), "L%d", ca->level);
-
- ca->name = xstrdup(buf);
-
- ul_path_readf_u32(desc->syscpu, &ca->ways_of_associativity,
- "cpu%d/cache/index%d/ways_of_associativity", num, i);
- ul_path_readf_u32(desc->syscpu, &ca->physical_line_partition,
- "cpu%d/cache/index%d/physical_line_partition", num, i);
- ul_path_readf_u32(desc->syscpu, &ca->number_of_sets,
- "cpu%d/cache/index%d/number_of_sets", num, i);
- ul_path_readf_u32(desc->syscpu, &ca->coherency_line_size,
- "cpu%d/cache/index%d/coherency_line_size", num, i);
-
- ul_path_readf_string(desc->syscpu, &ca->allocation_policy,
- "cpu%d/cache/index%d/allocation_policy", num, i);
- ul_path_readf_string(desc->syscpu, &ca->write_policy,
- "cpu%d/cache/index%d/write_policy", num, i);
-
- /* cache size */
- if (ul_path_readf_buffer(desc->syscpu, buf, sizeof(buf),
- "cpu%d/cache/index%d/size", num, i) > 0)
- parse_size(buf, &ca->size, NULL);
- else
- ca->size = 0;
- }
-
- /* information about how CPUs share different caches */
- ul_path_readf_cpuset(desc->syscpu, &map, maxcpus,
- "cpu%d/cache/index%d/shared_cpu_map", num, i);
-
- if (!ca->sharedmaps)
- ca->sharedmaps = xcalloc(desc->ncpuspos, sizeof(cpu_set_t *));
- add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
- }
-}
-
-static inline int is_node_dirent(struct dirent *d)
-{
- return
- d &&
-#ifdef _DIRENT_HAVE_D_TYPE
- (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
-#endif
- strncmp(d->d_name, "node", 4) == 0 &&
- isdigit_string(d->d_name + 4);
-}
-
-static int
-nodecmp(const void *ap, const void *bp)
-{
- int *a = (int *) ap, *b = (int *) bp;
- return *a - *b;
-}
-
-static void
-read_nodes(struct lscpu_desc *desc)
-{
- int i = 0;
- DIR *dir;
- struct dirent *d;
- struct path_cxt *sysnode;
-
- desc->nnodes = 0;
-
- sysnode = ul_new_path(_PATH_SYS_NODE);
- if (!sysnode)
- err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
- ul_path_set_prefix(sysnode, desc->prefix);
-
- dir = ul_path_opendir(sysnode, NULL);
- if (!dir)
- goto done;
-
- while ((d = readdir(dir))) {
- if (is_node_dirent(d))
- desc->nnodes++;
- }
-
- if (!desc->nnodes) {
- closedir(dir);
- goto done;
- }
-
- desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
- desc->idx2nodenum = xmalloc(desc->nnodes * sizeof(int));
-
- rewinddir(dir);
- while ((d = readdir(dir)) && i < desc->nnodes) {
- if (is_node_dirent(d))
- desc->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
- _("Failed to extract the node number"));
- }
- closedir(dir);
- qsort(desc->idx2nodenum, desc->nnodes, sizeof(int), nodecmp);
-
- /* information about how nodes share different CPUs */
- for (i = 0; i < desc->nnodes; i++)
- ul_path_readf_cpuset(sysnode, &desc->nodemaps[i], maxcpus,
- "node%d/cpumap", desc->idx2nodenum[i]);
-done:
- ul_unref_path(sysnode);
-}
+#ifdef LSCPU_OLD_OUTPUT_CODE /* temporary disabled for revrite */
static char *
get_cell_data(struct lscpu_desc *desc, int idx, int col,
scols_unref_table(tb);
}
+#endif /* LSCPU_OLD_OUTPUT_CODE */
+
static void __attribute__((__noreturn__)) usage(void)
{
FILE *out = stdout;
int main(int argc, char *argv[])
{
+#ifdef LSCPU_OLD_OUTPUT_CODE
struct lscpu_modifier _mod = { .mode = OUTPUT_SUMMARY }, *mod = &_mod;
struct lscpu_desc _desc = { .flags = NULL }, *desc = &_desc;
int c, i, all = 0;
ul_unref_path(desc->syscpu);
ul_unref_path(desc->procfs);
+#endif /* LSCPU_OLD_OUTPUT_CODE */
return EXIT_SUCCESS;
}