-
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2020 Karel Zak <kzak@redhat.com>
+ */
#include <sys/utsname.h>
#include <sys/personality.h>
-#include "lscpu-api.h"
-
-#include "fileutils.h"
-
-UL_DEBUG_DEFINE_MASK(lscpu);
-UL_DEBUG_DEFINE_MASKNAMES(lscpu) = UL_DEBUG_EMPTY_MASKNAMES;
-
-static void lscpu_init_debug(void)
-{
- __UL_INIT_DEBUG_FROM_ENV(lscpu, LSCPU_DEBUG_, 0, LSCPU_DEBUG);
-}
+#if defined(HAVE_LIBRTAS)
+# include <librtas.h>
+#endif
-static void context_init_paths(struct lscpu_cxt *cxt)
-{
- DBG(MISC, ul_debugobj(cxt, "initialize paths"));
- ul_path_init_debug();
-
- /* /sys/devices/system/cpu */
- cxt->syscpu = ul_new_path(_PATH_SYS_CPU);
- if (!cxt->syscpu)
- err(EXIT_FAILURE, _("failed to initialize CPUs sysfs handler"));
- if (cxt->prefix)
- ul_path_set_prefix(cxt->syscpu, cxt->prefix);
-
- /* /proc */
- cxt->procfs = ul_new_path("/proc");
- if (!cxt->procfs)
- err(EXIT_FAILURE, _("failed to initialize procfs handler"));
- if (cxt->prefix)
- ul_path_set_prefix(cxt->procfs, cxt->prefix);
-}
+#include "lscpu.h"
+#include "fileutils.h"
+#include "c_strtod.h"
/* Lookup a pattern and get the value for format "<pattern> : <key>"
*/
return 1;
}
-/* add @set to the @ary, unnecessary set is deallocated. */
-static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set, size_t setsize)
-{
- int i;
-
- if (!ary)
- return -1;
-
- for (i = 0; i < *items; i++) {
- if (CPU_EQUAL_S(setsize, set, ary[i]))
- break;
- }
- if (i == *items) {
- ary[*items] = set;
- ++*items;
- return 0;
- }
- CPU_FREE(set);
- return 1;
-}
-
struct lscpu_cputype *lscpu_new_cputype(void)
{
struct lscpu_cputype *ct;
ct = xcalloc(1, sizeof(struct lscpu_cputype));
ct->refcount = 1;
+ ct->dispatching = -1;
+ ct->freqboost = -1;
DBG(TYPE, ul_debugobj(ct, "alloc"));
return ct;
void lscpu_ref_cputype(struct lscpu_cputype *ct)
{
- if (ct)
+ if (ct) {
ct->refcount++;
+ DBG(TYPE, ul_debugobj(ct, ">>> ref %d", ct->refcount));
+ }
}
void lscpu_unref_cputype(struct lscpu_cputype *ct)
if (!ct)
return;
+ /*DBG(TYPE, ul_debugobj(ct, ">>> unref %d", ct->refcount - 1));*/
+
if (--ct->refcount <= 0) {
- DBG(TYPE, ul_debugobj(ct, " freeing"));
+ DBG(TYPE, ul_debugobj(ct, " freeing %s/%s", ct->vendor, ct->model));
+ lscpu_cputype_free_topology(ct);
free(ct->vendor);
+ free(ct->bios_vendor);
free(ct->machinetype); /* s390 */
free(ct->family);
free(ct->model);
free(ct->modelname);
+ free(ct->bios_modelname);
+ free(ct->bios_family);
free(ct->revision); /* alternative for model (ppc) */
free(ct->stepping);
free(ct->bogomips);
free(ct->flags);
free(ct->mtid); /* maximum thread id (s390) */
free(ct->addrsz); /* address sizes */
- free(ct->coremaps);
- free(ct->socketmaps);
- free(ct->bookmaps);
- free(ct->drawermaps);
+ free(ct->static_mhz);
+ free(ct->dynamic_mhz);
free(ct);
}
}
struct lscpu_cputype *lscpu_add_cputype(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
{
- size_t i;
-
- /* ignore if already in the context */
- for (i = 0; i < cxt->ncputypes; i++) {
- struct lscpu_cputype *x = cxt->cputypes[i];
-
- if (match(x->vendor, ct->vendor) &&
- match(x->model, ct->model) &&
- match(x->modelname, ct->modelname) &&
- match(x->stepping, ct->stepping)) {
-
- DBG(TYPE, ul_debugobj(x, "reuse"));
- return x;
- }
- }
-
DBG(TYPE, ul_debugobj(ct, "add new"));
- cxt->cputypes = xrealloc(cxt->cputypes, (cxt->ncputypes + 1)
- * sizeof(struct lscpu_cputype *));
+ cxt->cputypes = xreallocarray(cxt->cputypes, cxt->ncputypes + 1,
+ sizeof(struct lscpu_cputype *));
cxt->cputypes[cxt->ncputypes] = ct;
cxt->ncputypes++;
lscpu_ref_cputype(ct);
-
- /* first type -- use it for all CPUs */
- if (cxt->ncputypes == 1)
- lscpu_cpus_apply_type(cxt, ct);
-
return ct;
}
-static void lscpu_merge_cputype(struct lscpu_cputype *a, struct lscpu_cputype *b)
-{
- if (!a->vendor && b->vendor)
- a->vendor = xstrdup(b->vendor);
- if (!a->machinetype && b->machinetype)
- a->machinetype = xstrdup(b->machinetype);
- if (!a->family && b->family)
- a->family = xstrdup(b->family);
- if (!a->model && b->model)
- a->model = xstrdup(b->model);
- if (!a->modelname && b->modelname)
- a->modelname = xstrdup(b->modelname);
- if (!a->revision && b->revision)
- a->revision = xstrdup(b->revision);
- if (!a->stepping && b->stepping)
- a->stepping = xstrdup(b->stepping);
- if (!a->bogomips && b->bogomips)
- a->bogomips = xstrdup(b->bogomips);
- if (!a->flags && b->flags)
- a->flags = xstrdup(b->flags);
- if (!a->mtid && b->mtid)
- a->mtid = xstrdup(b->mtid);
- if (!a->addrsz && b->addrsz)
- a->addrsz = xstrdup(b->addrsz);
-}
-
-/* Read topology for specified type */
-static int cputype_read_topology(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
-{
- size_t i, setsize, npos;
- struct path_cxt *sys;
-
- sys = cxt->syscpu; /* /sys/devices/system/cpu/ */
- setsize = CPU_ALLOC_SIZE(cxt->maxcpus); /* CPU set size */
- npos = cxt->ncpuspos; /* possible CPUs */
-
- for (i = 0; i < cxt->ncpus; i++) {
- struct lscpu_cpu *cpu = cxt->cpus[i++];
- cpu_set_t *thread_siblings = NULL, *core_siblings = NULL;
- cpu_set_t *book_siblings = NULL, *drawer_siblings = NULL;
- int num;
-
- if (cpu->type != ct)
- continue;
-
- num = cpu->logical_id;
- if (ul_path_accessf(sys, F_OK,
- "cpu%d/topology/thread_siblings", num) != 0)
- continue;
-
- /* read topology maps */
- ul_path_readf_cpuset(sys, &thread_siblings, cxt->maxcpus,
- "cpu%d/topology/thread_siblings", num);
- ul_path_readf_cpuset(sys, &core_siblings, cxt->maxcpus,
- "cpu%d/topology/core_siblings", num);
- ul_path_readf_cpuset(sys, &book_siblings, cxt->maxcpus,
- "cpu%d/topology/book_siblings", num);
- ul_path_readf_cpuset(sys, &drawer_siblings, cxt->maxcpus,
- "cpu%d/topology/drawer_siblings", num);
-
- /* Allocate arrays for topology maps.
- *
- * For each map we make sure that it can have up to ncpuspos
- * entries. This is because we cannot reliably calculate the
- * number of cores, sockets and books on all architectures.
- * E.g. completely virtualized architectures like s390 may
- * have multiple sockets of different sizes.
- */
- if (!ct->coremaps)
- ct->coremaps = xcalloc(npos, sizeof(cpu_set_t *));
- if (!ct->socketmaps)
- ct->socketmaps = xcalloc(npos, sizeof(cpu_set_t *));
- if (!ct->bookmaps && book_siblings)
- ct->bookmaps = xcalloc(npos, sizeof(cpu_set_t *));
- if (!ct->drawermaps && drawer_siblings)
- ct->drawermaps = xcalloc(npos, sizeof(cpu_set_t *));
-
- /* add to topology maps */
- add_cpuset_to_array(ct->coremaps, &ct->ncores, thread_siblings, setsize);
- add_cpuset_to_array(ct->socketmaps, &ct->nsockets, core_siblings, setsize);
-
- if (book_siblings)
- add_cpuset_to_array(ct->bookmaps, &ct->nbooks, book_siblings, setsize);
- if (drawer_siblings)
- add_cpuset_to_array(ct->drawermaps, &ct->ndrawers, drawer_siblings, setsize);
-
- /* calculate threads */
- if (!ct->nthreads) {
- int ndrawers, nbooks, nsockets, ncores, nthreads;
-
- /* threads within one core */
- nthreads = CPU_COUNT_S(setsize, thread_siblings);
- if (!nthreads)
- nthreads = 1;
-
- /* cores within one socket */
- ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
- if (!ncores)
- ncores = 1;
-
- /* number of sockets within one book. Because of odd /
- * non-present cpu maps and to keep calculation easy we make
- * sure that nsockets and nbooks is at least 1.
- */
- nsockets = ct->ncpus / nthreads / ncores;
- if (!nsockets)
- nsockets = 1;
-
- /* number of books */
- nbooks = cxt->npresents / nthreads / ncores / nsockets;
- if (!nbooks)
- ct->nbooks = 1;
-
- /* number of drawers */
- ndrawers = cxt->npresents / nbooks / nthreads / ncores / nsockets;
- if (!ndrawers)
- ndrawers = 1;
-
- ct->nthreads = ndrawers * nbooks * nsockets * ncores * nthreads;
- }
- }
-
- return 0;
-}
-
-int lscpu_read_topology(struct lscpu_cxt *cxt)
+static void fprintf_cputypes(FILE *f, struct lscpu_cxt *cxt)
{
size_t i;
- int rc = 0;
- for (i = 0; i < cxt->ncputypes; i++)
- rc += cputype_read_topology(cxt, cxt->cputypes[i]);
-
- return rc;
+ for (i = 0; i < cxt->ncputypes; i++) {
+ struct lscpu_cputype *ct = cxt->cputypes[i];
+
+ fprintf(f, "\n vendor: %s\n", ct->vendor);
+ fprintf(f, " machinetype: %s\n", ct->machinetype);
+ fprintf(f, " family: %s\n", ct->family);
+ fprintf(f, " model: %s\n", ct->model);
+ fprintf(f, " modelname: %s\n", ct->modelname);
+ fprintf(f, " revision: %s\n", ct->revision);
+ fprintf(f, " stepping: %s\n", ct->stepping);
+ fprintf(f, " mtid: %s\n", ct->mtid);
+ fprintf(f, " addrsz: %s\n", ct->addrsz);
+ }
}
+enum {
+ CPUINFO_LINE_UNKNOWN, /* unknown line */
+ CPUINFO_LINE_CPUTYPE, /* line found in type_patterns[] */
+ CPUINFO_LINE_CPU, /* line found in cpu_patterns[] */
+ CPUINFO_LINE_CACHE /* line found in cache_pattern[] */
+};
/* Describes /proc/cpuinfo fields */
struct cpuinfo_pattern {
int id; /* field ID */
+ int domain; /* CPUINFO_LINE_* */
const char *pattern; /* field name as used in /proc/cpuinfo */
size_t offset; /* offset in lscpu_cputype or lscpu_cpu struct */
};
/* field identifiers (field name may be different on different archs) */
enum {
PAT_ADDRESS_SIZES,
- PAT_BOGOMIPS,
+ PAT_BOGOMIPS, /* global */
+ PAT_BOGOMIPS_CPU, /* per-cpu */
PAT_CPU,
PAT_FAMILY,
PAT_FEATURES,
PAT_TYPE,
PAT_VARIANT,
PAT_VENDOR,
+ PAT_CACHE,
+ PAT_ISA,
};
/*
#define DEF_PAT_CPUTYPE(_str, _id, _member) \
{ \
.id = (_id), \
+ .domain = CPUINFO_LINE_CPUTYPE, \
.pattern = (_str), \
.offset = offsetof(struct lscpu_cputype, _member), \
}
static const struct cpuinfo_pattern type_patterns[] =
{
/* Sort by fields name! */
+ DEF_PAT_CPUTYPE( "ASEs implemented", PAT_FLAGS, flags), /* mips */
+ DEF_PAT_CPUTYPE( "Address Sizes", PAT_ADDRESS_SIZES, addrsz),/* loongarch */
DEF_PAT_CPUTYPE( "BogoMIPS", PAT_BOGOMIPS, bogomips), /* aarch64 */
+ DEF_PAT_CPUTYPE( "CPU Family", PAT_FAMILY, family), /* loongarch */
+ DEF_PAT_CPUTYPE( "CPU Revision", PAT_REVISION, revision), /* loongarch */
DEF_PAT_CPUTYPE( "CPU implementer", PAT_IMPLEMENTER,vendor), /* ARM and aarch64 */
DEF_PAT_CPUTYPE( "CPU part", PAT_PART, model), /* ARM and aarch64 */
DEF_PAT_CPUTYPE( "CPU revision", PAT_REVISION, revision), /* aarch64 */
DEF_PAT_CPUTYPE( "CPU variant", PAT_VARIANT, stepping), /* aarch64 */
DEF_PAT_CPUTYPE( "Features", PAT_FEATURES, flags), /* aarch64 */
+ DEF_PAT_CPUTYPE( "ISA", PAT_ISA, isa), /* loongarch */
+ DEF_PAT_CPUTYPE( "Model Name", PAT_MODEL_NAME, modelname), /* loongarch */
DEF_PAT_CPUTYPE( "address sizes", PAT_ADDRESS_SIZES, addrsz),/* x86 */
- DEF_PAT_CPUTYPE( "bogomips", PAT_BOGOMIPS, bogomips),
DEF_PAT_CPUTYPE( "bogomips per cpu", PAT_BOGOMIPS, bogomips), /* s390 */
- DEF_PAT_CPUTYPE( "cpu family", PAT_FAMILY, family),
DEF_PAT_CPUTYPE( "cpu", PAT_CPU, modelname), /* ppc, sparc */
+ DEF_PAT_CPUTYPE( "cpu family", PAT_FAMILY, family),
+ DEF_PAT_CPUTYPE( "cpu model", PAT_MODEL, model), /* mips */
DEF_PAT_CPUTYPE( "family", PAT_FAMILY, family),
DEF_PAT_CPUTYPE( "features", PAT_FEATURES, flags), /* s390 */
DEF_PAT_CPUTYPE( "flags", PAT_FLAGS, flags), /* x86 */
DEF_PAT_CPUTYPE( "max thread id", PAT_MAX_THREAD_ID, mtid), /* s390 */
- DEF_PAT_CPUTYPE( "model name", PAT_MODEL_NAME, modelname),
DEF_PAT_CPUTYPE( "model", PAT_MODEL, model),
+ DEF_PAT_CPUTYPE( "model name", PAT_MODEL_NAME, modelname),
DEF_PAT_CPUTYPE( "revision", PAT_REVISION, revision),
DEF_PAT_CPUTYPE( "stepping", PAT_STEPPING, stepping),
DEF_PAT_CPUTYPE( "type", PAT_TYPE, flags), /* sparc64 */
#define DEF_PAT_CPU(_str, _id, _member) \
{ \
.id = (_id), \
+ .domain = CPUINFO_LINE_CPU, \
.pattern = (_str), \
.offset = offsetof(struct lscpu_cpu, _member), \
}
static const struct cpuinfo_pattern cpu_patterns[] =
{
/* Sort by fields name! */
- DEF_PAT_CPU( "cpu MHz dynamic", PAT_MHZ_DYNAMIC,dynamic_mhz), /* s390 */
- DEF_PAT_CPU( "cpu MHz static", PAT_MHZ_STATIC, static_mhz), /* s390 */
- DEF_PAT_CPU( "cpu MHz", PAT_MHZ, mhz),
- DEF_PAT_CPU( "cpu number", PAT_PROCESSOR, logical_id), /* s390 */
- DEF_PAT_CPU( "processor", PAT_PROCESSOR, logical_id),
+ DEF_PAT_CPU( "CPU MHz", PAT_MHZ, mhz), /* loongarch */
+ DEF_PAT_CPU( "bogomips", PAT_BOGOMIPS_CPU, bogomips),
+ DEF_PAT_CPU( "cpu MHz", PAT_MHZ, mhz),
+ DEF_PAT_CPU( "cpu MHz dynamic", PAT_MHZ_DYNAMIC, dynamic_mhz), /* s390 */
+ DEF_PAT_CPU( "cpu MHz static", PAT_MHZ_STATIC, static_mhz), /* s390 */
+ DEF_PAT_CPU( "cpu number", PAT_PROCESSOR, logical_id), /* s390 */
+ DEF_PAT_CPU( "processor", PAT_PROCESSOR, logical_id),
+
+};
+
+/*
+ * /proc/cpuinfo to lscpu_cache conversion
+ */
+#define DEF_PAT_CACHE(_str, _id) \
+ { \
+ .id = (_id), \
+ .domain = CPUINFO_LINE_CACHE, \
+ .pattern = (_str) \
+ }
+
+static const struct cpuinfo_pattern cache_patterns[] =
+{
+ /* Sort by fields name! */
+ DEF_PAT_CACHE("cache", PAT_CACHE),
};
#define CPUTYPE_PATTERN_BUFSZ 32
return strcmp(a->pattern, b->pattern);
}
-static int cpuinfo_parse_line( struct lscpu_cputype **ct,
- struct lscpu_cpu **cpu,
- const char *str)
+struct cpuinfo_parser {
+ struct lscpu_cxt *cxt;
+ struct lscpu_cpu *curr_cpu;
+ struct lscpu_cputype *curr_type;
+ unsigned int curr_type_added : 1;
+};
+
+static int is_different_cputype(struct lscpu_cputype *ct, size_t offset, const char *value)
{
- struct cpuinfo_pattern key, *pat;
- const char *p, *v;
- char buf[CPUTYPE_PATTERN_BUFSZ] = { 0 }, **data;
- void *stru = NULL;
+ switch (offset) {
+ case offsetof(struct lscpu_cputype, vendor):
+ return ct->vendor && value && strcmp(ct->vendor, value) != 0;
+ case offsetof(struct lscpu_cputype, model):
+ return ct->model && value && strcmp(ct->model, value) != 0;
+ case offsetof(struct lscpu_cputype, modelname):
+ return ct->modelname && value && strcmp(ct->modelname, value) != 0;
+ case offsetof(struct lscpu_cputype, stepping):
+ return ct->stepping && value && strcmp(ct->stepping, value) != 0;
+ }
+ return 0;
+}
+
+/* canonicalize @str -- remove number at the end return the
+ * number by @keynum. This is usable for example for "processor 5" or "cache1"
+ * cpuinfo lines */
+static char *key_cleanup(char *str, int *keynum)
+{
+ size_t sz = rtrim_whitespace((unsigned char *)str);
+ size_t i;
+
+ if (!sz)
+ return str;
+
+ for (i = sz; i > 0; i--) {
+ if (!isdigit(str[i - 1]))
+ break;
+ }
+
+ if (i < sz) {
+ char *end = NULL, *p = str + i;
+ int n;
+
+ errno = 0;
+ n = strtol(p, &end, 10);
+ if (errno || !end || end == p)
+ return str;
+
+ *keynum = n;
+ str[i] = '\0';
+ rtrim_whitespace((unsigned char *)str);
+ }
+ return str;
+}
+
+static const struct cpuinfo_pattern *cpuinfo_parse_line(char *str, char **value, int *keynum)
+{
+ struct cpuinfo_pattern key = { .id = 0 }, *pat;
+ char *p, *v;
+ char buf[CPUTYPE_PATTERN_BUFSZ] = { 0 };
DBG(GATHER, ul_debug("parse \"%s\"", str));
if (!str || !*str)
- return -EINVAL;
- p = skip_blank(str);
+ return NULL;
+ p = (char *) skip_blank(str);
if (!p || !*p)
- return -EINVAL;
+ return NULL;
v = strchr(p, ':');
if (!v || !*v)
- return -EINVAL;
+ return NULL;
/* prepare name of the field */
xstrncpy(buf, p, sizeof(buf));
buf[v - p] = '\0';
v++;
- rtrim_whitespace((unsigned char *)buf);
+ /* prepare value */
+ v = (char *) skip_space(v);
+ if (!v || !*v)
+ return NULL;
- /* search in cpu-types patterns */
- key.pattern = buf;
- pat = bsearch(&key, type_patterns,
+ key.pattern = key_cleanup(buf, keynum);
+ /* CPU-type */
+ if ((pat = bsearch(&key, type_patterns,
ARRAY_SIZE(type_patterns),
sizeof(struct cpuinfo_pattern),
- cmp_pattern);
- if (pat) {
- /* CPU type */
- if (!*ct)
- *ct = lscpu_new_cputype();
- stru = *ct;
- } else {
- /* search in cpu patterns */
- pat = bsearch(&key, cpu_patterns,
+ cmp_pattern)))
+ goto found;
+
+ /* CPU */
+ if ((pat = bsearch(&key, cpu_patterns,
ARRAY_SIZE(cpu_patterns),
sizeof(struct cpuinfo_pattern),
- cmp_pattern);
- if (pat) {
- if (!*cpu)
- *cpu = lscpu_new_cpu();
- stru = *cpu;
- }
- }
+ cmp_pattern)))
+ goto found;
- if (!stru) {
- DBG(GATHER, ul_debug("'%s' not found", buf));
- return 1;
- }
+ /* CACHE */
+ if ((pat = bsearch(&key, cache_patterns,
+ ARRAY_SIZE(cache_patterns),
+ sizeof(struct cpuinfo_pattern),
+ cmp_pattern)))
+ goto found;
+
+ return NULL;
+found:
+ rtrim_whitespace((unsigned char *) v);
+ *value = v;
+ return pat;
+}
- /* prepare value */
- v = skip_space(v);
- if (!v || !*v)
- return -EINVAL;
+/* Parse extra cache lines contained within /proc/cpuinfo but which are not
+ * part of the cache topology information within the sysfs filesystem. This is
+ * true for all shared caches on e.g. s390. When there are layers of
+ * hypervisors in between it is not knows which CPUs share which caches.
+ * Therefore information about shared caches is only available in
+ * /proc/cpuinfo. Format is:
+ *
+ * cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>
+ *
+ * the cache<nr> part is parsed in cpuinfo_parse_line, in this function parses part after ":".
+ */
+static int cpuinfo_parse_cache(struct lscpu_cxt *cxt, int keynum, char *data)
+{
+ struct lscpu_cache *cache;
+ long long size;
+ char *p, type;
+ int level;
+ unsigned int line_size, associativity;
- /* copy value to struct */
- switch (pat->id) {
- case PAT_PROCESSOR:
- (*cpu)->logical_id = atoi(v);
- break;
- default:
- /* set value as a string and cleanup */
- strdup_to_offset(stru, pat->offset, v);
- data = (char **) ((char *) stru + pat->offset);
- rtrim_whitespace((unsigned char *) *data);
- break;
- }
+ DBG(GATHER, ul_debugobj(cxt, " parse cpuinfo cache '%s'", data));
- return 0;
+ p = strstr(data, "scope=") + 6;
+ /* Skip private caches, also present in sysfs */
+ if (!p || strncmp(p, "Private", 7) == 0)
+ return 0;
+ p = strstr(data, "level=");
+ if (!p || sscanf(p, "level=%d", &level) != 1)
+ return 0;
+ p = strstr(data, "type=") + 5;
+ if (!p || !*p)
+ return 0;
+ type = 0;
+ if (strncmp(p, "Data", 4) == 0)
+ type = 'd';
+ else if (strncmp(p, "Instruction", 11) == 0)
+ type = 'i';
+ else if (strncmp(p, "Unified", 7) == 0)
+ type = 'u';
+ p = strstr(data, "size=");
+ if (!p || sscanf(p, "size=%lld", &size) != 1)
+ return 0;
+
+ p = strstr(data, "line_size=");
+ if (!p || sscanf(p, "line_size=%u", &line_size) != 1)
+ return 0;
+
+ p = strstr(data, "associativity=");
+ if (!p || sscanf(p, "associativity=%u", &associativity) != 1)
+ return 0;
+
+ cxt->necaches++;
+ cxt->ecaches = xreallocarray(cxt->ecaches,
+ cxt->necaches, sizeof(struct lscpu_cache));
+ cache = &cxt->ecaches[cxt->necaches - 1];
+ memset(cache, 0 , sizeof(*cache));
+
+ if (type == 'i' || type == 'd')
+ xasprintf(&cache->name, "L%d%c", level, type);
+ else
+ xasprintf(&cache->name, "L%d", level);
+
+ cache->nth = keynum;
+ cache->level = level;
+ cache->size = size * 1024;
+ cache->ways_of_associativity = associativity;
+ cache->coherency_line_size = line_size;
+ /* Number of sets for s390. For safety, just check divide by zero */
+ cache->number_of_sets = line_size ? (cache->size / line_size): 0;
+ cache->number_of_sets = associativity ? (cache->number_of_sets / associativity) : 0;
+
+ cache->type = type == 'i' ? xstrdup("Instruction") :
+ type == 'd' ? xstrdup("Data") :
+ type == 'u' ? xstrdup("Unified") : NULL;
+ return 1;
}
int lscpu_read_cpuinfo(struct lscpu_cxt *cxt)
{
- struct lscpu_cputype *type = NULL;
- struct lscpu_cpu *cpu = NULL;
FILE *fp;
- char buf[BUFSIZ];
+ /* Used to be BUFSIZ which is small on some platforms e.g, musl,
+ * therefore hardcode to 4K */
+ char buf[4096];
+ size_t i;
+ struct lscpu_cputype *ct;
+ struct cpuinfo_parser _pr = { .cxt = cxt }, *pr = &_pr;
+
+ assert(cxt->npossibles); /* lscpu_create_cpus() required */
+ assert(cxt->cpus);
DBG(GATHER, ul_debugobj(cxt, "reading cpuinfo"));
err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
do {
- const char *p = NULL;
+ int keynum = -1;
+ char *p = NULL, *value = NULL;
+ const struct cpuinfo_pattern *pattern;
if (fgets(buf, sizeof(buf), fp) != NULL)
- p = skip_space(buf);
+ p = (char *) skip_space(buf);
if (p == NULL || (*buf && !*p)) {
- if (cpu)
- lscpu_add_cpu(cxt, cpu, type);
- else if (type) {
- /* Generic non-cpu data. For some architectures
- * cpuinfo contains description block (at the
- * beginning of the file (IBM s390) or at the
- * end of the file (IBM POWER). The block is
- * global for all CPUs.
- */
- if (cxt->ncputypes == 1) {
- /* The type already exist, merge it. For example on POWER
- * CPU list contains "cpu:" line with architecture and
- * global information at the end of the file */
- struct lscpu_cputype *dflt = lscpu_cputype_get_default(cxt);
- if (dflt)
- lscpu_merge_cputype(dflt, type);
- } else
- lscpu_add_cputype(cxt, type);
- }
-
- lscpu_unref_cpu(cpu);
- lscpu_unref_cputype(type);
- cpu = NULL, type = NULL;
-
+ /* Blank line separates information */
if (p == NULL)
break; /* fgets() returns nothing; EOF */
- } else {
- rtrim_whitespace((unsigned char *) buf);
- cpuinfo_parse_line(&type, &cpu, p);
+ continue;
+ }
+
+ rtrim_whitespace((unsigned char *) buf);
+
+ /* parse */
+ pattern = cpuinfo_parse_line(p, &value, &keynum);
+ if (!pattern) {
+ DBG(GATHER, ul_debug("'%s' not found", buf));
+ continue;
+ }
+
+ /* set data */
+ switch (pattern->domain) {
+ case CPUINFO_LINE_CPU:
+ if (pattern->id == PAT_PROCESSOR) {
+ /* switch CPU */
+ int id = 0;
+
+ if (keynum >= 0)
+ id = keynum;
+ else {
+ uint32_t n;
+ if (ul_strtou32(value, &n, 10) == 0)
+ id = n;
+ }
+
+ if (pr->curr_cpu && pr->curr_type)
+ lscpu_cpu_set_type(pr->curr_cpu, pr->curr_type);
+
+ lscpu_unref_cpu(pr->curr_cpu);
+ pr->curr_cpu = lscpu_get_cpu(cxt, id);
+
+ if (!pr->curr_cpu)
+ DBG(GATHER, ul_debug("*** cpu ID '%d' undefined", id));
+ else
+ DBG(GATHER, ul_debug(" switch to CPU %d", id));
+ lscpu_ref_cpu(pr->curr_cpu);
+ break;
+ }
+ if (!pr->curr_cpu)
+ DBG(GATHER, ul_debug("*** cpu data before cpu ID"));
+ else
+ strdup_to_offset(pr->curr_cpu, pattern->offset, value);
+
+ if (pattern->id == PAT_MHZ_DYNAMIC && pr->curr_type && !pr->curr_type->dynamic_mhz)
+ pr->curr_type->dynamic_mhz = xstrdup(value);
+ if (pattern->id == PAT_MHZ_STATIC && pr->curr_type && !pr->curr_type->static_mhz)
+ pr->curr_type->static_mhz = xstrdup(value);
+ if (pattern->id == PAT_BOGOMIPS_CPU && pr->curr_type && !pr->curr_type->bogomips)
+ pr->curr_type->bogomips = xstrdup(value);
+ if (pattern->id == PAT_MHZ && pr->curr_cpu && value) {
+ errno = 0;
+ pr->curr_cpu->mhz_cur_freq = (float) c_strtod(value, NULL);
+ if (errno)
+ pr->curr_cpu->mhz_cur_freq = 0;
+ }
+ break;
+ case CPUINFO_LINE_CPUTYPE:
+ if (pr->curr_type && is_different_cputype(pr->curr_type, pattern->offset, value)) {
+ lscpu_unref_cputype(pr->curr_type);
+ pr->curr_type = NULL;
+ }
+ if (!pr->curr_type) {
+ pr->curr_type = lscpu_new_cputype();
+ lscpu_add_cputype(cxt, pr->curr_type);
+ }
+
+ strdup_to_offset(pr->curr_type, pattern->offset, value);
+ break;
+ case CPUINFO_LINE_CACHE:
+ if (pattern->id != PAT_CACHE)
+ break;
+ cpuinfo_parse_cache(cxt, keynum, value);
+ break;
}
} while (1);
- lscpu_unref_cpu(cpu);
- lscpu_unref_cputype(type);
+ DBG(GATHER, fprintf_cputypes(stderr, cxt));
+
+ if (pr->curr_cpu && !pr->curr_cpu->type)
+ lscpu_cpu_set_type(pr->curr_cpu, pr->curr_type);
+
+ lscpu_unref_cputype(pr->curr_type);
+ lscpu_unref_cpu(pr->curr_cpu);
+
fclose(fp);
+ lscpu_sort_caches(cxt->ecaches, cxt->necaches);
+
+ /* Set the default type to CPUs which are missing (or not parsed)
+ * in cpuinfo */
+ ct = lscpu_cputype_get_default(cxt);
+ for (i = 0; ct && i < cxt->npossibles; i++) {
+ struct lscpu_cpu *cpu = cxt->cpus[i];
+
+ if (cpu && !cpu->type)
+ lscpu_cpu_set_type(cpu, ct);
+ }
+
return 0;
}
snprintf(buf, sizeof(buf), " %s ", ct->flags);
if (strstr(buf, " lm "))
- ar->bit32 = 1, ar->bit64 = 1; /* x86_64 */
+ ar->bit32 = ar->bit64 = 1; /* x86_64 */
if (strstr(buf, " zarch "))
- ar->bit32 = 1, ar->bit64 = 1; /* s390x */
+ ar->bit32 = ar->bit64 = 1; /* s390x */
if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
- ar->bit32 = 1, ar->bit64 = 1; /* sparc64 */
+ ar->bit32 = ar->bit64 = 1; /* sparc64 */
+ }
+
+ if (ct && ct->isa) {
+ char buf[BUFSIZ];
+
+ snprintf(buf, sizeof(buf), " %s ", ct->isa);
+ if (strstr(buf, " loongarch32 "))
+ ar->bit32 = 1;
+ if (strstr(buf, " loongarch64 "))
+ ar->bit64 = 1;
}
if (ar->name && !cxt->noalive) {
int lscpu_read_cpulists(struct lscpu_cxt *cxt)
{
- size_t maxn;
- size_t setsize;
cpu_set_t *cpuset = NULL;
assert(cxt);
* real /sys, let's use any crazy number... */
cxt->maxcpus = 2048;
- maxn = cxt->maxcpus;
- setsize = CPU_ALLOC_SIZE(maxn);
-
- if (ul_path_readf_cpulist(cxt->syscpu, &cpuset, maxn, "possible") == 0) {
- size_t num, idx;
+ cxt->setsize = CPU_ALLOC_SIZE(cxt->maxcpus);
- cxt->ncpuspos = CPU_COUNT_S(setsize, cpuset);
- cxt->idx2cpunum = xcalloc(cxt->ncpuspos, sizeof(int));
-
- for (num = 0, idx = 0; num < maxn; num++) {
- if (CPU_ISSET_S(num, setsize, cpuset))
- cxt->idx2cpunum[idx++] = num;
- }
+ /* create CPUs from possible mask */
+ if (ul_path_readf_cpulist(cxt->syscpu, &cpuset, cxt->maxcpus, "possible") == 0) {
+ lscpu_create_cpus(cxt, cpuset, cxt->setsize);
cpuset_free(cpuset);
cpuset = NULL;
} else
/* get mask for present CPUs */
- if (ul_path_readf_cpulist(cxt->syscpu, &cxt->present, maxn, "present") == 0)
- cxt->npresents = CPU_COUNT_S(setsize, cxt->present);
+ if (ul_path_readf_cpulist(cxt->syscpu, &cxt->present, cxt->maxcpus, "present") == 0)
+ cxt->npresents = CPU_COUNT_S(cxt->setsize, cxt->present);
/* get mask for online CPUs */
- if (ul_path_readf_cpulist(cxt->syscpu, &cxt->online, maxn, "online") == 0)
- cxt->nonlines = CPU_COUNT_S(setsize, cxt->online);
+ if (ul_path_readf_cpulist(cxt->syscpu, &cxt->online, cxt->maxcpus, "online") == 0)
+ cxt->nonlines = CPU_COUNT_S(cxt->setsize, cxt->online);
return 0;
}
char buf[BUFSIZ];
struct lscpu_cputype *ct;
+ DBG(GATHER, ul_debugobj(cxt, "reading extra arch info"));
+
assert(cxt);
ct = lscpu_cputype_get_default(cxt);
if (!ct)
}
#if defined(HAVE_LIBRTAS)
- /* Get PowerPC speficic info */
+ /* Get PowerPC specific info */
if (!cxt->noalive) {
int rc, len, ntypes;
goto nortas;
ntypes = strbe16toh(buf, 2);
- assert(ntypes <= 1);
if (!ntypes)
goto nortas;
ct->physsockets = strbe16toh(buf, 4);
ct->physchips = strbe16toh(buf, 6);
ct->physcoresperchip = strbe16toh(buf, 8);
-nortas:
}
+nortas:
#endif
return 0;
}
assert(cxt);
+ DBG(GATHER, ul_debugobj(cxt, "reading vulnerabilities"));
+
dir = ul_path_opendir(cxt->syscpu, "vulnerabilities");
if (!dir)
return 0;
cxt->nvuls = n = 0;
while (xreaddir(dir))
n++;
- if (!n)
+ if (!n) {
+ closedir(dir);
return 0;
+ }
rewinddir(dir);
cxt->vuls = xcalloc(n, sizeof(struct lscpu_vulnerability));
assert(!cxt->nnodes);
+
sys = ul_new_path(_PATH_SYS_NODE);
if (!sys)
err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
cxt->idx2nodenum = xmalloc(cxt->nnodes * sizeof(int));
rewinddir(dir);
- for (i = 0; (d = readdir(dir)) && i < cxt->nnodes; i++) {
+ for (i = 0; (d = readdir(dir)) && i < cxt->nnodes;) {
if (is_node_dirent(d))
- cxt->idx2nodenum[i] = strtol_or_err(((d->d_name) + 4),
+ cxt->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
_("Failed to extract the node number"));
}
closedir(dir);
ul_path_readf_cpuset(sys, &cxt->nodemaps[i], cxt->maxcpus,
"node%d/cpumap", cxt->idx2nodenum[i]);
done:
+ DBG(GATHER, ul_debugobj(cxt, "read %zu numas", cxt->nnodes));
+
ul_unref_path(sys);
return 0;
}
-
-#ifdef TEST_PROGRAM_CPUTYPE
-/* TODO: move to lscpu.c */
-struct lscpu_cxt *lscpu_new_context(void)
-{
- return xcalloc(1, sizeof(struct lscpu_cxt));
-}
-
-void lscpu_free_context(struct lscpu_cxt *cxt)
-{
- size_t i;
-
- if (!cxt)
- return;
-
- DBG(MISC, ul_debugobj(cxt, "freeing context"));
-
- DBG(MISC, ul_debugobj(cxt, " de-initialize paths"));
- ul_unref_path(cxt->syscpu);
- ul_unref_path(cxt->procfs);
-
- DBG(MISC, ul_debugobj(cxt, " freeing cpus"));
- for (i = 0; i < cxt->ncpus; i++)
- lscpu_unref_cpu(cxt->cpus[i]);
-
- DBG(MISC, ul_debugobj(cxt, " freeing types"));
- for (i = 0; i < cxt->ncputypes; i++)
- lscpu_unref_cputype(cxt->cputypes[i]);
-
- free(cxt->idx2cpunum);
- free(cxt->present);
- free(cxt->online);
- free(cxt->cputypes);
- free(cxt->cpus);
-
- for (i = 0; i < cxt->nvuls; i++) {
- free(cxt->vuls[i].name);
- free(cxt->vuls[i].text);
- }
- free(cxt->vuls);
-
- for (i = 0; i < cxt->nnodes; i++)
- free(cxt->nodemaps[i]);
-
- free(cxt->nodemaps);
- free(cxt->idx2nodenum);
-
- lscpu_free_virtualization(cxt->virt);
- lscpu_free_architecture(cxt->arch);
-
- free(cxt);
-}
-
-int main(int argc, char **argv)
-{
- struct lscpu_cxt *cxt;
-
- cxt = lscpu_new_context();
-
- if (argc == 3 && strcmp(argv[1], "--prefix") == 0) {
- cxt->prefix = argv[2];
- cxt->noalive = 1;
- }
-
- lscpu_init_debug();
- context_init_paths(cxt);
-
- lscpu_read_cpuinfo(cxt);
- cxt->arch = lscpu_read_architecture(cxt);
-
- lscpu_read_cpulists(cxt);
- lscpu_read_archext(cxt);
- lscpu_read_vulnerabilities(cxt);
- lscpu_read_numas(cxt);
- lscpu_read_topology(cxt);
- lscpu_read_topolgy_ids(cxt);
-
- lscpu_decode_arm(cxt);
-
- cxt->virt = lscpu_read_virtualization(cxt);
-
- lscpu_free_context(cxt);
- return EXIT_SUCCESS;
-}
-#endif /* TEST_PROGRAM_CPUTYPES */