]> git.ipfire.org Git - thirdparty/util-linux.git/commitdiff
lscpu: (topology) read caches from /sys
authorKarel Zak <kzak@redhat.com>
Tue, 18 Aug 2020 10:48:41 +0000 (12:48 +0200)
committerKarel Zak <kzak@redhat.com>
Fri, 13 Nov 2020 08:19:02 +0000 (09:19 +0100)
Signed-off-by: Karel Zak <kzak@redhat.com>
sys-utils/lscpu-api.h
sys-utils/lscpu-cputype.c
sys-utils/lscpu-topology.c

index 7c62d05a35f52bf3afcb983ed5cd1923167e7334..1c33f552ffa524ab3884d2a2b876e8fdbda9528f 100644 (file)
@@ -32,6 +32,25 @@ UL_DEBUG_DECLARE_MASK(lscpu);
 #define _PATH_SYS_CPU          _PATH_SYS_SYSTEM "/cpu"
 #define _PATH_SYS_NODE         _PATH_SYS_SYSTEM "/node"
 
+struct lscpu_cache {
+       int             nth;            /* cache<number> from cpuinfo */
+       char            *name;
+       char            *type;
+       char            *allocation_policy;
+       char            *write_policy;
+
+       int             level;
+       uint64_t        size;
+
+       unsigned int    ways_of_associativity;
+       unsigned int    physical_line_partition;
+       unsigned int    number_of_sets;
+       unsigned int    coherency_line_size;
+
+       int             nsharedmaps;
+       cpu_set_t       **sharedmaps;
+};
+
 struct lscpu_cputype {
        int     refcount;
 
@@ -59,6 +78,9 @@ struct lscpu_cputype {
        int     nbooks_per_drawer;
        int     ndrawers_per_system;
 
+       struct lscpu_cache *caches;
+       size_t ncaches;
+
        /* siblings maps */
        int             ncores;
        cpu_set_t       **coremaps;
@@ -155,25 +177,6 @@ struct lscpu_virt {
 
 };
 
-struct lscpu_cache {
-       int             nth;            /* cache<number> from cpuinfo */
-       char            *name;
-       char            *type;
-       char            *allocation_policy;
-       char            *write_policy;
-
-       int             level;
-       uint64_t        size;
-
-       unsigned int    ways_of_associativity;
-       unsigned int    physical_line_partition;
-       unsigned int    number_of_sets;
-       unsigned int    coherency_line_size;
-
-       int             nsharedmaps;
-       cpu_set_t       **sharedmaps;
-};
-
 struct lscpu_cxt {
        int maxcpus;            /* size in bits of kernel cpu mask */
        const char *prefix;     /* path to /sys and /proc snapshot or NULL */
@@ -199,8 +202,6 @@ struct lscpu_cxt {
        struct lscpu_vulnerability *vuls;       /* array of CPU vulnerabilities */
        size_t  nvuls;                          /* number of CPU vulnerabilities */
 
-       struct lscpu_cache *caches;
-       size_t ncaches;
 
        struct lscpu_cache *ecaches;
        size_t necaches;                /* extra caches (s390) from /proc/cpuinfo */
@@ -224,6 +225,9 @@ int lscpu_read_archext(struct lscpu_cxt *cxt);
 int lscpu_read_vulnerabilities(struct lscpu_cxt *cxt);
 int lscpu_read_numas(struct lscpu_cxt *cxt);
 
+void lscpu_free_caches(struct lscpu_cache *caches, size_t n);
+void lscpu_sort_caches(struct lscpu_cache *caches, size_t n);
+
 int lscpu_read_topology(struct lscpu_cxt *cxt);
 void lscpu_cputype_free_topology(struct lscpu_cputype *ct);
 
index 189e0c1af3751719bbf399b61d235bb68eb9d08e..3f745f4f254b52afdbe524e8c3fbf936c89c4248 100644 (file)
@@ -279,14 +279,6 @@ static int cmp_pattern(const void *a0, const void *b0)
        return strcmp(a->pattern, b->pattern);
 }
 
-static int cmp_cache(const void *a0, const void *b0)
-{
-       const struct lscpu_cache
-               *a = (const struct lscpu_cache *) a0,
-               *b = (const struct lscpu_cache *) b0;
-       return strcmp(a->name, b->name);
-}
-
 struct cpuinfo_parser {
        struct lscpu_cxt        *cxt;
        struct lscpu_cpu        *curr_cpu;
@@ -543,10 +535,7 @@ int lscpu_read_cpuinfo(struct lscpu_cxt *cxt)
        lscpu_unref_cpu(pr->curr_cpu);
 
        fclose(fp);
-
-       if (cxt->ecaches)
-               qsort(cxt->ecaches, cxt->necaches,
-                               sizeof(struct lscpu_cache), cmp_cache);
+       lscpu_sort_caches(cxt->ecaches, cxt->necaches);
 
        return 0;
 }
@@ -880,27 +869,6 @@ struct lscpu_cxt *lscpu_new_context(void)
        return xcalloc(1, sizeof(struct lscpu_cxt));
 }
 
-static void lscpu_free_caches(struct lscpu_cache *caches, size_t n)
-{
-       size_t i;
-
-       if (!caches)
-               return;
-
-       for (i = 0; i < n; i++) {
-               struct lscpu_cache *c = &caches[i];
-
-               DBG(MISC, ul_debug(" freeing #%zu cache", i));
-
-               free(c->name);
-               free(c->type);
-               free(c->allocation_policy);
-               free(c->write_policy);
-               free(c->sharedmaps);
-       }
-       free(caches);
-}
-
 void lscpu_free_context(struct lscpu_cxt *cxt)
 {
        size_t i;
@@ -945,7 +913,6 @@ void lscpu_free_context(struct lscpu_cxt *cxt)
        lscpu_free_virtualization(cxt->virt);
        lscpu_free_architecture(cxt->arch);
        lscpu_free_caches(cxt->ecaches, cxt->necaches);
-       lscpu_free_caches(cxt->caches, cxt->ncaches);
 
        free(cxt);
 }
index 93b5fd60839f2ee38b51806993fa8b63ed787405..286a819daef9ece70aa8424c630848442b5e7832 100644 (file)
@@ -49,6 +49,44 @@ void lscpu_cputype_free_topology(struct lscpu_cputype *ct)
        free_cpuset_array(ct->socketmaps, ct->nsockets);
        free_cpuset_array(ct->bookmaps, ct->nbooks);
        free_cpuset_array(ct->drawermaps, ct->ndrawers);
+
+       lscpu_free_caches(ct->caches, ct->ncaches);
+}
+
+void lscpu_free_caches(struct lscpu_cache *caches, size_t n)
+{
+       size_t i;
+
+       if (!caches)
+               return;
+
+       for (i = 0; i < n; i++) {
+               struct lscpu_cache *c = &caches[i];
+
+               DBG(MISC, ul_debug(" freeing #%zu cache [%s]", i, c->name));
+
+               free(c->name);
+               free(c->type);
+               free(c->allocation_policy);
+               free(c->write_policy);
+
+               free_cpuset_array(c->sharedmaps, c->nsharedmaps);
+       }
+       free(caches);
+}
+
+static int cmp_cache(const void *a0, const void *b0)
+{
+       const struct lscpu_cache
+               *a = (const struct lscpu_cache *) a0,
+               *b = (const struct lscpu_cache *) b0;
+       return strcmp(a->name, b->name);
+}
+
+void lscpu_sort_caches(struct lscpu_cache *caches, size_t n)
+{
+       if (caches && n)
+               qsort(caches, n, sizeof(struct lscpu_cache), cmp_cache);
 }
 
 
@@ -184,6 +222,113 @@ static int cputype_read_topology(struct lscpu_cxt *cxt, struct lscpu_cputype *ct
        return 0;
 }
 
+static int read_caches(struct lscpu_cxt *cxt, struct lscpu_cputype *ct, struct lscpu_cpu *cpu)
+{
+       char buf[256];
+       struct path_cxt *sys = cxt->syscpu;
+       int num = cpu->logical_id;
+       size_t i, setsize;
+
+       if (!ct->ncaches) {
+               while (ul_path_accessf(sys, F_OK,
+                                       "cpu%d/cache/index%zu",
+                                       num, ct->ncaches) == 0)
+                       ct->ncaches++;
+
+               if (!ct->ncaches)
+                       return 0;
+               ct->caches = xcalloc(ct->ncaches, sizeof(*ct->caches));
+       }
+
+       setsize = CPU_ALLOC_SIZE(cxt->maxcpus);
+
+       for (i = 0; i < ct->ncaches; i++) {
+               struct lscpu_cache *ca = &ct->caches[i];
+               cpu_set_t *map;
+
+               if (ul_path_accessf(sys, F_OK, "cpu%d/cache/index%zu", num, i) != 0)
+                       continue;
+
+               if (!ca->name) {
+                       int type = 0;
+
+                       /* cache type */
+                       if (ul_path_readf_string(sys, &ca->type,
+                                       "cpu%d/cache/index%zu/type", num, i) > 0) {
+                               if (!strcmp(ca->type, "Data"))
+                                       type = 'd';
+                               else if (!strcmp(ca->type, "Instruction"))
+                                       type = 'i';
+                       }
+
+                       /* cache level */
+                       ul_path_readf_s32(sys, &ca->level,
+                                       "cpu%d/cache/index%zu/level", num, i);
+                       if (type)
+                               snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
+                       else
+                               snprintf(buf, sizeof(buf), "L%d", ca->level);
+
+                       ca->name = xstrdup(buf);
+
+                       ul_path_readf_u32(sys, &ca->ways_of_associativity,
+                                       "cpu%d/cache/index%zu/ways_of_associativity", num, i);
+                       ul_path_readf_u32(sys, &ca->physical_line_partition,
+                                       "cpu%d/cache/index%zu/physical_line_partition", num, i);
+                       ul_path_readf_u32(sys, &ca->number_of_sets,
+                                       "cpu%d/cache/index%zu/number_of_sets", num, i);
+                       ul_path_readf_u32(sys, &ca->coherency_line_size,
+                                       "cpu%d/cache/index%zu/coherency_line_size", num, i);
+
+                       ul_path_readf_string(sys, &ca->allocation_policy,
+                                       "cpu%d/cache/index%zu/allocation_policy", num, i);
+                       ul_path_readf_string(sys, &ca->write_policy,
+                                       "cpu%d/cache/index%zu/write_policy", num, i);
+
+                       /* cache size */
+                       if (ul_path_readf_buffer(sys, buf, sizeof(buf),
+                                       "cpu%d/cache/index%zu/size", num, i) > 0)
+                               parse_size(buf, &ca->size, NULL);
+                       else
+                               ca->size = 0;
+               }
+
+               /* information about how CPUs share different caches */
+               ul_path_readf_cpuset(sys, &map, cxt->maxcpus,
+                                 "cpu%d/cache/index%zu/shared_cpu_map", num, i);
+
+               if (!ca->sharedmaps)
+                       ca->sharedmaps = xcalloc(cxt->npossibles, sizeof(cpu_set_t *));
+
+               add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map, setsize);
+       }
+
+       return 0;
+}
+
+/* Read cache for specified type */
+static int cputype_read_caches(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
+{
+       size_t i;
+       int rc = 0;
+
+       DBG(TYPE, ul_debugobj(ct, "reading %s/%s/%s topology",
+                               ct->vendor ?: "", ct->model ?: "", ct->modelname ?:""));
+
+       for (i = 0; i < cxt->npossibles; i++) {
+               struct lscpu_cpu *cpu = cxt->cpus[i];
+
+               if (!cpu || cpu->type != ct)
+                       continue;
+               rc = read_caches(cxt, ct, cpu);
+               if (rc)
+                       break;
+       }
+
+       lscpu_sort_caches(ct->caches, ct->ncaches);
+       return rc;
+}
+
 static int read_ids(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
 {
        struct path_cxt *sys = cxt->syscpu;
@@ -275,8 +420,10 @@ int lscpu_read_topology(struct lscpu_cxt *cxt)
        size_t i;
        int rc = 0;
 
-       for (i = 0; i < cxt->ncputypes; i++)
+       for (i = 0; i < cxt->ncputypes; i++) {
                rc += cputype_read_topology(cxt, cxt->cputypes[i]);
+               rc += cputype_read_caches(cxt, cxt->cputypes[i]);
+       }
 
        for (i = 0; rc == 0 && i < cxt->npossibles; i++) {
                struct lscpu_cpu *cpu = cxt->cpus[i];