]> git.ipfire.org Git - thirdparty/util-linux.git/blame - sys-utils/lscpu-topology.c
sys-utils: cleanup license lines, add SPDX
[thirdparty/util-linux.git] / sys-utils / lscpu-topology.c
CommitLineData
9abd5e4b
KZ
1/*
2 * SPDX-License-Identifier: GPL-2.0-or-later
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
10 * Copyright (C) 2008-2023 Karel Zak <kzak@redhat.com>
11 */
f669523b
KZ
12#include <errno.h>
13#include <stdlib.h>
14#include <sys/types.h>
15#include <sys/stat.h>
16#include <fcntl.h>
17#include <unistd.h>
18#include <string.h>
19#include <stdio.h>
20
095be2c2 21#include "lscpu.h"
f669523b
KZ
22
23/* add @set to the @ary, unnecessary set is deallocated. */
db919abc 24static int add_cpuset_to_array(cpu_set_t **ary, size_t *items, cpu_set_t *set, size_t setsize)
f669523b 25{
db919abc 26 size_t i;
f669523b
KZ
27
28 if (!ary)
29 return -EINVAL;
30
31 for (i = 0; i < *items; i++) {
32 if (CPU_EQUAL_S(setsize, set, ary[i]))
33 break;
34 }
35 if (i == *items) {
36 ary[*items] = set;
37 ++*items;
38 return 0;
39 }
40 CPU_FREE(set);
41 return 1;
42}
43
44static void free_cpuset_array(cpu_set_t **ary, int items)
45{
46 int i;
47
48 if (!ary)
49 return;
50 for (i = 0; i < items; i++)
51 free(ary[i]);
52 free(ary);
53}
54
55void lscpu_cputype_free_topology(struct lscpu_cputype *ct)
56{
57 if (!ct)
58 return;
59 free_cpuset_array(ct->coremaps, ct->ncores);
60 free_cpuset_array(ct->socketmaps, ct->nsockets);
61 free_cpuset_array(ct->bookmaps, ct->nbooks);
62 free_cpuset_array(ct->drawermaps, ct->ndrawers);
eadaf6d7
KZ
63}
64
65void lscpu_free_caches(struct lscpu_cache *caches, size_t n)
66{
67 size_t i;
68
69 if (!caches)
70 return;
71
72 for (i = 0; i < n; i++) {
73 struct lscpu_cache *c = &caches[i];
74
6fbb5328
KZ
75 DBG(MISC, ul_debug(" freeing cache #%zu %s::%d",
76 i, c->name, c->id));
eadaf6d7
KZ
77
78 free(c->name);
79 free(c->type);
80 free(c->allocation_policy);
81 free(c->write_policy);
63c5e7f8 82 free(c->sharedmap);
eadaf6d7
KZ
83 }
84 free(caches);
85}
86
87static int cmp_cache(const void *a0, const void *b0)
88{
89 const struct lscpu_cache
90 *a = (const struct lscpu_cache *) a0,
91 *b = (const struct lscpu_cache *) b0;
92 return strcmp(a->name, b->name);
93}
94
95void lscpu_sort_caches(struct lscpu_cache *caches, size_t n)
96{
97 if (caches && n)
98 qsort(caches, n, sizeof(struct lscpu_cache), cmp_cache);
f669523b
KZ
99}
100
101
102/* Read topology for specified type */
103static int cputype_read_topology(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
104{
eaf32c65 105 size_t i, npos;
f669523b 106 struct path_cxt *sys;
6baa4df7
KZ
107 int nthreads = 0, sw_topo = 0;
108 FILE *fd;
f669523b
KZ
109
110 sys = cxt->syscpu; /* /sys/devices/system/cpu/ */
801b125f 111 npos = cxt->npossibles; /* possible CPUs */
f669523b
KZ
112
113 DBG(TYPE, ul_debugobj(ct, "reading %s/%s/%s topology",
114 ct->vendor ?: "", ct->model ?: "", ct->modelname ?:""));
115
19ddc05e 116 for (i = 0; i < cxt->npossibles; i++) {
f669523b
KZ
117 struct lscpu_cpu *cpu = cxt->cpus[i];
118 cpu_set_t *thread_siblings = NULL, *core_siblings = NULL;
119 cpu_set_t *book_siblings = NULL, *drawer_siblings = NULL;
01cb80ba 120 int num, n = 0;
f669523b 121
801b125f 122 if (!cpu || cpu->type != ct)
f669523b
KZ
123 continue;
124
125 num = cpu->logical_id;
126 if (ul_path_accessf(sys, F_OK,
127 "cpu%d/topology/thread_siblings", num) != 0)
128 continue;
129
f669523b
KZ
130 /* read topology maps */
131 ul_path_readf_cpuset(sys, &thread_siblings, cxt->maxcpus,
132 "cpu%d/topology/thread_siblings", num);
133 ul_path_readf_cpuset(sys, &core_siblings, cxt->maxcpus,
134 "cpu%d/topology/core_siblings", num);
135 ul_path_readf_cpuset(sys, &book_siblings, cxt->maxcpus,
136 "cpu%d/topology/book_siblings", num);
137 ul_path_readf_cpuset(sys, &drawer_siblings, cxt->maxcpus,
138 "cpu%d/topology/drawer_siblings", num);
139
01cb80ba
KZ
140 if (thread_siblings)
141 n = CPU_COUNT_S(cxt->setsize, thread_siblings);
f669523b
KZ
142 if (!n)
143 n = 1;
144 if (n > nthreads)
145 nthreads = n;
146
147 /* Allocate arrays for topology maps.
148 *
149 * For each map we make sure that it can have up to ncpuspos
150 * entries. This is because we cannot reliably calculate the
151 * number of cores, sockets and books on all architectures.
152 * E.g. completely virtualized architectures like s390 may
153 * have multiple sockets of different sizes.
154 */
01cb80ba 155 if (!ct->coremaps && thread_siblings)
f669523b 156 ct->coremaps = xcalloc(npos, sizeof(cpu_set_t *));
01cb80ba 157 if (!ct->socketmaps && core_siblings)
f669523b
KZ
158 ct->socketmaps = xcalloc(npos, sizeof(cpu_set_t *));
159 if (!ct->bookmaps && book_siblings)
160 ct->bookmaps = xcalloc(npos, sizeof(cpu_set_t *));
161 if (!ct->drawermaps && drawer_siblings)
162 ct->drawermaps = xcalloc(npos, sizeof(cpu_set_t *));
163
164 /* add to topology maps */
01cb80ba
KZ
165 if (thread_siblings)
166 add_cpuset_to_array(ct->coremaps, &ct->ncores, thread_siblings, cxt->setsize);
167 if (core_siblings)
168 add_cpuset_to_array(ct->socketmaps, &ct->nsockets, core_siblings, cxt->setsize);
f669523b 169 if (book_siblings)
eaf32c65 170 add_cpuset_to_array(ct->bookmaps, &ct->nbooks, book_siblings, cxt->setsize);
f669523b 171 if (drawer_siblings)
eaf32c65 172 add_cpuset_to_array(ct->drawermaps, &ct->ndrawers, drawer_siblings, cxt->setsize);
f669523b
KZ
173
174 }
175
6baa4df7
KZ
176 /* s390 detects its cpu topology via /proc/sysinfo, if present.
177 * Using simply the cpu topology masks in sysfs will not give
178 * usable results since everything is virtualized. E.g.
179 * virtual core 0 may have only 1 cpu, but virtual core 2 may
180 * five cpus.
181 * If the cpu topology is not exported (e.g. 2nd level guest)
182 * fall back to old calculation scheme.
183 */
184 if ((fd = ul_path_fopen(cxt->procfs, "r", "sysinfo"))) {
185 int t0, t1;
186 char buf[BUFSIZ];
187
188 DBG(TYPE, ul_debugobj(ct, " reading sysinfo"));
189
190 while (fgets(buf, sizeof(buf), fd) != NULL) {
db919abc 191 if (sscanf(buf, "CPU Topology SW: %d %d %zu %zu %zu %zu",
6baa4df7
KZ
192 &t0, &t1,
193 &ct->ndrawers_per_system,
194 &ct->nbooks_per_drawer,
195 &ct->nsockets_per_book,
196 &ct->ncores_per_socket) == 6) {
197 sw_topo = 1;
198 DBG(TYPE, ul_debugobj(ct, " using SW topology"));
199 break;
200 }
201 }
202 if (fd)
203 fclose(fd);
204 }
205
24c17c62
KZ
206 ct->nthreads_per_core = nthreads;
207 if (ct->mtid) {
208 uint64_t x;
209 if (ul_strtou64(ct->mtid, &x, 10) == 0 && x <= ULONG_MAX)
210 ct->nthreads_per_core = (size_t) x + 1;
211 }
6baa4df7
KZ
212
213 if (!sw_topo) {
19ddc05e
KZ
214 ct->ncores_per_socket = ct->nsockets ? ct->ncores / ct->nsockets : 0;
215 ct->nsockets_per_book = ct->nbooks ? ct->nsockets / ct->nbooks : 0;
216 ct->nbooks_per_drawer = ct->ndrawers ? ct->nbooks / ct->ndrawers : 0;
217 ct->ndrawers_per_system = ct->ndrawers;
6baa4df7 218 }
f669523b 219
db919abc
KZ
220 DBG(TYPE, ul_debugobj(ct, " nthreads: %zu (per core)", ct->nthreads_per_core));
221 DBG(TYPE, ul_debugobj(ct, " ncores: %zu (%zu per socket)", ct->ncores, ct->ncores_per_socket));
222 DBG(TYPE, ul_debugobj(ct, " nsockets: %zu (%zu per books)", ct->nsockets, ct->nsockets_per_book));
223 DBG(TYPE, ul_debugobj(ct, " nbooks: %zu (%zu per drawer)", ct->nbooks, ct->nbooks_per_drawer));
224 DBG(TYPE, ul_debugobj(ct, " ndrawers: %zu (%zu per system)", ct->ndrawers, ct->ndrawers_per_system));
f669523b
KZ
225
226 return 0;
227}
228
05abf594 229/* count size of all instancess of the "name" */
9dadd3e6 230size_t lscpu_get_cache_full_size(struct lscpu_cxt *cxt, const char *name, int *instances)
05abf594
KZ
231{
232 size_t i, sz = 0;
233
9dadd3e6
KZ
234 if (instances)
235 *instances = 0;
236
05abf594 237 for (i = 0; i < cxt->ncaches; i++) {
9dadd3e6 238 if (strcmp(cxt->caches[i].name, name) == 0) {
05abf594 239 sz += cxt->caches[i].size;
9dadd3e6
KZ
240 if (instances)
241 (*instances)++;
242 }
05abf594
KZ
243 }
244
245 return sz;
246}
247
63c5e7f8
KZ
248struct lscpu_cache *lscpu_cpu_get_cache(struct lscpu_cxt *cxt,
249 struct lscpu_cpu *cpu, const char *name)
250{
251 size_t i;
252
253 for (i = 0; i < cxt->ncaches; i++) {
254 struct lscpu_cache *ca = &cxt->caches[i];
255
256 if (strcmp(ca->name, name) == 0 &&
257 CPU_ISSET_S(cpu->logical_id, cxt->setsize, ca->sharedmap))
258 return ca;
259 }
260
261 return NULL;
262}
263
6fbb5328
KZ
264/*
265 * The cache is identifued by type+level+id.
266 */
267static struct lscpu_cache *get_cache(struct lscpu_cxt *cxt,
268 const char *type, int level, int id)
269{
270 size_t i;
271
272 for (i = 0; i < cxt->ncaches; i++) {
273 struct lscpu_cache *ca = &cxt->caches[i];
274 if (ca->id == id &&
275 ca->level == level &&
276 strcmp(ca->type, type) == 0)
277 return ca;
278 }
279 return NULL;
280}
281
282static struct lscpu_cache *add_cache(struct lscpu_cxt *cxt,
283 const char *type, int level, int id)
284{
285 struct lscpu_cache *ca;
286
287 cxt->ncaches++;
64d6d400
TW
288 cxt->caches = xreallocarray(cxt->caches,
289 cxt->ncaches, sizeof(*cxt->caches));
6fbb5328
KZ
290
291 ca = &cxt->caches[cxt->ncaches - 1];
292 memset(ca, 0 , sizeof(*ca));
293
294 ca->id = id;
295 ca->level = level;
296 ca->type = xstrdup(type);
297
298 DBG(GATHER, ul_debugobj(cxt, "add cache %s%d::%d", type, level, id));
6fbb5328
KZ
299 return ca;
300}
301
ada1a387
KZ
302static int mk_cache_id(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu, char *type, int level)
303{
304 size_t i;
305 int idx = 0;
306
307 for (i = 0; i < cxt->ncaches; i++) {
308 struct lscpu_cache *ca = &cxt->caches[i];
309
310 if (ca->level != level || strcmp(ca->type, type) != 0)
311 continue;
312
57fee04c
KZ
313 if (ca->sharedmap &&
314 CPU_ISSET_S(cpu->logical_id, cxt->setsize, ca->sharedmap))
ada1a387
KZ
315 return idx;
316 idx++;
317 }
318
319 return idx;
320}
321
57fee04c
KZ
322static int read_sparc_onecache(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu,
323 int level, char *typestr, int type)
324{
325 struct lscpu_cache *ca;
326 struct path_cxt *sys = cxt->syscpu;
327 int num = cpu->logical_id;
328 uint32_t size;
329 int rc, id;
330 char buf[32];
331
332 if (type)
333 snprintf(buf, sizeof(buf), "l%d_%c", level, type);
334 else
335 snprintf(buf, sizeof(buf), "l%d_", level);
336
337 rc = ul_path_readf_u32(sys, &size,
338 "cpu%d/%scache_size", num, buf);
339 if (rc != 0)
340 return rc;
341
342 DBG(CPU, ul_debugobj(cpu, "#%d reading sparc %s cache", num, buf));
343
344 id = mk_cache_id(cxt, cpu, typestr, level);
345
346 ca = get_cache(cxt, typestr, level, id);
347 if (!ca)
348 ca = add_cache(cxt, typestr, level, id);
349
350 if (!ca->name) {
351 ul_path_readf_u32(sys, &ca->coherency_line_size,
352 "cpu%d/%scache_line_size", num, buf);
353 assert(ca->type);
354
355 if (type)
356 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
357 else
358 snprintf(buf, sizeof(buf), "L%d", ca->level);
359 ca->name = xstrdup(buf);
360 ca->size = size;
361 }
362 /* There is no sharedmap of the cache in /sys, we assume that caches are
363 * not shared. Send a patch if your /sys provides another information.
364 */
365 if (!ca->sharedmap) {
366 size_t setsize = 0;
367
368 ca->sharedmap = cpuset_alloc(cxt->maxcpus, &setsize, NULL);
369 CPU_ZERO_S(setsize, ca->sharedmap);
370 CPU_SET_S(num, setsize, ca->sharedmap);
371 }
372
373 return 0;
374}
375
376static int read_sparc_caches(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
377{
5c09ab26
KZ
378 read_sparc_onecache(cxt, cpu, 1, "Instruction", 'i');
379 read_sparc_onecache(cxt, cpu, 1, "Data", 'd');
380 read_sparc_onecache(cxt, cpu, 2, "Unified", 0);
381 read_sparc_onecache(cxt, cpu, 2, "Unified", 0);
57fee04c
KZ
382
383 return 0;
384}
385
6fbb5328 386static int read_caches(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
eadaf6d7
KZ
387{
388 char buf[256];
389 struct path_cxt *sys = cxt->syscpu;
390 int num = cpu->logical_id;
ada1a387 391 size_t i, ncaches = 0;
eadaf6d7 392
6fbb5328
KZ
393 while (ul_path_accessf(sys, F_OK,
394 "cpu%d/cache/index%zu",
395 num, ncaches) == 0)
396 ncaches++;
eadaf6d7 397
57fee04c
KZ
398 if (ncaches == 0 && ul_path_accessf(sys, F_OK,
399 "cpu%d/l1_icache_size", num) == 0)
400 return read_sparc_caches(cxt, cpu);
401
ada1a387
KZ
402 DBG(CPU, ul_debugobj(cpu, "#%d reading %zd caches", num, ncaches));
403
6fbb5328
KZ
404 for (i = 0; i < ncaches; i++) {
405 struct lscpu_cache *ca;
6fbb5328 406 int id, level;
eadaf6d7 407
6fbb5328 408 if (ul_path_readf_s32(sys, &id, "cpu%d/cache/index%zu/id", num, i) != 0)
ada1a387 409 id = -1;
6fbb5328
KZ
410 if (ul_path_readf_s32(sys, &level, "cpu%d/cache/index%zu/level", num, i) != 0)
411 continue;
412 if (ul_path_readf_buffer(sys, buf, sizeof(buf),
413 "cpu%d/cache/index%zu/type", num, i) <= 0)
eadaf6d7
KZ
414 continue;
415
ada1a387
KZ
416 if (id == -1)
417 id = mk_cache_id(cxt, cpu, buf, level);
418
6fbb5328
KZ
419 ca = get_cache(cxt, buf, level, id);
420 if (!ca)
421 ca = add_cache(cxt, buf, level, id);
422
eadaf6d7
KZ
423 if (!ca->name) {
424 int type = 0;
425
6fbb5328
KZ
426 assert(ca->type);
427
428 if (!strcmp(ca->type, "Data"))
429 type = 'd';
430 else if (!strcmp(ca->type, "Instruction"))
431 type = 'i';
eadaf6d7 432
eadaf6d7
KZ
433 if (type)
434 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
435 else
436 snprintf(buf, sizeof(buf), "L%d", ca->level);
437
438 ca->name = xstrdup(buf);
439
440 ul_path_readf_u32(sys, &ca->ways_of_associativity,
441 "cpu%d/cache/index%zu/ways_of_associativity", num, i);
442 ul_path_readf_u32(sys, &ca->physical_line_partition,
443 "cpu%d/cache/index%zu/physical_line_partition", num, i);
444 ul_path_readf_u32(sys, &ca->number_of_sets,
445 "cpu%d/cache/index%zu/number_of_sets", num, i);
446 ul_path_readf_u32(sys, &ca->coherency_line_size,
447 "cpu%d/cache/index%zu/coherency_line_size", num, i);
448
449 ul_path_readf_string(sys, &ca->allocation_policy,
450 "cpu%d/cache/index%zu/allocation_policy", num, i);
451 ul_path_readf_string(sys, &ca->write_policy,
452 "cpu%d/cache/index%zu/write_policy", num, i);
453
454 /* cache size */
455 if (ul_path_readf_buffer(sys, buf, sizeof(buf),
456 "cpu%d/cache/index%zu/size", num, i) > 0)
457 parse_size(buf, &ca->size, NULL);
458 else
459 ca->size = 0;
460 }
461
63c5e7f8
KZ
462 if (!ca->sharedmap)
463 /* information about how CPUs share different caches */
464 ul_path_readf_cpuset(sys, &ca->sharedmap, cxt->maxcpus,
465 "cpu%d/cache/index%zu/shared_cpu_map", num, i);
eadaf6d7
KZ
466 }
467
468 return 0;
469}
470
2075eb60 471static int read_ids(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
f669523b 472{
2075eb60
KZ
473 struct path_cxt *sys = cxt->syscpu;
474 int num = cpu->logical_id;
f669523b 475
2075eb60
KZ
476 if (ul_path_accessf(sys, F_OK, "cpu%d/topology", num) != 0)
477 return 0;
f669523b 478
2075eb60
KZ
479 DBG(CPU, ul_debugobj(cpu, "#%d reading IDs", num));
480
481 if (ul_path_readf_s32(sys, &cpu->coreid, "cpu%d/topology/core_id", num) != 0)
482 cpu->coreid = -1;
483 if (ul_path_readf_s32(sys, &cpu->socketid, "cpu%d/topology/physical_package_id", num) != 0)
484 cpu->socketid = -1;
485 if (ul_path_readf_s32(sys, &cpu->bookid, "cpu%d/topology/book_id", num) != 0)
486 cpu->bookid = -1;
487 if (ul_path_readf_s32(sys, &cpu->drawerid, "cpu%d/topology/drawer_id", num) != 0)
488 cpu->drawerid = -1;
489
490 return 0;
f669523b
KZ
491}
492
2075eb60 493static int read_polarization(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
f669523b
KZ
494{
495 struct path_cxt *sys = cxt->syscpu;
2075eb60
KZ
496 int num = cpu->logical_id;
497 char mode[64];
f669523b 498
2075eb60
KZ
499 if (ul_path_accessf(sys, F_OK, "cpu%d/polarization", num) != 0)
500 return 0;
801b125f 501
2075eb60 502 ul_path_readf_buffer(sys, mode, sizeof(mode), "cpu%d/polarization", num);
ee0fabda 503
ada1a387 504 DBG(CPU, ul_debugobj(cpu, "#%d reading polar=%s", num, mode));
f669523b 505
2075eb60
KZ
506 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
507 cpu->polarization = POLAR_VLOW;
508 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
509 cpu->polarization = POLAR_VMEDIUM;
510 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
511 cpu->polarization = POLAR_VHIGH;
512 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
513 cpu->polarization = POLAR_HORIZONTAL;
514 else
515 cpu->polarization = POLAR_UNKNOWN;
f669523b 516
63c5e7f8
KZ
517 if (cpu->type)
518 cpu->type->has_polarization = 1;
f669523b
KZ
519 return 0;
520}
fd35a1da 521
6d1a2705
KZ
522static int read_address(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
523{
524 struct path_cxt *sys = cxt->syscpu;
525 int num = cpu->logical_id;
526
527 if (ul_path_accessf(sys, F_OK, "cpu%d/address", num) != 0)
528 return 0;
529
ada1a387
KZ
530 DBG(CPU, ul_debugobj(cpu, "#%d reading address", num));
531
6d1a2705 532 ul_path_readf_s32(sys, &cpu->address, "cpu%d/address", num);
63c5e7f8
KZ
533 if (cpu->type)
534 cpu->type->has_addresses = 1;
6d1a2705
KZ
535 return 0;
536}
537
8c587fad
KZ
538static int read_configure(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
539{
540 struct path_cxt *sys = cxt->syscpu;
541 int num = cpu->logical_id;
542
543 if (ul_path_accessf(sys, F_OK, "cpu%d/configure", num) != 0)
544 return 0;
545
ada1a387
KZ
546 DBG(CPU, ul_debugobj(cpu, "#%d reading configure", num));
547
8c587fad 548 ul_path_readf_s32(sys, &cpu->configured, "cpu%d/configure", num);
63c5e7f8
KZ
549 if (cpu->type)
550 cpu->type->has_configured = 1;
8c587fad
KZ
551 return 0;
552}
553
9d08a19d
KZ
554static int read_mhz(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
555{
556 struct path_cxt *sys = cxt->syscpu;
557 int num = cpu->logical_id;
558 int mhz;
559
ada1a387
KZ
560 DBG(CPU, ul_debugobj(cpu, "#%d reading mhz", num));
561
9d08a19d
KZ
562 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) == 0)
563 cpu->mhz_max_freq = (float) mhz / 1000;
564 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) == 0)
565 cpu->mhz_min_freq = (float) mhz / 1000;
6d5699e6 566
f2d08d4d
KZ
567 /* The default current-frequency value comes is from /proc/cpuinfo (if
568 * available). This /proc value is usually based on MSR registers
569 * (APERF/APERF) and it changes pretty often. It seems better to read
570 * frequency from cpufreq subsystem that provides the current frequency
571 * for the current policy. There is also cpuinfo_cur_freq in sysfs, but
572 * it's not always available.
573 */
574 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/scaling_cur_freq", num) == 0)
575 cpu->mhz_cur_freq = (float) mhz / 1000;
576
6d5699e6
KZ
577 if (cpu->type && (cpu->mhz_min_freq || cpu->mhz_max_freq))
578 cpu->type->has_freq = 1;
579
9d08a19d
KZ
580 return 0;
581}
582
6d5699e6
KZ
583float lsblk_cputype_get_maxmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
584{
585 size_t i;
586 float res = 0.0;
587
588 for (i = 0; i < cxt->npossibles; i++) {
589 struct lscpu_cpu *cpu = cxt->cpus[i];
590
591 if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu))
592 continue;
593 res = max(res, cpu->mhz_max_freq);
594 }
595 return res;
596}
597
598float lsblk_cputype_get_minmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
599{
600 size_t i;
601 float res = -1.0;
602
603 for (i = 0; i < cxt->npossibles; i++) {
604 struct lscpu_cpu *cpu = cxt->cpus[i];
605
606 if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu))
607 continue;
608 if (res < 0.0 || cpu->mhz_min_freq < res)
609 res = cpu->mhz_min_freq;
610 }
611 return res;
612}
613
9b9e4f5d
KZ
614/* returns scaling (use) of CPUs freq. in percent */
615float lsblk_cputype_get_scalmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
616{
617 size_t i;
618 float fmax = 0, fcur = 0;
619
620 for (i = 0; i < cxt->npossibles; i++) {
621 struct lscpu_cpu *cpu = cxt->cpus[i];
622
623 if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu))
624 continue;
625 if (cpu->mhz_max_freq <= 0.0 || cpu->mhz_cur_freq <= 0.0)
626 continue;
627 fmax += cpu->mhz_max_freq;
628 fcur += cpu->mhz_cur_freq;
629 }
630 if (fcur <= 0.0)
631 return 0.0;
632 return fcur / fmax * 100;
633}
634
2075eb60 635int lscpu_read_topology(struct lscpu_cxt *cxt)
fd35a1da 636{
fd35a1da 637 size_t i;
2075eb60
KZ
638 int rc = 0;
639
ada1a387 640
6fbb5328 641 for (i = 0; i < cxt->ncputypes; i++)
2075eb60 642 rc += cputype_read_topology(cxt, cxt->cputypes[i]);
fd35a1da 643
2075eb60 644 for (i = 0; rc == 0 && i < cxt->npossibles; i++) {
fd35a1da 645 struct lscpu_cpu *cpu = cxt->cpus[i];
fd35a1da 646
2075eb60 647 if (!cpu || !cpu->type)
fd35a1da 648 continue;
801b125f 649
ada1a387
KZ
650 DBG(CPU, ul_debugobj(cpu, "#%d reading topology", cpu->logical_id));
651
2075eb60 652 rc = read_ids(cxt, cpu);
6d1a2705
KZ
653 if (!rc)
654 rc = read_polarization(cxt, cpu);
655 if (!rc)
656 rc = read_address(cxt, cpu);
8c587fad
KZ
657 if (!rc)
658 rc = read_configure(cxt, cpu);
9d08a19d
KZ
659 if (!rc)
660 rc = read_mhz(cxt, cpu);
6fbb5328
KZ
661 if (!rc)
662 rc = read_caches(cxt, cpu);
2075eb60 663 }
fd35a1da 664
6fbb5328 665 lscpu_sort_caches(cxt->caches, cxt->ncaches);
9dadd3e6
KZ
666 DBG(GATHER, ul_debugobj(cxt, " L1d: %zu", lscpu_get_cache_full_size(cxt, "L1d", NULL)));
667 DBG(GATHER, ul_debugobj(cxt, " L1i: %zu", lscpu_get_cache_full_size(cxt, "L1i", NULL)));
668 DBG(GATHER, ul_debugobj(cxt, " L2: %zu", lscpu_get_cache_full_size(cxt, "L2", NULL)));
669 DBG(GATHER, ul_debugobj(cxt, " L3: %zu", lscpu_get_cache_full_size(cxt, "L3", NULL)));
6fbb5328 670
2075eb60
KZ
671 return rc;
672}
fd35a1da 673
fd35a1da 674