]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu-topology.c
hardlink: use xcalloc rather than xmalloc
[thirdparty/util-linux.git] / sys-utils / lscpu-topology.c
1 /*
2 * SPDX-License-Identifier: GPL-2.0-or-later
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
10 * Copyright (C) 2008-2023 Karel Zak <kzak@redhat.com>
11 */
12 #include <errno.h>
13 #include <stdlib.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <fcntl.h>
17 #include <unistd.h>
18 #include <string.h>
19 #include <stdio.h>
20
21 #include "lscpu.h"
22
23 /* add @set to the @ary, unnecessary set is deallocated. */
24 static int add_cpuset_to_array(cpu_set_t **ary, size_t *items, cpu_set_t *set, size_t setsize)
25 {
26 size_t i;
27
28 if (!ary)
29 return -EINVAL;
30
31 for (i = 0; i < *items; i++) {
32 if (CPU_EQUAL_S(setsize, set, ary[i]))
33 break;
34 }
35 if (i == *items) {
36 ary[*items] = set;
37 ++*items;
38 return 0;
39 }
40 CPU_FREE(set);
41 return 1;
42 }
43
44 static void free_cpuset_array(cpu_set_t **ary, int items)
45 {
46 int i;
47
48 if (!ary)
49 return;
50 for (i = 0; i < items; i++)
51 free(ary[i]);
52 free(ary);
53 }
54
55 void lscpu_cputype_free_topology(struct lscpu_cputype *ct)
56 {
57 if (!ct)
58 return;
59 free_cpuset_array(ct->coremaps, ct->ncores);
60 free_cpuset_array(ct->socketmaps, ct->nsockets);
61 free_cpuset_array(ct->bookmaps, ct->nbooks);
62 free_cpuset_array(ct->drawermaps, ct->ndrawers);
63 }
64
65 void lscpu_free_caches(struct lscpu_cache *caches, size_t n)
66 {
67 size_t i;
68
69 if (!caches)
70 return;
71
72 for (i = 0; i < n; i++) {
73 struct lscpu_cache *c = &caches[i];
74
75 DBG(MISC, ul_debug(" freeing cache #%zu %s::%d",
76 i, c->name, c->id));
77
78 free(c->name);
79 free(c->type);
80 free(c->allocation_policy);
81 free(c->write_policy);
82 free(c->sharedmap);
83 }
84 free(caches);
85 }
86
87 static int cmp_cache(const void *a0, const void *b0)
88 {
89 const struct lscpu_cache
90 *a = (const struct lscpu_cache *) a0,
91 *b = (const struct lscpu_cache *) b0;
92 return strcmp(a->name, b->name);
93 }
94
95 void lscpu_sort_caches(struct lscpu_cache *caches, size_t n)
96 {
97 if (caches && n)
98 qsort(caches, n, sizeof(struct lscpu_cache), cmp_cache);
99 }
100
101
102 /* Read topology for specified type */
103 static int cputype_read_topology(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
104 {
105 size_t i, npos;
106 struct path_cxt *sys;
107 int nthreads = 0, sw_topo = 0;
108 FILE *fd;
109
110 sys = cxt->syscpu; /* /sys/devices/system/cpu/ */
111 npos = cxt->npossibles; /* possible CPUs */
112
113 DBG(TYPE, ul_debugobj(ct, "reading %s/%s/%s topology",
114 ct->vendor ?: "", ct->model ?: "", ct->modelname ?:""));
115
116 for (i = 0; i < cxt->npossibles; i++) {
117 struct lscpu_cpu *cpu = cxt->cpus[i];
118 cpu_set_t *thread_siblings = NULL, *core_siblings = NULL;
119 cpu_set_t *book_siblings = NULL, *drawer_siblings = NULL;
120 int num, n = 0;
121
122 if (!cpu || cpu->type != ct)
123 continue;
124
125 num = cpu->logical_id;
126 if (ul_path_accessf(sys, F_OK,
127 "cpu%d/topology/thread_siblings", num) != 0)
128 continue;
129
130 /* read topology maps */
131 ul_path_readf_cpuset(sys, &thread_siblings, cxt->maxcpus,
132 "cpu%d/topology/thread_siblings", num);
133 ul_path_readf_cpuset(sys, &core_siblings, cxt->maxcpus,
134 "cpu%d/topology/core_siblings", num);
135 ul_path_readf_cpuset(sys, &book_siblings, cxt->maxcpus,
136 "cpu%d/topology/book_siblings", num);
137 ul_path_readf_cpuset(sys, &drawer_siblings, cxt->maxcpus,
138 "cpu%d/topology/drawer_siblings", num);
139
140 if (thread_siblings)
141 n = CPU_COUNT_S(cxt->setsize, thread_siblings);
142 if (!n)
143 n = 1;
144 if (n > nthreads)
145 nthreads = n;
146
147 /* Allocate arrays for topology maps.
148 *
149 * For each map we make sure that it can have up to ncpuspos
150 * entries. This is because we cannot reliably calculate the
151 * number of cores, sockets and books on all architectures.
152 * E.g. completely virtualized architectures like s390 may
153 * have multiple sockets of different sizes.
154 */
155 if (!ct->coremaps && thread_siblings)
156 ct->coremaps = xcalloc(npos, sizeof(cpu_set_t *));
157 if (!ct->socketmaps && core_siblings)
158 ct->socketmaps = xcalloc(npos, sizeof(cpu_set_t *));
159 if (!ct->bookmaps && book_siblings)
160 ct->bookmaps = xcalloc(npos, sizeof(cpu_set_t *));
161 if (!ct->drawermaps && drawer_siblings)
162 ct->drawermaps = xcalloc(npos, sizeof(cpu_set_t *));
163
164 /* add to topology maps */
165 if (thread_siblings)
166 add_cpuset_to_array(ct->coremaps, &ct->ncores, thread_siblings, cxt->setsize);
167 if (core_siblings)
168 add_cpuset_to_array(ct->socketmaps, &ct->nsockets, core_siblings, cxt->setsize);
169 if (book_siblings)
170 add_cpuset_to_array(ct->bookmaps, &ct->nbooks, book_siblings, cxt->setsize);
171 if (drawer_siblings)
172 add_cpuset_to_array(ct->drawermaps, &ct->ndrawers, drawer_siblings, cxt->setsize);
173
174 }
175
176 /* s390 detects its cpu topology via /proc/sysinfo, if present.
177 * Using simply the cpu topology masks in sysfs will not give
178 * usable results since everything is virtualized. E.g.
179 * virtual core 0 may have only 1 cpu, but virtual core 2 may
180 * five cpus.
181 * If the cpu topology is not exported (e.g. 2nd level guest)
182 * fall back to old calculation scheme.
183 */
184 if ((fd = ul_path_fopen(cxt->procfs, "r", "sysinfo"))) {
185 int t0, t1;
186 char buf[BUFSIZ];
187
188 DBG(TYPE, ul_debugobj(ct, " reading sysinfo"));
189
190 while (fgets(buf, sizeof(buf), fd) != NULL) {
191 if (sscanf(buf, "CPU Topology SW: %d %d %zu %zu %zu %zu",
192 &t0, &t1,
193 &ct->ndrawers_per_system,
194 &ct->nbooks_per_drawer,
195 &ct->nsockets_per_book,
196 &ct->ncores_per_socket) == 6) {
197 sw_topo = 1;
198 DBG(TYPE, ul_debugobj(ct, " using SW topology"));
199 break;
200 }
201 }
202 if (fd)
203 fclose(fd);
204 }
205
206 ct->nthreads_per_core = nthreads;
207 if (ct->mtid) {
208 uint64_t x;
209 if (ul_strtou64(ct->mtid, &x, 10) == 0 && x <= ULONG_MAX)
210 ct->nthreads_per_core = (size_t) x + 1;
211 }
212
213 if (!sw_topo) {
214 ct->ncores_per_socket = ct->nsockets ? ct->ncores / ct->nsockets : 0;
215 ct->nsockets_per_book = ct->nbooks ? ct->nsockets / ct->nbooks : 0;
216 ct->nbooks_per_drawer = ct->ndrawers ? ct->nbooks / ct->ndrawers : 0;
217 ct->ndrawers_per_system = ct->ndrawers;
218 }
219
220 DBG(TYPE, ul_debugobj(ct, " nthreads: %zu (per core)", ct->nthreads_per_core));
221 DBG(TYPE, ul_debugobj(ct, " ncores: %zu (%zu per socket)", ct->ncores, ct->ncores_per_socket));
222 DBG(TYPE, ul_debugobj(ct, " nsockets: %zu (%zu per books)", ct->nsockets, ct->nsockets_per_book));
223 DBG(TYPE, ul_debugobj(ct, " nbooks: %zu (%zu per drawer)", ct->nbooks, ct->nbooks_per_drawer));
224 DBG(TYPE, ul_debugobj(ct, " ndrawers: %zu (%zu per system)", ct->ndrawers, ct->ndrawers_per_system));
225
226 return 0;
227 }
228
229 /* count size of all instancess of the "name" */
230 size_t lscpu_get_cache_full_size(struct lscpu_cxt *cxt, const char *name, int *instances)
231 {
232 size_t i, sz = 0;
233
234 if (instances)
235 *instances = 0;
236
237 for (i = 0; i < cxt->ncaches; i++) {
238 if (strcmp(cxt->caches[i].name, name) == 0) {
239 sz += cxt->caches[i].size;
240 if (instances)
241 (*instances)++;
242 }
243 }
244
245 return sz;
246 }
247
248 struct lscpu_cache *lscpu_cpu_get_cache(struct lscpu_cxt *cxt,
249 struct lscpu_cpu *cpu, const char *name)
250 {
251 size_t i;
252
253 for (i = 0; i < cxt->ncaches; i++) {
254 struct lscpu_cache *ca = &cxt->caches[i];
255
256 if (ca->sharedmap &&
257 strcmp(ca->name, name) == 0 &&
258 CPU_ISSET_S(cpu->logical_id, cxt->setsize, ca->sharedmap))
259 return ca;
260 }
261
262 return NULL;
263 }
264
265 /*
266 * The cache is identifued by type+level+id.
267 */
268 static struct lscpu_cache *get_cache(struct lscpu_cxt *cxt,
269 const char *type, int level, int id)
270 {
271 size_t i;
272
273 for (i = 0; i < cxt->ncaches; i++) {
274 struct lscpu_cache *ca = &cxt->caches[i];
275 if (ca->id == id &&
276 ca->level == level &&
277 strcmp(ca->type, type) == 0)
278 return ca;
279 }
280 return NULL;
281 }
282
283 static struct lscpu_cache *add_cache(struct lscpu_cxt *cxt,
284 const char *type, int level, int id)
285 {
286 struct lscpu_cache *ca;
287
288 cxt->ncaches++;
289 cxt->caches = xreallocarray(cxt->caches,
290 cxt->ncaches, sizeof(*cxt->caches));
291
292 ca = &cxt->caches[cxt->ncaches - 1];
293 memset(ca, 0 , sizeof(*ca));
294
295 ca->id = id;
296 ca->level = level;
297 ca->type = xstrdup(type);
298
299 DBG(GATHER, ul_debugobj(cxt, "add cache %s%d::%d", type, level, id));
300 return ca;
301 }
302
303 static int mk_cache_id(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu, char *type, int level)
304 {
305 size_t i;
306 int idx = 0;
307
308 for (i = 0; i < cxt->ncaches; i++) {
309 struct lscpu_cache *ca = &cxt->caches[i];
310
311 if (ca->level != level || strcmp(ca->type, type) != 0)
312 continue;
313
314 if (ca->sharedmap &&
315 CPU_ISSET_S(cpu->logical_id, cxt->setsize, ca->sharedmap))
316 return idx;
317 idx++;
318 }
319
320 return idx;
321 }
322
323 static int read_sparc_onecache(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu,
324 int level, char *typestr, int type)
325 {
326 struct lscpu_cache *ca;
327 struct path_cxt *sys = cxt->syscpu;
328 int num = cpu->logical_id;
329 uint32_t size;
330 int rc, id;
331 char buf[32];
332
333 if (type)
334 snprintf(buf, sizeof(buf), "l%d_%c", level, type);
335 else
336 snprintf(buf, sizeof(buf), "l%d_", level);
337
338 rc = ul_path_readf_u32(sys, &size,
339 "cpu%d/%scache_size", num, buf);
340 if (rc != 0)
341 return rc;
342
343 DBG(CPU, ul_debugobj(cpu, "#%d reading sparc %s cache", num, buf));
344
345 id = mk_cache_id(cxt, cpu, typestr, level);
346
347 ca = get_cache(cxt, typestr, level, id);
348 if (!ca)
349 ca = add_cache(cxt, typestr, level, id);
350
351 if (!ca->name) {
352 ul_path_readf_u32(sys, &ca->coherency_line_size,
353 "cpu%d/%scache_line_size", num, buf);
354 assert(ca->type);
355
356 if (type)
357 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
358 else
359 snprintf(buf, sizeof(buf), "L%d", ca->level);
360 ca->name = xstrdup(buf);
361 ca->size = size;
362 }
363 /* There is no sharedmap of the cache in /sys, we assume that caches are
364 * not shared. Send a patch if your /sys provides another information.
365 */
366 if (!ca->sharedmap) {
367 size_t setsize = 0;
368
369 ca->sharedmap = cpuset_alloc(cxt->maxcpus, &setsize, NULL);
370 CPU_ZERO_S(setsize, ca->sharedmap);
371 CPU_SET_S(num, setsize, ca->sharedmap);
372 }
373
374 return 0;
375 }
376
377 static int read_sparc_caches(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
378 {
379 read_sparc_onecache(cxt, cpu, 1, "Instruction", 'i');
380 read_sparc_onecache(cxt, cpu, 1, "Data", 'd');
381 read_sparc_onecache(cxt, cpu, 2, "Unified", 0);
382 read_sparc_onecache(cxt, cpu, 2, "Unified", 0);
383
384 return 0;
385 }
386
387 static int read_caches(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
388 {
389 char buf[256];
390 struct path_cxt *sys = cxt->syscpu;
391 int num = cpu->logical_id;
392 size_t i, ncaches = 0;
393
394 while (ul_path_accessf(sys, F_OK,
395 "cpu%d/cache/index%zu",
396 num, ncaches) == 0)
397 ncaches++;
398
399 if (ncaches == 0 && ul_path_accessf(sys, F_OK,
400 "cpu%d/l1_icache_size", num) == 0)
401 return read_sparc_caches(cxt, cpu);
402
403 DBG(CPU, ul_debugobj(cpu, "#%d reading %zd caches", num, ncaches));
404
405 for (i = 0; i < ncaches; i++) {
406 struct lscpu_cache *ca;
407 int id, level;
408
409 if (ul_path_readf_s32(sys, &id, "cpu%d/cache/index%zu/id", num, i) != 0)
410 id = -1;
411 if (ul_path_readf_s32(sys, &level, "cpu%d/cache/index%zu/level", num, i) != 0)
412 continue;
413 if (ul_path_readf_buffer(sys, buf, sizeof(buf),
414 "cpu%d/cache/index%zu/type", num, i) <= 0)
415 continue;
416
417 if (id == -1)
418 id = mk_cache_id(cxt, cpu, buf, level);
419
420 ca = get_cache(cxt, buf, level, id);
421 if (!ca)
422 ca = add_cache(cxt, buf, level, id);
423
424 if (!ca->name) {
425 int type = 0;
426
427 assert(ca->type);
428
429 if (!strcmp(ca->type, "Data"))
430 type = 'd';
431 else if (!strcmp(ca->type, "Instruction"))
432 type = 'i';
433
434 if (type)
435 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
436 else
437 snprintf(buf, sizeof(buf), "L%d", ca->level);
438
439 ca->name = xstrdup(buf);
440
441 ul_path_readf_u32(sys, &ca->ways_of_associativity,
442 "cpu%d/cache/index%zu/ways_of_associativity", num, i);
443 ul_path_readf_u32(sys, &ca->physical_line_partition,
444 "cpu%d/cache/index%zu/physical_line_partition", num, i);
445 ul_path_readf_u32(sys, &ca->number_of_sets,
446 "cpu%d/cache/index%zu/number_of_sets", num, i);
447 ul_path_readf_u32(sys, &ca->coherency_line_size,
448 "cpu%d/cache/index%zu/coherency_line_size", num, i);
449
450 ul_path_readf_string(sys, &ca->allocation_policy,
451 "cpu%d/cache/index%zu/allocation_policy", num, i);
452 ul_path_readf_string(sys, &ca->write_policy,
453 "cpu%d/cache/index%zu/write_policy", num, i);
454
455 /* cache size */
456 if (ul_path_readf_buffer(sys, buf, sizeof(buf),
457 "cpu%d/cache/index%zu/size", num, i) > 0)
458 parse_size(buf, &ca->size, NULL);
459 else
460 ca->size = 0;
461 }
462
463 if (!ca->sharedmap)
464 /* information about how CPUs share different caches */
465 ul_path_readf_cpuset(sys, &ca->sharedmap, cxt->maxcpus,
466 "cpu%d/cache/index%zu/shared_cpu_map", num, i);
467 }
468
469 return 0;
470 }
471
472 static int read_ids(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
473 {
474 struct path_cxt *sys = cxt->syscpu;
475 int num = cpu->logical_id;
476
477 if (ul_path_accessf(sys, F_OK, "cpu%d/topology", num) != 0)
478 return 0;
479
480 DBG(CPU, ul_debugobj(cpu, "#%d reading IDs", num));
481
482 if (ul_path_readf_s32(sys, &cpu->coreid, "cpu%d/topology/core_id", num) != 0)
483 cpu->coreid = -1;
484 if (ul_path_readf_s32(sys, &cpu->socketid, "cpu%d/topology/physical_package_id", num) != 0)
485 cpu->socketid = -1;
486 if (ul_path_readf_s32(sys, &cpu->bookid, "cpu%d/topology/book_id", num) != 0)
487 cpu->bookid = -1;
488 if (ul_path_readf_s32(sys, &cpu->drawerid, "cpu%d/topology/drawer_id", num) != 0)
489 cpu->drawerid = -1;
490
491 return 0;
492 }
493
494 static int read_polarization(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
495 {
496 struct path_cxt *sys = cxt->syscpu;
497 int num = cpu->logical_id;
498 char mode[64];
499
500 if (ul_path_accessf(sys, F_OK, "cpu%d/polarization", num) != 0)
501 return 0;
502
503 ul_path_readf_buffer(sys, mode, sizeof(mode), "cpu%d/polarization", num);
504
505 DBG(CPU, ul_debugobj(cpu, "#%d reading polar=%s", num, mode));
506
507 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
508 cpu->polarization = POLAR_VLOW;
509 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
510 cpu->polarization = POLAR_VMEDIUM;
511 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
512 cpu->polarization = POLAR_VHIGH;
513 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
514 cpu->polarization = POLAR_HORIZONTAL;
515 else
516 cpu->polarization = POLAR_UNKNOWN;
517
518 if (cpu->type)
519 cpu->type->has_polarization = 1;
520 return 0;
521 }
522
523 static int read_address(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
524 {
525 struct path_cxt *sys = cxt->syscpu;
526 int num = cpu->logical_id;
527
528 if (ul_path_accessf(sys, F_OK, "cpu%d/address", num) != 0)
529 return 0;
530
531 DBG(CPU, ul_debugobj(cpu, "#%d reading address", num));
532
533 ul_path_readf_s32(sys, &cpu->address, "cpu%d/address", num);
534 if (cpu->type)
535 cpu->type->has_addresses = 1;
536 return 0;
537 }
538
539 static int read_configure(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
540 {
541 struct path_cxt *sys = cxt->syscpu;
542 int num = cpu->logical_id;
543
544 if (ul_path_accessf(sys, F_OK, "cpu%d/configure", num) != 0)
545 return 0;
546
547 DBG(CPU, ul_debugobj(cpu, "#%d reading configure", num));
548
549 ul_path_readf_s32(sys, &cpu->configured, "cpu%d/configure", num);
550 if (cpu->type)
551 cpu->type->has_configured = 1;
552 return 0;
553 }
554
555 static int read_mhz(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
556 {
557 struct path_cxt *sys = cxt->syscpu;
558 int num = cpu->logical_id;
559 int mhz;
560
561 DBG(CPU, ul_debugobj(cpu, "#%d reading mhz", num));
562
563 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) == 0)
564 cpu->mhz_max_freq = (float) mhz / 1000;
565 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) == 0)
566 cpu->mhz_min_freq = (float) mhz / 1000;
567
568 /* The default current-frequency value comes is from /proc/cpuinfo (if
569 * available). This /proc value is usually based on MSR registers
570 * (APERF/APERF) and it changes pretty often. It seems better to read
571 * frequency from cpufreq subsystem that provides the current frequency
572 * for the current policy. There is also cpuinfo_cur_freq in sysfs, but
573 * it's not always available.
574 */
575 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/scaling_cur_freq", num) == 0)
576 cpu->mhz_cur_freq = (float) mhz / 1000;
577
578 if (cpu->type && (cpu->mhz_min_freq || cpu->mhz_max_freq))
579 cpu->type->has_freq = 1;
580
581 return 0;
582 }
583
584 float lsblk_cputype_get_maxmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
585 {
586 size_t i;
587 float res = 0.0;
588
589 for (i = 0; i < cxt->npossibles; i++) {
590 struct lscpu_cpu *cpu = cxt->cpus[i];
591
592 if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu))
593 continue;
594 res = max(res, cpu->mhz_max_freq);
595 }
596 return res;
597 }
598
599 float lsblk_cputype_get_minmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
600 {
601 size_t i;
602 float res = -1.0;
603
604 for (i = 0; i < cxt->npossibles; i++) {
605 struct lscpu_cpu *cpu = cxt->cpus[i];
606
607 if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu))
608 continue;
609 if (res < 0.0 || cpu->mhz_min_freq < res)
610 res = cpu->mhz_min_freq;
611 }
612 return res;
613 }
614
615 /* returns scaling (use) of CPUs freq. in percent */
616 float lsblk_cputype_get_scalmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
617 {
618 size_t i;
619 float fmax = 0, fcur = 0;
620
621 for (i = 0; i < cxt->npossibles; i++) {
622 struct lscpu_cpu *cpu = cxt->cpus[i];
623
624 if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu))
625 continue;
626 if (cpu->mhz_max_freq <= 0.0 || cpu->mhz_cur_freq <= 0.0)
627 continue;
628 fmax += cpu->mhz_max_freq;
629 fcur += cpu->mhz_cur_freq;
630 }
631 if (fcur <= 0.0)
632 return 0.0;
633 return fcur / fmax * 100;
634 }
635
636 int lscpu_read_topology(struct lscpu_cxt *cxt)
637 {
638 size_t i;
639 int rc = 0;
640
641
642 for (i = 0; i < cxt->ncputypes; i++)
643 rc += cputype_read_topology(cxt, cxt->cputypes[i]);
644
645 for (i = 0; rc == 0 && i < cxt->npossibles; i++) {
646 struct lscpu_cpu *cpu = cxt->cpus[i];
647
648 if (!cpu || !cpu->type)
649 continue;
650
651 DBG(CPU, ul_debugobj(cpu, "#%d reading topology", cpu->logical_id));
652
653 rc = read_ids(cxt, cpu);
654 if (!rc)
655 rc = read_polarization(cxt, cpu);
656 if (!rc)
657 rc = read_address(cxt, cpu);
658 if (!rc)
659 rc = read_configure(cxt, cpu);
660 if (!rc)
661 rc = read_mhz(cxt, cpu);
662 if (!rc)
663 rc = read_caches(cxt, cpu);
664 }
665
666 lscpu_sort_caches(cxt->caches, cxt->ncaches);
667 DBG(GATHER, ul_debugobj(cxt, " L1d: %zu", lscpu_get_cache_full_size(cxt, "L1d", NULL)));
668 DBG(GATHER, ul_debugobj(cxt, " L1i: %zu", lscpu_get_cache_full_size(cxt, "L1i", NULL)));
669 DBG(GATHER, ul_debugobj(cxt, " L2: %zu", lscpu_get_cache_full_size(cxt, "L2", NULL)));
670 DBG(GATHER, ul_debugobj(cxt, " L3: %zu", lscpu_get_cache_full_size(cxt, "L3", NULL)));
671
672 return rc;
673 }
674
675