]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu-topology.c
02a90d63f12f27ede3b57063f1d5a5bb1f6c212b
[thirdparty/util-linux.git] / sys-utils / lscpu-topology.c
1 #include <errno.h>
2 #include <stdlib.h>
3 #include <sys/types.h>
4 #include <sys/stat.h>
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <string.h>
8 #include <stdio.h>
9
10 #include "lscpu.h"
11
12 /* add @set to the @ary, unnecessary set is deallocated. */
13 static int add_cpuset_to_array(cpu_set_t **ary, size_t *items, cpu_set_t *set, size_t setsize)
14 {
15 size_t i;
16
17 if (!ary)
18 return -EINVAL;
19
20 for (i = 0; i < *items; i++) {
21 if (CPU_EQUAL_S(setsize, set, ary[i]))
22 break;
23 }
24 if (i == *items) {
25 ary[*items] = set;
26 ++*items;
27 return 0;
28 }
29 CPU_FREE(set);
30 return 1;
31 }
32
33 static void free_cpuset_array(cpu_set_t **ary, int items)
34 {
35 int i;
36
37 if (!ary)
38 return;
39 for (i = 0; i < items; i++)
40 free(ary[i]);
41 free(ary);
42 }
43
44 void lscpu_cputype_free_topology(struct lscpu_cputype *ct)
45 {
46 if (!ct)
47 return;
48 free_cpuset_array(ct->coremaps, ct->ncores);
49 free_cpuset_array(ct->socketmaps, ct->nsockets);
50 free_cpuset_array(ct->bookmaps, ct->nbooks);
51 free_cpuset_array(ct->drawermaps, ct->ndrawers);
52 }
53
54 void lscpu_free_caches(struct lscpu_cache *caches, size_t n)
55 {
56 size_t i;
57
58 if (!caches)
59 return;
60
61 for (i = 0; i < n; i++) {
62 struct lscpu_cache *c = &caches[i];
63
64 DBG(MISC, ul_debug(" freeing cache #%zu %s::%d",
65 i, c->name, c->id));
66
67 free(c->name);
68 free(c->type);
69 free(c->allocation_policy);
70 free(c->write_policy);
71 free(c->sharedmap);
72 }
73 free(caches);
74 }
75
76 static int cmp_cache(const void *a0, const void *b0)
77 {
78 const struct lscpu_cache
79 *a = (const struct lscpu_cache *) a0,
80 *b = (const struct lscpu_cache *) b0;
81 return strcmp(a->name, b->name);
82 }
83
84 void lscpu_sort_caches(struct lscpu_cache *caches, size_t n)
85 {
86 if (caches && n)
87 qsort(caches, n, sizeof(struct lscpu_cache), cmp_cache);
88 }
89
90
91 /* Read topology for specified type */
92 static int cputype_read_topology(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
93 {
94 size_t i, npos;
95 struct path_cxt *sys;
96 int nthreads = 0, sw_topo = 0;
97 FILE *fd;
98
99 sys = cxt->syscpu; /* /sys/devices/system/cpu/ */
100 npos = cxt->npossibles; /* possible CPUs */
101
102 DBG(TYPE, ul_debugobj(ct, "reading %s/%s/%s topology",
103 ct->vendor ?: "", ct->model ?: "", ct->modelname ?:""));
104
105 for (i = 0; i < cxt->npossibles; i++) {
106 struct lscpu_cpu *cpu = cxt->cpus[i];
107 cpu_set_t *thread_siblings = NULL, *core_siblings = NULL;
108 cpu_set_t *book_siblings = NULL, *drawer_siblings = NULL;
109 int num, n = 0;
110
111 if (!cpu || cpu->type != ct)
112 continue;
113
114 num = cpu->logical_id;
115 if (ul_path_accessf(sys, F_OK,
116 "cpu%d/topology/thread_siblings", num) != 0)
117 continue;
118
119 /* read topology maps */
120 ul_path_readf_cpuset(sys, &thread_siblings, cxt->maxcpus,
121 "cpu%d/topology/thread_siblings", num);
122 ul_path_readf_cpuset(sys, &core_siblings, cxt->maxcpus,
123 "cpu%d/topology/core_siblings", num);
124 ul_path_readf_cpuset(sys, &book_siblings, cxt->maxcpus,
125 "cpu%d/topology/book_siblings", num);
126 ul_path_readf_cpuset(sys, &drawer_siblings, cxt->maxcpus,
127 "cpu%d/topology/drawer_siblings", num);
128
129 if (thread_siblings)
130 n = CPU_COUNT_S(cxt->setsize, thread_siblings);
131 if (!n)
132 n = 1;
133 if (n > nthreads)
134 nthreads = n;
135
136 /* Allocate arrays for topology maps.
137 *
138 * For each map we make sure that it can have up to ncpuspos
139 * entries. This is because we cannot reliably calculate the
140 * number of cores, sockets and books on all architectures.
141 * E.g. completely virtualized architectures like s390 may
142 * have multiple sockets of different sizes.
143 */
144 if (!ct->coremaps && thread_siblings)
145 ct->coremaps = xcalloc(npos, sizeof(cpu_set_t *));
146 if (!ct->socketmaps && core_siblings)
147 ct->socketmaps = xcalloc(npos, sizeof(cpu_set_t *));
148 if (!ct->bookmaps && book_siblings)
149 ct->bookmaps = xcalloc(npos, sizeof(cpu_set_t *));
150 if (!ct->drawermaps && drawer_siblings)
151 ct->drawermaps = xcalloc(npos, sizeof(cpu_set_t *));
152
153 /* add to topology maps */
154 if (thread_siblings)
155 add_cpuset_to_array(ct->coremaps, &ct->ncores, thread_siblings, cxt->setsize);
156 if (core_siblings)
157 add_cpuset_to_array(ct->socketmaps, &ct->nsockets, core_siblings, cxt->setsize);
158 if (book_siblings)
159 add_cpuset_to_array(ct->bookmaps, &ct->nbooks, book_siblings, cxt->setsize);
160 if (drawer_siblings)
161 add_cpuset_to_array(ct->drawermaps, &ct->ndrawers, drawer_siblings, cxt->setsize);
162
163 }
164
165 /* s390 detects its cpu topology via /proc/sysinfo, if present.
166 * Using simply the cpu topology masks in sysfs will not give
167 * usable results since everything is virtualized. E.g.
168 * virtual core 0 may have only 1 cpu, but virtual core 2 may
169 * five cpus.
170 * If the cpu topology is not exported (e.g. 2nd level guest)
171 * fall back to old calculation scheme.
172 */
173 if ((fd = ul_path_fopen(cxt->procfs, "r", "sysinfo"))) {
174 int t0, t1;
175 char buf[BUFSIZ];
176
177 DBG(TYPE, ul_debugobj(ct, " reading sysinfo"));
178
179 while (fgets(buf, sizeof(buf), fd) != NULL) {
180 if (sscanf(buf, "CPU Topology SW: %d %d %zu %zu %zu %zu",
181 &t0, &t1,
182 &ct->ndrawers_per_system,
183 &ct->nbooks_per_drawer,
184 &ct->nsockets_per_book,
185 &ct->ncores_per_socket) == 6) {
186 sw_topo = 1;
187 DBG(TYPE, ul_debugobj(ct, " using SW topology"));
188 break;
189 }
190 }
191 if (fd)
192 fclose(fd);
193 }
194
195 ct->nthreads_per_core = nthreads;
196 if (ct->mtid) {
197 uint64_t x;
198 if (ul_strtou64(ct->mtid, &x, 10) == 0 && x <= ULONG_MAX)
199 ct->nthreads_per_core = (size_t) x + 1;
200 }
201
202 if (!sw_topo) {
203 ct->ncores_per_socket = ct->nsockets ? ct->ncores / ct->nsockets : 0;
204 ct->nsockets_per_book = ct->nbooks ? ct->nsockets / ct->nbooks : 0;
205 ct->nbooks_per_drawer = ct->ndrawers ? ct->nbooks / ct->ndrawers : 0;
206 ct->ndrawers_per_system = ct->ndrawers;
207 }
208
209 DBG(TYPE, ul_debugobj(ct, " nthreads: %zu (per core)", ct->nthreads_per_core));
210 DBG(TYPE, ul_debugobj(ct, " ncores: %zu (%zu per socket)", ct->ncores, ct->ncores_per_socket));
211 DBG(TYPE, ul_debugobj(ct, " nsockets: %zu (%zu per books)", ct->nsockets, ct->nsockets_per_book));
212 DBG(TYPE, ul_debugobj(ct, " nbooks: %zu (%zu per drawer)", ct->nbooks, ct->nbooks_per_drawer));
213 DBG(TYPE, ul_debugobj(ct, " ndrawers: %zu (%zu per system)", ct->ndrawers, ct->ndrawers_per_system));
214
215 return 0;
216 }
217
218 /* count size of all instancess of the "name" */
219 size_t lscpu_get_cache_full_size(struct lscpu_cxt *cxt, const char *name, int *instances)
220 {
221 size_t i, sz = 0;
222
223 if (instances)
224 *instances = 0;
225
226 for (i = 0; i < cxt->ncaches; i++) {
227 if (strcmp(cxt->caches[i].name, name) == 0) {
228 sz += cxt->caches[i].size;
229 if (instances)
230 (*instances)++;
231 }
232 }
233
234 return sz;
235 }
236
237 struct lscpu_cache *lscpu_cpu_get_cache(struct lscpu_cxt *cxt,
238 struct lscpu_cpu *cpu, const char *name)
239 {
240 size_t i;
241
242 for (i = 0; i < cxt->ncaches; i++) {
243 struct lscpu_cache *ca = &cxt->caches[i];
244
245 if (strcmp(ca->name, name) == 0 &&
246 CPU_ISSET_S(cpu->logical_id, cxt->setsize, ca->sharedmap))
247 return ca;
248 }
249
250 return NULL;
251 }
252
253 /*
254 * The cache is identifued by type+level+id.
255 */
256 static struct lscpu_cache *get_cache(struct lscpu_cxt *cxt,
257 const char *type, int level, int id)
258 {
259 size_t i;
260
261 for (i = 0; i < cxt->ncaches; i++) {
262 struct lscpu_cache *ca = &cxt->caches[i];
263 if (ca->id == id &&
264 ca->level == level &&
265 strcmp(ca->type, type) == 0)
266 return ca;
267 }
268 return NULL;
269 }
270
271 static struct lscpu_cache *add_cache(struct lscpu_cxt *cxt,
272 const char *type, int level, int id)
273 {
274 struct lscpu_cache *ca;
275
276 cxt->ncaches++;
277 cxt->caches = xreallocarray(cxt->caches,
278 cxt->ncaches, sizeof(*cxt->caches));
279
280 ca = &cxt->caches[cxt->ncaches - 1];
281 memset(ca, 0 , sizeof(*ca));
282
283 ca->id = id;
284 ca->level = level;
285 ca->type = xstrdup(type);
286
287 DBG(GATHER, ul_debugobj(cxt, "add cache %s%d::%d", type, level, id));
288 return ca;
289 }
290
291 static int mk_cache_id(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu, char *type, int level)
292 {
293 size_t i;
294 int idx = 0;
295
296 for (i = 0; i < cxt->ncaches; i++) {
297 struct lscpu_cache *ca = &cxt->caches[i];
298
299 if (ca->level != level || strcmp(ca->type, type) != 0)
300 continue;
301
302 if (ca->sharedmap &&
303 CPU_ISSET_S(cpu->logical_id, cxt->setsize, ca->sharedmap))
304 return idx;
305 idx++;
306 }
307
308 return idx;
309 }
310
311 static int read_sparc_onecache(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu,
312 int level, char *typestr, int type)
313 {
314 struct lscpu_cache *ca;
315 struct path_cxt *sys = cxt->syscpu;
316 int num = cpu->logical_id;
317 uint32_t size;
318 int rc, id;
319 char buf[32];
320
321 if (type)
322 snprintf(buf, sizeof(buf), "l%d_%c", level, type);
323 else
324 snprintf(buf, sizeof(buf), "l%d_", level);
325
326 rc = ul_path_readf_u32(sys, &size,
327 "cpu%d/%scache_size", num, buf);
328 if (rc != 0)
329 return rc;
330
331 DBG(CPU, ul_debugobj(cpu, "#%d reading sparc %s cache", num, buf));
332
333 id = mk_cache_id(cxt, cpu, typestr, level);
334
335 ca = get_cache(cxt, typestr, level, id);
336 if (!ca)
337 ca = add_cache(cxt, typestr, level, id);
338
339 if (!ca->name) {
340 ul_path_readf_u32(sys, &ca->coherency_line_size,
341 "cpu%d/%scache_line_size", num, buf);
342 assert(ca->type);
343
344 if (type)
345 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
346 else
347 snprintf(buf, sizeof(buf), "L%d", ca->level);
348 ca->name = xstrdup(buf);
349 ca->size = size;
350 }
351 /* There is no sharedmap of the cache in /sys, we assume that caches are
352 * not shared. Send a patch if your /sys provides another information.
353 */
354 if (!ca->sharedmap) {
355 size_t setsize = 0;
356
357 ca->sharedmap = cpuset_alloc(cxt->maxcpus, &setsize, NULL);
358 CPU_ZERO_S(setsize, ca->sharedmap);
359 CPU_SET_S(num, setsize, ca->sharedmap);
360 }
361
362 return 0;
363 }
364
365 static int read_sparc_caches(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
366 {
367 read_sparc_onecache(cxt, cpu, 1, "Instruction", 'i');
368 read_sparc_onecache(cxt, cpu, 1, "Data", 'd');
369 read_sparc_onecache(cxt, cpu, 2, "Unified", 0);
370 read_sparc_onecache(cxt, cpu, 2, "Unified", 0);
371
372 return 0;
373 }
374
375 static int read_caches(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
376 {
377 char buf[256];
378 struct path_cxt *sys = cxt->syscpu;
379 int num = cpu->logical_id;
380 size_t i, ncaches = 0;
381
382 while (ul_path_accessf(sys, F_OK,
383 "cpu%d/cache/index%zu",
384 num, ncaches) == 0)
385 ncaches++;
386
387 if (ncaches == 0 && ul_path_accessf(sys, F_OK,
388 "cpu%d/l1_icache_size", num) == 0)
389 return read_sparc_caches(cxt, cpu);
390
391 DBG(CPU, ul_debugobj(cpu, "#%d reading %zd caches", num, ncaches));
392
393 for (i = 0; i < ncaches; i++) {
394 struct lscpu_cache *ca;
395 int id, level;
396
397 if (ul_path_readf_s32(sys, &id, "cpu%d/cache/index%zu/id", num, i) != 0)
398 id = -1;
399 if (ul_path_readf_s32(sys, &level, "cpu%d/cache/index%zu/level", num, i) != 0)
400 continue;
401 if (ul_path_readf_buffer(sys, buf, sizeof(buf),
402 "cpu%d/cache/index%zu/type", num, i) <= 0)
403 continue;
404
405 if (id == -1)
406 id = mk_cache_id(cxt, cpu, buf, level);
407
408 ca = get_cache(cxt, buf, level, id);
409 if (!ca)
410 ca = add_cache(cxt, buf, level, id);
411
412 if (!ca->name) {
413 int type = 0;
414
415 assert(ca->type);
416
417 if (!strcmp(ca->type, "Data"))
418 type = 'd';
419 else if (!strcmp(ca->type, "Instruction"))
420 type = 'i';
421
422 if (type)
423 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
424 else
425 snprintf(buf, sizeof(buf), "L%d", ca->level);
426
427 ca->name = xstrdup(buf);
428
429 ul_path_readf_u32(sys, &ca->ways_of_associativity,
430 "cpu%d/cache/index%zu/ways_of_associativity", num, i);
431 ul_path_readf_u32(sys, &ca->physical_line_partition,
432 "cpu%d/cache/index%zu/physical_line_partition", num, i);
433 ul_path_readf_u32(sys, &ca->number_of_sets,
434 "cpu%d/cache/index%zu/number_of_sets", num, i);
435 ul_path_readf_u32(sys, &ca->coherency_line_size,
436 "cpu%d/cache/index%zu/coherency_line_size", num, i);
437
438 ul_path_readf_string(sys, &ca->allocation_policy,
439 "cpu%d/cache/index%zu/allocation_policy", num, i);
440 ul_path_readf_string(sys, &ca->write_policy,
441 "cpu%d/cache/index%zu/write_policy", num, i);
442
443 /* cache size */
444 if (ul_path_readf_buffer(sys, buf, sizeof(buf),
445 "cpu%d/cache/index%zu/size", num, i) > 0)
446 parse_size(buf, &ca->size, NULL);
447 else
448 ca->size = 0;
449 }
450
451 if (!ca->sharedmap)
452 /* information about how CPUs share different caches */
453 ul_path_readf_cpuset(sys, &ca->sharedmap, cxt->maxcpus,
454 "cpu%d/cache/index%zu/shared_cpu_map", num, i);
455 }
456
457 return 0;
458 }
459
460 static int read_ids(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
461 {
462 struct path_cxt *sys = cxt->syscpu;
463 int num = cpu->logical_id;
464
465 if (ul_path_accessf(sys, F_OK, "cpu%d/topology", num) != 0)
466 return 0;
467
468 DBG(CPU, ul_debugobj(cpu, "#%d reading IDs", num));
469
470 if (ul_path_readf_s32(sys, &cpu->coreid, "cpu%d/topology/core_id", num) != 0)
471 cpu->coreid = -1;
472 if (ul_path_readf_s32(sys, &cpu->socketid, "cpu%d/topology/physical_package_id", num) != 0)
473 cpu->socketid = -1;
474 if (ul_path_readf_s32(sys, &cpu->bookid, "cpu%d/topology/book_id", num) != 0)
475 cpu->bookid = -1;
476 if (ul_path_readf_s32(sys, &cpu->drawerid, "cpu%d/topology/drawer_id", num) != 0)
477 cpu->drawerid = -1;
478
479 return 0;
480 }
481
482 static int read_polarization(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
483 {
484 struct path_cxt *sys = cxt->syscpu;
485 int num = cpu->logical_id;
486 char mode[64];
487
488 if (ul_path_accessf(sys, F_OK, "cpu%d/polarization", num) != 0)
489 return 0;
490
491 ul_path_readf_buffer(sys, mode, sizeof(mode), "cpu%d/polarization", num);
492
493 DBG(CPU, ul_debugobj(cpu, "#%d reading polar=%s", num, mode));
494
495 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
496 cpu->polarization = POLAR_VLOW;
497 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
498 cpu->polarization = POLAR_VMEDIUM;
499 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
500 cpu->polarization = POLAR_VHIGH;
501 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
502 cpu->polarization = POLAR_HORIZONTAL;
503 else
504 cpu->polarization = POLAR_UNKNOWN;
505
506 if (cpu->type)
507 cpu->type->has_polarization = 1;
508 return 0;
509 }
510
511 static int read_address(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
512 {
513 struct path_cxt *sys = cxt->syscpu;
514 int num = cpu->logical_id;
515
516 if (ul_path_accessf(sys, F_OK, "cpu%d/address", num) != 0)
517 return 0;
518
519 DBG(CPU, ul_debugobj(cpu, "#%d reading address", num));
520
521 ul_path_readf_s32(sys, &cpu->address, "cpu%d/address", num);
522 if (cpu->type)
523 cpu->type->has_addresses = 1;
524 return 0;
525 }
526
527 static int read_configure(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
528 {
529 struct path_cxt *sys = cxt->syscpu;
530 int num = cpu->logical_id;
531
532 if (ul_path_accessf(sys, F_OK, "cpu%d/configure", num) != 0)
533 return 0;
534
535 DBG(CPU, ul_debugobj(cpu, "#%d reading configure", num));
536
537 ul_path_readf_s32(sys, &cpu->configured, "cpu%d/configure", num);
538 if (cpu->type)
539 cpu->type->has_configured = 1;
540 return 0;
541 }
542
543 static int read_mhz(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
544 {
545 struct path_cxt *sys = cxt->syscpu;
546 int num = cpu->logical_id;
547 int mhz;
548
549 DBG(CPU, ul_debugobj(cpu, "#%d reading mhz", num));
550
551 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) == 0)
552 cpu->mhz_max_freq = (float) mhz / 1000;
553 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) == 0)
554 cpu->mhz_min_freq = (float) mhz / 1000;
555
556 /* The default current-frequency value comes is from /proc/cpuinfo (if
557 * available). This /proc value is usually based on MSR registers
558 * (APERF/APERF) and it changes pretty often. It seems better to read
559 * frequency from cpufreq subsystem that provides the current frequency
560 * for the current policy. There is also cpuinfo_cur_freq in sysfs, but
561 * it's not always available.
562 */
563 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/scaling_cur_freq", num) == 0)
564 cpu->mhz_cur_freq = (float) mhz / 1000;
565
566 if (cpu->type && (cpu->mhz_min_freq || cpu->mhz_max_freq))
567 cpu->type->has_freq = 1;
568
569 return 0;
570 }
571
572 float lsblk_cputype_get_maxmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
573 {
574 size_t i;
575 float res = 0.0;
576
577 for (i = 0; i < cxt->npossibles; i++) {
578 struct lscpu_cpu *cpu = cxt->cpus[i];
579
580 if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu))
581 continue;
582 res = max(res, cpu->mhz_max_freq);
583 }
584 return res;
585 }
586
587 float lsblk_cputype_get_minmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
588 {
589 size_t i;
590 float res = -1.0;
591
592 for (i = 0; i < cxt->npossibles; i++) {
593 struct lscpu_cpu *cpu = cxt->cpus[i];
594
595 if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu))
596 continue;
597 if (res < 0.0 || cpu->mhz_min_freq < res)
598 res = cpu->mhz_min_freq;
599 }
600 return res;
601 }
602
603 /* returns scaling (use) of CPUs freq. in percent */
604 float lsblk_cputype_get_scalmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
605 {
606 size_t i;
607 float fmax = 0, fcur = 0;
608
609 for (i = 0; i < cxt->npossibles; i++) {
610 struct lscpu_cpu *cpu = cxt->cpus[i];
611
612 if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu))
613 continue;
614 if (cpu->mhz_max_freq <= 0.0 || cpu->mhz_cur_freq <= 0.0)
615 continue;
616 fmax += cpu->mhz_max_freq;
617 fcur += cpu->mhz_cur_freq;
618 }
619 if (fcur <= 0.0)
620 return 0.0;
621 return fcur / fmax * 100;
622 }
623
624 int lscpu_read_topology(struct lscpu_cxt *cxt)
625 {
626 size_t i;
627 int rc = 0;
628
629
630 for (i = 0; i < cxt->ncputypes; i++)
631 rc += cputype_read_topology(cxt, cxt->cputypes[i]);
632
633 for (i = 0; rc == 0 && i < cxt->npossibles; i++) {
634 struct lscpu_cpu *cpu = cxt->cpus[i];
635
636 if (!cpu || !cpu->type)
637 continue;
638
639 DBG(CPU, ul_debugobj(cpu, "#%d reading topology", cpu->logical_id));
640
641 rc = read_ids(cxt, cpu);
642 if (!rc)
643 rc = read_polarization(cxt, cpu);
644 if (!rc)
645 rc = read_address(cxt, cpu);
646 if (!rc)
647 rc = read_configure(cxt, cpu);
648 if (!rc)
649 rc = read_mhz(cxt, cpu);
650 if (!rc)
651 rc = read_caches(cxt, cpu);
652 }
653
654 lscpu_sort_caches(cxt->caches, cxt->ncaches);
655 DBG(GATHER, ul_debugobj(cxt, " L1d: %zu", lscpu_get_cache_full_size(cxt, "L1d", NULL)));
656 DBG(GATHER, ul_debugobj(cxt, " L1i: %zu", lscpu_get_cache_full_size(cxt, "L1i", NULL)));
657 DBG(GATHER, ul_debugobj(cxt, " L2: %zu", lscpu_get_cache_full_size(cxt, "L2", NULL)));
658 DBG(GATHER, ul_debugobj(cxt, " L3: %zu", lscpu_get_cache_full_size(cxt, "L3", NULL)));
659
660 return rc;
661 }
662
663