]>
git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu-topology.c
6e0782f640520584e64790d4ffc499f873c3e803
2 * SPDX-License-Identifier: GPL-2.0-or-later
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
10 * Copyright (C) 2008-2023 Karel Zak <kzak@redhat.com>
14 #include <sys/types.h>
24 /* add @set to the @ary, unnecessary set is deallocated. */
25 static int add_cpuset_to_array(cpu_set_t
**ary
, size_t *items
, cpu_set_t
*set
, size_t setsize
, cpu_set_t
*common_cpus_set
)
33 * Check if @set has no cpu in common with the cpusets
34 * saved in @ary and if so append @set to @ary.
36 for (i
= 0; i
< *items
; i
++) {
37 CPU_AND_S(setsize
, common_cpus_set
, set
, ary
[i
]);
38 if (CPU_COUNT_S(setsize
, common_cpus_set
))
50 static void free_cpuset_array(cpu_set_t
**ary
, int items
)
56 for (i
= 0; i
< items
; i
++)
61 void lscpu_cputype_free_topology(struct lscpu_cputype
*ct
)
65 free_cpuset_array(ct
->coremaps
, ct
->ncores
);
66 free_cpuset_array(ct
->socketmaps
, ct
->nsockets
);
67 free_cpuset_array(ct
->bookmaps
, ct
->nbooks
);
68 free_cpuset_array(ct
->drawermaps
, ct
->ndrawers
);
71 void lscpu_free_caches(struct lscpu_cache
*caches
, size_t n
)
78 for (i
= 0; i
< n
; i
++) {
79 struct lscpu_cache
*c
= &caches
[i
];
81 DBG(MISC
, ul_debug(" freeing cache #%zu %s::%d",
86 free(c
->allocation_policy
);
87 free(c
->write_policy
);
93 static int cmp_cache(const void *a0
, const void *b0
)
95 const struct lscpu_cache
96 *a
= (const struct lscpu_cache
*) a0
,
97 *b
= (const struct lscpu_cache
*) b0
;
98 return strcmp(a
->name
, b
->name
);
101 void lscpu_sort_caches(struct lscpu_cache
*caches
, size_t n
)
104 qsort(caches
, n
, sizeof(struct lscpu_cache
), cmp_cache
);
108 * Get the hotplug state number representing a completely online
109 * cpu from /sys/devices/system/cpu/hotplug/state
111 static int get_online_state(struct path_cxt
*sys
)
113 int hp_online_state_val
= 0, page_size
, rc
;
116 hp_online_state_val
= -1;
118 /* sysfs text files have size = page size */
119 page_size
= getpagesize();
121 buf
= (char *)xmalloc(page_size
);
122 rc
= ul_path_readf_buffer(sys
, buf
, page_size
, "hotplug/states");
126 strp
= strstr(buf
, ": online");
130 strp
--; /* get digits before ': online' */
131 while (strp
>= buf
&& isdigit(*strp
))
133 ul_strtos32(strp
+ 1, &hp_online_state_val
, 10);
136 return hp_online_state_val
;
139 /* Read topology for specified type */
140 static int cputype_read_topology(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
143 struct path_cxt
*sys
;
144 int nthreads
= 0, sw_topo
= 0, rc
, hp_state
, hp_online_state
;
148 sys
= cxt
->syscpu
; /* /sys/devices/system/cpu/ */
149 npos
= cxt
->npossibles
; /* possible CPUs */
151 DBG(TYPE
, ul_debugobj(ct
, "reading %s/%s/%s topology",
152 ct
->vendor
?: "", ct
->model
?: "", ct
->modelname
?:""));
154 hp_online_state
= get_online_state(sys
);
156 temp_set
= CPU_ALLOC(cxt
->maxcpus
);
158 err(EXIT_FAILURE
, _("cpuset_alloc failed"));
160 for (i
= 0; i
< cxt
->npossibles
; i
++) {
161 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
162 cpu_set_t
*thread_siblings
= NULL
, *core_siblings
= NULL
;
163 cpu_set_t
*book_siblings
= NULL
, *drawer_siblings
= NULL
;
166 if (!cpu
|| cpu
->type
!= ct
)
169 num
= cpu
->logical_id
;
170 if (ul_path_accessf(sys
, F_OK
,
171 "cpu%d/topology/thread_siblings", num
) != 0)
175 * Ignore cpus which are not fully online.
176 * If hp_online_state is negative/zero or rc is negative,
177 * online state could not be read correctly, skip this check.
179 rc
= ul_path_readf_s32(sys
, &hp_state
, "cpu%d/hotplug/state", num
);
180 if (hp_online_state
> 0 && rc
>= 0 && hp_state
!= hp_online_state
)
183 /* read topology maps */
184 ul_path_readf_cpuset(sys
, &thread_siblings
, cxt
->maxcpus
,
185 "cpu%d/topology/thread_siblings", num
);
186 ul_path_readf_cpuset(sys
, &core_siblings
, cxt
->maxcpus
,
187 "cpu%d/topology/core_siblings", num
);
188 ul_path_readf_cpuset(sys
, &book_siblings
, cxt
->maxcpus
,
189 "cpu%d/topology/book_siblings", num
);
190 ul_path_readf_cpuset(sys
, &drawer_siblings
, cxt
->maxcpus
,
191 "cpu%d/topology/drawer_siblings", num
);
194 n
= CPU_COUNT_S(cxt
->setsize
, thread_siblings
);
200 /* Allocate arrays for topology maps.
202 * For each map we make sure that it can have up to ncpuspos
203 * entries. This is because we cannot reliably calculate the
204 * number of cores, sockets and books on all architectures.
205 * E.g. completely virtualized architectures like s390 may
206 * have multiple sockets of different sizes.
208 if (!ct
->coremaps
&& thread_siblings
)
209 ct
->coremaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
210 if (!ct
->socketmaps
&& core_siblings
)
211 ct
->socketmaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
212 if (!ct
->bookmaps
&& book_siblings
)
213 ct
->bookmaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
214 if (!ct
->drawermaps
&& drawer_siblings
)
215 ct
->drawermaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
217 /* add to topology maps */
219 add_cpuset_to_array(ct
->coremaps
, &ct
->ncores
, thread_siblings
, cxt
->setsize
, temp_set
);
221 add_cpuset_to_array(ct
->socketmaps
, &ct
->nsockets
, core_siblings
, cxt
->setsize
, temp_set
);
223 add_cpuset_to_array(ct
->bookmaps
, &ct
->nbooks
, book_siblings
, cxt
->setsize
, temp_set
);
225 add_cpuset_to_array(ct
->drawermaps
, &ct
->ndrawers
, drawer_siblings
, cxt
->setsize
, temp_set
);
230 /* s390 detects its cpu topology via /proc/sysinfo, if present.
231 * Using simply the cpu topology masks in sysfs will not give
232 * usable results since everything is virtualized. E.g.
233 * virtual core 0 may have only 1 cpu, but virtual core 2 may
235 * If the cpu topology is not exported (e.g. 2nd level guest)
236 * fall back to old calculation scheme.
238 if ((fd
= ul_path_fopen(cxt
->procfs
, "r", "sysinfo"))) {
242 DBG(TYPE
, ul_debugobj(ct
, " reading sysinfo"));
244 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
245 if (sscanf(buf
, "CPU Topology SW: %d %d %zu %zu %zu %zu",
247 &ct
->ndrawers_per_system
,
248 &ct
->nbooks_per_drawer
,
249 &ct
->nsockets_per_book
,
250 &ct
->ncores_per_socket
) == 6) {
252 DBG(TYPE
, ul_debugobj(ct
, " using SW topology"));
260 ct
->nthreads_per_core
= nthreads
;
263 if (ul_strtou64(ct
->mtid
, &x
, 10) == 0 && x
<= ULONG_MAX
)
264 ct
->nthreads_per_core
= (size_t) x
+ 1;
268 ct
->ncores_per_socket
= ct
->nsockets
? ct
->ncores
/ ct
->nsockets
: 0;
269 ct
->nsockets_per_book
= ct
->nbooks
? ct
->nsockets
/ ct
->nbooks
: 0;
270 ct
->nbooks_per_drawer
= ct
->ndrawers
? ct
->nbooks
/ ct
->ndrawers
: 0;
271 ct
->ndrawers_per_system
= ct
->ndrawers
;
274 DBG(TYPE
, ul_debugobj(ct
, " nthreads: %zu (per core)", ct
->nthreads_per_core
));
275 DBG(TYPE
, ul_debugobj(ct
, " ncores: %zu (%zu per socket)", ct
->ncores
, ct
->ncores_per_socket
));
276 DBG(TYPE
, ul_debugobj(ct
, " nsockets: %zu (%zu per books)", ct
->nsockets
, ct
->nsockets_per_book
));
277 DBG(TYPE
, ul_debugobj(ct
, " nbooks: %zu (%zu per drawer)", ct
->nbooks
, ct
->nbooks_per_drawer
));
278 DBG(TYPE
, ul_debugobj(ct
, " ndrawers: %zu (%zu per system)", ct
->ndrawers
, ct
->ndrawers_per_system
));
283 /* count size of all instancess of the "name" */
284 size_t lscpu_get_cache_full_size(struct lscpu_cxt
*cxt
, const char *name
, int *instances
)
291 for (i
= 0; i
< cxt
->ncaches
; i
++) {
292 if (strcmp(cxt
->caches
[i
].name
, name
) == 0) {
293 sz
+= cxt
->caches
[i
].size
;
302 struct lscpu_cache
*lscpu_cpu_get_cache(struct lscpu_cxt
*cxt
,
303 struct lscpu_cpu
*cpu
, const char *name
)
307 for (i
= 0; i
< cxt
->ncaches
; i
++) {
308 struct lscpu_cache
*ca
= &cxt
->caches
[i
];
311 strcmp(ca
->name
, name
) == 0 &&
312 CPU_ISSET_S(cpu
->logical_id
, cxt
->setsize
, ca
->sharedmap
))
320 * The cache is identifued by type+level+id.
322 static struct lscpu_cache
*get_cache(struct lscpu_cxt
*cxt
,
323 const char *type
, int level
, int id
)
327 for (i
= 0; i
< cxt
->ncaches
; i
++) {
328 struct lscpu_cache
*ca
= &cxt
->caches
[i
];
330 ca
->level
== level
&&
331 strcmp(ca
->type
, type
) == 0)
337 static struct lscpu_cache
*add_cache(struct lscpu_cxt
*cxt
,
338 const char *type
, int level
, int id
)
340 struct lscpu_cache
*ca
;
343 cxt
->caches
= xreallocarray(cxt
->caches
,
344 cxt
->ncaches
, sizeof(*cxt
->caches
));
346 ca
= &cxt
->caches
[cxt
->ncaches
- 1];
347 memset(ca
, 0 , sizeof(*ca
));
351 ca
->type
= xstrdup(type
);
353 DBG(GATHER
, ul_debugobj(cxt
, "add cache %s%d::%d", type
, level
, id
));
357 static int mk_cache_id(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
, char *type
, int level
)
362 for (i
= 0; i
< cxt
->ncaches
; i
++) {
363 struct lscpu_cache
*ca
= &cxt
->caches
[i
];
365 if (ca
->level
!= level
|| strcmp(ca
->type
, type
) != 0)
369 CPU_ISSET_S(cpu
->logical_id
, cxt
->setsize
, ca
->sharedmap
))
377 static int read_sparc_onecache(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
,
378 int level
, char *typestr
, int type
)
380 struct lscpu_cache
*ca
;
381 struct path_cxt
*sys
= cxt
->syscpu
;
382 int num
= cpu
->logical_id
;
388 snprintf(buf
, sizeof(buf
), "l%d_%c", level
, type
);
390 snprintf(buf
, sizeof(buf
), "l%d_", level
);
392 rc
= ul_path_readf_u32(sys
, &size
,
393 "cpu%d/%scache_size", num
, buf
);
397 DBG(CPU
, ul_debugobj(cpu
, "#%d reading sparc %s cache", num
, buf
));
399 id
= mk_cache_id(cxt
, cpu
, typestr
, level
);
401 ca
= get_cache(cxt
, typestr
, level
, id
);
403 ca
= add_cache(cxt
, typestr
, level
, id
);
406 ul_path_readf_u32(sys
, &ca
->coherency_line_size
,
407 "cpu%d/%scache_line_size", num
, buf
);
411 snprintf(buf
, sizeof(buf
), "L%d%c", ca
->level
, type
);
413 snprintf(buf
, sizeof(buf
), "L%d", ca
->level
);
414 ca
->name
= xstrdup(buf
);
417 /* There is no sharedmap of the cache in /sys, we assume that caches are
418 * not shared. Send a patch if your /sys provides another information.
420 if (!ca
->sharedmap
) {
423 ca
->sharedmap
= cpuset_alloc(cxt
->maxcpus
, &setsize
, NULL
);
424 CPU_ZERO_S(setsize
, ca
->sharedmap
);
425 CPU_SET_S(num
, setsize
, ca
->sharedmap
);
431 static int read_sparc_caches(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
433 read_sparc_onecache(cxt
, cpu
, 1, "Instruction", 'i');
434 read_sparc_onecache(cxt
, cpu
, 1, "Data", 'd');
435 read_sparc_onecache(cxt
, cpu
, 2, "Unified", 0);
436 read_sparc_onecache(cxt
, cpu
, 2, "Unified", 0);
441 static int read_caches(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
444 struct path_cxt
*sys
= cxt
->syscpu
;
445 int num
= cpu
->logical_id
;
446 size_t i
, ncaches
= 0;
448 while (ul_path_accessf(sys
, F_OK
,
449 "cpu%d/cache/index%zu",
453 if (ncaches
== 0 && ul_path_accessf(sys
, F_OK
,
454 "cpu%d/l1_icache_size", num
) == 0)
455 return read_sparc_caches(cxt
, cpu
);
457 DBG(CPU
, ul_debugobj(cpu
, "#%d reading %zd caches", num
, ncaches
));
459 for (i
= 0; i
< ncaches
; i
++) {
460 struct lscpu_cache
*ca
;
463 if (ul_path_readf_s32(sys
, &id
, "cpu%d/cache/index%zu/id", num
, i
) != 0)
465 if (ul_path_readf_s32(sys
, &level
, "cpu%d/cache/index%zu/level", num
, i
) != 0)
467 if (ul_path_readf_buffer(sys
, buf
, sizeof(buf
),
468 "cpu%d/cache/index%zu/type", num
, i
) <= 0)
472 id
= mk_cache_id(cxt
, cpu
, buf
, level
);
474 ca
= get_cache(cxt
, buf
, level
, id
);
476 ca
= add_cache(cxt
, buf
, level
, id
);
483 if (!strcmp(ca
->type
, "Data"))
485 else if (!strcmp(ca
->type
, "Instruction"))
489 snprintf(buf
, sizeof(buf
), "L%d%c", ca
->level
, type
);
491 snprintf(buf
, sizeof(buf
), "L%d", ca
->level
);
493 ca
->name
= xstrdup(buf
);
495 ul_path_readf_u32(sys
, &ca
->ways_of_associativity
,
496 "cpu%d/cache/index%zu/ways_of_associativity", num
, i
);
497 ul_path_readf_u32(sys
, &ca
->physical_line_partition
,
498 "cpu%d/cache/index%zu/physical_line_partition", num
, i
);
499 ul_path_readf_u32(sys
, &ca
->number_of_sets
,
500 "cpu%d/cache/index%zu/number_of_sets", num
, i
);
501 ul_path_readf_u32(sys
, &ca
->coherency_line_size
,
502 "cpu%d/cache/index%zu/coherency_line_size", num
, i
);
504 ul_path_readf_string(sys
, &ca
->allocation_policy
,
505 "cpu%d/cache/index%zu/allocation_policy", num
, i
);
506 ul_path_readf_string(sys
, &ca
->write_policy
,
507 "cpu%d/cache/index%zu/write_policy", num
, i
);
510 if (ul_path_readf_buffer(sys
, buf
, sizeof(buf
),
511 "cpu%d/cache/index%zu/size", num
, i
) > 0)
512 ul_parse_size(buf
, &ca
->size
, NULL
);
518 /* information about how CPUs share different caches */
519 ul_path_readf_cpuset(sys
, &ca
->sharedmap
, cxt
->maxcpus
,
520 "cpu%d/cache/index%zu/shared_cpu_map", num
, i
);
526 static int read_ids(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
528 struct path_cxt
*sys
= cxt
->syscpu
;
529 int num
= cpu
->logical_id
;
531 if (ul_path_accessf(sys
, F_OK
, "cpu%d/topology", num
) != 0)
534 DBG(CPU
, ul_debugobj(cpu
, "#%d reading IDs", num
));
536 if (ul_path_readf_s32(sys
, &cpu
->coreid
, "cpu%d/topology/core_id", num
) != 0)
538 if (ul_path_readf_s32(sys
, &cpu
->socketid
, "cpu%d/topology/physical_package_id", num
) != 0)
540 if (ul_path_readf_s32(sys
, &cpu
->bookid
, "cpu%d/topology/book_id", num
) != 0)
542 if (ul_path_readf_s32(sys
, &cpu
->drawerid
, "cpu%d/topology/drawer_id", num
) != 0)
548 static int read_polarization(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
550 struct path_cxt
*sys
= cxt
->syscpu
;
551 int num
= cpu
->logical_id
;
554 if (ul_path_accessf(sys
, F_OK
, "cpu%d/polarization", num
) != 0)
557 ul_path_readf_buffer(sys
, mode
, sizeof(mode
), "cpu%d/polarization", num
);
559 DBG(CPU
, ul_debugobj(cpu
, "#%d reading polar=%s", num
, mode
));
561 if (strncmp(mode
, "vertical:low", sizeof(mode
)) == 0)
562 cpu
->polarization
= POLAR_VLOW
;
563 else if (strncmp(mode
, "vertical:medium", sizeof(mode
)) == 0)
564 cpu
->polarization
= POLAR_VMEDIUM
;
565 else if (strncmp(mode
, "vertical:high", sizeof(mode
)) == 0)
566 cpu
->polarization
= POLAR_VHIGH
;
567 else if (strncmp(mode
, "horizontal", sizeof(mode
)) == 0)
568 cpu
->polarization
= POLAR_HORIZONTAL
;
570 cpu
->polarization
= POLAR_UNKNOWN
;
573 cpu
->type
->has_polarization
= 1;
577 static int read_address(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
579 struct path_cxt
*sys
= cxt
->syscpu
;
580 int num
= cpu
->logical_id
;
582 if (ul_path_accessf(sys
, F_OK
, "cpu%d/address", num
) != 0)
585 DBG(CPU
, ul_debugobj(cpu
, "#%d reading address", num
));
587 ul_path_readf_s32(sys
, &cpu
->address
, "cpu%d/address", num
);
589 cpu
->type
->has_addresses
= 1;
593 static int read_configure(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
595 struct path_cxt
*sys
= cxt
->syscpu
;
596 int num
= cpu
->logical_id
;
598 if (ul_path_accessf(sys
, F_OK
, "cpu%d/configure", num
) != 0)
601 DBG(CPU
, ul_debugobj(cpu
, "#%d reading configure", num
));
603 ul_path_readf_s32(sys
, &cpu
->configured
, "cpu%d/configure", num
);
605 cpu
->type
->has_configured
= 1;
609 static int read_mhz(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
611 struct path_cxt
*sys
= cxt
->syscpu
;
612 int num
= cpu
->logical_id
;
615 DBG(CPU
, ul_debugobj(cpu
, "#%d reading mhz", num
));
617 if (ul_path_readf_s32(sys
, &mhz
, "cpu%d/cpufreq/cpuinfo_max_freq", num
) == 0)
618 cpu
->mhz_max_freq
= (float) mhz
/ 1000;
619 if (ul_path_readf_s32(sys
, &mhz
, "cpu%d/cpufreq/cpuinfo_min_freq", num
) == 0)
620 cpu
->mhz_min_freq
= (float) mhz
/ 1000;
622 /* The default current-frequency value comes is from /proc/cpuinfo (if
623 * available). This /proc value is usually based on MSR registers
624 * (APERF/APERF) and it changes pretty often. It seems better to read
625 * frequency from cpufreq subsystem that provides the current frequency
626 * for the current policy. There is also cpuinfo_cur_freq in sysfs, but
627 * it's not always available.
629 if (ul_path_readf_s32(sys
, &mhz
, "cpu%d/cpufreq/scaling_cur_freq", num
) == 0)
630 cpu
->mhz_cur_freq
= (float) mhz
/ 1000;
632 if (cpu
->type
&& (cpu
->mhz_min_freq
|| cpu
->mhz_max_freq
))
633 cpu
->type
->has_freq
= 1;
638 float lsblk_cputype_get_maxmhz(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
643 for (i
= 0; i
< cxt
->npossibles
; i
++) {
644 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
646 if (!cpu
|| cpu
->type
!= ct
|| !is_cpu_present(cxt
, cpu
))
648 res
= max(res
, cpu
->mhz_max_freq
);
653 float lsblk_cputype_get_minmhz(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
658 for (i
= 0; i
< cxt
->npossibles
; i
++) {
659 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
661 if (!cpu
|| cpu
->type
!= ct
|| !is_cpu_present(cxt
, cpu
))
663 if (!cpu
->mhz_min_freq
)
665 if (res
< 0.0 || cpu
->mhz_min_freq
< res
)
666 res
= cpu
->mhz_min_freq
;
671 /* returns scaling (use) of CPUs freq. in percent */
672 float lsblk_cputype_get_scalmhz(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
675 float fmax
= 0, fcur
= 0;
677 for (i
= 0; i
< cxt
->npossibles
; i
++) {
678 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
680 if (!cpu
|| cpu
->type
!= ct
|| !is_cpu_present(cxt
, cpu
))
682 if (cpu
->mhz_max_freq
<= 0.0 || cpu
->mhz_cur_freq
<= 0.0)
684 fmax
+= cpu
->mhz_max_freq
;
685 fcur
+= cpu
->mhz_cur_freq
;
689 return fcur
/ fmax
* 100;
692 int lscpu_read_topology(struct lscpu_cxt
*cxt
)
698 for (i
= 0; i
< cxt
->ncputypes
; i
++)
699 rc
+= cputype_read_topology(cxt
, cxt
->cputypes
[i
]);
701 for (i
= 0; rc
== 0 && i
< cxt
->npossibles
; i
++) {
702 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
704 if (!cpu
|| !cpu
->type
)
707 DBG(CPU
, ul_debugobj(cpu
, "#%d reading topology", cpu
->logical_id
));
709 rc
= read_ids(cxt
, cpu
);
711 rc
= read_polarization(cxt
, cpu
);
713 rc
= read_address(cxt
, cpu
);
715 rc
= read_configure(cxt
, cpu
);
717 rc
= read_mhz(cxt
, cpu
);
719 rc
= read_caches(cxt
, cpu
);
722 lscpu_sort_caches(cxt
->caches
, cxt
->ncaches
);
723 DBG(GATHER
, ul_debugobj(cxt
, " L1d: %zu", lscpu_get_cache_full_size(cxt
, "L1d", NULL
)));
724 DBG(GATHER
, ul_debugobj(cxt
, " L1i: %zu", lscpu_get_cache_full_size(cxt
, "L1i", NULL
)));
725 DBG(GATHER
, ul_debugobj(cxt
, " L2: %zu", lscpu_get_cache_full_size(cxt
, "L2", NULL
)));
726 DBG(GATHER
, ul_debugobj(cxt
, " L3: %zu", lscpu_get_cache_full_size(cxt
, "L3", NULL
)));