]>
git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu-topology.c
2 * SPDX-License-Identifier: GPL-2.0-or-later
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
10 * Copyright (C) 2008-2023 Karel Zak <kzak@redhat.com>
14 #include <sys/types.h>
23 /* add @set to the @ary, unnecessary set is deallocated. */
24 static int add_cpuset_to_array(cpu_set_t
**ary
, size_t *items
, cpu_set_t
*set
, size_t setsize
)
31 for (i
= 0; i
< *items
; i
++) {
32 if (CPU_EQUAL_S(setsize
, set
, ary
[i
]))
44 static void free_cpuset_array(cpu_set_t
**ary
, int items
)
50 for (i
= 0; i
< items
; i
++)
55 void lscpu_cputype_free_topology(struct lscpu_cputype
*ct
)
59 free_cpuset_array(ct
->coremaps
, ct
->ncores
);
60 free_cpuset_array(ct
->socketmaps
, ct
->nsockets
);
61 free_cpuset_array(ct
->bookmaps
, ct
->nbooks
);
62 free_cpuset_array(ct
->drawermaps
, ct
->ndrawers
);
65 void lscpu_free_caches(struct lscpu_cache
*caches
, size_t n
)
72 for (i
= 0; i
< n
; i
++) {
73 struct lscpu_cache
*c
= &caches
[i
];
75 DBG(MISC
, ul_debug(" freeing cache #%zu %s::%d",
80 free(c
->allocation_policy
);
81 free(c
->write_policy
);
87 static int cmp_cache(const void *a0
, const void *b0
)
89 const struct lscpu_cache
90 *a
= (const struct lscpu_cache
*) a0
,
91 *b
= (const struct lscpu_cache
*) b0
;
92 return strcmp(a
->name
, b
->name
);
95 void lscpu_sort_caches(struct lscpu_cache
*caches
, size_t n
)
98 qsort(caches
, n
, sizeof(struct lscpu_cache
), cmp_cache
);
102 /* Read topology for specified type */
103 static int cputype_read_topology(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
106 struct path_cxt
*sys
;
107 int nthreads
= 0, sw_topo
= 0;
110 sys
= cxt
->syscpu
; /* /sys/devices/system/cpu/ */
111 npos
= cxt
->npossibles
; /* possible CPUs */
113 DBG(TYPE
, ul_debugobj(ct
, "reading %s/%s/%s topology",
114 ct
->vendor
?: "", ct
->model
?: "", ct
->modelname
?:""));
116 for (i
= 0; i
< cxt
->npossibles
; i
++) {
117 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
118 cpu_set_t
*thread_siblings
= NULL
, *core_siblings
= NULL
;
119 cpu_set_t
*book_siblings
= NULL
, *drawer_siblings
= NULL
;
122 if (!cpu
|| cpu
->type
!= ct
)
125 num
= cpu
->logical_id
;
126 if (ul_path_accessf(sys
, F_OK
,
127 "cpu%d/topology/thread_siblings", num
) != 0)
130 /* read topology maps */
131 ul_path_readf_cpuset(sys
, &thread_siblings
, cxt
->maxcpus
,
132 "cpu%d/topology/thread_siblings", num
);
133 ul_path_readf_cpuset(sys
, &core_siblings
, cxt
->maxcpus
,
134 "cpu%d/topology/core_siblings", num
);
135 ul_path_readf_cpuset(sys
, &book_siblings
, cxt
->maxcpus
,
136 "cpu%d/topology/book_siblings", num
);
137 ul_path_readf_cpuset(sys
, &drawer_siblings
, cxt
->maxcpus
,
138 "cpu%d/topology/drawer_siblings", num
);
141 n
= CPU_COUNT_S(cxt
->setsize
, thread_siblings
);
147 /* Allocate arrays for topology maps.
149 * For each map we make sure that it can have up to ncpuspos
150 * entries. This is because we cannot reliably calculate the
151 * number of cores, sockets and books on all architectures.
152 * E.g. completely virtualized architectures like s390 may
153 * have multiple sockets of different sizes.
155 if (!ct
->coremaps
&& thread_siblings
)
156 ct
->coremaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
157 if (!ct
->socketmaps
&& core_siblings
)
158 ct
->socketmaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
159 if (!ct
->bookmaps
&& book_siblings
)
160 ct
->bookmaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
161 if (!ct
->drawermaps
&& drawer_siblings
)
162 ct
->drawermaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
164 /* add to topology maps */
166 add_cpuset_to_array(ct
->coremaps
, &ct
->ncores
, thread_siblings
, cxt
->setsize
);
168 add_cpuset_to_array(ct
->socketmaps
, &ct
->nsockets
, core_siblings
, cxt
->setsize
);
170 add_cpuset_to_array(ct
->bookmaps
, &ct
->nbooks
, book_siblings
, cxt
->setsize
);
172 add_cpuset_to_array(ct
->drawermaps
, &ct
->ndrawers
, drawer_siblings
, cxt
->setsize
);
176 /* s390 detects its cpu topology via /proc/sysinfo, if present.
177 * Using simply the cpu topology masks in sysfs will not give
178 * usable results since everything is virtualized. E.g.
179 * virtual core 0 may have only 1 cpu, but virtual core 2 may
181 * If the cpu topology is not exported (e.g. 2nd level guest)
182 * fall back to old calculation scheme.
184 if ((fd
= ul_path_fopen(cxt
->procfs
, "r", "sysinfo"))) {
188 DBG(TYPE
, ul_debugobj(ct
, " reading sysinfo"));
190 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
191 if (sscanf(buf
, "CPU Topology SW: %d %d %zu %zu %zu %zu",
193 &ct
->ndrawers_per_system
,
194 &ct
->nbooks_per_drawer
,
195 &ct
->nsockets_per_book
,
196 &ct
->ncores_per_socket
) == 6) {
198 DBG(TYPE
, ul_debugobj(ct
, " using SW topology"));
206 ct
->nthreads_per_core
= nthreads
;
209 if (ul_strtou64(ct
->mtid
, &x
, 10) == 0 && x
<= ULONG_MAX
)
210 ct
->nthreads_per_core
= (size_t) x
+ 1;
214 ct
->ncores_per_socket
= ct
->nsockets
? ct
->ncores
/ ct
->nsockets
: 0;
215 ct
->nsockets_per_book
= ct
->nbooks
? ct
->nsockets
/ ct
->nbooks
: 0;
216 ct
->nbooks_per_drawer
= ct
->ndrawers
? ct
->nbooks
/ ct
->ndrawers
: 0;
217 ct
->ndrawers_per_system
= ct
->ndrawers
;
220 DBG(TYPE
, ul_debugobj(ct
, " nthreads: %zu (per core)", ct
->nthreads_per_core
));
221 DBG(TYPE
, ul_debugobj(ct
, " ncores: %zu (%zu per socket)", ct
->ncores
, ct
->ncores_per_socket
));
222 DBG(TYPE
, ul_debugobj(ct
, " nsockets: %zu (%zu per books)", ct
->nsockets
, ct
->nsockets_per_book
));
223 DBG(TYPE
, ul_debugobj(ct
, " nbooks: %zu (%zu per drawer)", ct
->nbooks
, ct
->nbooks_per_drawer
));
224 DBG(TYPE
, ul_debugobj(ct
, " ndrawers: %zu (%zu per system)", ct
->ndrawers
, ct
->ndrawers_per_system
));
229 /* count size of all instancess of the "name" */
230 size_t lscpu_get_cache_full_size(struct lscpu_cxt
*cxt
, const char *name
, int *instances
)
237 for (i
= 0; i
< cxt
->ncaches
; i
++) {
238 if (strcmp(cxt
->caches
[i
].name
, name
) == 0) {
239 sz
+= cxt
->caches
[i
].size
;
248 struct lscpu_cache
*lscpu_cpu_get_cache(struct lscpu_cxt
*cxt
,
249 struct lscpu_cpu
*cpu
, const char *name
)
253 for (i
= 0; i
< cxt
->ncaches
; i
++) {
254 struct lscpu_cache
*ca
= &cxt
->caches
[i
];
257 strcmp(ca
->name
, name
) == 0 &&
258 CPU_ISSET_S(cpu
->logical_id
, cxt
->setsize
, ca
->sharedmap
))
266 * The cache is identifued by type+level+id.
268 static struct lscpu_cache
*get_cache(struct lscpu_cxt
*cxt
,
269 const char *type
, int level
, int id
)
273 for (i
= 0; i
< cxt
->ncaches
; i
++) {
274 struct lscpu_cache
*ca
= &cxt
->caches
[i
];
276 ca
->level
== level
&&
277 strcmp(ca
->type
, type
) == 0)
283 static struct lscpu_cache
*add_cache(struct lscpu_cxt
*cxt
,
284 const char *type
, int level
, int id
)
286 struct lscpu_cache
*ca
;
289 cxt
->caches
= xreallocarray(cxt
->caches
,
290 cxt
->ncaches
, sizeof(*cxt
->caches
));
292 ca
= &cxt
->caches
[cxt
->ncaches
- 1];
293 memset(ca
, 0 , sizeof(*ca
));
297 ca
->type
= xstrdup(type
);
299 DBG(GATHER
, ul_debugobj(cxt
, "add cache %s%d::%d", type
, level
, id
));
303 static int mk_cache_id(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
, char *type
, int level
)
308 for (i
= 0; i
< cxt
->ncaches
; i
++) {
309 struct lscpu_cache
*ca
= &cxt
->caches
[i
];
311 if (ca
->level
!= level
|| strcmp(ca
->type
, type
) != 0)
315 CPU_ISSET_S(cpu
->logical_id
, cxt
->setsize
, ca
->sharedmap
))
323 static int read_sparc_onecache(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
,
324 int level
, char *typestr
, int type
)
326 struct lscpu_cache
*ca
;
327 struct path_cxt
*sys
= cxt
->syscpu
;
328 int num
= cpu
->logical_id
;
334 snprintf(buf
, sizeof(buf
), "l%d_%c", level
, type
);
336 snprintf(buf
, sizeof(buf
), "l%d_", level
);
338 rc
= ul_path_readf_u32(sys
, &size
,
339 "cpu%d/%scache_size", num
, buf
);
343 DBG(CPU
, ul_debugobj(cpu
, "#%d reading sparc %s cache", num
, buf
));
345 id
= mk_cache_id(cxt
, cpu
, typestr
, level
);
347 ca
= get_cache(cxt
, typestr
, level
, id
);
349 ca
= add_cache(cxt
, typestr
, level
, id
);
352 ul_path_readf_u32(sys
, &ca
->coherency_line_size
,
353 "cpu%d/%scache_line_size", num
, buf
);
357 snprintf(buf
, sizeof(buf
), "L%d%c", ca
->level
, type
);
359 snprintf(buf
, sizeof(buf
), "L%d", ca
->level
);
360 ca
->name
= xstrdup(buf
);
363 /* There is no sharedmap of the cache in /sys, we assume that caches are
364 * not shared. Send a patch if your /sys provides another information.
366 if (!ca
->sharedmap
) {
369 ca
->sharedmap
= cpuset_alloc(cxt
->maxcpus
, &setsize
, NULL
);
370 CPU_ZERO_S(setsize
, ca
->sharedmap
);
371 CPU_SET_S(num
, setsize
, ca
->sharedmap
);
377 static int read_sparc_caches(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
379 read_sparc_onecache(cxt
, cpu
, 1, "Instruction", 'i');
380 read_sparc_onecache(cxt
, cpu
, 1, "Data", 'd');
381 read_sparc_onecache(cxt
, cpu
, 2, "Unified", 0);
382 read_sparc_onecache(cxt
, cpu
, 2, "Unified", 0);
387 static int read_caches(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
390 struct path_cxt
*sys
= cxt
->syscpu
;
391 int num
= cpu
->logical_id
;
392 size_t i
, ncaches
= 0;
394 while (ul_path_accessf(sys
, F_OK
,
395 "cpu%d/cache/index%zu",
399 if (ncaches
== 0 && ul_path_accessf(sys
, F_OK
,
400 "cpu%d/l1_icache_size", num
) == 0)
401 return read_sparc_caches(cxt
, cpu
);
403 DBG(CPU
, ul_debugobj(cpu
, "#%d reading %zd caches", num
, ncaches
));
405 for (i
= 0; i
< ncaches
; i
++) {
406 struct lscpu_cache
*ca
;
409 if (ul_path_readf_s32(sys
, &id
, "cpu%d/cache/index%zu/id", num
, i
) != 0)
411 if (ul_path_readf_s32(sys
, &level
, "cpu%d/cache/index%zu/level", num
, i
) != 0)
413 if (ul_path_readf_buffer(sys
, buf
, sizeof(buf
),
414 "cpu%d/cache/index%zu/type", num
, i
) <= 0)
418 id
= mk_cache_id(cxt
, cpu
, buf
, level
);
420 ca
= get_cache(cxt
, buf
, level
, id
);
422 ca
= add_cache(cxt
, buf
, level
, id
);
429 if (!strcmp(ca
->type
, "Data"))
431 else if (!strcmp(ca
->type
, "Instruction"))
435 snprintf(buf
, sizeof(buf
), "L%d%c", ca
->level
, type
);
437 snprintf(buf
, sizeof(buf
), "L%d", ca
->level
);
439 ca
->name
= xstrdup(buf
);
441 ul_path_readf_u32(sys
, &ca
->ways_of_associativity
,
442 "cpu%d/cache/index%zu/ways_of_associativity", num
, i
);
443 ul_path_readf_u32(sys
, &ca
->physical_line_partition
,
444 "cpu%d/cache/index%zu/physical_line_partition", num
, i
);
445 ul_path_readf_u32(sys
, &ca
->number_of_sets
,
446 "cpu%d/cache/index%zu/number_of_sets", num
, i
);
447 ul_path_readf_u32(sys
, &ca
->coherency_line_size
,
448 "cpu%d/cache/index%zu/coherency_line_size", num
, i
);
450 ul_path_readf_string(sys
, &ca
->allocation_policy
,
451 "cpu%d/cache/index%zu/allocation_policy", num
, i
);
452 ul_path_readf_string(sys
, &ca
->write_policy
,
453 "cpu%d/cache/index%zu/write_policy", num
, i
);
456 if (ul_path_readf_buffer(sys
, buf
, sizeof(buf
),
457 "cpu%d/cache/index%zu/size", num
, i
) > 0)
458 parse_size(buf
, &ca
->size
, NULL
);
464 /* information about how CPUs share different caches */
465 ul_path_readf_cpuset(sys
, &ca
->sharedmap
, cxt
->maxcpus
,
466 "cpu%d/cache/index%zu/shared_cpu_map", num
, i
);
472 static int read_ids(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
474 struct path_cxt
*sys
= cxt
->syscpu
;
475 int num
= cpu
->logical_id
;
477 if (ul_path_accessf(sys
, F_OK
, "cpu%d/topology", num
) != 0)
480 DBG(CPU
, ul_debugobj(cpu
, "#%d reading IDs", num
));
482 if (ul_path_readf_s32(sys
, &cpu
->coreid
, "cpu%d/topology/core_id", num
) != 0)
484 if (ul_path_readf_s32(sys
, &cpu
->socketid
, "cpu%d/topology/physical_package_id", num
) != 0)
486 if (ul_path_readf_s32(sys
, &cpu
->bookid
, "cpu%d/topology/book_id", num
) != 0)
488 if (ul_path_readf_s32(sys
, &cpu
->drawerid
, "cpu%d/topology/drawer_id", num
) != 0)
494 static int read_polarization(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
496 struct path_cxt
*sys
= cxt
->syscpu
;
497 int num
= cpu
->logical_id
;
500 if (ul_path_accessf(sys
, F_OK
, "cpu%d/polarization", num
) != 0)
503 ul_path_readf_buffer(sys
, mode
, sizeof(mode
), "cpu%d/polarization", num
);
505 DBG(CPU
, ul_debugobj(cpu
, "#%d reading polar=%s", num
, mode
));
507 if (strncmp(mode
, "vertical:low", sizeof(mode
)) == 0)
508 cpu
->polarization
= POLAR_VLOW
;
509 else if (strncmp(mode
, "vertical:medium", sizeof(mode
)) == 0)
510 cpu
->polarization
= POLAR_VMEDIUM
;
511 else if (strncmp(mode
, "vertical:high", sizeof(mode
)) == 0)
512 cpu
->polarization
= POLAR_VHIGH
;
513 else if (strncmp(mode
, "horizontal", sizeof(mode
)) == 0)
514 cpu
->polarization
= POLAR_HORIZONTAL
;
516 cpu
->polarization
= POLAR_UNKNOWN
;
519 cpu
->type
->has_polarization
= 1;
523 static int read_address(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
525 struct path_cxt
*sys
= cxt
->syscpu
;
526 int num
= cpu
->logical_id
;
528 if (ul_path_accessf(sys
, F_OK
, "cpu%d/address", num
) != 0)
531 DBG(CPU
, ul_debugobj(cpu
, "#%d reading address", num
));
533 ul_path_readf_s32(sys
, &cpu
->address
, "cpu%d/address", num
);
535 cpu
->type
->has_addresses
= 1;
539 static int read_configure(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
541 struct path_cxt
*sys
= cxt
->syscpu
;
542 int num
= cpu
->logical_id
;
544 if (ul_path_accessf(sys
, F_OK
, "cpu%d/configure", num
) != 0)
547 DBG(CPU
, ul_debugobj(cpu
, "#%d reading configure", num
));
549 ul_path_readf_s32(sys
, &cpu
->configured
, "cpu%d/configure", num
);
551 cpu
->type
->has_configured
= 1;
555 static int read_mhz(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
557 struct path_cxt
*sys
= cxt
->syscpu
;
558 int num
= cpu
->logical_id
;
561 DBG(CPU
, ul_debugobj(cpu
, "#%d reading mhz", num
));
563 if (ul_path_readf_s32(sys
, &mhz
, "cpu%d/cpufreq/cpuinfo_max_freq", num
) == 0)
564 cpu
->mhz_max_freq
= (float) mhz
/ 1000;
565 if (ul_path_readf_s32(sys
, &mhz
, "cpu%d/cpufreq/cpuinfo_min_freq", num
) == 0)
566 cpu
->mhz_min_freq
= (float) mhz
/ 1000;
568 /* The default current-frequency value comes is from /proc/cpuinfo (if
569 * available). This /proc value is usually based on MSR registers
570 * (APERF/APERF) and it changes pretty often. It seems better to read
571 * frequency from cpufreq subsystem that provides the current frequency
572 * for the current policy. There is also cpuinfo_cur_freq in sysfs, but
573 * it's not always available.
575 if (ul_path_readf_s32(sys
, &mhz
, "cpu%d/cpufreq/scaling_cur_freq", num
) == 0)
576 cpu
->mhz_cur_freq
= (float) mhz
/ 1000;
578 if (cpu
->type
&& (cpu
->mhz_min_freq
|| cpu
->mhz_max_freq
))
579 cpu
->type
->has_freq
= 1;
584 float lsblk_cputype_get_maxmhz(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
589 for (i
= 0; i
< cxt
->npossibles
; i
++) {
590 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
592 if (!cpu
|| cpu
->type
!= ct
|| !is_cpu_present(cxt
, cpu
))
594 res
= max(res
, cpu
->mhz_max_freq
);
599 float lsblk_cputype_get_minmhz(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
604 for (i
= 0; i
< cxt
->npossibles
; i
++) {
605 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
607 if (!cpu
|| cpu
->type
!= ct
|| !is_cpu_present(cxt
, cpu
))
609 if (res
< 0.0 || cpu
->mhz_min_freq
< res
)
610 res
= cpu
->mhz_min_freq
;
615 /* returns scaling (use) of CPUs freq. in percent */
616 float lsblk_cputype_get_scalmhz(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
619 float fmax
= 0, fcur
= 0;
621 for (i
= 0; i
< cxt
->npossibles
; i
++) {
622 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
624 if (!cpu
|| cpu
->type
!= ct
|| !is_cpu_present(cxt
, cpu
))
626 if (cpu
->mhz_max_freq
<= 0.0 || cpu
->mhz_cur_freq
<= 0.0)
628 fmax
+= cpu
->mhz_max_freq
;
629 fcur
+= cpu
->mhz_cur_freq
;
633 return fcur
/ fmax
* 100;
636 int lscpu_read_topology(struct lscpu_cxt
*cxt
)
642 for (i
= 0; i
< cxt
->ncputypes
; i
++)
643 rc
+= cputype_read_topology(cxt
, cxt
->cputypes
[i
]);
645 for (i
= 0; rc
== 0 && i
< cxt
->npossibles
; i
++) {
646 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
648 if (!cpu
|| !cpu
->type
)
651 DBG(CPU
, ul_debugobj(cpu
, "#%d reading topology", cpu
->logical_id
));
653 rc
= read_ids(cxt
, cpu
);
655 rc
= read_polarization(cxt
, cpu
);
657 rc
= read_address(cxt
, cpu
);
659 rc
= read_configure(cxt
, cpu
);
661 rc
= read_mhz(cxt
, cpu
);
663 rc
= read_caches(cxt
, cpu
);
666 lscpu_sort_caches(cxt
->caches
, cxt
->ncaches
);
667 DBG(GATHER
, ul_debugobj(cxt
, " L1d: %zu", lscpu_get_cache_full_size(cxt
, "L1d", NULL
)));
668 DBG(GATHER
, ul_debugobj(cxt
, " L1i: %zu", lscpu_get_cache_full_size(cxt
, "L1i", NULL
)));
669 DBG(GATHER
, ul_debugobj(cxt
, " L2: %zu", lscpu_get_cache_full_size(cxt
, "L2", NULL
)));
670 DBG(GATHER
, ul_debugobj(cxt
, " L3: %zu", lscpu_get_cache_full_size(cxt
, "L3", NULL
)));