]>
git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu-topology.c
02a90d63f12f27ede3b57063f1d5a5bb1f6c212b
12 /* add @set to the @ary, unnecessary set is deallocated. */
13 static int add_cpuset_to_array(cpu_set_t
**ary
, size_t *items
, cpu_set_t
*set
, size_t setsize
)
20 for (i
= 0; i
< *items
; i
++) {
21 if (CPU_EQUAL_S(setsize
, set
, ary
[i
]))
33 static void free_cpuset_array(cpu_set_t
**ary
, int items
)
39 for (i
= 0; i
< items
; i
++)
44 void lscpu_cputype_free_topology(struct lscpu_cputype
*ct
)
48 free_cpuset_array(ct
->coremaps
, ct
->ncores
);
49 free_cpuset_array(ct
->socketmaps
, ct
->nsockets
);
50 free_cpuset_array(ct
->bookmaps
, ct
->nbooks
);
51 free_cpuset_array(ct
->drawermaps
, ct
->ndrawers
);
54 void lscpu_free_caches(struct lscpu_cache
*caches
, size_t n
)
61 for (i
= 0; i
< n
; i
++) {
62 struct lscpu_cache
*c
= &caches
[i
];
64 DBG(MISC
, ul_debug(" freeing cache #%zu %s::%d",
69 free(c
->allocation_policy
);
70 free(c
->write_policy
);
76 static int cmp_cache(const void *a0
, const void *b0
)
78 const struct lscpu_cache
79 *a
= (const struct lscpu_cache
*) a0
,
80 *b
= (const struct lscpu_cache
*) b0
;
81 return strcmp(a
->name
, b
->name
);
84 void lscpu_sort_caches(struct lscpu_cache
*caches
, size_t n
)
87 qsort(caches
, n
, sizeof(struct lscpu_cache
), cmp_cache
);
91 /* Read topology for specified type */
92 static int cputype_read_topology(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
96 int nthreads
= 0, sw_topo
= 0;
99 sys
= cxt
->syscpu
; /* /sys/devices/system/cpu/ */
100 npos
= cxt
->npossibles
; /* possible CPUs */
102 DBG(TYPE
, ul_debugobj(ct
, "reading %s/%s/%s topology",
103 ct
->vendor
?: "", ct
->model
?: "", ct
->modelname
?:""));
105 for (i
= 0; i
< cxt
->npossibles
; i
++) {
106 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
107 cpu_set_t
*thread_siblings
= NULL
, *core_siblings
= NULL
;
108 cpu_set_t
*book_siblings
= NULL
, *drawer_siblings
= NULL
;
111 if (!cpu
|| cpu
->type
!= ct
)
114 num
= cpu
->logical_id
;
115 if (ul_path_accessf(sys
, F_OK
,
116 "cpu%d/topology/thread_siblings", num
) != 0)
119 /* read topology maps */
120 ul_path_readf_cpuset(sys
, &thread_siblings
, cxt
->maxcpus
,
121 "cpu%d/topology/thread_siblings", num
);
122 ul_path_readf_cpuset(sys
, &core_siblings
, cxt
->maxcpus
,
123 "cpu%d/topology/core_siblings", num
);
124 ul_path_readf_cpuset(sys
, &book_siblings
, cxt
->maxcpus
,
125 "cpu%d/topology/book_siblings", num
);
126 ul_path_readf_cpuset(sys
, &drawer_siblings
, cxt
->maxcpus
,
127 "cpu%d/topology/drawer_siblings", num
);
130 n
= CPU_COUNT_S(cxt
->setsize
, thread_siblings
);
136 /* Allocate arrays for topology maps.
138 * For each map we make sure that it can have up to ncpuspos
139 * entries. This is because we cannot reliably calculate the
140 * number of cores, sockets and books on all architectures.
141 * E.g. completely virtualized architectures like s390 may
142 * have multiple sockets of different sizes.
144 if (!ct
->coremaps
&& thread_siblings
)
145 ct
->coremaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
146 if (!ct
->socketmaps
&& core_siblings
)
147 ct
->socketmaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
148 if (!ct
->bookmaps
&& book_siblings
)
149 ct
->bookmaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
150 if (!ct
->drawermaps
&& drawer_siblings
)
151 ct
->drawermaps
= xcalloc(npos
, sizeof(cpu_set_t
*));
153 /* add to topology maps */
155 add_cpuset_to_array(ct
->coremaps
, &ct
->ncores
, thread_siblings
, cxt
->setsize
);
157 add_cpuset_to_array(ct
->socketmaps
, &ct
->nsockets
, core_siblings
, cxt
->setsize
);
159 add_cpuset_to_array(ct
->bookmaps
, &ct
->nbooks
, book_siblings
, cxt
->setsize
);
161 add_cpuset_to_array(ct
->drawermaps
, &ct
->ndrawers
, drawer_siblings
, cxt
->setsize
);
165 /* s390 detects its cpu topology via /proc/sysinfo, if present.
166 * Using simply the cpu topology masks in sysfs will not give
167 * usable results since everything is virtualized. E.g.
168 * virtual core 0 may have only 1 cpu, but virtual core 2 may
170 * If the cpu topology is not exported (e.g. 2nd level guest)
171 * fall back to old calculation scheme.
173 if ((fd
= ul_path_fopen(cxt
->procfs
, "r", "sysinfo"))) {
177 DBG(TYPE
, ul_debugobj(ct
, " reading sysinfo"));
179 while (fgets(buf
, sizeof(buf
), fd
) != NULL
) {
180 if (sscanf(buf
, "CPU Topology SW: %d %d %zu %zu %zu %zu",
182 &ct
->ndrawers_per_system
,
183 &ct
->nbooks_per_drawer
,
184 &ct
->nsockets_per_book
,
185 &ct
->ncores_per_socket
) == 6) {
187 DBG(TYPE
, ul_debugobj(ct
, " using SW topology"));
195 ct
->nthreads_per_core
= nthreads
;
198 if (ul_strtou64(ct
->mtid
, &x
, 10) == 0 && x
<= ULONG_MAX
)
199 ct
->nthreads_per_core
= (size_t) x
+ 1;
203 ct
->ncores_per_socket
= ct
->nsockets
? ct
->ncores
/ ct
->nsockets
: 0;
204 ct
->nsockets_per_book
= ct
->nbooks
? ct
->nsockets
/ ct
->nbooks
: 0;
205 ct
->nbooks_per_drawer
= ct
->ndrawers
? ct
->nbooks
/ ct
->ndrawers
: 0;
206 ct
->ndrawers_per_system
= ct
->ndrawers
;
209 DBG(TYPE
, ul_debugobj(ct
, " nthreads: %zu (per core)", ct
->nthreads_per_core
));
210 DBG(TYPE
, ul_debugobj(ct
, " ncores: %zu (%zu per socket)", ct
->ncores
, ct
->ncores_per_socket
));
211 DBG(TYPE
, ul_debugobj(ct
, " nsockets: %zu (%zu per books)", ct
->nsockets
, ct
->nsockets_per_book
));
212 DBG(TYPE
, ul_debugobj(ct
, " nbooks: %zu (%zu per drawer)", ct
->nbooks
, ct
->nbooks_per_drawer
));
213 DBG(TYPE
, ul_debugobj(ct
, " ndrawers: %zu (%zu per system)", ct
->ndrawers
, ct
->ndrawers_per_system
));
218 /* count size of all instancess of the "name" */
219 size_t lscpu_get_cache_full_size(struct lscpu_cxt
*cxt
, const char *name
, int *instances
)
226 for (i
= 0; i
< cxt
->ncaches
; i
++) {
227 if (strcmp(cxt
->caches
[i
].name
, name
) == 0) {
228 sz
+= cxt
->caches
[i
].size
;
237 struct lscpu_cache
*lscpu_cpu_get_cache(struct lscpu_cxt
*cxt
,
238 struct lscpu_cpu
*cpu
, const char *name
)
242 for (i
= 0; i
< cxt
->ncaches
; i
++) {
243 struct lscpu_cache
*ca
= &cxt
->caches
[i
];
245 if (strcmp(ca
->name
, name
) == 0 &&
246 CPU_ISSET_S(cpu
->logical_id
, cxt
->setsize
, ca
->sharedmap
))
254 * The cache is identifued by type+level+id.
256 static struct lscpu_cache
*get_cache(struct lscpu_cxt
*cxt
,
257 const char *type
, int level
, int id
)
261 for (i
= 0; i
< cxt
->ncaches
; i
++) {
262 struct lscpu_cache
*ca
= &cxt
->caches
[i
];
264 ca
->level
== level
&&
265 strcmp(ca
->type
, type
) == 0)
271 static struct lscpu_cache
*add_cache(struct lscpu_cxt
*cxt
,
272 const char *type
, int level
, int id
)
274 struct lscpu_cache
*ca
;
277 cxt
->caches
= xreallocarray(cxt
->caches
,
278 cxt
->ncaches
, sizeof(*cxt
->caches
));
280 ca
= &cxt
->caches
[cxt
->ncaches
- 1];
281 memset(ca
, 0 , sizeof(*ca
));
285 ca
->type
= xstrdup(type
);
287 DBG(GATHER
, ul_debugobj(cxt
, "add cache %s%d::%d", type
, level
, id
));
291 static int mk_cache_id(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
, char *type
, int level
)
296 for (i
= 0; i
< cxt
->ncaches
; i
++) {
297 struct lscpu_cache
*ca
= &cxt
->caches
[i
];
299 if (ca
->level
!= level
|| strcmp(ca
->type
, type
) != 0)
303 CPU_ISSET_S(cpu
->logical_id
, cxt
->setsize
, ca
->sharedmap
))
311 static int read_sparc_onecache(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
,
312 int level
, char *typestr
, int type
)
314 struct lscpu_cache
*ca
;
315 struct path_cxt
*sys
= cxt
->syscpu
;
316 int num
= cpu
->logical_id
;
322 snprintf(buf
, sizeof(buf
), "l%d_%c", level
, type
);
324 snprintf(buf
, sizeof(buf
), "l%d_", level
);
326 rc
= ul_path_readf_u32(sys
, &size
,
327 "cpu%d/%scache_size", num
, buf
);
331 DBG(CPU
, ul_debugobj(cpu
, "#%d reading sparc %s cache", num
, buf
));
333 id
= mk_cache_id(cxt
, cpu
, typestr
, level
);
335 ca
= get_cache(cxt
, typestr
, level
, id
);
337 ca
= add_cache(cxt
, typestr
, level
, id
);
340 ul_path_readf_u32(sys
, &ca
->coherency_line_size
,
341 "cpu%d/%scache_line_size", num
, buf
);
345 snprintf(buf
, sizeof(buf
), "L%d%c", ca
->level
, type
);
347 snprintf(buf
, sizeof(buf
), "L%d", ca
->level
);
348 ca
->name
= xstrdup(buf
);
351 /* There is no sharedmap of the cache in /sys, we assume that caches are
352 * not shared. Send a patch if your /sys provides another information.
354 if (!ca
->sharedmap
) {
357 ca
->sharedmap
= cpuset_alloc(cxt
->maxcpus
, &setsize
, NULL
);
358 CPU_ZERO_S(setsize
, ca
->sharedmap
);
359 CPU_SET_S(num
, setsize
, ca
->sharedmap
);
365 static int read_sparc_caches(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
367 read_sparc_onecache(cxt
, cpu
, 1, "Instruction", 'i');
368 read_sparc_onecache(cxt
, cpu
, 1, "Data", 'd');
369 read_sparc_onecache(cxt
, cpu
, 2, "Unified", 0);
370 read_sparc_onecache(cxt
, cpu
, 2, "Unified", 0);
375 static int read_caches(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
378 struct path_cxt
*sys
= cxt
->syscpu
;
379 int num
= cpu
->logical_id
;
380 size_t i
, ncaches
= 0;
382 while (ul_path_accessf(sys
, F_OK
,
383 "cpu%d/cache/index%zu",
387 if (ncaches
== 0 && ul_path_accessf(sys
, F_OK
,
388 "cpu%d/l1_icache_size", num
) == 0)
389 return read_sparc_caches(cxt
, cpu
);
391 DBG(CPU
, ul_debugobj(cpu
, "#%d reading %zd caches", num
, ncaches
));
393 for (i
= 0; i
< ncaches
; i
++) {
394 struct lscpu_cache
*ca
;
397 if (ul_path_readf_s32(sys
, &id
, "cpu%d/cache/index%zu/id", num
, i
) != 0)
399 if (ul_path_readf_s32(sys
, &level
, "cpu%d/cache/index%zu/level", num
, i
) != 0)
401 if (ul_path_readf_buffer(sys
, buf
, sizeof(buf
),
402 "cpu%d/cache/index%zu/type", num
, i
) <= 0)
406 id
= mk_cache_id(cxt
, cpu
, buf
, level
);
408 ca
= get_cache(cxt
, buf
, level
, id
);
410 ca
= add_cache(cxt
, buf
, level
, id
);
417 if (!strcmp(ca
->type
, "Data"))
419 else if (!strcmp(ca
->type
, "Instruction"))
423 snprintf(buf
, sizeof(buf
), "L%d%c", ca
->level
, type
);
425 snprintf(buf
, sizeof(buf
), "L%d", ca
->level
);
427 ca
->name
= xstrdup(buf
);
429 ul_path_readf_u32(sys
, &ca
->ways_of_associativity
,
430 "cpu%d/cache/index%zu/ways_of_associativity", num
, i
);
431 ul_path_readf_u32(sys
, &ca
->physical_line_partition
,
432 "cpu%d/cache/index%zu/physical_line_partition", num
, i
);
433 ul_path_readf_u32(sys
, &ca
->number_of_sets
,
434 "cpu%d/cache/index%zu/number_of_sets", num
, i
);
435 ul_path_readf_u32(sys
, &ca
->coherency_line_size
,
436 "cpu%d/cache/index%zu/coherency_line_size", num
, i
);
438 ul_path_readf_string(sys
, &ca
->allocation_policy
,
439 "cpu%d/cache/index%zu/allocation_policy", num
, i
);
440 ul_path_readf_string(sys
, &ca
->write_policy
,
441 "cpu%d/cache/index%zu/write_policy", num
, i
);
444 if (ul_path_readf_buffer(sys
, buf
, sizeof(buf
),
445 "cpu%d/cache/index%zu/size", num
, i
) > 0)
446 parse_size(buf
, &ca
->size
, NULL
);
452 /* information about how CPUs share different caches */
453 ul_path_readf_cpuset(sys
, &ca
->sharedmap
, cxt
->maxcpus
,
454 "cpu%d/cache/index%zu/shared_cpu_map", num
, i
);
460 static int read_ids(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
462 struct path_cxt
*sys
= cxt
->syscpu
;
463 int num
= cpu
->logical_id
;
465 if (ul_path_accessf(sys
, F_OK
, "cpu%d/topology", num
) != 0)
468 DBG(CPU
, ul_debugobj(cpu
, "#%d reading IDs", num
));
470 if (ul_path_readf_s32(sys
, &cpu
->coreid
, "cpu%d/topology/core_id", num
) != 0)
472 if (ul_path_readf_s32(sys
, &cpu
->socketid
, "cpu%d/topology/physical_package_id", num
) != 0)
474 if (ul_path_readf_s32(sys
, &cpu
->bookid
, "cpu%d/topology/book_id", num
) != 0)
476 if (ul_path_readf_s32(sys
, &cpu
->drawerid
, "cpu%d/topology/drawer_id", num
) != 0)
482 static int read_polarization(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
484 struct path_cxt
*sys
= cxt
->syscpu
;
485 int num
= cpu
->logical_id
;
488 if (ul_path_accessf(sys
, F_OK
, "cpu%d/polarization", num
) != 0)
491 ul_path_readf_buffer(sys
, mode
, sizeof(mode
), "cpu%d/polarization", num
);
493 DBG(CPU
, ul_debugobj(cpu
, "#%d reading polar=%s", num
, mode
));
495 if (strncmp(mode
, "vertical:low", sizeof(mode
)) == 0)
496 cpu
->polarization
= POLAR_VLOW
;
497 else if (strncmp(mode
, "vertical:medium", sizeof(mode
)) == 0)
498 cpu
->polarization
= POLAR_VMEDIUM
;
499 else if (strncmp(mode
, "vertical:high", sizeof(mode
)) == 0)
500 cpu
->polarization
= POLAR_VHIGH
;
501 else if (strncmp(mode
, "horizontal", sizeof(mode
)) == 0)
502 cpu
->polarization
= POLAR_HORIZONTAL
;
504 cpu
->polarization
= POLAR_UNKNOWN
;
507 cpu
->type
->has_polarization
= 1;
511 static int read_address(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
513 struct path_cxt
*sys
= cxt
->syscpu
;
514 int num
= cpu
->logical_id
;
516 if (ul_path_accessf(sys
, F_OK
, "cpu%d/address", num
) != 0)
519 DBG(CPU
, ul_debugobj(cpu
, "#%d reading address", num
));
521 ul_path_readf_s32(sys
, &cpu
->address
, "cpu%d/address", num
);
523 cpu
->type
->has_addresses
= 1;
527 static int read_configure(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
529 struct path_cxt
*sys
= cxt
->syscpu
;
530 int num
= cpu
->logical_id
;
532 if (ul_path_accessf(sys
, F_OK
, "cpu%d/configure", num
) != 0)
535 DBG(CPU
, ul_debugobj(cpu
, "#%d reading configure", num
));
537 ul_path_readf_s32(sys
, &cpu
->configured
, "cpu%d/configure", num
);
539 cpu
->type
->has_configured
= 1;
543 static int read_mhz(struct lscpu_cxt
*cxt
, struct lscpu_cpu
*cpu
)
545 struct path_cxt
*sys
= cxt
->syscpu
;
546 int num
= cpu
->logical_id
;
549 DBG(CPU
, ul_debugobj(cpu
, "#%d reading mhz", num
));
551 if (ul_path_readf_s32(sys
, &mhz
, "cpu%d/cpufreq/cpuinfo_max_freq", num
) == 0)
552 cpu
->mhz_max_freq
= (float) mhz
/ 1000;
553 if (ul_path_readf_s32(sys
, &mhz
, "cpu%d/cpufreq/cpuinfo_min_freq", num
) == 0)
554 cpu
->mhz_min_freq
= (float) mhz
/ 1000;
556 /* The default current-frequency value comes is from /proc/cpuinfo (if
557 * available). This /proc value is usually based on MSR registers
558 * (APERF/APERF) and it changes pretty often. It seems better to read
559 * frequency from cpufreq subsystem that provides the current frequency
560 * for the current policy. There is also cpuinfo_cur_freq in sysfs, but
561 * it's not always available.
563 if (ul_path_readf_s32(sys
, &mhz
, "cpu%d/cpufreq/scaling_cur_freq", num
) == 0)
564 cpu
->mhz_cur_freq
= (float) mhz
/ 1000;
566 if (cpu
->type
&& (cpu
->mhz_min_freq
|| cpu
->mhz_max_freq
))
567 cpu
->type
->has_freq
= 1;
572 float lsblk_cputype_get_maxmhz(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
577 for (i
= 0; i
< cxt
->npossibles
; i
++) {
578 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
580 if (!cpu
|| cpu
->type
!= ct
|| !is_cpu_present(cxt
, cpu
))
582 res
= max(res
, cpu
->mhz_max_freq
);
587 float lsblk_cputype_get_minmhz(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
592 for (i
= 0; i
< cxt
->npossibles
; i
++) {
593 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
595 if (!cpu
|| cpu
->type
!= ct
|| !is_cpu_present(cxt
, cpu
))
597 if (res
< 0.0 || cpu
->mhz_min_freq
< res
)
598 res
= cpu
->mhz_min_freq
;
603 /* returns scaling (use) of CPUs freq. in percent */
604 float lsblk_cputype_get_scalmhz(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
607 float fmax
= 0, fcur
= 0;
609 for (i
= 0; i
< cxt
->npossibles
; i
++) {
610 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
612 if (!cpu
|| cpu
->type
!= ct
|| !is_cpu_present(cxt
, cpu
))
614 if (cpu
->mhz_max_freq
<= 0.0 || cpu
->mhz_cur_freq
<= 0.0)
616 fmax
+= cpu
->mhz_max_freq
;
617 fcur
+= cpu
->mhz_cur_freq
;
621 return fcur
/ fmax
* 100;
624 int lscpu_read_topology(struct lscpu_cxt
*cxt
)
630 for (i
= 0; i
< cxt
->ncputypes
; i
++)
631 rc
+= cputype_read_topology(cxt
, cxt
->cputypes
[i
]);
633 for (i
= 0; rc
== 0 && i
< cxt
->npossibles
; i
++) {
634 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
636 if (!cpu
|| !cpu
->type
)
639 DBG(CPU
, ul_debugobj(cpu
, "#%d reading topology", cpu
->logical_id
));
641 rc
= read_ids(cxt
, cpu
);
643 rc
= read_polarization(cxt
, cpu
);
645 rc
= read_address(cxt
, cpu
);
647 rc
= read_configure(cxt
, cpu
);
649 rc
= read_mhz(cxt
, cpu
);
651 rc
= read_caches(cxt
, cpu
);
654 lscpu_sort_caches(cxt
->caches
, cxt
->ncaches
);
655 DBG(GATHER
, ul_debugobj(cxt
, " L1d: %zu", lscpu_get_cache_full_size(cxt
, "L1d", NULL
)));
656 DBG(GATHER
, ul_debugobj(cxt
, " L1i: %zu", lscpu_get_cache_full_size(cxt
, "L1i", NULL
)));
657 DBG(GATHER
, ul_debugobj(cxt
, " L2: %zu", lscpu_get_cache_full_size(cxt
, "L2", NULL
)));
658 DBG(GATHER
, ul_debugobj(cxt
, " L3: %zu", lscpu_get_cache_full_size(cxt
, "L3", NULL
)));