]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * SPDX-License-Identifier: GPL-2.0-or-later | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * Copyright (C) 2008 Cai Qian <qcai@redhat.com> | |
10 | * Copyright (C) 2008-2023 Karel Zak <kzak@redhat.com> | |
11 | */ | |
12 | #include <errno.h> | |
13 | #include <stdlib.h> | |
14 | #include <sys/types.h> | |
15 | #include <sys/stat.h> | |
16 | #include <fcntl.h> | |
17 | #include <unistd.h> | |
18 | #include <string.h> | |
19 | #include <stdio.h> | |
20 | #include <ctype.h> | |
21 | ||
22 | #include "lscpu.h" | |
23 | ||
24 | /* add @set to the @ary, unnecessary set is deallocated. */ | |
25 | static int add_cpuset_to_array(cpu_set_t **ary, size_t *items, cpu_set_t *set, size_t setsize, cpu_set_t *common_cpus_set) | |
26 | { | |
27 | size_t i; | |
28 | ||
29 | if (!ary) | |
30 | return -EINVAL; | |
31 | ||
32 | /* | |
33 | * Check if @set has no cpu in common with the cpusets | |
34 | * saved in @ary and if so append @set to @ary. | |
35 | */ | |
36 | for (i = 0; i < *items; i++) { | |
37 | CPU_AND_S(setsize, common_cpus_set, set, ary[i]); | |
38 | if (CPU_COUNT_S(setsize, common_cpus_set)) | |
39 | break; | |
40 | } | |
41 | if (i == *items) { | |
42 | ary[*items] = set; | |
43 | ++*items; | |
44 | return 0; | |
45 | } | |
46 | CPU_FREE(set); | |
47 | return 1; | |
48 | } | |
49 | ||
50 | static void free_cpuset_array(cpu_set_t **ary, int items) | |
51 | { | |
52 | int i; | |
53 | ||
54 | if (!ary) | |
55 | return; | |
56 | for (i = 0; i < items; i++) | |
57 | free(ary[i]); | |
58 | free(ary); | |
59 | } | |
60 | ||
61 | void lscpu_cputype_free_topology(struct lscpu_cputype *ct) | |
62 | { | |
63 | if (!ct) | |
64 | return; | |
65 | free_cpuset_array(ct->coremaps, ct->ncores); | |
66 | free_cpuset_array(ct->socketmaps, ct->nsockets); | |
67 | free_cpuset_array(ct->bookmaps, ct->nbooks); | |
68 | free_cpuset_array(ct->drawermaps, ct->ndrawers); | |
69 | } | |
70 | ||
71 | void lscpu_free_caches(struct lscpu_cache *caches, size_t n) | |
72 | { | |
73 | size_t i; | |
74 | ||
75 | if (!caches) | |
76 | return; | |
77 | ||
78 | for (i = 0; i < n; i++) { | |
79 | struct lscpu_cache *c = &caches[i]; | |
80 | ||
81 | DBG(MISC, ul_debug(" freeing cache #%zu %s::%d", | |
82 | i, c->name, c->id)); | |
83 | ||
84 | free(c->name); | |
85 | free(c->type); | |
86 | free(c->allocation_policy); | |
87 | free(c->write_policy); | |
88 | free(c->sharedmap); | |
89 | } | |
90 | free(caches); | |
91 | } | |
92 | ||
93 | static int cmp_cache(const void *a0, const void *b0) | |
94 | { | |
95 | const struct lscpu_cache | |
96 | *a = (const struct lscpu_cache *) a0, | |
97 | *b = (const struct lscpu_cache *) b0; | |
98 | return strcmp(a->name, b->name); | |
99 | } | |
100 | ||
101 | void lscpu_sort_caches(struct lscpu_cache *caches, size_t n) | |
102 | { | |
103 | if (caches && n) | |
104 | qsort(caches, n, sizeof(struct lscpu_cache), cmp_cache); | |
105 | } | |
106 | ||
107 | /* | |
108 | * Get the hotplug state number representing a completely online | |
109 | * cpu from /sys/devices/system/cpu/hotplug/state | |
110 | */ | |
111 | static int get_online_state(struct path_cxt *sys) | |
112 | { | |
113 | int hp_online_state_val = 0, page_size, rc; | |
114 | char *buf, *strp; | |
115 | ||
116 | hp_online_state_val = -1; | |
117 | ||
118 | /* sysfs text files have size = page size */ | |
119 | page_size = getpagesize(); | |
120 | ||
121 | buf = (char *)xmalloc(page_size); | |
122 | rc = ul_path_readf_buffer(sys, buf, page_size, "hotplug/states"); | |
123 | if (rc <= 0) | |
124 | goto done; | |
125 | ||
126 | strp = strstr(buf, ": online"); | |
127 | if (!strp) | |
128 | goto done; | |
129 | ||
130 | strp--; /* get digits before ': online' */ | |
131 | while (strp >= buf && isdigit(*strp)) | |
132 | strp--; | |
133 | ul_strtos32(strp + 1, &hp_online_state_val, 10); | |
134 | done: | |
135 | free(buf); | |
136 | return hp_online_state_val; | |
137 | } | |
138 | ||
139 | /* Read topology for specified type */ | |
140 | static int cputype_read_topology(struct lscpu_cxt *cxt, struct lscpu_cputype *ct) | |
141 | { | |
142 | size_t i, npos; | |
143 | struct path_cxt *sys; | |
144 | int nthreads = 0, sw_topo = 0, rc, hp_state, hp_online_state; | |
145 | FILE *fd; | |
146 | cpu_set_t *temp_set; | |
147 | ||
148 | sys = cxt->syscpu; /* /sys/devices/system/cpu/ */ | |
149 | npos = cxt->npossibles; /* possible CPUs */ | |
150 | ||
151 | DBG(TYPE, ul_debugobj(ct, "reading %s/%s/%s topology", | |
152 | ct->vendor ?: "", ct->model ?: "", ct->modelname ?:"")); | |
153 | ||
154 | hp_online_state = get_online_state(sys); | |
155 | ||
156 | temp_set = CPU_ALLOC(cxt->maxcpus); | |
157 | if (!temp_set) | |
158 | err(EXIT_FAILURE, _("cpuset_alloc failed")); | |
159 | ||
160 | for (i = 0; i < cxt->npossibles; i++) { | |
161 | struct lscpu_cpu *cpu = cxt->cpus[i]; | |
162 | cpu_set_t *thread_siblings = NULL, *core_siblings = NULL; | |
163 | cpu_set_t *book_siblings = NULL, *drawer_siblings = NULL; | |
164 | int num, n = 0; | |
165 | ||
166 | if (!cpu || cpu->type != ct) | |
167 | continue; | |
168 | ||
169 | num = cpu->logical_id; | |
170 | if (ul_path_accessf(sys, F_OK, | |
171 | "cpu%d/topology/thread_siblings", num) != 0) | |
172 | continue; | |
173 | ||
174 | /* | |
175 | * Ignore cpus which are not fully online. | |
176 | * If hp_online_state is negative/zero or rc is negative, | |
177 | * online state could not be read correctly, skip this check. | |
178 | */ | |
179 | rc = ul_path_readf_s32(sys, &hp_state, "cpu%d/hotplug/state", num); | |
180 | if (hp_online_state > 0 && rc >= 0 && hp_state != hp_online_state) | |
181 | continue; | |
182 | ||
183 | /* read topology maps */ | |
184 | ul_path_readf_cpuset(sys, &thread_siblings, cxt->maxcpus, | |
185 | "cpu%d/topology/thread_siblings", num); | |
186 | ul_path_readf_cpuset(sys, &core_siblings, cxt->maxcpus, | |
187 | "cpu%d/topology/core_siblings", num); | |
188 | ul_path_readf_cpuset(sys, &book_siblings, cxt->maxcpus, | |
189 | "cpu%d/topology/book_siblings", num); | |
190 | ul_path_readf_cpuset(sys, &drawer_siblings, cxt->maxcpus, | |
191 | "cpu%d/topology/drawer_siblings", num); | |
192 | ||
193 | if (thread_siblings) | |
194 | n = CPU_COUNT_S(cxt->setsize, thread_siblings); | |
195 | if (!n) | |
196 | n = 1; | |
197 | if (n > nthreads) | |
198 | nthreads = n; | |
199 | ||
200 | /* Allocate arrays for topology maps. | |
201 | * | |
202 | * For each map we make sure that it can have up to ncpuspos | |
203 | * entries. This is because we cannot reliably calculate the | |
204 | * number of cores, sockets and books on all architectures. | |
205 | * E.g. completely virtualized architectures like s390 may | |
206 | * have multiple sockets of different sizes. | |
207 | */ | |
208 | if (!ct->coremaps && thread_siblings) | |
209 | ct->coremaps = xcalloc(npos, sizeof(cpu_set_t *)); | |
210 | if (!ct->socketmaps && core_siblings) | |
211 | ct->socketmaps = xcalloc(npos, sizeof(cpu_set_t *)); | |
212 | if (!ct->bookmaps && book_siblings) | |
213 | ct->bookmaps = xcalloc(npos, sizeof(cpu_set_t *)); | |
214 | if (!ct->drawermaps && drawer_siblings) | |
215 | ct->drawermaps = xcalloc(npos, sizeof(cpu_set_t *)); | |
216 | ||
217 | /* add to topology maps */ | |
218 | if (thread_siblings) | |
219 | add_cpuset_to_array(ct->coremaps, &ct->ncores, thread_siblings, cxt->setsize, temp_set); | |
220 | if (core_siblings) | |
221 | add_cpuset_to_array(ct->socketmaps, &ct->nsockets, core_siblings, cxt->setsize, temp_set); | |
222 | if (book_siblings) | |
223 | add_cpuset_to_array(ct->bookmaps, &ct->nbooks, book_siblings, cxt->setsize, temp_set); | |
224 | if (drawer_siblings) | |
225 | add_cpuset_to_array(ct->drawermaps, &ct->ndrawers, drawer_siblings, cxt->setsize, temp_set); | |
226 | ||
227 | } | |
228 | CPU_FREE(temp_set); | |
229 | ||
230 | /* s390 detects its cpu topology via /proc/sysinfo, if present. | |
231 | * Using simply the cpu topology masks in sysfs will not give | |
232 | * usable results since everything is virtualized. E.g. | |
233 | * virtual core 0 may have only 1 cpu, but virtual core 2 may | |
234 | * five cpus. | |
235 | * If the cpu topology is not exported (e.g. 2nd level guest) | |
236 | * fall back to old calculation scheme. | |
237 | */ | |
238 | if ((fd = ul_path_fopen(cxt->procfs, "r", "sysinfo"))) { | |
239 | int t0, t1; | |
240 | char buf[BUFSIZ]; | |
241 | ||
242 | DBG(TYPE, ul_debugobj(ct, " reading sysinfo")); | |
243 | ||
244 | while (fgets(buf, sizeof(buf), fd) != NULL) { | |
245 | if (sscanf(buf, "CPU Topology SW: %d %d %zu %zu %zu %zu", | |
246 | &t0, &t1, | |
247 | &ct->ndrawers_per_system, | |
248 | &ct->nbooks_per_drawer, | |
249 | &ct->nsockets_per_book, | |
250 | &ct->ncores_per_socket) == 6) { | |
251 | sw_topo = 1; | |
252 | DBG(TYPE, ul_debugobj(ct, " using SW topology")); | |
253 | break; | |
254 | } | |
255 | } | |
256 | if (fd) | |
257 | fclose(fd); | |
258 | } | |
259 | ||
260 | ct->nthreads_per_core = nthreads; | |
261 | if (ct->mtid) { | |
262 | uint64_t x; | |
263 | if (ul_strtou64(ct->mtid, &x, 10) == 0 && x <= ULONG_MAX) | |
264 | ct->nthreads_per_core = (size_t) x + 1; | |
265 | } | |
266 | ||
267 | if (!sw_topo) { | |
268 | ct->ncores_per_socket = ct->nsockets ? ct->ncores / ct->nsockets : 0; | |
269 | ct->nsockets_per_book = ct->nbooks ? ct->nsockets / ct->nbooks : 0; | |
270 | ct->nbooks_per_drawer = ct->ndrawers ? ct->nbooks / ct->ndrawers : 0; | |
271 | ct->ndrawers_per_system = ct->ndrawers; | |
272 | } | |
273 | ||
274 | DBG(TYPE, ul_debugobj(ct, " nthreads: %zu (per core)", ct->nthreads_per_core)); | |
275 | DBG(TYPE, ul_debugobj(ct, " ncores: %zu (%zu per socket)", ct->ncores, ct->ncores_per_socket)); | |
276 | DBG(TYPE, ul_debugobj(ct, " nsockets: %zu (%zu per books)", ct->nsockets, ct->nsockets_per_book)); | |
277 | DBG(TYPE, ul_debugobj(ct, " nbooks: %zu (%zu per drawer)", ct->nbooks, ct->nbooks_per_drawer)); | |
278 | DBG(TYPE, ul_debugobj(ct, " ndrawers: %zu (%zu per system)", ct->ndrawers, ct->ndrawers_per_system)); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | /* count size of all instancess of the "name" */ | |
284 | size_t lscpu_get_cache_full_size(struct lscpu_cxt *cxt, const char *name, int *instances) | |
285 | { | |
286 | size_t i, sz = 0; | |
287 | ||
288 | if (instances) | |
289 | *instances = 0; | |
290 | ||
291 | for (i = 0; i < cxt->ncaches; i++) { | |
292 | if (strcmp(cxt->caches[i].name, name) == 0) { | |
293 | sz += cxt->caches[i].size; | |
294 | if (instances) | |
295 | (*instances)++; | |
296 | } | |
297 | } | |
298 | ||
299 | return sz; | |
300 | } | |
301 | ||
302 | struct lscpu_cache *lscpu_cpu_get_cache(struct lscpu_cxt *cxt, | |
303 | struct lscpu_cpu *cpu, const char *name) | |
304 | { | |
305 | size_t i; | |
306 | ||
307 | for (i = 0; i < cxt->ncaches; i++) { | |
308 | struct lscpu_cache *ca = &cxt->caches[i]; | |
309 | ||
310 | if (ca->sharedmap && | |
311 | strcmp(ca->name, name) == 0 && | |
312 | CPU_ISSET_S(cpu->logical_id, cxt->setsize, ca->sharedmap)) | |
313 | return ca; | |
314 | } | |
315 | ||
316 | return NULL; | |
317 | } | |
318 | ||
319 | /* | |
320 | * The cache is identifued by type+level+id. | |
321 | */ | |
322 | static struct lscpu_cache *get_cache(struct lscpu_cxt *cxt, | |
323 | const char *type, int level, int id) | |
324 | { | |
325 | size_t i; | |
326 | ||
327 | for (i = 0; i < cxt->ncaches; i++) { | |
328 | struct lscpu_cache *ca = &cxt->caches[i]; | |
329 | if (ca->id == id && | |
330 | ca->level == level && | |
331 | strcmp(ca->type, type) == 0) | |
332 | return ca; | |
333 | } | |
334 | return NULL; | |
335 | } | |
336 | ||
337 | static struct lscpu_cache *add_cache(struct lscpu_cxt *cxt, | |
338 | const char *type, int level, int id) | |
339 | { | |
340 | struct lscpu_cache *ca; | |
341 | ||
342 | cxt->ncaches++; | |
343 | cxt->caches = xreallocarray(cxt->caches, | |
344 | cxt->ncaches, sizeof(*cxt->caches)); | |
345 | ||
346 | ca = &cxt->caches[cxt->ncaches - 1]; | |
347 | memset(ca, 0 , sizeof(*ca)); | |
348 | ||
349 | ca->id = id; | |
350 | ca->level = level; | |
351 | ca->type = xstrdup(type); | |
352 | ||
353 | DBG(GATHER, ul_debugobj(cxt, "add cache %s%d::%d", type, level, id)); | |
354 | return ca; | |
355 | } | |
356 | ||
357 | static int mk_cache_id(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu, char *type, int level) | |
358 | { | |
359 | size_t i; | |
360 | int idx = 0; | |
361 | ||
362 | for (i = 0; i < cxt->ncaches; i++) { | |
363 | struct lscpu_cache *ca = &cxt->caches[i]; | |
364 | ||
365 | if (ca->level != level || strcmp(ca->type, type) != 0) | |
366 | continue; | |
367 | ||
368 | if (ca->sharedmap && | |
369 | CPU_ISSET_S(cpu->logical_id, cxt->setsize, ca->sharedmap)) | |
370 | return idx; | |
371 | idx++; | |
372 | } | |
373 | ||
374 | return idx; | |
375 | } | |
376 | ||
377 | static int read_sparc_onecache(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu, | |
378 | int level, char *typestr, int type) | |
379 | { | |
380 | struct lscpu_cache *ca; | |
381 | struct path_cxt *sys = cxt->syscpu; | |
382 | int num = cpu->logical_id; | |
383 | uint32_t size; | |
384 | int rc, id; | |
385 | char buf[32]; | |
386 | ||
387 | if (type) | |
388 | snprintf(buf, sizeof(buf), "l%d_%c", level, type); | |
389 | else | |
390 | snprintf(buf, sizeof(buf), "l%d_", level); | |
391 | ||
392 | rc = ul_path_readf_u32(sys, &size, | |
393 | "cpu%d/%scache_size", num, buf); | |
394 | if (rc != 0) | |
395 | return rc; | |
396 | ||
397 | DBG(CPU, ul_debugobj(cpu, "#%d reading sparc %s cache", num, buf)); | |
398 | ||
399 | id = mk_cache_id(cxt, cpu, typestr, level); | |
400 | ||
401 | ca = get_cache(cxt, typestr, level, id); | |
402 | if (!ca) | |
403 | ca = add_cache(cxt, typestr, level, id); | |
404 | ||
405 | if (!ca->name) { | |
406 | ul_path_readf_u32(sys, &ca->coherency_line_size, | |
407 | "cpu%d/%scache_line_size", num, buf); | |
408 | assert(ca->type); | |
409 | ||
410 | if (type) | |
411 | snprintf(buf, sizeof(buf), "L%d%c", ca->level, type); | |
412 | else | |
413 | snprintf(buf, sizeof(buf), "L%d", ca->level); | |
414 | ca->name = xstrdup(buf); | |
415 | ca->size = size; | |
416 | } | |
417 | /* There is no sharedmap of the cache in /sys, we assume that caches are | |
418 | * not shared. Send a patch if your /sys provides another information. | |
419 | */ | |
420 | if (!ca->sharedmap) { | |
421 | size_t setsize = 0; | |
422 | ||
423 | ca->sharedmap = cpuset_alloc(cxt->maxcpus, &setsize, NULL); | |
424 | CPU_ZERO_S(setsize, ca->sharedmap); | |
425 | CPU_SET_S(num, setsize, ca->sharedmap); | |
426 | } | |
427 | ||
428 | return 0; | |
429 | } | |
430 | ||
431 | static int read_sparc_caches(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu) | |
432 | { | |
433 | read_sparc_onecache(cxt, cpu, 1, "Instruction", 'i'); | |
434 | read_sparc_onecache(cxt, cpu, 1, "Data", 'd'); | |
435 | read_sparc_onecache(cxt, cpu, 2, "Unified", 0); | |
436 | read_sparc_onecache(cxt, cpu, 2, "Unified", 0); | |
437 | ||
438 | return 0; | |
439 | } | |
440 | ||
441 | static int read_caches(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu) | |
442 | { | |
443 | char buf[256]; | |
444 | struct path_cxt *sys = cxt->syscpu; | |
445 | int num = cpu->logical_id; | |
446 | size_t i, ncaches = 0; | |
447 | ||
448 | while (ul_path_accessf(sys, F_OK, | |
449 | "cpu%d/cache/index%zu", | |
450 | num, ncaches) == 0) | |
451 | ncaches++; | |
452 | ||
453 | if (ncaches == 0 && ul_path_accessf(sys, F_OK, | |
454 | "cpu%d/l1_icache_size", num) == 0) | |
455 | return read_sparc_caches(cxt, cpu); | |
456 | ||
457 | DBG(CPU, ul_debugobj(cpu, "#%d reading %zd caches", num, ncaches)); | |
458 | ||
459 | for (i = 0; i < ncaches; i++) { | |
460 | struct lscpu_cache *ca; | |
461 | int id, level; | |
462 | ||
463 | if (ul_path_readf_s32(sys, &id, "cpu%d/cache/index%zu/id", num, i) != 0) | |
464 | id = -1; | |
465 | if (ul_path_readf_s32(sys, &level, "cpu%d/cache/index%zu/level", num, i) != 0) | |
466 | continue; | |
467 | if (ul_path_readf_buffer(sys, buf, sizeof(buf), | |
468 | "cpu%d/cache/index%zu/type", num, i) <= 0) | |
469 | continue; | |
470 | ||
471 | if (id == -1) | |
472 | id = mk_cache_id(cxt, cpu, buf, level); | |
473 | ||
474 | ca = get_cache(cxt, buf, level, id); | |
475 | if (!ca) | |
476 | ca = add_cache(cxt, buf, level, id); | |
477 | ||
478 | if (!ca->name) { | |
479 | int type = 0; | |
480 | ||
481 | assert(ca->type); | |
482 | ||
483 | if (!strcmp(ca->type, "Data")) | |
484 | type = 'd'; | |
485 | else if (!strcmp(ca->type, "Instruction")) | |
486 | type = 'i'; | |
487 | ||
488 | if (type) | |
489 | snprintf(buf, sizeof(buf), "L%d%c", ca->level, type); | |
490 | else | |
491 | snprintf(buf, sizeof(buf), "L%d", ca->level); | |
492 | ||
493 | ca->name = xstrdup(buf); | |
494 | ||
495 | ul_path_readf_u32(sys, &ca->ways_of_associativity, | |
496 | "cpu%d/cache/index%zu/ways_of_associativity", num, i); | |
497 | ul_path_readf_u32(sys, &ca->physical_line_partition, | |
498 | "cpu%d/cache/index%zu/physical_line_partition", num, i); | |
499 | ul_path_readf_u32(sys, &ca->number_of_sets, | |
500 | "cpu%d/cache/index%zu/number_of_sets", num, i); | |
501 | ul_path_readf_u32(sys, &ca->coherency_line_size, | |
502 | "cpu%d/cache/index%zu/coherency_line_size", num, i); | |
503 | ||
504 | ul_path_readf_string(sys, &ca->allocation_policy, | |
505 | "cpu%d/cache/index%zu/allocation_policy", num, i); | |
506 | ul_path_readf_string(sys, &ca->write_policy, | |
507 | "cpu%d/cache/index%zu/write_policy", num, i); | |
508 | ||
509 | /* cache size */ | |
510 | if (ul_path_readf_buffer(sys, buf, sizeof(buf), | |
511 | "cpu%d/cache/index%zu/size", num, i) > 0) | |
512 | ul_parse_size(buf, &ca->size, NULL); | |
513 | else | |
514 | ca->size = 0; | |
515 | } | |
516 | ||
517 | if (!ca->sharedmap) | |
518 | /* information about how CPUs share different caches */ | |
519 | ul_path_readf_cpuset(sys, &ca->sharedmap, cxt->maxcpus, | |
520 | "cpu%d/cache/index%zu/shared_cpu_map", num, i); | |
521 | } | |
522 | ||
523 | return 0; | |
524 | } | |
525 | ||
526 | static int read_ids(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu) | |
527 | { | |
528 | struct path_cxt *sys = cxt->syscpu; | |
529 | int num = cpu->logical_id; | |
530 | ||
531 | if (ul_path_accessf(sys, F_OK, "cpu%d/topology", num) != 0) | |
532 | return 0; | |
533 | ||
534 | DBG(CPU, ul_debugobj(cpu, "#%d reading IDs", num)); | |
535 | ||
536 | if (ul_path_readf_s32(sys, &cpu->coreid, "cpu%d/topology/core_id", num) != 0) | |
537 | cpu->coreid = -1; | |
538 | if (ul_path_readf_s32(sys, &cpu->socketid, "cpu%d/topology/physical_package_id", num) != 0) | |
539 | cpu->socketid = -1; | |
540 | if (ul_path_readf_s32(sys, &cpu->bookid, "cpu%d/topology/book_id", num) != 0) | |
541 | cpu->bookid = -1; | |
542 | if (ul_path_readf_s32(sys, &cpu->drawerid, "cpu%d/topology/drawer_id", num) != 0) | |
543 | cpu->drawerid = -1; | |
544 | ||
545 | return 0; | |
546 | } | |
547 | ||
548 | static int read_polarization(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu) | |
549 | { | |
550 | struct path_cxt *sys = cxt->syscpu; | |
551 | int num = cpu->logical_id; | |
552 | char mode[64]; | |
553 | ||
554 | if (ul_path_accessf(sys, F_OK, "cpu%d/polarization", num) != 0) | |
555 | return 0; | |
556 | ||
557 | ul_path_readf_buffer(sys, mode, sizeof(mode), "cpu%d/polarization", num); | |
558 | ||
559 | DBG(CPU, ul_debugobj(cpu, "#%d reading polar=%s", num, mode)); | |
560 | ||
561 | if (strncmp(mode, "vertical:low", sizeof(mode)) == 0) | |
562 | cpu->polarization = POLAR_VLOW; | |
563 | else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0) | |
564 | cpu->polarization = POLAR_VMEDIUM; | |
565 | else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0) | |
566 | cpu->polarization = POLAR_VHIGH; | |
567 | else if (strncmp(mode, "horizontal", sizeof(mode)) == 0) | |
568 | cpu->polarization = POLAR_HORIZONTAL; | |
569 | else | |
570 | cpu->polarization = POLAR_UNKNOWN; | |
571 | ||
572 | if (cpu->type) | |
573 | cpu->type->has_polarization = 1; | |
574 | return 0; | |
575 | } | |
576 | ||
577 | static int read_address(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu) | |
578 | { | |
579 | struct path_cxt *sys = cxt->syscpu; | |
580 | int num = cpu->logical_id; | |
581 | ||
582 | if (ul_path_accessf(sys, F_OK, "cpu%d/address", num) != 0) | |
583 | return 0; | |
584 | ||
585 | DBG(CPU, ul_debugobj(cpu, "#%d reading address", num)); | |
586 | ||
587 | ul_path_readf_s32(sys, &cpu->address, "cpu%d/address", num); | |
588 | if (cpu->type) | |
589 | cpu->type->has_addresses = 1; | |
590 | return 0; | |
591 | } | |
592 | ||
593 | static int read_configure(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu) | |
594 | { | |
595 | struct path_cxt *sys = cxt->syscpu; | |
596 | int num = cpu->logical_id; | |
597 | ||
598 | if (ul_path_accessf(sys, F_OK, "cpu%d/configure", num) != 0) | |
599 | return 0; | |
600 | ||
601 | DBG(CPU, ul_debugobj(cpu, "#%d reading configure", num)); | |
602 | ||
603 | ul_path_readf_s32(sys, &cpu->configured, "cpu%d/configure", num); | |
604 | if (cpu->type) | |
605 | cpu->type->has_configured = 1; | |
606 | return 0; | |
607 | } | |
608 | ||
609 | static int read_mhz(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu) | |
610 | { | |
611 | struct path_cxt *sys = cxt->syscpu; | |
612 | int num = cpu->logical_id; | |
613 | int mhz; | |
614 | ||
615 | DBG(CPU, ul_debugobj(cpu, "#%d reading mhz", num)); | |
616 | ||
617 | if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) == 0) | |
618 | cpu->mhz_max_freq = (float) mhz / 1000; | |
619 | if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) == 0) | |
620 | cpu->mhz_min_freq = (float) mhz / 1000; | |
621 | ||
622 | /* The default current-frequency value comes is from /proc/cpuinfo (if | |
623 | * available). This /proc value is usually based on MSR registers | |
624 | * (APERF/APERF) and it changes pretty often. It seems better to read | |
625 | * frequency from cpufreq subsystem that provides the current frequency | |
626 | * for the current policy. There is also cpuinfo_cur_freq in sysfs, but | |
627 | * it's not always available. | |
628 | */ | |
629 | if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/scaling_cur_freq", num) == 0) | |
630 | cpu->mhz_cur_freq = (float) mhz / 1000; | |
631 | ||
632 | if (cpu->type && (cpu->mhz_min_freq || cpu->mhz_max_freq)) | |
633 | cpu->type->has_freq = 1; | |
634 | ||
635 | return 0; | |
636 | } | |
637 | ||
638 | float lsblk_cputype_get_maxmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct) | |
639 | { | |
640 | size_t i; | |
641 | float res = 0.0; | |
642 | ||
643 | for (i = 0; i < cxt->npossibles; i++) { | |
644 | struct lscpu_cpu *cpu = cxt->cpus[i]; | |
645 | ||
646 | if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu)) | |
647 | continue; | |
648 | res = max(res, cpu->mhz_max_freq); | |
649 | } | |
650 | return res; | |
651 | } | |
652 | ||
653 | float lsblk_cputype_get_minmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct) | |
654 | { | |
655 | size_t i; | |
656 | float res = -1.0; | |
657 | ||
658 | for (i = 0; i < cxt->npossibles; i++) { | |
659 | struct lscpu_cpu *cpu = cxt->cpus[i]; | |
660 | ||
661 | if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu)) | |
662 | continue; | |
663 | if (!cpu->mhz_min_freq) | |
664 | continue; | |
665 | if (res < 0.0 || cpu->mhz_min_freq < res) | |
666 | res = cpu->mhz_min_freq; | |
667 | } | |
668 | return res; | |
669 | } | |
670 | ||
671 | /* returns scaling (use) of CPUs freq. in percent */ | |
672 | float lsblk_cputype_get_scalmhz(struct lscpu_cxt *cxt, struct lscpu_cputype *ct) | |
673 | { | |
674 | size_t i; | |
675 | float fmax = 0, fcur = 0; | |
676 | ||
677 | for (i = 0; i < cxt->npossibles; i++) { | |
678 | struct lscpu_cpu *cpu = cxt->cpus[i]; | |
679 | ||
680 | if (!cpu || cpu->type != ct || !is_cpu_present(cxt, cpu)) | |
681 | continue; | |
682 | if (cpu->mhz_max_freq <= 0.0 || cpu->mhz_cur_freq <= 0.0) | |
683 | continue; | |
684 | fmax += cpu->mhz_max_freq; | |
685 | fcur += cpu->mhz_cur_freq; | |
686 | } | |
687 | if (fcur <= 0.0) | |
688 | return 0.0; | |
689 | return fcur / fmax * 100; | |
690 | } | |
691 | ||
692 | int lscpu_read_topology(struct lscpu_cxt *cxt) | |
693 | { | |
694 | size_t i; | |
695 | int rc = 0; | |
696 | ||
697 | ||
698 | for (i = 0; i < cxt->ncputypes; i++) | |
699 | rc += cputype_read_topology(cxt, cxt->cputypes[i]); | |
700 | ||
701 | for (i = 0; rc == 0 && i < cxt->npossibles; i++) { | |
702 | struct lscpu_cpu *cpu = cxt->cpus[i]; | |
703 | ||
704 | if (!cpu || !cpu->type) | |
705 | continue; | |
706 | ||
707 | DBG(CPU, ul_debugobj(cpu, "#%d reading topology", cpu->logical_id)); | |
708 | ||
709 | rc = read_ids(cxt, cpu); | |
710 | if (!rc) | |
711 | rc = read_polarization(cxt, cpu); | |
712 | if (!rc) | |
713 | rc = read_address(cxt, cpu); | |
714 | if (!rc) | |
715 | rc = read_configure(cxt, cpu); | |
716 | if (!rc) | |
717 | rc = read_mhz(cxt, cpu); | |
718 | if (!rc) | |
719 | rc = read_caches(cxt, cpu); | |
720 | } | |
721 | ||
722 | lscpu_sort_caches(cxt->caches, cxt->ncaches); | |
723 | DBG(GATHER, ul_debugobj(cxt, " L1d: %zu", lscpu_get_cache_full_size(cxt, "L1d", NULL))); | |
724 | DBG(GATHER, ul_debugobj(cxt, " L1i: %zu", lscpu_get_cache_full_size(cxt, "L1i", NULL))); | |
725 | DBG(GATHER, ul_debugobj(cxt, " L2: %zu", lscpu_get_cache_full_size(cxt, "L2", NULL))); | |
726 | DBG(GATHER, ul_debugobj(cxt, " L3: %zu", lscpu_get_cache_full_size(cxt, "L3", NULL))); | |
727 | ||
728 | return rc; | |
729 | } | |
730 | ||
731 |