]> git.ipfire.org Git - thirdparty/util-linux.git/blame - sys-utils/lscpu-topology.c
lscpu: use new code to read CPUs info
[thirdparty/util-linux.git] / sys-utils / lscpu-topology.c
CommitLineData
f669523b
KZ
1#include <errno.h>
2#include <stdlib.h>
3#include <sys/types.h>
4#include <sys/stat.h>
5#include <fcntl.h>
6#include <unistd.h>
7#include <string.h>
8#include <stdio.h>
9
095be2c2 10#include "lscpu.h"
f669523b
KZ
11
12/* add @set to the @ary, unnecessary set is deallocated. */
13static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set, size_t setsize)
14{
15 int i;
16
17 if (!ary)
18 return -EINVAL;
19
20 for (i = 0; i < *items; i++) {
21 if (CPU_EQUAL_S(setsize, set, ary[i]))
22 break;
23 }
24 if (i == *items) {
25 ary[*items] = set;
26 ++*items;
27 return 0;
28 }
29 CPU_FREE(set);
30 return 1;
31}
32
33static void free_cpuset_array(cpu_set_t **ary, int items)
34{
35 int i;
36
37 if (!ary)
38 return;
39 for (i = 0; i < items; i++)
40 free(ary[i]);
41 free(ary);
42}
43
44void lscpu_cputype_free_topology(struct lscpu_cputype *ct)
45{
46 if (!ct)
47 return;
48 free_cpuset_array(ct->coremaps, ct->ncores);
49 free_cpuset_array(ct->socketmaps, ct->nsockets);
50 free_cpuset_array(ct->bookmaps, ct->nbooks);
51 free_cpuset_array(ct->drawermaps, ct->ndrawers);
eadaf6d7
KZ
52
53 lscpu_free_caches(ct->caches, ct->ncaches);
54}
55
56void lscpu_free_caches(struct lscpu_cache *caches, size_t n)
57{
58 size_t i;
59
60 if (!caches)
61 return;
62
63 for (i = 0; i < n; i++) {
64 struct lscpu_cache *c = &caches[i];
65
66 DBG(MISC, ul_debug(" freeing #%zu cache [%s]", i, c->name));
67
68 free(c->name);
69 free(c->type);
70 free(c->allocation_policy);
71 free(c->write_policy);
72
73 free_cpuset_array(c->sharedmaps, c->nsharedmaps);
74 }
75 free(caches);
76}
77
78static int cmp_cache(const void *a0, const void *b0)
79{
80 const struct lscpu_cache
81 *a = (const struct lscpu_cache *) a0,
82 *b = (const struct lscpu_cache *) b0;
83 return strcmp(a->name, b->name);
84}
85
86void lscpu_sort_caches(struct lscpu_cache *caches, size_t n)
87{
88 if (caches && n)
89 qsort(caches, n, sizeof(struct lscpu_cache), cmp_cache);
f669523b
KZ
90}
91
92
93/* Read topology for specified type */
94static int cputype_read_topology(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
95{
96 size_t i, setsize, npos;
97 struct path_cxt *sys;
6baa4df7
KZ
98 int nthreads = 0, sw_topo = 0;
99 FILE *fd;
f669523b
KZ
100
101 sys = cxt->syscpu; /* /sys/devices/system/cpu/ */
102 setsize = CPU_ALLOC_SIZE(cxt->maxcpus); /* CPU set size */
801b125f 103 npos = cxt->npossibles; /* possible CPUs */
f669523b
KZ
104
105 DBG(TYPE, ul_debugobj(ct, "reading %s/%s/%s topology",
106 ct->vendor ?: "", ct->model ?: "", ct->modelname ?:""));
107
801b125f 108 for (i = 0; i < npos; i++) {
f669523b
KZ
109 struct lscpu_cpu *cpu = cxt->cpus[i];
110 cpu_set_t *thread_siblings = NULL, *core_siblings = NULL;
111 cpu_set_t *book_siblings = NULL, *drawer_siblings = NULL;
112 int num, n;
113
801b125f 114 if (!cpu || cpu->type != ct)
f669523b
KZ
115 continue;
116
117 num = cpu->logical_id;
118 if (ul_path_accessf(sys, F_OK,
119 "cpu%d/topology/thread_siblings", num) != 0)
120 continue;
121
122 /*DBG(TYPE, ul_debugobj(ct, " #%d", num));*/
123
124 /* read topology maps */
125 ul_path_readf_cpuset(sys, &thread_siblings, cxt->maxcpus,
126 "cpu%d/topology/thread_siblings", num);
127 ul_path_readf_cpuset(sys, &core_siblings, cxt->maxcpus,
128 "cpu%d/topology/core_siblings", num);
129 ul_path_readf_cpuset(sys, &book_siblings, cxt->maxcpus,
130 "cpu%d/topology/book_siblings", num);
131 ul_path_readf_cpuset(sys, &drawer_siblings, cxt->maxcpus,
132 "cpu%d/topology/drawer_siblings", num);
133
134 n = CPU_COUNT_S(setsize, thread_siblings);
135 if (!n)
136 n = 1;
137 if (n > nthreads)
138 nthreads = n;
139
140 /* Allocate arrays for topology maps.
141 *
142 * For each map we make sure that it can have up to ncpuspos
143 * entries. This is because we cannot reliably calculate the
144 * number of cores, sockets and books on all architectures.
145 * E.g. completely virtualized architectures like s390 may
146 * have multiple sockets of different sizes.
147 */
148 if (!ct->coremaps)
149 ct->coremaps = xcalloc(npos, sizeof(cpu_set_t *));
150 if (!ct->socketmaps)
151 ct->socketmaps = xcalloc(npos, sizeof(cpu_set_t *));
152 if (!ct->bookmaps && book_siblings)
153 ct->bookmaps = xcalloc(npos, sizeof(cpu_set_t *));
154 if (!ct->drawermaps && drawer_siblings)
155 ct->drawermaps = xcalloc(npos, sizeof(cpu_set_t *));
156
157 /* add to topology maps */
158 add_cpuset_to_array(ct->coremaps, &ct->ncores, thread_siblings, setsize);
159 add_cpuset_to_array(ct->socketmaps, &ct->nsockets, core_siblings, setsize);
160
161 if (book_siblings)
162 add_cpuset_to_array(ct->bookmaps, &ct->nbooks, book_siblings, setsize);
163 if (drawer_siblings)
164 add_cpuset_to_array(ct->drawermaps, &ct->ndrawers, drawer_siblings, setsize);
165
166 }
167
6baa4df7
KZ
168 /* s390 detects its cpu topology via /proc/sysinfo, if present.
169 * Using simply the cpu topology masks in sysfs will not give
170 * usable results since everything is virtualized. E.g.
171 * virtual core 0 may have only 1 cpu, but virtual core 2 may
172 * five cpus.
173 * If the cpu topology is not exported (e.g. 2nd level guest)
174 * fall back to old calculation scheme.
175 */
176 if ((fd = ul_path_fopen(cxt->procfs, "r", "sysinfo"))) {
177 int t0, t1;
178 char buf[BUFSIZ];
179
180 DBG(TYPE, ul_debugobj(ct, " reading sysinfo"));
181
182 while (fgets(buf, sizeof(buf), fd) != NULL) {
183 if (sscanf(buf, "CPU Topology SW: %d %d %d %d %d %d",
184 &t0, &t1,
185 &ct->ndrawers_per_system,
186 &ct->nbooks_per_drawer,
187 &ct->nsockets_per_book,
188 &ct->ncores_per_socket) == 6) {
189 sw_topo = 1;
190 DBG(TYPE, ul_debugobj(ct, " using SW topology"));
191 break;
192 }
193 }
194 if (fd)
195 fclose(fd);
196 }
197
198 if (ct->mtid)
199 ct->nthreads_per_core = atoi(ct->mtid) + 1;
200 else
201 ct->nthreads_per_core = nthreads;
202
203 if (!sw_topo) {
204 ct->ndrawers_per_system = ct->nbooks_per_drawer =
205 ct->nsockets_per_book = ct->ncores_per_socket = 0;
801b125f 206 if (!ct->ncores_per_socket && ct->nsockets)
6baa4df7
KZ
207 ct->ncores_per_socket = ct->ncores / ct->nsockets;
208 if (!ct->nsockets_per_book && ct->nbooks)
209 ct->nsockets_per_book = ct->nsockets / ct->nbooks;
210 if (!ct->nbooks_per_drawer && ct->ndrawers)
211 ct->nbooks_per_drawer = ct->nbooks / ct->ndrawers;
212 if (ct->ndrawers_per_system)
213 ct->ndrawers_per_system = ct->ndrawers;
214 }
f669523b 215
6baa4df7
KZ
216 DBG(TYPE, ul_debugobj(ct, " nthreads: %d (per core)", ct->nthreads_per_core));
217 DBG(TYPE, ul_debugobj(ct, " ncores: %d (%d per socket)", ct->ncores, ct->ncores_per_socket));
218 DBG(TYPE, ul_debugobj(ct, " nsockets: %d (%d per books)", ct->nsockets, ct->nsockets_per_book));
219 DBG(TYPE, ul_debugobj(ct, " nbooks: %d (%d per drawer)", ct->nbooks, ct->nbooks_per_drawer));
220 DBG(TYPE, ul_debugobj(ct, " ndrawers: %d (%d per system)", ct->ndrawers, ct->ndrawers_per_system));
f669523b
KZ
221
222 return 0;
223}
224
eadaf6d7
KZ
225static int read_caches(struct lscpu_cxt *cxt, struct lscpu_cputype *ct, struct lscpu_cpu *cpu)
226{
227 char buf[256];
228 struct path_cxt *sys = cxt->syscpu;
229 int num = cpu->logical_id;
230 size_t i, setsize;
231
232 if (!ct->ncaches) {
233 while (ul_path_accessf(sys, F_OK,
234 "cpu%d/cache/index%zu",
235 num, ct->ncaches) == 0)
236 ct->ncaches++;
237
238 if (!ct->ncaches)
239 return 0;
240 ct->caches = xcalloc(ct->ncaches, sizeof(*ct->caches));
241 }
242
243 setsize = CPU_ALLOC_SIZE(cxt->maxcpus);
244
245 for (i = 0; i < ct->ncaches; i++) {
246 struct lscpu_cache *ca = &ct->caches[i];
247 cpu_set_t *map;
248
249 if (ul_path_accessf(sys, F_OK, "cpu%d/cache/index%zu", num, i) != 0)
250 continue;
251
252 if (!ca->name) {
253 int type = 0;
254
255 /* cache type */
256 if (ul_path_readf_string(sys, &ca->type,
257 "cpu%d/cache/index%zu/type", num, i) > 0) {
258 if (!strcmp(ca->type, "Data"))
259 type = 'd';
260 else if (!strcmp(ca->type, "Instruction"))
261 type = 'i';
262 }
263
264 /* cache level */
265 ul_path_readf_s32(sys, &ca->level,
266 "cpu%d/cache/index%zu/level", num, i);
267 if (type)
268 snprintf(buf, sizeof(buf), "L%d%c", ca->level, type);
269 else
270 snprintf(buf, sizeof(buf), "L%d", ca->level);
271
272 ca->name = xstrdup(buf);
273
274 ul_path_readf_u32(sys, &ca->ways_of_associativity,
275 "cpu%d/cache/index%zu/ways_of_associativity", num, i);
276 ul_path_readf_u32(sys, &ca->physical_line_partition,
277 "cpu%d/cache/index%zu/physical_line_partition", num, i);
278 ul_path_readf_u32(sys, &ca->number_of_sets,
279 "cpu%d/cache/index%zu/number_of_sets", num, i);
280 ul_path_readf_u32(sys, &ca->coherency_line_size,
281 "cpu%d/cache/index%zu/coherency_line_size", num, i);
282
283 ul_path_readf_string(sys, &ca->allocation_policy,
284 "cpu%d/cache/index%zu/allocation_policy", num, i);
285 ul_path_readf_string(sys, &ca->write_policy,
286 "cpu%d/cache/index%zu/write_policy", num, i);
287
288 /* cache size */
289 if (ul_path_readf_buffer(sys, buf, sizeof(buf),
290 "cpu%d/cache/index%zu/size", num, i) > 0)
291 parse_size(buf, &ca->size, NULL);
292 else
293 ca->size = 0;
294 }
295
296 /* information about how CPUs share different caches */
297 ul_path_readf_cpuset(sys, &map, cxt->maxcpus,
298 "cpu%d/cache/index%zu/shared_cpu_map", num, i);
299
300 if (!ca->sharedmaps)
301 ca->sharedmaps = xcalloc(cxt->npossibles, sizeof(cpu_set_t *));
302
303 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map, setsize);
304 }
305
306 return 0;
307}
308
309/* Read cache for specified type */
310static int cputype_read_caches(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
311{
312 size_t i;
313 int rc = 0;
314
315 DBG(TYPE, ul_debugobj(ct, "reading %s/%s/%s topology",
316 ct->vendor ?: "", ct->model ?: "", ct->modelname ?:""));
317
318 for (i = 0; i < cxt->npossibles; i++) {
319 struct lscpu_cpu *cpu = cxt->cpus[i];
320
321 if (!cpu || cpu->type != ct)
322 continue;
323 rc = read_caches(cxt, ct, cpu);
324 if (rc)
325 break;
326 }
327
328 lscpu_sort_caches(ct->caches, ct->ncaches);
329 return rc;
330}
331
2075eb60 332static int read_ids(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
f669523b 333{
2075eb60
KZ
334 struct path_cxt *sys = cxt->syscpu;
335 int num = cpu->logical_id;
f669523b 336
2075eb60
KZ
337 if (ul_path_accessf(sys, F_OK, "cpu%d/topology", num) != 0)
338 return 0;
f669523b 339
2075eb60
KZ
340 DBG(CPU, ul_debugobj(cpu, "#%d reading IDs", num));
341
342 if (ul_path_readf_s32(sys, &cpu->coreid, "cpu%d/topology/core_id", num) != 0)
343 cpu->coreid = -1;
344 if (ul_path_readf_s32(sys, &cpu->socketid, "cpu%d/topology/physical_package_id", num) != 0)
345 cpu->socketid = -1;
346 if (ul_path_readf_s32(sys, &cpu->bookid, "cpu%d/topology/book_id", num) != 0)
347 cpu->bookid = -1;
348 if (ul_path_readf_s32(sys, &cpu->drawerid, "cpu%d/topology/drawer_id", num) != 0)
349 cpu->drawerid = -1;
350
351 return 0;
f669523b
KZ
352}
353
2075eb60 354static int read_polarization(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
f669523b
KZ
355{
356 struct path_cxt *sys = cxt->syscpu;
2075eb60
KZ
357 int num = cpu->logical_id;
358 char mode[64];
f669523b 359
2075eb60
KZ
360 if (ul_path_accessf(sys, F_OK, "cpu%d/polarization", num) != 0)
361 return 0;
801b125f 362
2075eb60 363 ul_path_readf_buffer(sys, mode, sizeof(mode), "cpu%d/polarization", num);
ee0fabda 364
2075eb60 365 DBG(CPU, ul_debugobj(cpu, "#%d polar=%s", num, mode));
f669523b 366
2075eb60
KZ
367 if (strncmp(mode, "vertical:low", sizeof(mode)) == 0)
368 cpu->polarization = POLAR_VLOW;
369 else if (strncmp(mode, "vertical:medium", sizeof(mode)) == 0)
370 cpu->polarization = POLAR_VMEDIUM;
371 else if (strncmp(mode, "vertical:high", sizeof(mode)) == 0)
372 cpu->polarization = POLAR_VHIGH;
373 else if (strncmp(mode, "horizontal", sizeof(mode)) == 0)
374 cpu->polarization = POLAR_HORIZONTAL;
375 else
376 cpu->polarization = POLAR_UNKNOWN;
f669523b
KZ
377
378 return 0;
379}
fd35a1da 380
6d1a2705
KZ
381static int read_address(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
382{
383 struct path_cxt *sys = cxt->syscpu;
384 int num = cpu->logical_id;
385
386 if (ul_path_accessf(sys, F_OK, "cpu%d/address", num) != 0)
387 return 0;
388
389 ul_path_readf_s32(sys, &cpu->address, "cpu%d/address", num);
390 return 0;
391}
392
8c587fad
KZ
393static int read_configure(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
394{
395 struct path_cxt *sys = cxt->syscpu;
396 int num = cpu->logical_id;
397
398 if (ul_path_accessf(sys, F_OK, "cpu%d/configure", num) != 0)
399 return 0;
400
401 ul_path_readf_s32(sys, &cpu->configured, "cpu%d/configure", num);
402 return 0;
403}
404
9d08a19d
KZ
405static int read_mhz(struct lscpu_cxt *cxt, struct lscpu_cpu *cpu)
406{
407 struct path_cxt *sys = cxt->syscpu;
408 int num = cpu->logical_id;
409 int mhz;
410
411 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_max_freq", num) == 0)
412 cpu->mhz_max_freq = (float) mhz / 1000;
413 if (ul_path_readf_s32(sys, &mhz, "cpu%d/cpufreq/cpuinfo_min_freq", num) == 0)
414 cpu->mhz_min_freq = (float) mhz / 1000;
415 return 0;
416}
417
2075eb60 418int lscpu_read_topology(struct lscpu_cxt *cxt)
fd35a1da 419{
fd35a1da 420 size_t i;
2075eb60
KZ
421 int rc = 0;
422
eadaf6d7 423 for (i = 0; i < cxt->ncputypes; i++) {
2075eb60 424 rc += cputype_read_topology(cxt, cxt->cputypes[i]);
eadaf6d7
KZ
425 rc += cputype_read_caches(cxt, cxt->cputypes[i]);
426 }
fd35a1da 427
2075eb60 428 for (i = 0; rc == 0 && i < cxt->npossibles; i++) {
fd35a1da 429 struct lscpu_cpu *cpu = cxt->cpus[i];
fd35a1da 430
2075eb60 431 if (!cpu || !cpu->type)
fd35a1da 432 continue;
801b125f 433
2075eb60 434 rc = read_ids(cxt, cpu);
6d1a2705
KZ
435 if (!rc)
436 rc = read_polarization(cxt, cpu);
437 if (!rc)
438 rc = read_address(cxt, cpu);
8c587fad
KZ
439 if (!rc)
440 rc = read_configure(cxt, cpu);
9d08a19d
KZ
441 if (!rc)
442 rc = read_mhz(cxt, cpu);
2075eb60 443 }
fd35a1da 444
2075eb60
KZ
445 return rc;
446}
fd35a1da 447
fd35a1da 448