]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu-cputype.c
sys-utils: cleanup license lines, add SPDX
[thirdparty/util-linux.git] / sys-utils / lscpu-cputype.c
1 /*
2 * SPDX-License-Identifier: GPL-2.0-or-later
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * Copyright (C) 2020 Karel Zak <kzak@redhat.com>
10 */
11 #include <sys/utsname.h>
12 #include <sys/personality.h>
13
14 #if defined(HAVE_LIBRTAS)
15 # include <librtas.h>
16 #endif
17
18 #include "lscpu.h"
19
20 #include "fileutils.h"
21 #include "c_strtod.h"
22
23 /* Lookup a pattern and get the value for format "<pattern> : <key>"
24 */
25 int lookup(char *line, char *pattern, char **value)
26 {
27 char *p, *v;
28 int len = strlen(pattern);
29
30 /* don't re-fill already found tags, first one wins */
31 if (!*line || *value)
32 return 0;
33 /* pattern */
34 if (strncmp(line, pattern, len))
35 return 0;
36 /* white spaces */
37 for (p = line + len; isspace(*p); p++);
38
39 /* separator */
40 if (*p != ':')
41 return 0;
42 /* white spaces */
43 for (++p; isspace(*p); p++);
44
45 /* value */
46 if (!*p)
47 return 0;
48 v = p;
49
50 /* end of value */
51 len = strlen(line) - 1;
52 for (p = line + len; isspace(*(p-1)); p--);
53 *p = '\0';
54
55 *value = xstrdup(v);
56 return 1;
57 }
58
59 struct lscpu_cputype *lscpu_new_cputype(void)
60 {
61 struct lscpu_cputype *ct;
62
63 ct = xcalloc(1, sizeof(struct lscpu_cputype));
64 ct->refcount = 1;
65 ct->dispatching = -1;
66 ct->freqboost = -1;
67
68 DBG(TYPE, ul_debugobj(ct, "alloc"));
69 return ct;
70 }
71
72 void lscpu_ref_cputype(struct lscpu_cputype *ct)
73 {
74 if (ct) {
75 ct->refcount++;
76 DBG(TYPE, ul_debugobj(ct, ">>> ref %d", ct->refcount));
77 }
78 }
79
80 void lscpu_unref_cputype(struct lscpu_cputype *ct)
81 {
82 if (!ct)
83 return;
84
85 /*DBG(TYPE, ul_debugobj(ct, ">>> unref %d", ct->refcount - 1));*/
86
87 if (--ct->refcount <= 0) {
88 DBG(TYPE, ul_debugobj(ct, " freeing %s/%s", ct->vendor, ct->model));
89 lscpu_cputype_free_topology(ct);
90 free(ct->vendor);
91 free(ct->bios_vendor);
92 free(ct->machinetype); /* s390 */
93 free(ct->family);
94 free(ct->model);
95 free(ct->modelname);
96 free(ct->bios_modelname);
97 free(ct->bios_family);
98 free(ct->revision); /* alternative for model (ppc) */
99 free(ct->stepping);
100 free(ct->bogomips);
101 free(ct->flags);
102 free(ct->mtid); /* maximum thread id (s390) */
103 free(ct->addrsz); /* address sizes */
104 free(ct->static_mhz);
105 free(ct->dynamic_mhz);
106 free(ct);
107 }
108 }
109
110 struct lscpu_cputype *lscpu_cputype_get_default(struct lscpu_cxt *cxt)
111 {
112 return cxt->cputypes ? cxt->cputypes[0] : NULL;
113 }
114
115 #define match(astr, bstr) \
116 ((!astr && !bstr) || (astr && bstr && strcmp(astr, bstr) == 0))
117
118 struct lscpu_cputype *lscpu_add_cputype(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
119 {
120 DBG(TYPE, ul_debugobj(ct, "add new"));
121 cxt->cputypes = xreallocarray(cxt->cputypes, cxt->ncputypes + 1,
122 sizeof(struct lscpu_cputype *));
123 cxt->cputypes[cxt->ncputypes] = ct;
124 cxt->ncputypes++;
125 lscpu_ref_cputype(ct);
126 return ct;
127 }
128
129 static void fprintf_cputypes(FILE *f, struct lscpu_cxt *cxt)
130 {
131 size_t i;
132
133 for (i = 0; i < cxt->ncputypes; i++) {
134 struct lscpu_cputype *ct = cxt->cputypes[i];
135
136 fprintf(f, "\n vendor: %s\n", ct->vendor);
137 fprintf(f, " machinetype: %s\n", ct->machinetype);
138 fprintf(f, " family: %s\n", ct->family);
139 fprintf(f, " model: %s\n", ct->model);
140 fprintf(f, " modelname: %s\n", ct->modelname);
141 fprintf(f, " revision: %s\n", ct->revision);
142 fprintf(f, " stepping: %s\n", ct->stepping);
143 fprintf(f, " mtid: %s\n", ct->mtid);
144 fprintf(f, " addrsz: %s\n", ct->addrsz);
145 }
146 }
147
148 enum {
149 CPUINFO_LINE_UNKNOWN, /* unknown line */
150 CPUINFO_LINE_CPUTYPE, /* line found in type_patterns[] */
151 CPUINFO_LINE_CPU, /* line found in cpu_patterns[] */
152 CPUINFO_LINE_CACHE /* line found in cache_pattern[] */
153 };
154
155 /* Describes /proc/cpuinfo fields */
156 struct cpuinfo_pattern {
157 int id; /* field ID */
158 int domain; /* CPUINFO_LINE_* */
159 const char *pattern; /* field name as used in /proc/cpuinfo */
160 size_t offset; /* offset in lscpu_cputype or lscpu_cpu struct */
161 };
162
163 /* field identifiers (field name may be different on different archs) */
164 enum {
165 PAT_ADDRESS_SIZES,
166 PAT_BOGOMIPS, /* global */
167 PAT_BOGOMIPS_CPU, /* per-cpu */
168 PAT_CPU,
169 PAT_FAMILY,
170 PAT_FEATURES,
171 PAT_FLAGS,
172 PAT_IMPLEMENTER,
173 PAT_MAX_THREAD_ID,
174 PAT_MHZ,
175 PAT_MHZ_DYNAMIC,
176 PAT_MHZ_STATIC,
177 PAT_MODEL,
178 PAT_MODEL_NAME,
179 PAT_PART,
180 PAT_PROCESSOR,
181 PAT_REVISION,
182 PAT_STEPPING,
183 PAT_TYPE,
184 PAT_VARIANT,
185 PAT_VENDOR,
186 PAT_CACHE,
187 PAT_ISA,
188 };
189
190 /*
191 * /proc/cpuinfo to lscpu_cputype conversion
192 */
193 #define DEF_PAT_CPUTYPE(_str, _id, _member) \
194 { \
195 .id = (_id), \
196 .domain = CPUINFO_LINE_CPUTYPE, \
197 .pattern = (_str), \
198 .offset = offsetof(struct lscpu_cputype, _member), \
199 }
200
201 static const struct cpuinfo_pattern type_patterns[] =
202 {
203 /* Sort by fields name! */
204 DEF_PAT_CPUTYPE( "ASEs implemented", PAT_FLAGS, flags), /* mips */
205 DEF_PAT_CPUTYPE( "Address Sizes", PAT_ADDRESS_SIZES, addrsz),/* loongarch */
206 DEF_PAT_CPUTYPE( "BogoMIPS", PAT_BOGOMIPS, bogomips), /* aarch64 */
207 DEF_PAT_CPUTYPE( "CPU Family", PAT_FAMILY, family), /* loongarch */
208 DEF_PAT_CPUTYPE( "CPU Revision", PAT_REVISION, revision), /* loongarch */
209 DEF_PAT_CPUTYPE( "CPU implementer", PAT_IMPLEMENTER,vendor), /* ARM and aarch64 */
210 DEF_PAT_CPUTYPE( "CPU part", PAT_PART, model), /* ARM and aarch64 */
211 DEF_PAT_CPUTYPE( "CPU revision", PAT_REVISION, revision), /* aarch64 */
212 DEF_PAT_CPUTYPE( "CPU variant", PAT_VARIANT, stepping), /* aarch64 */
213 DEF_PAT_CPUTYPE( "Features", PAT_FEATURES, flags), /* aarch64 */
214 DEF_PAT_CPUTYPE( "ISA", PAT_ISA, isa), /* loongarch */
215 DEF_PAT_CPUTYPE( "Model Name", PAT_MODEL_NAME, modelname), /* loongarch */
216 DEF_PAT_CPUTYPE( "address sizes", PAT_ADDRESS_SIZES, addrsz),/* x86 */
217 DEF_PAT_CPUTYPE( "bogomips per cpu", PAT_BOGOMIPS, bogomips), /* s390 */
218 DEF_PAT_CPUTYPE( "cpu", PAT_CPU, modelname), /* ppc, sparc */
219 DEF_PAT_CPUTYPE( "cpu family", PAT_FAMILY, family),
220 DEF_PAT_CPUTYPE( "cpu model", PAT_MODEL, model), /* mips */
221 DEF_PAT_CPUTYPE( "family", PAT_FAMILY, family),
222 DEF_PAT_CPUTYPE( "features", PAT_FEATURES, flags), /* s390 */
223 DEF_PAT_CPUTYPE( "flags", PAT_FLAGS, flags), /* x86 */
224 DEF_PAT_CPUTYPE( "max thread id", PAT_MAX_THREAD_ID, mtid), /* s390 */
225 DEF_PAT_CPUTYPE( "model", PAT_MODEL, model),
226 DEF_PAT_CPUTYPE( "model name", PAT_MODEL_NAME, modelname),
227 DEF_PAT_CPUTYPE( "revision", PAT_REVISION, revision),
228 DEF_PAT_CPUTYPE( "stepping", PAT_STEPPING, stepping),
229 DEF_PAT_CPUTYPE( "type", PAT_TYPE, flags), /* sparc64 */
230 DEF_PAT_CPUTYPE( "vendor", PAT_VENDOR, vendor),
231 DEF_PAT_CPUTYPE( "vendor_id", PAT_VENDOR, vendor), /* s390 */
232 };
233
234 /*
235 * /proc/cpuinfo to lscpu_cpu conversion
236 */
237 #define DEF_PAT_CPU(_str, _id, _member) \
238 { \
239 .id = (_id), \
240 .domain = CPUINFO_LINE_CPU, \
241 .pattern = (_str), \
242 .offset = offsetof(struct lscpu_cpu, _member), \
243 }
244
245 static const struct cpuinfo_pattern cpu_patterns[] =
246 {
247 /* Sort by fields name! */
248 DEF_PAT_CPU( "CPU MHz", PAT_MHZ, mhz), /* loongarch */
249 DEF_PAT_CPU( "bogomips", PAT_BOGOMIPS_CPU, bogomips),
250 DEF_PAT_CPU( "cpu MHz", PAT_MHZ, mhz),
251 DEF_PAT_CPU( "cpu MHz dynamic", PAT_MHZ_DYNAMIC, dynamic_mhz), /* s390 */
252 DEF_PAT_CPU( "cpu MHz static", PAT_MHZ_STATIC, static_mhz), /* s390 */
253 DEF_PAT_CPU( "cpu number", PAT_PROCESSOR, logical_id), /* s390 */
254 DEF_PAT_CPU( "processor", PAT_PROCESSOR, logical_id),
255
256 };
257
258 /*
259 * /proc/cpuinfo to lscpu_cache conversion
260 */
261 #define DEF_PAT_CACHE(_str, _id) \
262 { \
263 .id = (_id), \
264 .domain = CPUINFO_LINE_CACHE, \
265 .pattern = (_str) \
266 }
267
268 static const struct cpuinfo_pattern cache_patterns[] =
269 {
270 /* Sort by fields name! */
271 DEF_PAT_CACHE("cache", PAT_CACHE),
272 };
273
274 #define CPUTYPE_PATTERN_BUFSZ 32
275
276 static int cmp_pattern(const void *a0, const void *b0)
277 {
278 const struct cpuinfo_pattern
279 *a = (const struct cpuinfo_pattern *) a0,
280 *b = (const struct cpuinfo_pattern *) b0;
281 return strcmp(a->pattern, b->pattern);
282 }
283
284 struct cpuinfo_parser {
285 struct lscpu_cxt *cxt;
286 struct lscpu_cpu *curr_cpu;
287 struct lscpu_cputype *curr_type;
288 unsigned int curr_type_added : 1;
289 };
290
291 static int is_different_cputype(struct lscpu_cputype *ct, size_t offset, const char *value)
292 {
293 switch (offset) {
294 case offsetof(struct lscpu_cputype, vendor):
295 return ct->vendor && value && strcmp(ct->vendor, value) != 0;
296 case offsetof(struct lscpu_cputype, model):
297 return ct->model && value && strcmp(ct->model, value) != 0;
298 case offsetof(struct lscpu_cputype, modelname):
299 return ct->modelname && value && strcmp(ct->modelname, value) != 0;
300 case offsetof(struct lscpu_cputype, stepping):
301 return ct->stepping && value && strcmp(ct->stepping, value) != 0;
302 }
303 return 0;
304 }
305
306 /* canonicalize @str -- remove number at the end return the
307 * number by @keynum. This is usable for example for "processor 5" or "cache1"
308 * cpuinfo lines */
309 static char *key_cleanup(char *str, int *keynum)
310 {
311 size_t sz = rtrim_whitespace((unsigned char *)str);
312 size_t i;
313
314 if (!sz)
315 return str;
316
317 for (i = sz; i > 0; i--) {
318 if (!isdigit(str[i - 1]))
319 break;
320 }
321
322 if (i < sz) {
323 char *end = NULL, *p = str + i;
324 int n;
325
326 errno = 0;
327 n = strtol(p, &end, 10);
328 if (errno || !end || end == p)
329 return str;
330
331 *keynum = n;
332 str[i] = '\0';
333 rtrim_whitespace((unsigned char *)str);
334 }
335 return str;
336 }
337
338 static const struct cpuinfo_pattern *cpuinfo_parse_line(char *str, char **value, int *keynum)
339 {
340 struct cpuinfo_pattern key = { .id = 0 }, *pat;
341 char *p, *v;
342 char buf[CPUTYPE_PATTERN_BUFSZ] = { 0 };
343
344 DBG(GATHER, ul_debug("parse \"%s\"", str));
345
346 if (!str || !*str)
347 return NULL;
348 p = (char *) skip_blank(str);
349 if (!p || !*p)
350 return NULL;
351
352 v = strchr(p, ':');
353 if (!v || !*v)
354 return NULL;
355
356 /* prepare name of the field */
357 xstrncpy(buf, p, sizeof(buf));
358 buf[v - p] = '\0';
359 v++;
360
361 /* prepare value */
362 v = (char *) skip_space(v);
363 if (!v || !*v)
364 return NULL;
365
366 key.pattern = key_cleanup(buf, keynum);
367 /* CPU-type */
368 if ((pat = bsearch(&key, type_patterns,
369 ARRAY_SIZE(type_patterns),
370 sizeof(struct cpuinfo_pattern),
371 cmp_pattern)))
372 goto found;
373
374 /* CPU */
375 if ((pat = bsearch(&key, cpu_patterns,
376 ARRAY_SIZE(cpu_patterns),
377 sizeof(struct cpuinfo_pattern),
378 cmp_pattern)))
379 goto found;
380
381 /* CACHE */
382 if ((pat = bsearch(&key, cache_patterns,
383 ARRAY_SIZE(cache_patterns),
384 sizeof(struct cpuinfo_pattern),
385 cmp_pattern)))
386 goto found;
387
388 return NULL;
389 found:
390 rtrim_whitespace((unsigned char *) v);
391 *value = v;
392 return pat;
393 }
394
395 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
396 * part of the cache topology information within the sysfs filesystem. This is
397 * true for all shared caches on e.g. s390. When there are layers of
398 * hypervisors in between it is not knows which CPUs share which caches.
399 * Therefore information about shared caches is only available in
400 * /proc/cpuinfo. Format is:
401 *
402 * cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>
403 *
404 * the cache<nr> part is parsed in cpuinfo_parse_line, in this function parses part after ":".
405 */
406 static int cpuinfo_parse_cache(struct lscpu_cxt *cxt, int keynum, char *data)
407 {
408 struct lscpu_cache *cache;
409 long long size;
410 char *p, type;
411 int level;
412 unsigned int line_size, associativity;
413
414 DBG(GATHER, ul_debugobj(cxt, " parse cpuinfo cache '%s'", data));
415
416 p = strstr(data, "scope=") + 6;
417 /* Skip private caches, also present in sysfs */
418 if (!p || strncmp(p, "Private", 7) == 0)
419 return 0;
420 p = strstr(data, "level=");
421 if (!p || sscanf(p, "level=%d", &level) != 1)
422 return 0;
423 p = strstr(data, "type=") + 5;
424 if (!p || !*p)
425 return 0;
426 type = 0;
427 if (strncmp(p, "Data", 4) == 0)
428 type = 'd';
429 else if (strncmp(p, "Instruction", 11) == 0)
430 type = 'i';
431 else if (strncmp(p, "Unified", 7) == 0)
432 type = 'u';
433 p = strstr(data, "size=");
434 if (!p || sscanf(p, "size=%lld", &size) != 1)
435 return 0;
436
437 p = strstr(data, "line_size=");
438 if (!p || sscanf(p, "line_size=%u", &line_size) != 1)
439 return 0;
440
441 p = strstr(data, "associativity=");
442 if (!p || sscanf(p, "associativity=%u", &associativity) != 1)
443 return 0;
444
445 cxt->necaches++;
446 cxt->ecaches = xreallocarray(cxt->ecaches,
447 cxt->necaches, sizeof(struct lscpu_cache));
448 cache = &cxt->ecaches[cxt->necaches - 1];
449 memset(cache, 0 , sizeof(*cache));
450
451 if (type == 'i' || type == 'd')
452 xasprintf(&cache->name, "L%d%c", level, type);
453 else
454 xasprintf(&cache->name, "L%d", level);
455
456 cache->nth = keynum;
457 cache->level = level;
458 cache->size = size * 1024;
459 cache->ways_of_associativity = associativity;
460 cache->coherency_line_size = line_size;
461 /* Number of sets for s390. For safety, just check divide by zero */
462 cache->number_of_sets = line_size ? (cache->size / line_size): 0;
463 cache->number_of_sets = associativity ? (cache->number_of_sets / associativity) : 0;
464
465 cache->type = type == 'i' ? xstrdup("Instruction") :
466 type == 'd' ? xstrdup("Data") :
467 type == 'u' ? xstrdup("Unified") : NULL;
468 return 1;
469 }
470
471 int lscpu_read_cpuinfo(struct lscpu_cxt *cxt)
472 {
473 FILE *fp;
474 /* Used to be BUFSIZ which is small on some platforms e.g, musl,
475 * therefore hardcode to 4K */
476 char buf[4096];
477 size_t i;
478 struct lscpu_cputype *ct;
479 struct cpuinfo_parser _pr = { .cxt = cxt }, *pr = &_pr;
480
481 assert(cxt->npossibles); /* lscpu_create_cpus() required */
482 assert(cxt->cpus);
483
484 DBG(GATHER, ul_debugobj(cxt, "reading cpuinfo"));
485
486 fp = ul_path_fopen(cxt->procfs, "r", "cpuinfo");
487 if (!fp)
488 err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
489
490 do {
491 int keynum = -1;
492 char *p = NULL, *value = NULL;
493 const struct cpuinfo_pattern *pattern;
494
495 if (fgets(buf, sizeof(buf), fp) != NULL)
496 p = (char *) skip_space(buf);
497
498 if (p == NULL || (*buf && !*p)) {
499 /* Blank line separates information */
500 if (p == NULL)
501 break; /* fgets() returns nothing; EOF */
502 continue;
503 }
504
505 rtrim_whitespace((unsigned char *) buf);
506
507 /* parse */
508 pattern = cpuinfo_parse_line(p, &value, &keynum);
509 if (!pattern) {
510 DBG(GATHER, ul_debug("'%s' not found", buf));
511 continue;
512 }
513
514 /* set data */
515 switch (pattern->domain) {
516 case CPUINFO_LINE_CPU:
517 if (pattern->id == PAT_PROCESSOR) {
518 /* switch CPU */
519 int id = 0;
520
521 if (keynum >= 0)
522 id = keynum;
523 else {
524 uint32_t n;
525 if (ul_strtou32(value, &n, 10) == 0)
526 id = n;
527 }
528
529 if (pr->curr_cpu && pr->curr_type)
530 lscpu_cpu_set_type(pr->curr_cpu, pr->curr_type);
531
532 lscpu_unref_cpu(pr->curr_cpu);
533 pr->curr_cpu = lscpu_get_cpu(cxt, id);
534
535 if (!pr->curr_cpu)
536 DBG(GATHER, ul_debug("*** cpu ID '%d' undefined", id));
537 else
538 DBG(GATHER, ul_debug(" switch to CPU %d", id));
539 lscpu_ref_cpu(pr->curr_cpu);
540 break;
541 }
542 if (!pr->curr_cpu)
543 DBG(GATHER, ul_debug("*** cpu data before cpu ID"));
544 else
545 strdup_to_offset(pr->curr_cpu, pattern->offset, value);
546
547 if (pattern->id == PAT_MHZ_DYNAMIC && pr->curr_type && !pr->curr_type->dynamic_mhz)
548 pr->curr_type->dynamic_mhz = xstrdup(value);
549 if (pattern->id == PAT_MHZ_STATIC && pr->curr_type && !pr->curr_type->static_mhz)
550 pr->curr_type->static_mhz = xstrdup(value);
551 if (pattern->id == PAT_BOGOMIPS_CPU && pr->curr_type && !pr->curr_type->bogomips)
552 pr->curr_type->bogomips = xstrdup(value);
553 if (pattern->id == PAT_MHZ && pr->curr_cpu && value) {
554 errno = 0;
555 pr->curr_cpu->mhz_cur_freq = (float) c_strtod(value, NULL);
556 if (errno)
557 pr->curr_cpu->mhz_cur_freq = 0;
558 }
559 break;
560 case CPUINFO_LINE_CPUTYPE:
561 if (pr->curr_type && is_different_cputype(pr->curr_type, pattern->offset, value)) {
562 lscpu_unref_cputype(pr->curr_type);
563 pr->curr_type = NULL;
564 }
565 if (!pr->curr_type) {
566 pr->curr_type = lscpu_new_cputype();
567 lscpu_add_cputype(cxt, pr->curr_type);
568 }
569
570 strdup_to_offset(pr->curr_type, pattern->offset, value);
571 break;
572 case CPUINFO_LINE_CACHE:
573 if (pattern->id != PAT_CACHE)
574 break;
575 cpuinfo_parse_cache(cxt, keynum, value);
576 break;
577 }
578 } while (1);
579
580 DBG(GATHER, fprintf_cputypes(stderr, cxt));
581
582 if (pr->curr_cpu && !pr->curr_cpu->type)
583 lscpu_cpu_set_type(pr->curr_cpu, pr->curr_type);
584
585 lscpu_unref_cputype(pr->curr_type);
586 lscpu_unref_cpu(pr->curr_cpu);
587
588 fclose(fp);
589 lscpu_sort_caches(cxt->ecaches, cxt->necaches);
590
591 /* Set the default type to CPUs which are missing (or not parsed)
592 * in cpuinfo */
593 ct = lscpu_cputype_get_default(cxt);
594 for (i = 0; ct && i < cxt->npossibles; i++) {
595 struct lscpu_cpu *cpu = cxt->cpus[i];
596
597 if (cpu && !cpu->type)
598 lscpu_cpu_set_type(cpu, ct);
599 }
600
601 return 0;
602 }
603
604 struct lscpu_arch *lscpu_read_architecture(struct lscpu_cxt *cxt)
605 {
606 struct utsname utsbuf;
607 struct lscpu_arch *ar;
608 struct lscpu_cputype *ct;
609
610 assert(cxt);
611
612 DBG(GATHER, ul_debug("reading architecture"));
613
614 if (uname(&utsbuf) == -1)
615 err(EXIT_FAILURE, _("error: uname failed"));
616
617 ar = xcalloc(1, sizeof(*cxt->arch));
618 ar->name = xstrdup(utsbuf.machine);
619
620 if (cxt->noalive)
621 /* reading info from any /{sys,proc} dump, don't mix it with
622 * information about our real CPU */
623 ;
624 else {
625 #if defined(__alpha__) || defined(__ia64__)
626 ar->bit64 = 1; /* 64bit platforms only */
627 #endif
628 /* platforms with 64bit flag in /proc/cpuinfo, define
629 * 32bit default here */
630 #if defined(__i386__) || defined(__x86_64__) || \
631 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
632 ar->bit32 = 1;
633 #endif
634
635 #if defined(__aarch64__)
636 {
637 /* personality() is the most reliable way (since 4.7)
638 * to determine aarch32 support */
639 int pers = personality(PER_LINUX32);
640 if (pers != -1) {
641 personality(pers);
642 ar->bit32 = 1;
643 }
644 ar->bit64 = 1;
645 }
646 #endif
647 }
648
649 ct = lscpu_cputype_get_default(cxt);
650 if (ct && ct->flags) {
651 char buf[BUFSIZ];
652
653 snprintf(buf, sizeof(buf), " %s ", ct->flags);
654 if (strstr(buf, " lm "))
655 ar->bit32 = ar->bit64 = 1; /* x86_64 */
656 if (strstr(buf, " zarch "))
657 ar->bit32 = ar->bit64 = 1; /* s390x */
658 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
659 ar->bit32 = ar->bit64 = 1; /* sparc64 */
660 }
661
662 if (ct && ct->isa) {
663 char buf[BUFSIZ];
664
665 snprintf(buf, sizeof(buf), " %s ", ct->isa);
666 if (strstr(buf, " loongarch32 "))
667 ar->bit32 = 1;
668 if (strstr(buf, " loongarch64 "))
669 ar->bit64 = 1;
670 }
671
672 if (ar->name && !cxt->noalive) {
673 if (strcmp(ar->name, "ppc64") == 0)
674 ar->bit32 = 1, ar->bit64 = 1;
675 else if (strcmp(ar->name, "ppc") == 0)
676 ar->bit32 = 1;
677 }
678
679 DBG(GATHER, ul_debugobj(ar, "arch: name=%s %s %s",
680 ar->name,
681 ar->bit64 ? "64-bit" : "",
682 ar->bit64 ? "32-bit" : ""));
683 return ar;
684 }
685
686 void lscpu_free_architecture(struct lscpu_arch *ar)
687 {
688 if (!ar)
689 return;
690 free(ar->name);
691 free(ar);
692 }
693
694 int lscpu_read_cpulists(struct lscpu_cxt *cxt)
695 {
696 cpu_set_t *cpuset = NULL;
697
698 assert(cxt);
699 DBG(GATHER, ul_debugobj(cxt, "reading cpulists"));
700
701 if (ul_path_read_s32(cxt->syscpu, &cxt->maxcpus, "kernel_max") == 0)
702 /* note that kernel_max is maximum index [NR_CPUS-1] */
703 cxt->maxcpus += 1;
704
705 else if (!cxt->noalive)
706 /* the root is '/' so we are working with data from the current kernel */
707 cxt->maxcpus = get_max_number_of_cpus();
708
709 if (cxt->maxcpus <= 0)
710 /* error or we are reading some /sys snapshot instead of the
711 * real /sys, let's use any crazy number... */
712 cxt->maxcpus = 2048;
713
714 cxt->setsize = CPU_ALLOC_SIZE(cxt->maxcpus);
715
716 /* create CPUs from possible mask */
717 if (ul_path_readf_cpulist(cxt->syscpu, &cpuset, cxt->maxcpus, "possible") == 0) {
718 lscpu_create_cpus(cxt, cpuset, cxt->setsize);
719 cpuset_free(cpuset);
720 cpuset = NULL;
721 } else
722 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
723 _PATH_SYS_CPU "/possible");
724
725
726 /* get mask for present CPUs */
727 if (ul_path_readf_cpulist(cxt->syscpu, &cxt->present, cxt->maxcpus, "present") == 0)
728 cxt->npresents = CPU_COUNT_S(cxt->setsize, cxt->present);
729
730 /* get mask for online CPUs */
731 if (ul_path_readf_cpulist(cxt->syscpu, &cxt->online, cxt->maxcpus, "online") == 0)
732 cxt->nonlines = CPU_COUNT_S(cxt->setsize, cxt->online);
733
734 return 0;
735 }
736
737 #if defined(HAVE_LIBRTAS)
738 # define PROCESSOR_MODULE_INFO 43
739 static int strbe16toh(const char *buf, int offset)
740 {
741 return (buf[offset] << 8) + buf[offset+1];
742 }
743 #endif
744
745 /* some extra information for the default CPU type */
746 int lscpu_read_archext(struct lscpu_cxt *cxt)
747 {
748 FILE *f;
749 char buf[BUFSIZ];
750 struct lscpu_cputype *ct;
751
752 DBG(GATHER, ul_debugobj(cxt, "reading extra arch info"));
753
754 assert(cxt);
755 ct = lscpu_cputype_get_default(cxt);
756 if (!ct)
757 return -EINVAL;
758
759 /* get dispatching mode */
760 if (ul_path_read_s32(cxt->syscpu, &ct->dispatching, "dispatching") != 0)
761 ct->dispatching = -1;
762
763 /* get cpufreq boost mode */
764 if (ul_path_read_s32(cxt->syscpu, &ct->freqboost, "cpufreq/boost") != 0)
765 ct->freqboost = -1;
766
767 if ((f = ul_path_fopen(cxt->procfs, "r", "sysinfo"))) {
768 while (fgets(buf, sizeof(buf), f) != NULL) {
769 if (lookup(buf, "Type", &ct->machinetype))
770 break;
771 }
772 fclose(f);
773 }
774
775 #if defined(HAVE_LIBRTAS)
776 /* Get PowerPC specific info */
777 if (!cxt->noalive) {
778 int rc, len, ntypes;
779
780 ct->physsockets = ct->physchips = ct->physcoresperchip = 0;
781
782 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
783 if (rc < 0)
784 goto nortas;
785
786 len = strbe16toh(buf, 0);
787 if (len < 8)
788 goto nortas;
789
790 ntypes = strbe16toh(buf, 2);
791 if (!ntypes)
792 goto nortas;
793
794 ct->physsockets = strbe16toh(buf, 4);
795 ct->physchips = strbe16toh(buf, 6);
796 ct->physcoresperchip = strbe16toh(buf, 8);
797 }
798 nortas:
799 #endif
800 return 0;
801 }
802
803 static int cmp_vulnerability_name(const void *a0, const void *b0)
804 {
805 const struct lscpu_vulnerability
806 *a = (const struct lscpu_vulnerability *) a0,
807 *b = (const struct lscpu_vulnerability *) b0;
808 return strcmp(a->name, b->name);
809 }
810
811 int lscpu_read_vulnerabilities(struct lscpu_cxt *cxt)
812 {
813 struct dirent *d;
814 DIR *dir;
815 size_t n = 0;
816
817 assert(cxt);
818
819 DBG(GATHER, ul_debugobj(cxt, "reading vulnerabilities"));
820
821 dir = ul_path_opendir(cxt->syscpu, "vulnerabilities");
822 if (!dir)
823 return 0;
824
825 cxt->nvuls = n = 0;
826 while (xreaddir(dir))
827 n++;
828 if (!n) {
829 closedir(dir);
830 return 0;
831 }
832
833 rewinddir(dir);
834 cxt->vuls = xcalloc(n, sizeof(struct lscpu_vulnerability));
835
836 while (cxt->nvuls < n && (d = xreaddir(dir))) {
837 char *str, *p;
838 struct lscpu_vulnerability *vu;
839
840 #ifdef _DIRENT_HAVE_D_TYPE
841 if (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN)
842 continue;
843 #endif
844 if (ul_path_readf_string(cxt->syscpu, &str,
845 "vulnerabilities/%s", d->d_name) <= 0)
846 continue;
847
848 vu = &cxt->vuls[cxt->nvuls++];
849
850 /* Name */
851 vu->name = xstrdup(d->d_name);
852 *vu->name = toupper(*vu->name);
853 strrep(vu->name, '_', ' ');
854
855 /* Description */
856 vu->text = str;
857 p = (char *) startswith(vu->text, "Mitigation");
858 if (p) {
859 *p = ';';
860 strrem(vu->text, ':');
861 }
862 }
863 closedir(dir);
864
865 qsort(cxt->vuls, cxt->nvuls,
866 sizeof(struct lscpu_vulnerability), cmp_vulnerability_name);
867
868 return 0;
869 }
870
871 static inline int is_node_dirent(struct dirent *d)
872 {
873 return
874 d &&
875 #ifdef _DIRENT_HAVE_D_TYPE
876 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
877 #endif
878 strncmp(d->d_name, "node", 4) == 0 &&
879 isdigit_string(d->d_name + 4);
880 }
881
882 static int nodecmp(const void *ap, const void *bp)
883 {
884 int *a = (int *) ap, *b = (int *) bp;
885 return *a - *b;
886 }
887
888 int lscpu_read_numas(struct lscpu_cxt *cxt)
889 {
890 size_t i = 0;
891 DIR *dir;
892 struct dirent *d;
893 struct path_cxt *sys;
894
895 assert(!cxt->nnodes);
896
897
898 sys = ul_new_path(_PATH_SYS_NODE);
899 if (!sys)
900 err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
901
902 ul_path_set_prefix(sys, cxt->prefix);
903
904 dir = ul_path_opendir(sys, NULL);
905 if (!dir)
906 goto done;
907
908 while ((d = readdir(dir))) {
909 if (is_node_dirent(d))
910 cxt->nnodes++;
911 }
912
913 if (!cxt->nnodes) {
914 closedir(dir);
915 goto done;
916 }
917
918 cxt->nodemaps = xcalloc(cxt->nnodes, sizeof(cpu_set_t *));
919 cxt->idx2nodenum = xmalloc(cxt->nnodes * sizeof(int));
920
921 rewinddir(dir);
922 for (i = 0; (d = readdir(dir)) && i < cxt->nnodes;) {
923 if (is_node_dirent(d))
924 cxt->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
925 _("Failed to extract the node number"));
926 }
927 closedir(dir);
928 qsort(cxt->idx2nodenum, cxt->nnodes, sizeof(int), nodecmp);
929
930 /* information about how nodes share different CPUs */
931 for (i = 0; i < cxt->nnodes; i++)
932 ul_path_readf_cpuset(sys, &cxt->nodemaps[i], cxt->maxcpus,
933 "node%d/cpumap", cxt->idx2nodenum[i]);
934 done:
935 DBG(GATHER, ul_debugobj(cxt, "read %zu numas", cxt->nnodes));
936
937 ul_unref_path(sys);
938 return 0;
939 }