]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu-cputype.c
unshare: Support multiple ID ranges for user and group maps
[thirdparty/util-linux.git] / sys-utils / lscpu-cputype.c
1
2 #include <sys/utsname.h>
3 #include <sys/personality.h>
4
5 #if defined(HAVE_LIBRTAS)
6 # include <librtas.h>
7 #endif
8
9 #include "lscpu.h"
10
11 #include "fileutils.h"
12 #include "c_strtod.h"
13
14 /* Lookup a pattern and get the value for format "<pattern> : <key>"
15 */
16 int lookup(char *line, char *pattern, char **value)
17 {
18 char *p, *v;
19 int len = strlen(pattern);
20
21 /* don't re-fill already found tags, first one wins */
22 if (!*line || *value)
23 return 0;
24 /* pattern */
25 if (strncmp(line, pattern, len))
26 return 0;
27 /* white spaces */
28 for (p = line + len; isspace(*p); p++);
29
30 /* separator */
31 if (*p != ':')
32 return 0;
33 /* white spaces */
34 for (++p; isspace(*p); p++);
35
36 /* value */
37 if (!*p)
38 return 0;
39 v = p;
40
41 /* end of value */
42 len = strlen(line) - 1;
43 for (p = line + len; isspace(*(p-1)); p--);
44 *p = '\0';
45
46 *value = xstrdup(v);
47 return 1;
48 }
49
50 struct lscpu_cputype *lscpu_new_cputype(void)
51 {
52 struct lscpu_cputype *ct;
53
54 ct = xcalloc(1, sizeof(struct lscpu_cputype));
55 ct->refcount = 1;
56 ct->dispatching = -1;
57 ct->freqboost = -1;
58
59 DBG(TYPE, ul_debugobj(ct, "alloc"));
60 return ct;
61 }
62
63 void lscpu_ref_cputype(struct lscpu_cputype *ct)
64 {
65 if (ct) {
66 ct->refcount++;
67 DBG(TYPE, ul_debugobj(ct, ">>> ref %d", ct->refcount));
68 }
69 }
70
71 void lscpu_unref_cputype(struct lscpu_cputype *ct)
72 {
73 if (!ct)
74 return;
75
76 /*DBG(TYPE, ul_debugobj(ct, ">>> unref %d", ct->refcount - 1));*/
77
78 if (--ct->refcount <= 0) {
79 DBG(TYPE, ul_debugobj(ct, " freeing %s/%s", ct->vendor, ct->model));
80 lscpu_cputype_free_topology(ct);
81 free(ct->vendor);
82 free(ct->bios_vendor);
83 free(ct->machinetype); /* s390 */
84 free(ct->family);
85 free(ct->model);
86 free(ct->modelname);
87 free(ct->bios_modelname);
88 free(ct->bios_family);
89 free(ct->revision); /* alternative for model (ppc) */
90 free(ct->stepping);
91 free(ct->bogomips);
92 free(ct->flags);
93 free(ct->mtid); /* maximum thread id (s390) */
94 free(ct->addrsz); /* address sizes */
95 free(ct->static_mhz);
96 free(ct->dynamic_mhz);
97 free(ct);
98 }
99 }
100
101 struct lscpu_cputype *lscpu_cputype_get_default(struct lscpu_cxt *cxt)
102 {
103 return cxt->cputypes ? cxt->cputypes[0] : NULL;
104 }
105
106 #define match(astr, bstr) \
107 ((!astr && !bstr) || (astr && bstr && strcmp(astr, bstr) == 0))
108
109 struct lscpu_cputype *lscpu_add_cputype(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
110 {
111 DBG(TYPE, ul_debugobj(ct, "add new"));
112 cxt->cputypes = xreallocarray(cxt->cputypes, cxt->ncputypes + 1,
113 sizeof(struct lscpu_cputype *));
114 cxt->cputypes[cxt->ncputypes] = ct;
115 cxt->ncputypes++;
116 lscpu_ref_cputype(ct);
117 return ct;
118 }
119
120 static void fprintf_cputypes(FILE *f, struct lscpu_cxt *cxt)
121 {
122 size_t i;
123
124 for (i = 0; i < cxt->ncputypes; i++) {
125 struct lscpu_cputype *ct = cxt->cputypes[i];
126
127 fprintf(f, "\n vendor: %s\n", ct->vendor);
128 fprintf(f, " machinetype: %s\n", ct->machinetype);
129 fprintf(f, " family: %s\n", ct->family);
130 fprintf(f, " model: %s\n", ct->model);
131 fprintf(f, " modelname: %s\n", ct->modelname);
132 fprintf(f, " revision: %s\n", ct->revision);
133 fprintf(f, " stepping: %s\n", ct->stepping);
134 fprintf(f, " mtid: %s\n", ct->mtid);
135 fprintf(f, " addrsz: %s\n", ct->addrsz);
136 }
137 }
138
139 enum {
140 CPUINFO_LINE_UNKNOWN, /* unknown line */
141 CPUINFO_LINE_CPUTYPE, /* line found in type_patterns[] */
142 CPUINFO_LINE_CPU, /* line found in cpu_patterns[] */
143 CPUINFO_LINE_CACHE /* line found in cache_pattern[] */
144 };
145
146 /* Describes /proc/cpuinfo fields */
147 struct cpuinfo_pattern {
148 int id; /* field ID */
149 int domain; /* CPUINFO_LINE_* */
150 const char *pattern; /* field name as used in /proc/cpuinfo */
151 size_t offset; /* offset in lscpu_cputype or lscpu_cpu struct */
152 };
153
154 /* field identifiers (field name may be different on different archs) */
155 enum {
156 PAT_ADDRESS_SIZES,
157 PAT_BOGOMIPS, /* global */
158 PAT_BOGOMIPS_CPU, /* per-cpu */
159 PAT_CPU,
160 PAT_FAMILY,
161 PAT_FEATURES,
162 PAT_FLAGS,
163 PAT_IMPLEMENTER,
164 PAT_MAX_THREAD_ID,
165 PAT_MHZ,
166 PAT_MHZ_DYNAMIC,
167 PAT_MHZ_STATIC,
168 PAT_MODEL,
169 PAT_MODEL_NAME,
170 PAT_PART,
171 PAT_PROCESSOR,
172 PAT_REVISION,
173 PAT_STEPPING,
174 PAT_TYPE,
175 PAT_VARIANT,
176 PAT_VENDOR,
177 PAT_CACHE,
178 PAT_ISA,
179 };
180
181 /*
182 * /proc/cpuinfo to lscpu_cputype conversion
183 */
184 #define DEF_PAT_CPUTYPE(_str, _id, _member) \
185 { \
186 .id = (_id), \
187 .domain = CPUINFO_LINE_CPUTYPE, \
188 .pattern = (_str), \
189 .offset = offsetof(struct lscpu_cputype, _member), \
190 }
191
192 static const struct cpuinfo_pattern type_patterns[] =
193 {
194 /* Sort by fields name! */
195 DEF_PAT_CPUTYPE( "ASEs implemented", PAT_FLAGS, flags), /* mips */
196 DEF_PAT_CPUTYPE( "Address Sizes", PAT_ADDRESS_SIZES, addrsz),/* loongarch */
197 DEF_PAT_CPUTYPE( "BogoMIPS", PAT_BOGOMIPS, bogomips), /* aarch64 */
198 DEF_PAT_CPUTYPE( "CPU Family", PAT_FAMILY, family), /* loongarch */
199 DEF_PAT_CPUTYPE( "CPU Revision", PAT_REVISION, revision), /* loongarch */
200 DEF_PAT_CPUTYPE( "CPU implementer", PAT_IMPLEMENTER,vendor), /* ARM and aarch64 */
201 DEF_PAT_CPUTYPE( "CPU part", PAT_PART, model), /* ARM and aarch64 */
202 DEF_PAT_CPUTYPE( "CPU revision", PAT_REVISION, revision), /* aarch64 */
203 DEF_PAT_CPUTYPE( "CPU variant", PAT_VARIANT, stepping), /* aarch64 */
204 DEF_PAT_CPUTYPE( "Features", PAT_FEATURES, flags), /* aarch64 */
205 DEF_PAT_CPUTYPE( "ISA", PAT_ISA, isa), /* loongarch */
206 DEF_PAT_CPUTYPE( "Model Name", PAT_MODEL_NAME, modelname), /* loongarch */
207 DEF_PAT_CPUTYPE( "address sizes", PAT_ADDRESS_SIZES, addrsz),/* x86 */
208 DEF_PAT_CPUTYPE( "bogomips per cpu", PAT_BOGOMIPS, bogomips), /* s390 */
209 DEF_PAT_CPUTYPE( "cpu", PAT_CPU, modelname), /* ppc, sparc */
210 DEF_PAT_CPUTYPE( "cpu family", PAT_FAMILY, family),
211 DEF_PAT_CPUTYPE( "cpu model", PAT_MODEL, model), /* mips */
212 DEF_PAT_CPUTYPE( "family", PAT_FAMILY, family),
213 DEF_PAT_CPUTYPE( "features", PAT_FEATURES, flags), /* s390 */
214 DEF_PAT_CPUTYPE( "flags", PAT_FLAGS, flags), /* x86 */
215 DEF_PAT_CPUTYPE( "max thread id", PAT_MAX_THREAD_ID, mtid), /* s390 */
216 DEF_PAT_CPUTYPE( "model", PAT_MODEL, model),
217 DEF_PAT_CPUTYPE( "model name", PAT_MODEL_NAME, modelname),
218 DEF_PAT_CPUTYPE( "revision", PAT_REVISION, revision),
219 DEF_PAT_CPUTYPE( "stepping", PAT_STEPPING, stepping),
220 DEF_PAT_CPUTYPE( "type", PAT_TYPE, flags), /* sparc64 */
221 DEF_PAT_CPUTYPE( "vendor", PAT_VENDOR, vendor),
222 DEF_PAT_CPUTYPE( "vendor_id", PAT_VENDOR, vendor), /* s390 */
223 };
224
225 /*
226 * /proc/cpuinfo to lscpu_cpu conversion
227 */
228 #define DEF_PAT_CPU(_str, _id, _member) \
229 { \
230 .id = (_id), \
231 .domain = CPUINFO_LINE_CPU, \
232 .pattern = (_str), \
233 .offset = offsetof(struct lscpu_cpu, _member), \
234 }
235
236 static const struct cpuinfo_pattern cpu_patterns[] =
237 {
238 /* Sort by fields name! */
239 DEF_PAT_CPU( "CPU MHz", PAT_MHZ, mhz), /* loongarch */
240 DEF_PAT_CPU( "bogomips", PAT_BOGOMIPS_CPU, bogomips),
241 DEF_PAT_CPU( "cpu MHz", PAT_MHZ, mhz),
242 DEF_PAT_CPU( "cpu MHz dynamic", PAT_MHZ_DYNAMIC, dynamic_mhz), /* s390 */
243 DEF_PAT_CPU( "cpu MHz static", PAT_MHZ_STATIC, static_mhz), /* s390 */
244 DEF_PAT_CPU( "cpu number", PAT_PROCESSOR, logical_id), /* s390 */
245 DEF_PAT_CPU( "processor", PAT_PROCESSOR, logical_id),
246
247 };
248
249 /*
250 * /proc/cpuinfo to lscpu_cache conversion
251 */
252 #define DEF_PAT_CACHE(_str, _id) \
253 { \
254 .id = (_id), \
255 .domain = CPUINFO_LINE_CACHE, \
256 .pattern = (_str) \
257 }
258
259 static const struct cpuinfo_pattern cache_patterns[] =
260 {
261 /* Sort by fields name! */
262 DEF_PAT_CACHE("cache", PAT_CACHE),
263 };
264
265 #define CPUTYPE_PATTERN_BUFSZ 32
266
267 static int cmp_pattern(const void *a0, const void *b0)
268 {
269 const struct cpuinfo_pattern
270 *a = (const struct cpuinfo_pattern *) a0,
271 *b = (const struct cpuinfo_pattern *) b0;
272 return strcmp(a->pattern, b->pattern);
273 }
274
275 struct cpuinfo_parser {
276 struct lscpu_cxt *cxt;
277 struct lscpu_cpu *curr_cpu;
278 struct lscpu_cputype *curr_type;
279 unsigned int curr_type_added : 1;
280 };
281
282 static int is_different_cputype(struct lscpu_cputype *ct, size_t offset, const char *value)
283 {
284 switch (offset) {
285 case offsetof(struct lscpu_cputype, vendor):
286 return ct->vendor && value && strcmp(ct->vendor, value) != 0;
287 case offsetof(struct lscpu_cputype, model):
288 return ct->model && value && strcmp(ct->model, value) != 0;
289 case offsetof(struct lscpu_cputype, modelname):
290 return ct->modelname && value && strcmp(ct->modelname, value) != 0;
291 case offsetof(struct lscpu_cputype, stepping):
292 return ct->stepping && value && strcmp(ct->stepping, value) != 0;
293 }
294 return 0;
295 }
296
297 /* canonicalize @str -- remove number at the end return the
298 * number by @keynum. This is usable for example for "processor 5" or "cache1"
299 * cpuinfo lines */
300 static char *key_cleanup(char *str, int *keynum)
301 {
302 size_t sz = rtrim_whitespace((unsigned char *)str);
303 size_t i;
304
305 if (!sz)
306 return str;
307
308 for (i = sz; i > 0; i--) {
309 if (!isdigit(str[i - 1]))
310 break;
311 }
312
313 if (i < sz) {
314 char *end = NULL, *p = str + i;
315 int n;
316
317 errno = 0;
318 n = strtol(p, &end, 10);
319 if (errno || !end || end == p)
320 return str;
321
322 *keynum = n;
323 str[i] = '\0';
324 rtrim_whitespace((unsigned char *)str);
325 }
326 return str;
327 }
328
329 static const struct cpuinfo_pattern *cpuinfo_parse_line(char *str, char **value, int *keynum)
330 {
331 struct cpuinfo_pattern key = { .id = 0 }, *pat;
332 char *p, *v;
333 char buf[CPUTYPE_PATTERN_BUFSZ] = { 0 };
334
335 DBG(GATHER, ul_debug("parse \"%s\"", str));
336
337 if (!str || !*str)
338 return NULL;
339 p = (char *) skip_blank(str);
340 if (!p || !*p)
341 return NULL;
342
343 v = strchr(p, ':');
344 if (!v || !*v)
345 return NULL;
346
347 /* prepare name of the field */
348 xstrncpy(buf, p, sizeof(buf));
349 buf[v - p] = '\0';
350 v++;
351
352 /* prepare value */
353 v = (char *) skip_space(v);
354 if (!v || !*v)
355 return NULL;
356
357 key.pattern = key_cleanup(buf, keynum);
358 /* CPU-type */
359 if ((pat = bsearch(&key, type_patterns,
360 ARRAY_SIZE(type_patterns),
361 sizeof(struct cpuinfo_pattern),
362 cmp_pattern)))
363 goto found;
364
365 /* CPU */
366 if ((pat = bsearch(&key, cpu_patterns,
367 ARRAY_SIZE(cpu_patterns),
368 sizeof(struct cpuinfo_pattern),
369 cmp_pattern)))
370 goto found;
371
372 /* CACHE */
373 if ((pat = bsearch(&key, cache_patterns,
374 ARRAY_SIZE(cache_patterns),
375 sizeof(struct cpuinfo_pattern),
376 cmp_pattern)))
377 goto found;
378
379 return NULL;
380 found:
381 rtrim_whitespace((unsigned char *) v);
382 *value = v;
383 return pat;
384 }
385
386 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
387 * part of the cache topology information within the sysfs filesystem. This is
388 * true for all shared caches on e.g. s390. When there are layers of
389 * hypervisors in between it is not knows which CPUs share which caches.
390 * Therefore information about shared caches is only available in
391 * /proc/cpuinfo. Format is:
392 *
393 * cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>
394 *
395 * the cache<nr> part is parsed in cpuinfo_parse_line, in this function parses part after ":".
396 */
397 static int cpuinfo_parse_cache(struct lscpu_cxt *cxt, int keynum, char *data)
398 {
399 struct lscpu_cache *cache;
400 long long size;
401 char *p, type;
402 int level;
403 unsigned int line_size, associativity;
404
405 DBG(GATHER, ul_debugobj(cxt, " parse cpuinfo cache '%s'", data));
406
407 p = strstr(data, "scope=") + 6;
408 /* Skip private caches, also present in sysfs */
409 if (!p || strncmp(p, "Private", 7) == 0)
410 return 0;
411 p = strstr(data, "level=");
412 if (!p || sscanf(p, "level=%d", &level) != 1)
413 return 0;
414 p = strstr(data, "type=") + 5;
415 if (!p || !*p)
416 return 0;
417 type = 0;
418 if (strncmp(p, "Data", 4) == 0)
419 type = 'd';
420 else if (strncmp(p, "Instruction", 11) == 0)
421 type = 'i';
422 else if (strncmp(p, "Unified", 7) == 0)
423 type = 'u';
424 p = strstr(data, "size=");
425 if (!p || sscanf(p, "size=%lld", &size) != 1)
426 return 0;
427
428 p = strstr(data, "line_size=");
429 if (!p || sscanf(p, "line_size=%u", &line_size) != 1)
430 return 0;
431
432 p = strstr(data, "associativity=");
433 if (!p || sscanf(p, "associativity=%u", &associativity) != 1)
434 return 0;
435
436 cxt->necaches++;
437 cxt->ecaches = xreallocarray(cxt->ecaches,
438 cxt->necaches, sizeof(struct lscpu_cache));
439 cache = &cxt->ecaches[cxt->necaches - 1];
440 memset(cache, 0 , sizeof(*cache));
441
442 if (type == 'i' || type == 'd')
443 xasprintf(&cache->name, "L%d%c", level, type);
444 else
445 xasprintf(&cache->name, "L%d", level);
446
447 cache->nth = keynum;
448 cache->level = level;
449 cache->size = size * 1024;
450 cache->ways_of_associativity = associativity;
451 cache->coherency_line_size = line_size;
452 /* Number of sets for s390. For safety, just check divide by zero */
453 cache->number_of_sets = line_size ? (cache->size / line_size): 0;
454 cache->number_of_sets = associativity ? (cache->number_of_sets / associativity) : 0;
455
456 cache->type = type == 'i' ? xstrdup("Instruction") :
457 type == 'd' ? xstrdup("Data") :
458 type == 'u' ? xstrdup("Unified") : NULL;
459 return 1;
460 }
461
462 int lscpu_read_cpuinfo(struct lscpu_cxt *cxt)
463 {
464 FILE *fp;
465 /* Used to be BUFSIZ which is small on some platforms e.g, musl,
466 * therefore hardcode to 4K */
467 char buf[4096];
468 size_t i;
469 struct lscpu_cputype *ct;
470 struct cpuinfo_parser _pr = { .cxt = cxt }, *pr = &_pr;
471
472 assert(cxt->npossibles); /* lscpu_create_cpus() required */
473 assert(cxt->cpus);
474
475 DBG(GATHER, ul_debugobj(cxt, "reading cpuinfo"));
476
477 fp = ul_path_fopen(cxt->procfs, "r", "cpuinfo");
478 if (!fp)
479 err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
480
481 do {
482 int keynum = -1;
483 char *p = NULL, *value = NULL;
484 const struct cpuinfo_pattern *pattern;
485
486 if (fgets(buf, sizeof(buf), fp) != NULL)
487 p = (char *) skip_space(buf);
488
489 if (p == NULL || (*buf && !*p)) {
490 /* Blank line separates information */
491 if (p == NULL)
492 break; /* fgets() returns nothing; EOF */
493 continue;
494 }
495
496 rtrim_whitespace((unsigned char *) buf);
497
498 /* parse */
499 pattern = cpuinfo_parse_line(p, &value, &keynum);
500 if (!pattern) {
501 DBG(GATHER, ul_debug("'%s' not found", buf));
502 continue;
503 }
504
505 /* set data */
506 switch (pattern->domain) {
507 case CPUINFO_LINE_CPU:
508 if (pattern->id == PAT_PROCESSOR) {
509 /* switch CPU */
510 int id = 0;
511
512 if (keynum >= 0)
513 id = keynum;
514 else {
515 uint32_t n;
516 if (ul_strtou32(value, &n, 10) == 0)
517 id = n;
518 }
519
520 if (pr->curr_cpu && pr->curr_type)
521 lscpu_cpu_set_type(pr->curr_cpu, pr->curr_type);
522
523 lscpu_unref_cpu(pr->curr_cpu);
524 pr->curr_cpu = lscpu_get_cpu(cxt, id);
525
526 if (!pr->curr_cpu)
527 DBG(GATHER, ul_debug("*** cpu ID '%d' undefined", id));
528 else
529 DBG(GATHER, ul_debug(" switch to CPU %d", id));
530 lscpu_ref_cpu(pr->curr_cpu);
531 break;
532 }
533 if (!pr->curr_cpu)
534 DBG(GATHER, ul_debug("*** cpu data before cpu ID"));
535 else
536 strdup_to_offset(pr->curr_cpu, pattern->offset, value);
537
538 if (pattern->id == PAT_MHZ_DYNAMIC && pr->curr_type && !pr->curr_type->dynamic_mhz)
539 pr->curr_type->dynamic_mhz = xstrdup(value);
540 if (pattern->id == PAT_MHZ_STATIC && pr->curr_type && !pr->curr_type->static_mhz)
541 pr->curr_type->static_mhz = xstrdup(value);
542 if (pattern->id == PAT_BOGOMIPS_CPU && pr->curr_type && !pr->curr_type->bogomips)
543 pr->curr_type->bogomips = xstrdup(value);
544 if (pattern->id == PAT_MHZ && pr->curr_cpu && value) {
545 errno = 0;
546 pr->curr_cpu->mhz_cur_freq = (float) c_strtod(value, NULL);
547 if (errno)
548 pr->curr_cpu->mhz_cur_freq = 0;
549 }
550 break;
551 case CPUINFO_LINE_CPUTYPE:
552 if (pr->curr_type && is_different_cputype(pr->curr_type, pattern->offset, value)) {
553 lscpu_unref_cputype(pr->curr_type);
554 pr->curr_type = NULL;
555 }
556 if (!pr->curr_type) {
557 pr->curr_type = lscpu_new_cputype();
558 lscpu_add_cputype(cxt, pr->curr_type);
559 }
560
561 strdup_to_offset(pr->curr_type, pattern->offset, value);
562 break;
563 case CPUINFO_LINE_CACHE:
564 if (pattern->id != PAT_CACHE)
565 break;
566 cpuinfo_parse_cache(cxt, keynum, value);
567 break;
568 }
569 } while (1);
570
571 DBG(GATHER, fprintf_cputypes(stderr, cxt));
572
573 if (pr->curr_cpu && !pr->curr_cpu->type)
574 lscpu_cpu_set_type(pr->curr_cpu, pr->curr_type);
575
576 lscpu_unref_cputype(pr->curr_type);
577 lscpu_unref_cpu(pr->curr_cpu);
578
579 fclose(fp);
580 lscpu_sort_caches(cxt->ecaches, cxt->necaches);
581
582 /* Set the default type to CPUs which are missing (or not parsed)
583 * in cpuinfo */
584 ct = lscpu_cputype_get_default(cxt);
585 for (i = 0; ct && i < cxt->npossibles; i++) {
586 struct lscpu_cpu *cpu = cxt->cpus[i];
587
588 if (cpu && !cpu->type)
589 lscpu_cpu_set_type(cpu, ct);
590 }
591
592 return 0;
593 }
594
595 struct lscpu_arch *lscpu_read_architecture(struct lscpu_cxt *cxt)
596 {
597 struct utsname utsbuf;
598 struct lscpu_arch *ar;
599 struct lscpu_cputype *ct;
600
601 assert(cxt);
602
603 DBG(GATHER, ul_debug("reading architecture"));
604
605 if (uname(&utsbuf) == -1)
606 err(EXIT_FAILURE, _("error: uname failed"));
607
608 ar = xcalloc(1, sizeof(*cxt->arch));
609 ar->name = xstrdup(utsbuf.machine);
610
611 if (cxt->noalive)
612 /* reading info from any /{sys,proc} dump, don't mix it with
613 * information about our real CPU */
614 ;
615 else {
616 #if defined(__alpha__) || defined(__ia64__)
617 ar->bit64 = 1; /* 64bit platforms only */
618 #endif
619 /* platforms with 64bit flag in /proc/cpuinfo, define
620 * 32bit default here */
621 #if defined(__i386__) || defined(__x86_64__) || \
622 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
623 ar->bit32 = 1;
624 #endif
625
626 #if defined(__aarch64__)
627 {
628 /* personality() is the most reliable way (since 4.7)
629 * to determine aarch32 support */
630 int pers = personality(PER_LINUX32);
631 if (pers != -1) {
632 personality(pers);
633 ar->bit32 = 1;
634 }
635 ar->bit64 = 1;
636 }
637 #endif
638 }
639
640 ct = lscpu_cputype_get_default(cxt);
641 if (ct && ct->flags) {
642 char buf[BUFSIZ];
643
644 snprintf(buf, sizeof(buf), " %s ", ct->flags);
645 if (strstr(buf, " lm "))
646 ar->bit32 = 1, ar->bit64 = 1; /* x86_64 */
647 if (strstr(buf, " zarch "))
648 ar->bit32 = 1, ar->bit64 = 1; /* s390x */
649 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
650 ar->bit32 = 1, ar->bit64 = 1; /* sparc64 */
651 }
652
653 if (ct && ct->isa) {
654 char buf[BUFSIZ];
655
656 snprintf(buf, sizeof(buf), " %s ", ct->isa);
657 if (strstr(buf, " loongarch32 "))
658 ar->bit32 = 1;
659 if (strstr(buf, " loongarch64 "))
660 ar->bit64 = 1;
661 }
662
663 if (ar->name && !cxt->noalive) {
664 if (strcmp(ar->name, "ppc64") == 0)
665 ar->bit32 = 1, ar->bit64 = 1;
666 else if (strcmp(ar->name, "ppc") == 0)
667 ar->bit32 = 1;
668 }
669
670 DBG(GATHER, ul_debugobj(ar, "arch: name=%s %s %s",
671 ar->name,
672 ar->bit64 ? "64-bit" : "",
673 ar->bit64 ? "32-bit" : ""));
674 return ar;
675 }
676
677 void lscpu_free_architecture(struct lscpu_arch *ar)
678 {
679 if (!ar)
680 return;
681 free(ar->name);
682 free(ar);
683 }
684
685 int lscpu_read_cpulists(struct lscpu_cxt *cxt)
686 {
687 cpu_set_t *cpuset = NULL;
688
689 assert(cxt);
690 DBG(GATHER, ul_debugobj(cxt, "reading cpulists"));
691
692 if (ul_path_read_s32(cxt->syscpu, &cxt->maxcpus, "kernel_max") == 0)
693 /* note that kernel_max is maximum index [NR_CPUS-1] */
694 cxt->maxcpus += 1;
695
696 else if (!cxt->noalive)
697 /* the root is '/' so we are working with data from the current kernel */
698 cxt->maxcpus = get_max_number_of_cpus();
699
700 if (cxt->maxcpus <= 0)
701 /* error or we are reading some /sys snapshot instead of the
702 * real /sys, let's use any crazy number... */
703 cxt->maxcpus = 2048;
704
705 cxt->setsize = CPU_ALLOC_SIZE(cxt->maxcpus);
706
707 /* create CPUs from possible mask */
708 if (ul_path_readf_cpulist(cxt->syscpu, &cpuset, cxt->maxcpus, "possible") == 0) {
709 lscpu_create_cpus(cxt, cpuset, cxt->setsize);
710 cpuset_free(cpuset);
711 cpuset = NULL;
712 } else
713 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
714 _PATH_SYS_CPU "/possible");
715
716
717 /* get mask for present CPUs */
718 if (ul_path_readf_cpulist(cxt->syscpu, &cxt->present, cxt->maxcpus, "present") == 0)
719 cxt->npresents = CPU_COUNT_S(cxt->setsize, cxt->present);
720
721 /* get mask for online CPUs */
722 if (ul_path_readf_cpulist(cxt->syscpu, &cxt->online, cxt->maxcpus, "online") == 0)
723 cxt->nonlines = CPU_COUNT_S(cxt->setsize, cxt->online);
724
725 return 0;
726 }
727
728 #if defined(HAVE_LIBRTAS)
729 # define PROCESSOR_MODULE_INFO 43
730 static int strbe16toh(const char *buf, int offset)
731 {
732 return (buf[offset] << 8) + buf[offset+1];
733 }
734 #endif
735
736 /* some extra information for the default CPU type */
737 int lscpu_read_archext(struct lscpu_cxt *cxt)
738 {
739 FILE *f;
740 char buf[BUFSIZ];
741 struct lscpu_cputype *ct;
742
743 DBG(GATHER, ul_debugobj(cxt, "reading extra arch info"));
744
745 assert(cxt);
746 ct = lscpu_cputype_get_default(cxt);
747 if (!ct)
748 return -EINVAL;
749
750 /* get dispatching mode */
751 if (ul_path_read_s32(cxt->syscpu, &ct->dispatching, "dispatching") != 0)
752 ct->dispatching = -1;
753
754 /* get cpufreq boost mode */
755 if (ul_path_read_s32(cxt->syscpu, &ct->freqboost, "cpufreq/boost") != 0)
756 ct->freqboost = -1;
757
758 if ((f = ul_path_fopen(cxt->procfs, "r", "sysinfo"))) {
759 while (fgets(buf, sizeof(buf), f) != NULL) {
760 if (lookup(buf, "Type", &ct->machinetype))
761 break;
762 }
763 fclose(f);
764 }
765
766 #if defined(HAVE_LIBRTAS)
767 /* Get PowerPC specific info */
768 if (!cxt->noalive) {
769 int rc, len, ntypes;
770
771 ct->physsockets = ct->physchips = ct->physcoresperchip = 0;
772
773 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
774 if (rc < 0)
775 goto nortas;
776
777 len = strbe16toh(buf, 0);
778 if (len < 8)
779 goto nortas;
780
781 ntypes = strbe16toh(buf, 2);
782 if (!ntypes)
783 goto nortas;
784
785 ct->physsockets = strbe16toh(buf, 4);
786 ct->physchips = strbe16toh(buf, 6);
787 ct->physcoresperchip = strbe16toh(buf, 8);
788 }
789 nortas:
790 #endif
791 return 0;
792 }
793
794 static int cmp_vulnerability_name(const void *a0, const void *b0)
795 {
796 const struct lscpu_vulnerability
797 *a = (const struct lscpu_vulnerability *) a0,
798 *b = (const struct lscpu_vulnerability *) b0;
799 return strcmp(a->name, b->name);
800 }
801
802 int lscpu_read_vulnerabilities(struct lscpu_cxt *cxt)
803 {
804 struct dirent *d;
805 DIR *dir;
806 size_t n = 0;
807
808 assert(cxt);
809
810 DBG(GATHER, ul_debugobj(cxt, "reading vulnerabilities"));
811
812 dir = ul_path_opendir(cxt->syscpu, "vulnerabilities");
813 if (!dir)
814 return 0;
815
816 cxt->nvuls = n = 0;
817 while (xreaddir(dir))
818 n++;
819 if (!n) {
820 closedir(dir);
821 return 0;
822 }
823
824 rewinddir(dir);
825 cxt->vuls = xcalloc(n, sizeof(struct lscpu_vulnerability));
826
827 while (cxt->nvuls < n && (d = xreaddir(dir))) {
828 char *str, *p;
829 struct lscpu_vulnerability *vu;
830
831 #ifdef _DIRENT_HAVE_D_TYPE
832 if (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN)
833 continue;
834 #endif
835 if (ul_path_readf_string(cxt->syscpu, &str,
836 "vulnerabilities/%s", d->d_name) <= 0)
837 continue;
838
839 vu = &cxt->vuls[cxt->nvuls++];
840
841 /* Name */
842 vu->name = xstrdup(d->d_name);
843 *vu->name = toupper(*vu->name);
844 strrep(vu->name, '_', ' ');
845
846 /* Description */
847 vu->text = str;
848 p = (char *) startswith(vu->text, "Mitigation");
849 if (p) {
850 *p = ';';
851 strrem(vu->text, ':');
852 }
853 }
854 closedir(dir);
855
856 qsort(cxt->vuls, cxt->nvuls,
857 sizeof(struct lscpu_vulnerability), cmp_vulnerability_name);
858
859 return 0;
860 }
861
862 static inline int is_node_dirent(struct dirent *d)
863 {
864 return
865 d &&
866 #ifdef _DIRENT_HAVE_D_TYPE
867 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
868 #endif
869 strncmp(d->d_name, "node", 4) == 0 &&
870 isdigit_string(d->d_name + 4);
871 }
872
873 static int nodecmp(const void *ap, const void *bp)
874 {
875 int *a = (int *) ap, *b = (int *) bp;
876 return *a - *b;
877 }
878
879 int lscpu_read_numas(struct lscpu_cxt *cxt)
880 {
881 size_t i = 0;
882 DIR *dir;
883 struct dirent *d;
884 struct path_cxt *sys;
885
886 assert(!cxt->nnodes);
887
888
889 sys = ul_new_path(_PATH_SYS_NODE);
890 if (!sys)
891 err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
892
893 ul_path_set_prefix(sys, cxt->prefix);
894
895 dir = ul_path_opendir(sys, NULL);
896 if (!dir)
897 goto done;
898
899 while ((d = readdir(dir))) {
900 if (is_node_dirent(d))
901 cxt->nnodes++;
902 }
903
904 if (!cxt->nnodes) {
905 closedir(dir);
906 goto done;
907 }
908
909 cxt->nodemaps = xcalloc(cxt->nnodes, sizeof(cpu_set_t *));
910 cxt->idx2nodenum = xmalloc(cxt->nnodes * sizeof(int));
911
912 rewinddir(dir);
913 for (i = 0; (d = readdir(dir)) && i < cxt->nnodes;) {
914 if (is_node_dirent(d))
915 cxt->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
916 _("Failed to extract the node number"));
917 }
918 closedir(dir);
919 qsort(cxt->idx2nodenum, cxt->nnodes, sizeof(int), nodecmp);
920
921 /* information about how nodes share different CPUs */
922 for (i = 0; i < cxt->nnodes; i++)
923 ul_path_readf_cpuset(sys, &cxt->nodemaps[i], cxt->maxcpus,
924 "node%d/cpumap", cxt->idx2nodenum[i]);
925 done:
926 DBG(GATHER, ul_debugobj(cxt, "read %zu numas", cxt->nnodes));
927
928 ul_unref_path(sys);
929 return 0;
930 }