]> git.ipfire.org Git - thirdparty/util-linux.git/blob - sys-utils/lscpu-cputype.c
Merge branch 'fix_mount_fscontext' of https://github.com/tweksteen/util-linux
[thirdparty/util-linux.git] / sys-utils / lscpu-cputype.c
1
2 #include <sys/utsname.h>
3 #include <sys/personality.h>
4
5 #include "lscpu.h"
6
7 #include "fileutils.h"
8 #include "c_strtod.h"
9
10 /* Lookup a pattern and get the value for format "<pattern> : <key>"
11 */
12 int lookup(char *line, char *pattern, char **value)
13 {
14 char *p, *v;
15 int len = strlen(pattern);
16
17 /* don't re-fill already found tags, first one wins */
18 if (!*line || *value)
19 return 0;
20 /* pattern */
21 if (strncmp(line, pattern, len))
22 return 0;
23 /* white spaces */
24 for (p = line + len; isspace(*p); p++);
25
26 /* separator */
27 if (*p != ':')
28 return 0;
29 /* white spaces */
30 for (++p; isspace(*p); p++);
31
32 /* value */
33 if (!*p)
34 return 0;
35 v = p;
36
37 /* end of value */
38 len = strlen(line) - 1;
39 for (p = line + len; isspace(*(p-1)); p--);
40 *p = '\0';
41
42 *value = xstrdup(v);
43 return 1;
44 }
45
46 struct lscpu_cputype *lscpu_new_cputype(void)
47 {
48 struct lscpu_cputype *ct;
49
50 ct = xcalloc(1, sizeof(struct lscpu_cputype));
51 ct->refcount = 1;
52 ct->dispatching = -1;
53 ct->freqboost = -1;
54
55 DBG(TYPE, ul_debugobj(ct, "alloc"));
56 return ct;
57 }
58
59 void lscpu_ref_cputype(struct lscpu_cputype *ct)
60 {
61 if (ct) {
62 ct->refcount++;
63 DBG(TYPE, ul_debugobj(ct, ">>> ref %d", ct->refcount));
64 }
65 }
66
67 void lscpu_unref_cputype(struct lscpu_cputype *ct)
68 {
69 if (!ct)
70 return;
71
72 /*DBG(TYPE, ul_debugobj(ct, ">>> unref %d", ct->refcount - 1));*/
73
74 if (--ct->refcount <= 0) {
75 DBG(TYPE, ul_debugobj(ct, " freeing %s/%s", ct->vendor, ct->model));
76 lscpu_cputype_free_topology(ct);
77 free(ct->vendor);
78 free(ct->bios_vendor);
79 free(ct->machinetype); /* s390 */
80 free(ct->family);
81 free(ct->model);
82 free(ct->modelname);
83 free(ct->bios_modelname);
84 free(ct->bios_family);
85 free(ct->revision); /* alternative for model (ppc) */
86 free(ct->stepping);
87 free(ct->bogomips);
88 free(ct->flags);
89 free(ct->mtid); /* maximum thread id (s390) */
90 free(ct->addrsz); /* address sizes */
91 free(ct->static_mhz);
92 free(ct->dynamic_mhz);
93 free(ct);
94 }
95 }
96
97 struct lscpu_cputype *lscpu_cputype_get_default(struct lscpu_cxt *cxt)
98 {
99 return cxt->cputypes ? cxt->cputypes[0] : NULL;
100 }
101
102 #define match(astr, bstr) \
103 ((!astr && !bstr) || (astr && bstr && strcmp(astr, bstr) == 0))
104
105 struct lscpu_cputype *lscpu_add_cputype(struct lscpu_cxt *cxt, struct lscpu_cputype *ct)
106 {
107 DBG(TYPE, ul_debugobj(ct, "add new"));
108 cxt->cputypes = xrealloc(cxt->cputypes, (cxt->ncputypes + 1)
109 * sizeof(struct lscpu_cputype *));
110 cxt->cputypes[cxt->ncputypes] = ct;
111 cxt->ncputypes++;
112 lscpu_ref_cputype(ct);
113 return ct;
114 }
115
116 static void fprintf_cputypes(FILE *f, struct lscpu_cxt *cxt)
117 {
118 size_t i;
119
120 for (i = 0; i < cxt->ncputypes; i++) {
121 struct lscpu_cputype *ct = cxt->cputypes[i];
122
123 fprintf(f, "\n vendor: %s\n", ct->vendor);
124 fprintf(f, " machinetype: %s\n", ct->machinetype);
125 fprintf(f, " family: %s\n", ct->family);
126 fprintf(f, " model: %s\n", ct->model);
127 fprintf(f, " modelname: %s\n", ct->modelname);
128 fprintf(f, " revision: %s\n", ct->revision);
129 fprintf(f, " stepping: %s\n", ct->stepping);
130 fprintf(f, " mtid: %s\n", ct->mtid);
131 fprintf(f, " addrsz: %s\n", ct->addrsz);
132 }
133 }
134
135 enum {
136 CPUINFO_LINE_UNKNOWN, /* unknown line */
137 CPUINFO_LINE_CPUTYPE, /* line found in type_patterns[] */
138 CPUINFO_LINE_CPU, /* line found in cpu_patterns[] */
139 CPUINFO_LINE_CACHE /* line found in cache_pattern[] */
140 };
141
142 /* Describes /proc/cpuinfo fields */
143 struct cpuinfo_pattern {
144 int id; /* field ID */
145 int domain; /* CPUINFO_LINE_* */
146 const char *pattern; /* field name as used in /proc/cpuinfo */
147 size_t offset; /* offset in lscpu_cputype or lscpu_cpu struct */
148 };
149
150 /* field identifiers (field name may be different on different archs) */
151 enum {
152 PAT_ADDRESS_SIZES,
153 PAT_BOGOMIPS, /* global */
154 PAT_BOGOMIPS_CPU, /* per-cpu */
155 PAT_CPU,
156 PAT_FAMILY,
157 PAT_FEATURES,
158 PAT_FLAGS,
159 PAT_IMPLEMENTER,
160 PAT_MAX_THREAD_ID,
161 PAT_MHZ,
162 PAT_MHZ_DYNAMIC,
163 PAT_MHZ_STATIC,
164 PAT_MODEL,
165 PAT_MODEL_NAME,
166 PAT_PART,
167 PAT_PROCESSOR,
168 PAT_REVISION,
169 PAT_STEPPING,
170 PAT_TYPE,
171 PAT_VARIANT,
172 PAT_VENDOR,
173 PAT_CACHE
174 };
175
176 /*
177 * /proc/cpuinfo to lscpu_cputype conversion
178 */
179 #define DEF_PAT_CPUTYPE(_str, _id, _member) \
180 { \
181 .id = (_id), \
182 .domain = CPUINFO_LINE_CPUTYPE, \
183 .pattern = (_str), \
184 .offset = offsetof(struct lscpu_cputype, _member), \
185 }
186
187 static const struct cpuinfo_pattern type_patterns[] =
188 {
189 /* Sort by fields name! */
190 DEF_PAT_CPUTYPE( "ASEs implemented", PAT_FLAGS, flags), /* mips */
191 DEF_PAT_CPUTYPE( "BogoMIPS", PAT_BOGOMIPS, bogomips), /* aarch64 */
192 DEF_PAT_CPUTYPE( "CPU implementer", PAT_IMPLEMENTER,vendor), /* ARM and aarch64 */
193 DEF_PAT_CPUTYPE( "CPU part", PAT_PART, model), /* ARM and aarch64 */
194 DEF_PAT_CPUTYPE( "CPU revision", PAT_REVISION, revision), /* aarch64 */
195 DEF_PAT_CPUTYPE( "CPU variant", PAT_VARIANT, stepping), /* aarch64 */
196 DEF_PAT_CPUTYPE( "Features", PAT_FEATURES, flags), /* aarch64 */
197 DEF_PAT_CPUTYPE( "address sizes", PAT_ADDRESS_SIZES, addrsz),/* x86 */
198 DEF_PAT_CPUTYPE( "bogomips per cpu", PAT_BOGOMIPS, bogomips), /* s390 */
199 DEF_PAT_CPUTYPE( "cpu", PAT_CPU, modelname), /* ppc, sparc */
200 DEF_PAT_CPUTYPE( "cpu family", PAT_FAMILY, family),
201 DEF_PAT_CPUTYPE( "cpu model", PAT_MODEL, model), /* mips */
202 DEF_PAT_CPUTYPE( "family", PAT_FAMILY, family),
203 DEF_PAT_CPUTYPE( "features", PAT_FEATURES, flags), /* s390 */
204 DEF_PAT_CPUTYPE( "flags", PAT_FLAGS, flags), /* x86 */
205 DEF_PAT_CPUTYPE( "max thread id", PAT_MAX_THREAD_ID, mtid), /* s390 */
206 DEF_PAT_CPUTYPE( "model", PAT_MODEL, model),
207 DEF_PAT_CPUTYPE( "model name", PAT_MODEL_NAME, modelname),
208 DEF_PAT_CPUTYPE( "revision", PAT_REVISION, revision),
209 DEF_PAT_CPUTYPE( "stepping", PAT_STEPPING, stepping),
210 DEF_PAT_CPUTYPE( "type", PAT_TYPE, flags), /* sparc64 */
211 DEF_PAT_CPUTYPE( "vendor", PAT_VENDOR, vendor),
212 DEF_PAT_CPUTYPE( "vendor_id", PAT_VENDOR, vendor), /* s390 */
213 };
214
215 /*
216 * /proc/cpuinfo to lscpu_cpu conversion
217 */
218 #define DEF_PAT_CPU(_str, _id, _member) \
219 { \
220 .id = (_id), \
221 .domain = CPUINFO_LINE_CPU, \
222 .pattern = (_str), \
223 .offset = offsetof(struct lscpu_cpu, _member), \
224 }
225
226 static const struct cpuinfo_pattern cpu_patterns[] =
227 {
228 /* Sort by fields name! */
229 DEF_PAT_CPU( "bogomips", PAT_BOGOMIPS_CPU, bogomips),
230 DEF_PAT_CPU( "cpu MHz", PAT_MHZ, mhz),
231 DEF_PAT_CPU( "cpu MHz dynamic", PAT_MHZ_DYNAMIC, dynamic_mhz), /* s390 */
232 DEF_PAT_CPU( "cpu MHz static", PAT_MHZ_STATIC, static_mhz), /* s390 */
233 DEF_PAT_CPU( "cpu number", PAT_PROCESSOR, logical_id), /* s390 */
234 DEF_PAT_CPU( "processor", PAT_PROCESSOR, logical_id),
235
236 };
237
238 /*
239 * /proc/cpuinfo to lscpu_cache conversion
240 */
241 #define DEF_PAT_CACHE(_str, _id) \
242 { \
243 .id = (_id), \
244 .domain = CPUINFO_LINE_CACHE, \
245 .pattern = (_str) \
246 }
247
248 static const struct cpuinfo_pattern cache_patterns[] =
249 {
250 /* Sort by fields name! */
251 DEF_PAT_CACHE("cache", PAT_CACHE),
252 };
253
254 #define CPUTYPE_PATTERN_BUFSZ 32
255
256 static int cmp_pattern(const void *a0, const void *b0)
257 {
258 const struct cpuinfo_pattern
259 *a = (const struct cpuinfo_pattern *) a0,
260 *b = (const struct cpuinfo_pattern *) b0;
261 return strcmp(a->pattern, b->pattern);
262 }
263
264 struct cpuinfo_parser {
265 struct lscpu_cxt *cxt;
266 struct lscpu_cpu *curr_cpu;
267 struct lscpu_cputype *curr_type;
268 unsigned int curr_type_added : 1;
269 };
270
271 static int is_different_cputype(struct lscpu_cputype *ct, size_t offset, const char *value)
272 {
273 switch (offset) {
274 case offsetof(struct lscpu_cputype, vendor):
275 return ct->vendor && value && strcmp(ct->vendor, value) != 0;
276 case offsetof(struct lscpu_cputype, model):
277 return ct->model && value && strcmp(ct->model, value) != 0;
278 case offsetof(struct lscpu_cputype, modelname):
279 return ct->modelname && value && strcmp(ct->modelname, value) != 0;
280 case offsetof(struct lscpu_cputype, stepping):
281 return ct->stepping && value && strcmp(ct->stepping, value) != 0;
282 }
283 return 0;
284 }
285
286 /* cannonicalize @str -- remove number at the end return the
287 * number by @keynum. This is usable for example for "processor 5" or "cache1"
288 * cpuinfo lines */
289 static char *key_cleanup(char *str, int *keynum)
290 {
291 size_t sz = rtrim_whitespace((unsigned char *)str);
292 size_t i;
293
294 if (!sz)
295 return str;
296
297 for (i = sz; i > 0; i--) {
298 if (!isdigit(str[i - 1]))
299 break;
300 }
301
302 if (i < sz) {
303 char *end = NULL, *p = str + i;
304 int n;
305
306 errno = 0;
307 n = strtol(p, &end, 10);
308 if (errno || !end || end == p)
309 return str;
310
311 *keynum = n;
312 str[i] = '\0';
313 rtrim_whitespace((unsigned char *)str);
314 }
315 return str;
316 }
317
318 static const struct cpuinfo_pattern *cpuinfo_parse_line(char *str, char **value, int *keynum)
319 {
320 struct cpuinfo_pattern key, *pat;
321 char *p, *v;
322 char buf[CPUTYPE_PATTERN_BUFSZ] = { 0 };
323
324 DBG(GATHER, ul_debug("parse \"%s\"", str));
325
326 if (!str || !*str)
327 return NULL;
328 p = (char *) skip_blank(str);
329 if (!p || !*p)
330 return NULL;
331
332 v = strchr(p, ':');
333 if (!v || !*v)
334 return NULL;
335
336 /* prepare name of the field */
337 xstrncpy(buf, p, sizeof(buf));
338 buf[v - p] = '\0';
339 v++;
340
341 /* prepare value */
342 v = (char *) skip_space(v);
343 if (!v || !*v)
344 return NULL;
345
346 key.pattern = key_cleanup(buf, keynum);
347 /* CPU-type */
348 if ((pat = bsearch(&key, type_patterns,
349 ARRAY_SIZE(type_patterns),
350 sizeof(struct cpuinfo_pattern),
351 cmp_pattern)))
352 goto found;
353
354 /* CPU */
355 if ((pat = bsearch(&key, cpu_patterns,
356 ARRAY_SIZE(cpu_patterns),
357 sizeof(struct cpuinfo_pattern),
358 cmp_pattern)))
359 goto found;
360
361 /* CACHE */
362 if ((pat = bsearch(&key, cache_patterns,
363 ARRAY_SIZE(cache_patterns),
364 sizeof(struct cpuinfo_pattern),
365 cmp_pattern)))
366 goto found;
367
368 return NULL;
369 found:
370 rtrim_whitespace((unsigned char *) v);
371 *value = v;
372 return pat;
373 }
374
375 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
376 * part of the cache topology information within the sysfs filesystem. This is
377 * true for all shared caches on e.g. s390. When there are layers of
378 * hypervisors in between it is not knows which CPUs share which caches.
379 * Therefore information about shared caches is only available in
380 * /proc/cpuinfo. Format is:
381 *
382 * cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>
383 *
384 * the cache<nr> part is parsed in cpuinfo_parse_line, in this function parses part after ":".
385 */
386 static int cpuinfo_parse_cache(struct lscpu_cxt *cxt, int keynum, char *data)
387 {
388 struct lscpu_cache *cache;
389 long long size;
390 char *p, type;
391 int level;
392 unsigned int line_size, associativity;
393
394 DBG(GATHER, ul_debugobj(cxt, " parse cpuinfo cache '%s'", data));
395
396 p = strstr(data, "scope=") + 6;
397 /* Skip private caches, also present in sysfs */
398 if (!p || strncmp(p, "Private", 7) == 0)
399 return 0;
400 p = strstr(data, "level=");
401 if (!p || sscanf(p, "level=%d", &level) != 1)
402 return 0;
403 p = strstr(data, "type=") + 5;
404 if (!p || !*p)
405 return 0;
406 type = 0;
407 if (strncmp(p, "Data", 4) == 0)
408 type = 'd';
409 else if (strncmp(p, "Instruction", 11) == 0)
410 type = 'i';
411 else if (strncmp(p, "Unified", 7) == 0)
412 type = 'u';
413 p = strstr(data, "size=");
414 if (!p || sscanf(p, "size=%lld", &size) != 1)
415 return 0;
416
417 p = strstr(data, "line_size=");
418 if (!p || sscanf(p, "line_size=%u", &line_size) != 1)
419 return 0;
420
421 p = strstr(data, "associativity=");
422 if (!p || sscanf(p, "associativity=%u", &associativity) != 1)
423 return 0;
424
425 cxt->necaches++;
426 cxt->ecaches = xrealloc(cxt->ecaches,
427 cxt->necaches * sizeof(struct lscpu_cache));
428 cache = &cxt->ecaches[cxt->necaches - 1];
429 memset(cache, 0 , sizeof(*cache));
430
431 if (type == 'i' || type == 'd')
432 xasprintf(&cache->name, "L%d%c", level, type);
433 else
434 xasprintf(&cache->name, "L%d", level);
435
436 cache->nth = keynum;
437 cache->level = level;
438 cache->size = size * 1024;
439 cache->ways_of_associativity = associativity;
440 cache->coherency_line_size = line_size;
441 /* Number of sets for s390. For safety, just check divide by zero */
442 cache->number_of_sets = line_size ? (cache->size / line_size): 0;
443 cache->number_of_sets = associativity ? (cache->number_of_sets / associativity) : 0;
444
445 cache->type = type == 'i' ? xstrdup("Instruction") :
446 type == 'd' ? xstrdup("Data") :
447 type == 'u' ? xstrdup("Unified") : NULL;
448 return 1;
449 }
450
451 int lscpu_read_cpuinfo(struct lscpu_cxt *cxt)
452 {
453 FILE *fp;
454 char buf[BUFSIZ];
455 size_t i;
456 struct lscpu_cputype *ct;
457 struct cpuinfo_parser _pr = { .cxt = cxt }, *pr = &_pr;
458
459 assert(cxt->npossibles); /* lscpu_create_cpus() required */
460 assert(cxt->cpus);
461
462 DBG(GATHER, ul_debugobj(cxt, "reading cpuinfo"));
463
464 fp = ul_path_fopen(cxt->procfs, "r", "cpuinfo");
465 if (!fp)
466 err(EXIT_FAILURE, _("cannot open %s"), "/proc/cpuinfo");
467
468 do {
469 int keynum = -1;
470 char *p = NULL, *value = NULL;
471 const struct cpuinfo_pattern *pattern;
472
473 if (fgets(buf, sizeof(buf), fp) != NULL)
474 p = (char *) skip_space(buf);
475
476 if (p == NULL || (*buf && !*p)) {
477 /* Blank line separates information */
478 if (p == NULL)
479 break; /* fgets() returns nothing; EOF */
480 continue;
481 }
482
483 rtrim_whitespace((unsigned char *) buf);
484
485 /* parse */
486 pattern = cpuinfo_parse_line(p, &value, &keynum);
487 if (!pattern) {
488 DBG(GATHER, ul_debug("'%s' not found", buf));
489 continue;
490 }
491
492 /* set data */
493 switch (pattern->domain) {
494 case CPUINFO_LINE_CPU:
495 if (pattern->id == PAT_PROCESSOR) {
496 /* switch CPU */
497 int id = 0;
498
499 if (keynum >= 0)
500 id = keynum;
501 else {
502 uint32_t n;
503 if (ul_strtou32(value, &n, 10) == 0)
504 id = n;
505 }
506
507 if (pr->curr_cpu && pr->curr_type)
508 lscpu_cpu_set_type(pr->curr_cpu, pr->curr_type);
509
510 lscpu_unref_cpu(pr->curr_cpu);
511 pr->curr_cpu = lscpu_get_cpu(cxt, id);
512
513 if (!pr->curr_cpu)
514 DBG(GATHER, ul_debug("*** cpu ID '%d' undefined", id));
515 else
516 DBG(GATHER, ul_debug(" switch to CPU %d", id));
517 lscpu_ref_cpu(pr->curr_cpu);
518 break;
519 }
520 if (!pr->curr_cpu)
521 DBG(GATHER, ul_debug("*** cpu data before cpu ID"));
522 else
523 strdup_to_offset(pr->curr_cpu, pattern->offset, value);
524
525 if (pattern->id == PAT_MHZ_DYNAMIC && pr->curr_type && !pr->curr_type->dynamic_mhz)
526 pr->curr_type->dynamic_mhz = xstrdup(value);
527 if (pattern->id == PAT_MHZ_STATIC && pr->curr_type && !pr->curr_type->static_mhz)
528 pr->curr_type->static_mhz = xstrdup(value);
529 if (pattern->id == PAT_BOGOMIPS_CPU && pr->curr_type && !pr->curr_type->bogomips)
530 pr->curr_type->bogomips = xstrdup(value);
531 if (pattern->id == PAT_MHZ && pr->curr_cpu && value) {
532 errno = 0;
533 pr->curr_cpu->mhz_cur_freq = (float) c_strtod(value, NULL);
534 if (errno)
535 pr->curr_cpu->mhz_cur_freq = 0;
536 }
537 break;
538 case CPUINFO_LINE_CPUTYPE:
539 if (pr->curr_type && is_different_cputype(pr->curr_type, pattern->offset, value)) {
540 lscpu_unref_cputype(pr->curr_type);
541 pr->curr_type = NULL;
542 }
543 if (!pr->curr_type) {
544 pr->curr_type = lscpu_new_cputype();
545 lscpu_add_cputype(cxt, pr->curr_type);
546 }
547
548 strdup_to_offset(pr->curr_type, pattern->offset, value);
549 break;
550 case CPUINFO_LINE_CACHE:
551 if (pattern->id != PAT_CACHE)
552 break;
553 cpuinfo_parse_cache(cxt, keynum, value);
554 break;
555 }
556 } while (1);
557
558 DBG(GATHER, fprintf_cputypes(stderr, cxt));
559
560 if (pr->curr_cpu && !pr->curr_cpu->type)
561 lscpu_cpu_set_type(pr->curr_cpu, pr->curr_type);
562
563 lscpu_unref_cputype(pr->curr_type);
564 lscpu_unref_cpu(pr->curr_cpu);
565
566 fclose(fp);
567 lscpu_sort_caches(cxt->ecaches, cxt->necaches);
568
569 /* Set the default type to CPUs which are missing (or not parsed)
570 * in cpuinfo */
571 ct = lscpu_cputype_get_default(cxt);
572 for (i = 0; i < cxt->npossibles; i++) {
573 struct lscpu_cpu *cpu = cxt->cpus[i];
574
575 if (cpu && !cpu->type)
576 lscpu_cpu_set_type(cpu, ct);
577 }
578
579 return 0;
580 }
581
582 struct lscpu_arch *lscpu_read_architecture(struct lscpu_cxt *cxt)
583 {
584 struct utsname utsbuf;
585 struct lscpu_arch *ar;
586 struct lscpu_cputype *ct;
587
588 assert(cxt);
589
590 DBG(GATHER, ul_debug("reading architecture"));
591
592 if (uname(&utsbuf) == -1)
593 err(EXIT_FAILURE, _("error: uname failed"));
594
595 ar = xcalloc(1, sizeof(*cxt->arch));
596 ar->name = xstrdup(utsbuf.machine);
597
598 if (cxt->noalive)
599 /* reading info from any /{sys,proc} dump, don't mix it with
600 * information about our real CPU */
601 ;
602 else {
603 #if defined(__alpha__) || defined(__ia64__)
604 ar->bit64 = 1; /* 64bit platforms only */
605 #endif
606 /* platforms with 64bit flag in /proc/cpuinfo, define
607 * 32bit default here */
608 #if defined(__i386__) || defined(__x86_64__) || \
609 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
610 ar->bit32 = 1;
611 #endif
612
613 #if defined(__aarch64__)
614 {
615 /* personality() is the most reliable way (since 4.7)
616 * to determine aarch32 support */
617 int pers = personality(PER_LINUX32);
618 if (pers != -1) {
619 personality(pers);
620 ar->bit32 = 1;
621 }
622 ar->bit64 = 1;
623 }
624 #endif
625 }
626
627 ct = lscpu_cputype_get_default(cxt);
628 if (ct && ct->flags) {
629 char buf[BUFSIZ];
630
631 snprintf(buf, sizeof(buf), " %s ", ct->flags);
632 if (strstr(buf, " lm "))
633 ar->bit32 = 1, ar->bit64 = 1; /* x86_64 */
634 if (strstr(buf, " zarch "))
635 ar->bit32 = 1, ar->bit64 = 1; /* s390x */
636 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
637 ar->bit32 = 1, ar->bit64 = 1; /* sparc64 */
638 }
639
640 if (ar->name && !cxt->noalive) {
641 if (strcmp(ar->name, "ppc64") == 0)
642 ar->bit32 = 1, ar->bit64 = 1;
643 else if (strcmp(ar->name, "ppc") == 0)
644 ar->bit32 = 1;
645 }
646
647 DBG(GATHER, ul_debugobj(ar, "arch: name=%s %s %s",
648 ar->name,
649 ar->bit64 ? "64-bit" : "",
650 ar->bit64 ? "32-bit" : ""));
651 return ar;
652 }
653
654 void lscpu_free_architecture(struct lscpu_arch *ar)
655 {
656 if (!ar)
657 return;
658 free(ar->name);
659 free(ar);
660 }
661
662 int lscpu_read_cpulists(struct lscpu_cxt *cxt)
663 {
664 cpu_set_t *cpuset = NULL;
665
666 assert(cxt);
667 DBG(GATHER, ul_debugobj(cxt, "reading cpulists"));
668
669 if (ul_path_read_s32(cxt->syscpu, &cxt->maxcpus, "kernel_max") == 0)
670 /* note that kernel_max is maximum index [NR_CPUS-1] */
671 cxt->maxcpus += 1;
672
673 else if (!cxt->noalive)
674 /* the root is '/' so we are working with data from the current kernel */
675 cxt->maxcpus = get_max_number_of_cpus();
676
677 if (cxt->maxcpus <= 0)
678 /* error or we are reading some /sys snapshot instead of the
679 * real /sys, let's use any crazy number... */
680 cxt->maxcpus = 2048;
681
682 cxt->setsize = CPU_ALLOC_SIZE(cxt->maxcpus);
683
684 /* create CPUs from possible mask */
685 if (ul_path_readf_cpulist(cxt->syscpu, &cpuset, cxt->maxcpus, "possible") == 0) {
686 lscpu_create_cpus(cxt, cpuset, cxt->setsize);
687 cpuset_free(cpuset);
688 cpuset = NULL;
689 } else
690 err(EXIT_FAILURE, _("failed to determine number of CPUs: %s"),
691 _PATH_SYS_CPU "/possible");
692
693
694 /* get mask for present CPUs */
695 if (ul_path_readf_cpulist(cxt->syscpu, &cxt->present, cxt->maxcpus, "present") == 0)
696 cxt->npresents = CPU_COUNT_S(cxt->setsize, cxt->present);
697
698 /* get mask for online CPUs */
699 if (ul_path_readf_cpulist(cxt->syscpu, &cxt->online, cxt->maxcpus, "online") == 0)
700 cxt->nonlines = CPU_COUNT_S(cxt->setsize, cxt->online);
701
702 return 0;
703 }
704
705 #if defined(HAVE_LIBRTAS)
706 # define PROCESSOR_MODULE_INFO 43
707 static int strbe16toh(const char *buf, int offset)
708 {
709 return (buf[offset] << 8) + buf[offset+1];
710 }
711 #endif
712
713 /* some extra information for the default CPU type */
714 int lscpu_read_archext(struct lscpu_cxt *cxt)
715 {
716 FILE *f;
717 char buf[BUFSIZ];
718 struct lscpu_cputype *ct;
719
720 DBG(GATHER, ul_debugobj(cxt, "reading extra arch info"));
721
722 assert(cxt);
723 ct = lscpu_cputype_get_default(cxt);
724 if (!ct)
725 return -EINVAL;
726
727 /* get dispatching mode */
728 if (ul_path_read_s32(cxt->syscpu, &ct->dispatching, "dispatching") != 0)
729 ct->dispatching = -1;
730
731 /* get cpufreq boost mode */
732 if (ul_path_read_s32(cxt->syscpu, &ct->freqboost, "cpufreq/boost") != 0)
733 ct->freqboost = -1;
734
735 if ((f = ul_path_fopen(cxt->procfs, "r", "sysinfo"))) {
736 while (fgets(buf, sizeof(buf), f) != NULL) {
737 if (lookup(buf, "Type", &ct->machinetype))
738 break;
739 }
740 fclose(f);
741 }
742
743 #if defined(HAVE_LIBRTAS)
744 /* Get PowerPC specific info */
745 if (!cxt->noalive) {
746 int rc, len, ntypes;
747
748 ct->physsockets = ct->physchips = ct->physcoresperchip = 0;
749
750 rc = rtas_get_sysparm(PROCESSOR_MODULE_INFO, sizeof(buf), buf);
751 if (rc < 0)
752 goto nortas;
753
754 len = strbe16toh(buf, 0);
755 if (len < 8)
756 goto nortas;
757
758 ntypes = strbe16toh(buf, 2);
759 if (!ntypes)
760 goto nortas;
761
762 ct->physsockets = strbe16toh(buf, 4);
763 ct->physchips = strbe16toh(buf, 6);
764 ct->physcoresperchip = strbe16toh(buf, 8);
765 }
766 nortas:
767 #endif
768 return 0;
769 }
770
771 static int cmp_vulnerability_name(const void *a0, const void *b0)
772 {
773 const struct lscpu_vulnerability
774 *a = (const struct lscpu_vulnerability *) a0,
775 *b = (const struct lscpu_vulnerability *) b0;
776 return strcmp(a->name, b->name);
777 }
778
779 int lscpu_read_vulnerabilities(struct lscpu_cxt *cxt)
780 {
781 struct dirent *d;
782 DIR *dir;
783 size_t n = 0;
784
785 assert(cxt);
786
787 DBG(GATHER, ul_debugobj(cxt, "reading vulnerabilities"));
788
789 dir = ul_path_opendir(cxt->syscpu, "vulnerabilities");
790 if (!dir)
791 return 0;
792
793 cxt->nvuls = n = 0;
794 while (xreaddir(dir))
795 n++;
796 if (!n) {
797 closedir(dir);
798 return 0;
799 }
800
801 rewinddir(dir);
802 cxt->vuls = xcalloc(n, sizeof(struct lscpu_vulnerability));
803
804 while (cxt->nvuls < n && (d = xreaddir(dir))) {
805 char *str, *p;
806 struct lscpu_vulnerability *vu;
807
808 #ifdef _DIRENT_HAVE_D_TYPE
809 if (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN)
810 continue;
811 #endif
812 if (ul_path_readf_string(cxt->syscpu, &str,
813 "vulnerabilities/%s", d->d_name) <= 0)
814 continue;
815
816 vu = &cxt->vuls[cxt->nvuls++];
817
818 /* Name */
819 vu->name = xstrdup(d->d_name);
820 *vu->name = toupper(*vu->name);
821 strrep(vu->name, '_', ' ');
822
823 /* Description */
824 vu->text = str;
825 p = (char *) startswith(vu->text, "Mitigation");
826 if (p) {
827 *p = ';';
828 strrem(vu->text, ':');
829 }
830 }
831 closedir(dir);
832
833 qsort(cxt->vuls, cxt->nvuls,
834 sizeof(struct lscpu_vulnerability), cmp_vulnerability_name);
835
836 return 0;
837 }
838
839 static inline int is_node_dirent(struct dirent *d)
840 {
841 return
842 d &&
843 #ifdef _DIRENT_HAVE_D_TYPE
844 (d->d_type == DT_DIR || d->d_type == DT_UNKNOWN) &&
845 #endif
846 strncmp(d->d_name, "node", 4) == 0 &&
847 isdigit_string(d->d_name + 4);
848 }
849
850 static int nodecmp(const void *ap, const void *bp)
851 {
852 int *a = (int *) ap, *b = (int *) bp;
853 return *a - *b;
854 }
855
856 int lscpu_read_numas(struct lscpu_cxt *cxt)
857 {
858 size_t i = 0;
859 DIR *dir;
860 struct dirent *d;
861 struct path_cxt *sys;
862
863 assert(!cxt->nnodes);
864
865
866 sys = ul_new_path(_PATH_SYS_NODE);
867 if (!sys)
868 err(EXIT_FAILURE, _("failed to initialize %s handler"), _PATH_SYS_NODE);
869
870 ul_path_set_prefix(sys, cxt->prefix);
871
872 dir = ul_path_opendir(sys, NULL);
873 if (!dir)
874 goto done;
875
876 while ((d = readdir(dir))) {
877 if (is_node_dirent(d))
878 cxt->nnodes++;
879 }
880
881 if (!cxt->nnodes) {
882 closedir(dir);
883 goto done;
884 }
885
886 cxt->nodemaps = xcalloc(cxt->nnodes, sizeof(cpu_set_t *));
887 cxt->idx2nodenum = xmalloc(cxt->nnodes * sizeof(int));
888
889 rewinddir(dir);
890 for (i = 0; (d = readdir(dir)) && i < cxt->nnodes;) {
891 if (is_node_dirent(d))
892 cxt->idx2nodenum[i++] = strtol_or_err(((d->d_name) + 4),
893 _("Failed to extract the node number"));
894 }
895 closedir(dir);
896 qsort(cxt->idx2nodenum, cxt->nnodes, sizeof(int), nodecmp);
897
898 /* information about how nodes share different CPUs */
899 for (i = 0; i < cxt->nnodes; i++)
900 ul_path_readf_cpuset(sys, &cxt->nodemaps[i], cxt->maxcpus,
901 "node%d/cpumap", cxt->idx2nodenum[i]);
902 done:
903 DBG(GATHER, ul_debugobj(cxt, "read %zu numas", cxt->nnodes));
904
905 ul_unref_path(sys);
906 return 0;
907 }