2 #include <sys/utsname.h>
3 #include <sys/personality.h>
5 #if defined(HAVE_LIBRTAS)
11 #include "fileutils.h"
14 /* Lookup a pattern and get the value for format "<pattern> : <key>"
16 int lookup(char *line
, char *pattern
, char **value
)
19 int len
= strlen(pattern
);
21 /* don't re-fill already found tags, first one wins */
25 if (strncmp(line
, pattern
, len
))
28 for (p
= line
+ len
; isspace(*p
); p
++);
34 for (++p
; isspace(*p
); p
++);
42 len
= strlen(line
) - 1;
43 for (p
= line
+ len
; isspace(*(p
-1)); p
--);
50 struct lscpu_cputype
*lscpu_new_cputype(void)
52 struct lscpu_cputype
*ct
;
54 ct
= xcalloc(1, sizeof(struct lscpu_cputype
));
59 DBG(TYPE
, ul_debugobj(ct
, "alloc"));
63 void lscpu_ref_cputype(struct lscpu_cputype
*ct
)
67 DBG(TYPE
, ul_debugobj(ct
, ">>> ref %d", ct
->refcount
));
71 void lscpu_unref_cputype(struct lscpu_cputype
*ct
)
76 /*DBG(TYPE, ul_debugobj(ct, ">>> unref %d", ct->refcount - 1));*/
78 if (--ct
->refcount
<= 0) {
79 DBG(TYPE
, ul_debugobj(ct
, " freeing %s/%s", ct
->vendor
, ct
->model
));
80 lscpu_cputype_free_topology(ct
);
82 free(ct
->bios_vendor
);
83 free(ct
->machinetype
); /* s390 */
87 free(ct
->bios_modelname
);
88 free(ct
->bios_family
);
89 free(ct
->revision
); /* alternative for model (ppc) */
93 free(ct
->mtid
); /* maximum thread id (s390) */
94 free(ct
->addrsz
); /* address sizes */
96 free(ct
->dynamic_mhz
);
101 struct lscpu_cputype
*lscpu_cputype_get_default(struct lscpu_cxt
*cxt
)
103 return cxt
->cputypes
? cxt
->cputypes
[0] : NULL
;
106 #define match(astr, bstr) \
107 ((!astr && !bstr) || (astr && bstr && strcmp(astr, bstr) == 0))
109 struct lscpu_cputype
*lscpu_add_cputype(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
111 DBG(TYPE
, ul_debugobj(ct
, "add new"));
112 cxt
->cputypes
= xreallocarray(cxt
->cputypes
, cxt
->ncputypes
+ 1,
113 sizeof(struct lscpu_cputype
*));
114 cxt
->cputypes
[cxt
->ncputypes
] = ct
;
116 lscpu_ref_cputype(ct
);
120 static void fprintf_cputypes(FILE *f
, struct lscpu_cxt
*cxt
)
124 for (i
= 0; i
< cxt
->ncputypes
; i
++) {
125 struct lscpu_cputype
*ct
= cxt
->cputypes
[i
];
127 fprintf(f
, "\n vendor: %s\n", ct
->vendor
);
128 fprintf(f
, " machinetype: %s\n", ct
->machinetype
);
129 fprintf(f
, " family: %s\n", ct
->family
);
130 fprintf(f
, " model: %s\n", ct
->model
);
131 fprintf(f
, " modelname: %s\n", ct
->modelname
);
132 fprintf(f
, " revision: %s\n", ct
->revision
);
133 fprintf(f
, " stepping: %s\n", ct
->stepping
);
134 fprintf(f
, " mtid: %s\n", ct
->mtid
);
135 fprintf(f
, " addrsz: %s\n", ct
->addrsz
);
140 CPUINFO_LINE_UNKNOWN
, /* unknown line */
141 CPUINFO_LINE_CPUTYPE
, /* line found in type_patterns[] */
142 CPUINFO_LINE_CPU
, /* line found in cpu_patterns[] */
143 CPUINFO_LINE_CACHE
/* line found in cache_pattern[] */
146 /* Describes /proc/cpuinfo fields */
147 struct cpuinfo_pattern
{
148 int id
; /* field ID */
149 int domain
; /* CPUINFO_LINE_* */
150 const char *pattern
; /* field name as used in /proc/cpuinfo */
151 size_t offset
; /* offset in lscpu_cputype or lscpu_cpu struct */
154 /* field identifiers (field name may be different on different archs) */
157 PAT_BOGOMIPS
, /* global */
158 PAT_BOGOMIPS_CPU
, /* per-cpu */
182 * /proc/cpuinfo to lscpu_cputype conversion
184 #define DEF_PAT_CPUTYPE(_str, _id, _member) \
187 .domain = CPUINFO_LINE_CPUTYPE, \
189 .offset = offsetof(struct lscpu_cputype, _member), \
192 static const struct cpuinfo_pattern type_patterns
[] =
194 /* Sort by fields name! */
195 DEF_PAT_CPUTYPE( "ASEs implemented", PAT_FLAGS
, flags
), /* mips */
196 DEF_PAT_CPUTYPE( "Address Sizes", PAT_ADDRESS_SIZES
, addrsz
),/* loongarch */
197 DEF_PAT_CPUTYPE( "BogoMIPS", PAT_BOGOMIPS
, bogomips
), /* aarch64 */
198 DEF_PAT_CPUTYPE( "CPU Family", PAT_FAMILY
, family
), /* loongarch */
199 DEF_PAT_CPUTYPE( "CPU Revision", PAT_REVISION
, revision
), /* loongarch */
200 DEF_PAT_CPUTYPE( "CPU implementer", PAT_IMPLEMENTER
,vendor
), /* ARM and aarch64 */
201 DEF_PAT_CPUTYPE( "CPU part", PAT_PART
, model
), /* ARM and aarch64 */
202 DEF_PAT_CPUTYPE( "CPU revision", PAT_REVISION
, revision
), /* aarch64 */
203 DEF_PAT_CPUTYPE( "CPU variant", PAT_VARIANT
, stepping
), /* aarch64 */
204 DEF_PAT_CPUTYPE( "Features", PAT_FEATURES
, flags
), /* aarch64 */
205 DEF_PAT_CPUTYPE( "ISA", PAT_ISA
, isa
), /* loongarch */
206 DEF_PAT_CPUTYPE( "Model Name", PAT_MODEL_NAME
, modelname
), /* loongarch */
207 DEF_PAT_CPUTYPE( "address sizes", PAT_ADDRESS_SIZES
, addrsz
),/* x86 */
208 DEF_PAT_CPUTYPE( "bogomips per cpu", PAT_BOGOMIPS
, bogomips
), /* s390 */
209 DEF_PAT_CPUTYPE( "cpu", PAT_CPU
, modelname
), /* ppc, sparc */
210 DEF_PAT_CPUTYPE( "cpu family", PAT_FAMILY
, family
),
211 DEF_PAT_CPUTYPE( "cpu model", PAT_MODEL
, model
), /* mips */
212 DEF_PAT_CPUTYPE( "family", PAT_FAMILY
, family
),
213 DEF_PAT_CPUTYPE( "features", PAT_FEATURES
, flags
), /* s390 */
214 DEF_PAT_CPUTYPE( "flags", PAT_FLAGS
, flags
), /* x86 */
215 DEF_PAT_CPUTYPE( "max thread id", PAT_MAX_THREAD_ID
, mtid
), /* s390 */
216 DEF_PAT_CPUTYPE( "model", PAT_MODEL
, model
),
217 DEF_PAT_CPUTYPE( "model name", PAT_MODEL_NAME
, modelname
),
218 DEF_PAT_CPUTYPE( "revision", PAT_REVISION
, revision
),
219 DEF_PAT_CPUTYPE( "stepping", PAT_STEPPING
, stepping
),
220 DEF_PAT_CPUTYPE( "type", PAT_TYPE
, flags
), /* sparc64 */
221 DEF_PAT_CPUTYPE( "vendor", PAT_VENDOR
, vendor
),
222 DEF_PAT_CPUTYPE( "vendor_id", PAT_VENDOR
, vendor
), /* s390 */
226 * /proc/cpuinfo to lscpu_cpu conversion
228 #define DEF_PAT_CPU(_str, _id, _member) \
231 .domain = CPUINFO_LINE_CPU, \
233 .offset = offsetof(struct lscpu_cpu, _member), \
236 static const struct cpuinfo_pattern cpu_patterns
[] =
238 /* Sort by fields name! */
239 DEF_PAT_CPU( "CPU MHz", PAT_MHZ
, mhz
), /* loongarch */
240 DEF_PAT_CPU( "bogomips", PAT_BOGOMIPS_CPU
, bogomips
),
241 DEF_PAT_CPU( "cpu MHz", PAT_MHZ
, mhz
),
242 DEF_PAT_CPU( "cpu MHz dynamic", PAT_MHZ_DYNAMIC
, dynamic_mhz
), /* s390 */
243 DEF_PAT_CPU( "cpu MHz static", PAT_MHZ_STATIC
, static_mhz
), /* s390 */
244 DEF_PAT_CPU( "cpu number", PAT_PROCESSOR
, logical_id
), /* s390 */
245 DEF_PAT_CPU( "processor", PAT_PROCESSOR
, logical_id
),
250 * /proc/cpuinfo to lscpu_cache conversion
252 #define DEF_PAT_CACHE(_str, _id) \
255 .domain = CPUINFO_LINE_CACHE, \
259 static const struct cpuinfo_pattern cache_patterns
[] =
261 /* Sort by fields name! */
262 DEF_PAT_CACHE("cache", PAT_CACHE
),
265 #define CPUTYPE_PATTERN_BUFSZ 32
267 static int cmp_pattern(const void *a0
, const void *b0
)
269 const struct cpuinfo_pattern
270 *a
= (const struct cpuinfo_pattern
*) a0
,
271 *b
= (const struct cpuinfo_pattern
*) b0
;
272 return strcmp(a
->pattern
, b
->pattern
);
275 struct cpuinfo_parser
{
276 struct lscpu_cxt
*cxt
;
277 struct lscpu_cpu
*curr_cpu
;
278 struct lscpu_cputype
*curr_type
;
279 unsigned int curr_type_added
: 1;
282 static int is_different_cputype(struct lscpu_cputype
*ct
, size_t offset
, const char *value
)
285 case offsetof(struct lscpu_cputype
, vendor
):
286 return ct
->vendor
&& value
&& strcmp(ct
->vendor
, value
) != 0;
287 case offsetof(struct lscpu_cputype
, model
):
288 return ct
->model
&& value
&& strcmp(ct
->model
, value
) != 0;
289 case offsetof(struct lscpu_cputype
, modelname
):
290 return ct
->modelname
&& value
&& strcmp(ct
->modelname
, value
) != 0;
291 case offsetof(struct lscpu_cputype
, stepping
):
292 return ct
->stepping
&& value
&& strcmp(ct
->stepping
, value
) != 0;
297 /* canonicalize @str -- remove number at the end return the
298 * number by @keynum. This is usable for example for "processor 5" or "cache1"
300 static char *key_cleanup(char *str
, int *keynum
)
302 size_t sz
= rtrim_whitespace((unsigned char *)str
);
308 for (i
= sz
; i
> 0; i
--) {
309 if (!isdigit(str
[i
- 1]))
314 char *end
= NULL
, *p
= str
+ i
;
318 n
= strtol(p
, &end
, 10);
319 if (errno
|| !end
|| end
== p
)
324 rtrim_whitespace((unsigned char *)str
);
329 static const struct cpuinfo_pattern
*cpuinfo_parse_line(char *str
, char **value
, int *keynum
)
331 struct cpuinfo_pattern key
= { .id
= 0 }, *pat
;
333 char buf
[CPUTYPE_PATTERN_BUFSZ
] = { 0 };
335 DBG(GATHER
, ul_debug("parse \"%s\"", str
));
339 p
= (char *) skip_blank(str
);
347 /* prepare name of the field */
348 xstrncpy(buf
, p
, sizeof(buf
));
353 v
= (char *) skip_space(v
);
357 key
.pattern
= key_cleanup(buf
, keynum
);
359 if ((pat
= bsearch(&key
, type_patterns
,
360 ARRAY_SIZE(type_patterns
),
361 sizeof(struct cpuinfo_pattern
),
366 if ((pat
= bsearch(&key
, cpu_patterns
,
367 ARRAY_SIZE(cpu_patterns
),
368 sizeof(struct cpuinfo_pattern
),
373 if ((pat
= bsearch(&key
, cache_patterns
,
374 ARRAY_SIZE(cache_patterns
),
375 sizeof(struct cpuinfo_pattern
),
381 rtrim_whitespace((unsigned char *) v
);
386 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
387 * part of the cache topology information within the sysfs filesystem. This is
388 * true for all shared caches on e.g. s390. When there are layers of
389 * hypervisors in between it is not knows which CPUs share which caches.
390 * Therefore information about shared caches is only available in
391 * /proc/cpuinfo. Format is:
393 * cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>
395 * the cache<nr> part is parsed in cpuinfo_parse_line, in this function parses part after ":".
397 static int cpuinfo_parse_cache(struct lscpu_cxt
*cxt
, int keynum
, char *data
)
399 struct lscpu_cache
*cache
;
403 unsigned int line_size
, associativity
;
405 DBG(GATHER
, ul_debugobj(cxt
, " parse cpuinfo cache '%s'", data
));
407 p
= strstr(data
, "scope=") + 6;
408 /* Skip private caches, also present in sysfs */
409 if (!p
|| strncmp(p
, "Private", 7) == 0)
411 p
= strstr(data
, "level=");
412 if (!p
|| sscanf(p
, "level=%d", &level
) != 1)
414 p
= strstr(data
, "type=") + 5;
418 if (strncmp(p
, "Data", 4) == 0)
420 else if (strncmp(p
, "Instruction", 11) == 0)
422 else if (strncmp(p
, "Unified", 7) == 0)
424 p
= strstr(data
, "size=");
425 if (!p
|| sscanf(p
, "size=%lld", &size
) != 1)
428 p
= strstr(data
, "line_size=");
429 if (!p
|| sscanf(p
, "line_size=%u", &line_size
) != 1)
432 p
= strstr(data
, "associativity=");
433 if (!p
|| sscanf(p
, "associativity=%u", &associativity
) != 1)
437 cxt
->ecaches
= xreallocarray(cxt
->ecaches
,
438 cxt
->necaches
, sizeof(struct lscpu_cache
));
439 cache
= &cxt
->ecaches
[cxt
->necaches
- 1];
440 memset(cache
, 0 , sizeof(*cache
));
442 if (type
== 'i' || type
== 'd')
443 xasprintf(&cache
->name
, "L%d%c", level
, type
);
445 xasprintf(&cache
->name
, "L%d", level
);
448 cache
->level
= level
;
449 cache
->size
= size
* 1024;
450 cache
->ways_of_associativity
= associativity
;
451 cache
->coherency_line_size
= line_size
;
452 /* Number of sets for s390. For safety, just check divide by zero */
453 cache
->number_of_sets
= line_size
? (cache
->size
/ line_size
): 0;
454 cache
->number_of_sets
= associativity
? (cache
->number_of_sets
/ associativity
) : 0;
456 cache
->type
= type
== 'i' ? xstrdup("Instruction") :
457 type
== 'd' ? xstrdup("Data") :
458 type
== 'u' ? xstrdup("Unified") : NULL
;
462 int lscpu_read_cpuinfo(struct lscpu_cxt
*cxt
)
465 /* Used to be BUFSIZ which is small on some platforms e.g, musl,
466 * therefore hardcode to 4K */
469 struct lscpu_cputype
*ct
;
470 struct cpuinfo_parser _pr
= { .cxt
= cxt
}, *pr
= &_pr
;
472 assert(cxt
->npossibles
); /* lscpu_create_cpus() required */
475 DBG(GATHER
, ul_debugobj(cxt
, "reading cpuinfo"));
477 fp
= ul_path_fopen(cxt
->procfs
, "r", "cpuinfo");
479 err(EXIT_FAILURE
, _("cannot open %s"), "/proc/cpuinfo");
483 char *p
= NULL
, *value
= NULL
;
484 const struct cpuinfo_pattern
*pattern
;
486 if (fgets(buf
, sizeof(buf
), fp
) != NULL
)
487 p
= (char *) skip_space(buf
);
489 if (p
== NULL
|| (*buf
&& !*p
)) {
490 /* Blank line separates information */
492 break; /* fgets() returns nothing; EOF */
496 rtrim_whitespace((unsigned char *) buf
);
499 pattern
= cpuinfo_parse_line(p
, &value
, &keynum
);
501 DBG(GATHER
, ul_debug("'%s' not found", buf
));
506 switch (pattern
->domain
) {
507 case CPUINFO_LINE_CPU
:
508 if (pattern
->id
== PAT_PROCESSOR
) {
516 if (ul_strtou32(value
, &n
, 10) == 0)
520 if (pr
->curr_cpu
&& pr
->curr_type
)
521 lscpu_cpu_set_type(pr
->curr_cpu
, pr
->curr_type
);
523 lscpu_unref_cpu(pr
->curr_cpu
);
524 pr
->curr_cpu
= lscpu_get_cpu(cxt
, id
);
527 DBG(GATHER
, ul_debug("*** cpu ID '%d' undefined", id
));
529 DBG(GATHER
, ul_debug(" switch to CPU %d", id
));
530 lscpu_ref_cpu(pr
->curr_cpu
);
534 DBG(GATHER
, ul_debug("*** cpu data before cpu ID"));
536 strdup_to_offset(pr
->curr_cpu
, pattern
->offset
, value
);
538 if (pattern
->id
== PAT_MHZ_DYNAMIC
&& pr
->curr_type
&& !pr
->curr_type
->dynamic_mhz
)
539 pr
->curr_type
->dynamic_mhz
= xstrdup(value
);
540 if (pattern
->id
== PAT_MHZ_STATIC
&& pr
->curr_type
&& !pr
->curr_type
->static_mhz
)
541 pr
->curr_type
->static_mhz
= xstrdup(value
);
542 if (pattern
->id
== PAT_BOGOMIPS_CPU
&& pr
->curr_type
&& !pr
->curr_type
->bogomips
)
543 pr
->curr_type
->bogomips
= xstrdup(value
);
544 if (pattern
->id
== PAT_MHZ
&& pr
->curr_cpu
&& value
) {
546 pr
->curr_cpu
->mhz_cur_freq
= (float) c_strtod(value
, NULL
);
548 pr
->curr_cpu
->mhz_cur_freq
= 0;
551 case CPUINFO_LINE_CPUTYPE
:
552 if (pr
->curr_type
&& is_different_cputype(pr
->curr_type
, pattern
->offset
, value
)) {
553 lscpu_unref_cputype(pr
->curr_type
);
554 pr
->curr_type
= NULL
;
556 if (!pr
->curr_type
) {
557 pr
->curr_type
= lscpu_new_cputype();
558 lscpu_add_cputype(cxt
, pr
->curr_type
);
561 strdup_to_offset(pr
->curr_type
, pattern
->offset
, value
);
563 case CPUINFO_LINE_CACHE
:
564 if (pattern
->id
!= PAT_CACHE
)
566 cpuinfo_parse_cache(cxt
, keynum
, value
);
571 DBG(GATHER
, fprintf_cputypes(stderr
, cxt
));
573 if (pr
->curr_cpu
&& !pr
->curr_cpu
->type
)
574 lscpu_cpu_set_type(pr
->curr_cpu
, pr
->curr_type
);
576 lscpu_unref_cputype(pr
->curr_type
);
577 lscpu_unref_cpu(pr
->curr_cpu
);
580 lscpu_sort_caches(cxt
->ecaches
, cxt
->necaches
);
582 /* Set the default type to CPUs which are missing (or not parsed)
584 ct
= lscpu_cputype_get_default(cxt
);
585 for (i
= 0; ct
&& i
< cxt
->npossibles
; i
++) {
586 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
588 if (cpu
&& !cpu
->type
)
589 lscpu_cpu_set_type(cpu
, ct
);
595 struct lscpu_arch
*lscpu_read_architecture(struct lscpu_cxt
*cxt
)
597 struct utsname utsbuf
;
598 struct lscpu_arch
*ar
;
599 struct lscpu_cputype
*ct
;
603 DBG(GATHER
, ul_debug("reading architecture"));
605 if (uname(&utsbuf
) == -1)
606 err(EXIT_FAILURE
, _("error: uname failed"));
608 ar
= xcalloc(1, sizeof(*cxt
->arch
));
609 ar
->name
= xstrdup(utsbuf
.machine
);
612 /* reading info from any /{sys,proc} dump, don't mix it with
613 * information about our real CPU */
616 #if defined(__alpha__) || defined(__ia64__)
617 ar
->bit64
= 1; /* 64bit platforms only */
619 /* platforms with 64bit flag in /proc/cpuinfo, define
620 * 32bit default here */
621 #if defined(__i386__) || defined(__x86_64__) || \
622 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
626 #if defined(__aarch64__)
628 /* personality() is the most reliable way (since 4.7)
629 * to determine aarch32 support */
630 int pers
= personality(PER_LINUX32
);
640 ct
= lscpu_cputype_get_default(cxt
);
641 if (ct
&& ct
->flags
) {
644 snprintf(buf
, sizeof(buf
), " %s ", ct
->flags
);
645 if (strstr(buf
, " lm "))
646 ar
->bit32
= 1, ar
->bit64
= 1; /* x86_64 */
647 if (strstr(buf
, " zarch "))
648 ar
->bit32
= 1, ar
->bit64
= 1; /* s390x */
649 if (strstr(buf
, " sun4v ") || strstr(buf
, " sun4u "))
650 ar
->bit32
= 1, ar
->bit64
= 1; /* sparc64 */
656 snprintf(buf
, sizeof(buf
), " %s ", ct
->isa
);
657 if (strstr(buf
, " loongarch32 "))
659 if (strstr(buf
, " loongarch64 "))
663 if (ar
->name
&& !cxt
->noalive
) {
664 if (strcmp(ar
->name
, "ppc64") == 0)
665 ar
->bit32
= 1, ar
->bit64
= 1;
666 else if (strcmp(ar
->name
, "ppc") == 0)
670 DBG(GATHER
, ul_debugobj(ar
, "arch: name=%s %s %s",
672 ar
->bit64
? "64-bit" : "",
673 ar
->bit64
? "32-bit" : ""));
677 void lscpu_free_architecture(struct lscpu_arch
*ar
)
685 int lscpu_read_cpulists(struct lscpu_cxt
*cxt
)
687 cpu_set_t
*cpuset
= NULL
;
690 DBG(GATHER
, ul_debugobj(cxt
, "reading cpulists"));
692 if (ul_path_read_s32(cxt
->syscpu
, &cxt
->maxcpus
, "kernel_max") == 0)
693 /* note that kernel_max is maximum index [NR_CPUS-1] */
696 else if (!cxt
->noalive
)
697 /* the root is '/' so we are working with data from the current kernel */
698 cxt
->maxcpus
= get_max_number_of_cpus();
700 if (cxt
->maxcpus
<= 0)
701 /* error or we are reading some /sys snapshot instead of the
702 * real /sys, let's use any crazy number... */
705 cxt
->setsize
= CPU_ALLOC_SIZE(cxt
->maxcpus
);
707 /* create CPUs from possible mask */
708 if (ul_path_readf_cpulist(cxt
->syscpu
, &cpuset
, cxt
->maxcpus
, "possible") == 0) {
709 lscpu_create_cpus(cxt
, cpuset
, cxt
->setsize
);
713 err(EXIT_FAILURE
, _("failed to determine number of CPUs: %s"),
714 _PATH_SYS_CPU
"/possible");
717 /* get mask for present CPUs */
718 if (ul_path_readf_cpulist(cxt
->syscpu
, &cxt
->present
, cxt
->maxcpus
, "present") == 0)
719 cxt
->npresents
= CPU_COUNT_S(cxt
->setsize
, cxt
->present
);
721 /* get mask for online CPUs */
722 if (ul_path_readf_cpulist(cxt
->syscpu
, &cxt
->online
, cxt
->maxcpus
, "online") == 0)
723 cxt
->nonlines
= CPU_COUNT_S(cxt
->setsize
, cxt
->online
);
728 #if defined(HAVE_LIBRTAS)
729 # define PROCESSOR_MODULE_INFO 43
730 static int strbe16toh(const char *buf
, int offset
)
732 return (buf
[offset
] << 8) + buf
[offset
+1];
736 /* some extra information for the default CPU type */
737 int lscpu_read_archext(struct lscpu_cxt
*cxt
)
741 struct lscpu_cputype
*ct
;
743 DBG(GATHER
, ul_debugobj(cxt
, "reading extra arch info"));
746 ct
= lscpu_cputype_get_default(cxt
);
750 /* get dispatching mode */
751 if (ul_path_read_s32(cxt
->syscpu
, &ct
->dispatching
, "dispatching") != 0)
752 ct
->dispatching
= -1;
754 /* get cpufreq boost mode */
755 if (ul_path_read_s32(cxt
->syscpu
, &ct
->freqboost
, "cpufreq/boost") != 0)
758 if ((f
= ul_path_fopen(cxt
->procfs
, "r", "sysinfo"))) {
759 while (fgets(buf
, sizeof(buf
), f
) != NULL
) {
760 if (lookup(buf
, "Type", &ct
->machinetype
))
766 #if defined(HAVE_LIBRTAS)
767 /* Get PowerPC specific info */
771 ct
->physsockets
= ct
->physchips
= ct
->physcoresperchip
= 0;
773 rc
= rtas_get_sysparm(PROCESSOR_MODULE_INFO
, sizeof(buf
), buf
);
777 len
= strbe16toh(buf
, 0);
781 ntypes
= strbe16toh(buf
, 2);
785 ct
->physsockets
= strbe16toh(buf
, 4);
786 ct
->physchips
= strbe16toh(buf
, 6);
787 ct
->physcoresperchip
= strbe16toh(buf
, 8);
794 static int cmp_vulnerability_name(const void *a0
, const void *b0
)
796 const struct lscpu_vulnerability
797 *a
= (const struct lscpu_vulnerability
*) a0
,
798 *b
= (const struct lscpu_vulnerability
*) b0
;
799 return strcmp(a
->name
, b
->name
);
802 int lscpu_read_vulnerabilities(struct lscpu_cxt
*cxt
)
810 DBG(GATHER
, ul_debugobj(cxt
, "reading vulnerabilities"));
812 dir
= ul_path_opendir(cxt
->syscpu
, "vulnerabilities");
817 while (xreaddir(dir
))
825 cxt
->vuls
= xcalloc(n
, sizeof(struct lscpu_vulnerability
));
827 while (cxt
->nvuls
< n
&& (d
= xreaddir(dir
))) {
829 struct lscpu_vulnerability
*vu
;
831 #ifdef _DIRENT_HAVE_D_TYPE
832 if (d
->d_type
== DT_DIR
|| d
->d_type
== DT_UNKNOWN
)
835 if (ul_path_readf_string(cxt
->syscpu
, &str
,
836 "vulnerabilities/%s", d
->d_name
) <= 0)
839 vu
= &cxt
->vuls
[cxt
->nvuls
++];
842 vu
->name
= xstrdup(d
->d_name
);
843 *vu
->name
= toupper(*vu
->name
);
844 strrep(vu
->name
, '_', ' ');
848 p
= (char *) startswith(vu
->text
, "Mitigation");
851 strrem(vu
->text
, ':');
856 qsort(cxt
->vuls
, cxt
->nvuls
,
857 sizeof(struct lscpu_vulnerability
), cmp_vulnerability_name
);
862 static inline int is_node_dirent(struct dirent
*d
)
866 #ifdef _DIRENT_HAVE_D_TYPE
867 (d
->d_type
== DT_DIR
|| d
->d_type
== DT_UNKNOWN
) &&
869 strncmp(d
->d_name
, "node", 4) == 0 &&
870 isdigit_string(d
->d_name
+ 4);
873 static int nodecmp(const void *ap
, const void *bp
)
875 int *a
= (int *) ap
, *b
= (int *) bp
;
879 int lscpu_read_numas(struct lscpu_cxt
*cxt
)
884 struct path_cxt
*sys
;
886 assert(!cxt
->nnodes
);
889 sys
= ul_new_path(_PATH_SYS_NODE
);
891 err(EXIT_FAILURE
, _("failed to initialize %s handler"), _PATH_SYS_NODE
);
893 ul_path_set_prefix(sys
, cxt
->prefix
);
895 dir
= ul_path_opendir(sys
, NULL
);
899 while ((d
= readdir(dir
))) {
900 if (is_node_dirent(d
))
909 cxt
->nodemaps
= xcalloc(cxt
->nnodes
, sizeof(cpu_set_t
*));
910 cxt
->idx2nodenum
= xmalloc(cxt
->nnodes
* sizeof(int));
913 for (i
= 0; (d
= readdir(dir
)) && i
< cxt
->nnodes
;) {
914 if (is_node_dirent(d
))
915 cxt
->idx2nodenum
[i
++] = strtol_or_err(((d
->d_name
) + 4),
916 _("Failed to extract the node number"));
919 qsort(cxt
->idx2nodenum
, cxt
->nnodes
, sizeof(int), nodecmp
);
921 /* information about how nodes share different CPUs */
922 for (i
= 0; i
< cxt
->nnodes
; i
++)
923 ul_path_readf_cpuset(sys
, &cxt
->nodemaps
[i
], cxt
->maxcpus
,
924 "node%d/cpumap", cxt
->idx2nodenum
[i
]);
926 DBG(GATHER
, ul_debugobj(cxt
, "read %zu numas", cxt
->nnodes
));