2 #include <sys/utsname.h>
3 #include <sys/personality.h>
10 /* Lookup a pattern and get the value for format "<pattern> : <key>"
12 int lookup(char *line
, char *pattern
, char **value
)
15 int len
= strlen(pattern
);
17 /* don't re-fill already found tags, first one wins */
21 if (strncmp(line
, pattern
, len
))
24 for (p
= line
+ len
; isspace(*p
); p
++);
30 for (++p
; isspace(*p
); p
++);
38 len
= strlen(line
) - 1;
39 for (p
= line
+ len
; isspace(*(p
-1)); p
--);
46 struct lscpu_cputype
*lscpu_new_cputype(void)
48 struct lscpu_cputype
*ct
;
50 ct
= xcalloc(1, sizeof(struct lscpu_cputype
));
55 DBG(TYPE
, ul_debugobj(ct
, "alloc"));
59 void lscpu_ref_cputype(struct lscpu_cputype
*ct
)
63 DBG(TYPE
, ul_debugobj(ct
, ">>> ref %d", ct
->refcount
));
67 void lscpu_unref_cputype(struct lscpu_cputype
*ct
)
72 /*DBG(TYPE, ul_debugobj(ct, ">>> unref %d", ct->refcount - 1));*/
74 if (--ct
->refcount
<= 0) {
75 DBG(TYPE
, ul_debugobj(ct
, " freeing %s/%s", ct
->vendor
, ct
->model
));
76 lscpu_cputype_free_topology(ct
);
78 free(ct
->bios_vendor
);
79 free(ct
->machinetype
); /* s390 */
83 free(ct
->bios_modelname
);
84 free(ct
->bios_family
);
85 free(ct
->revision
); /* alternative for model (ppc) */
89 free(ct
->mtid
); /* maximum thread id (s390) */
90 free(ct
->addrsz
); /* address sizes */
92 free(ct
->dynamic_mhz
);
97 struct lscpu_cputype
*lscpu_cputype_get_default(struct lscpu_cxt
*cxt
)
99 return cxt
->cputypes
? cxt
->cputypes
[0] : NULL
;
102 #define match(astr, bstr) \
103 ((!astr && !bstr) || (astr && bstr && strcmp(astr, bstr) == 0))
105 struct lscpu_cputype
*lscpu_add_cputype(struct lscpu_cxt
*cxt
, struct lscpu_cputype
*ct
)
107 DBG(TYPE
, ul_debugobj(ct
, "add new"));
108 cxt
->cputypes
= xrealloc(cxt
->cputypes
, (cxt
->ncputypes
+ 1)
109 * sizeof(struct lscpu_cputype
*));
110 cxt
->cputypes
[cxt
->ncputypes
] = ct
;
112 lscpu_ref_cputype(ct
);
116 static void fprintf_cputypes(FILE *f
, struct lscpu_cxt
*cxt
)
120 for (i
= 0; i
< cxt
->ncputypes
; i
++) {
121 struct lscpu_cputype
*ct
= cxt
->cputypes
[i
];
123 fprintf(f
, "\n vendor: %s\n", ct
->vendor
);
124 fprintf(f
, " machinetype: %s\n", ct
->machinetype
);
125 fprintf(f
, " family: %s\n", ct
->family
);
126 fprintf(f
, " model: %s\n", ct
->model
);
127 fprintf(f
, " modelname: %s\n", ct
->modelname
);
128 fprintf(f
, " revision: %s\n", ct
->revision
);
129 fprintf(f
, " stepping: %s\n", ct
->stepping
);
130 fprintf(f
, " mtid: %s\n", ct
->mtid
);
131 fprintf(f
, " addrsz: %s\n", ct
->addrsz
);
136 CPUINFO_LINE_UNKNOWN
, /* unknown line */
137 CPUINFO_LINE_CPUTYPE
, /* line found in type_patterns[] */
138 CPUINFO_LINE_CPU
, /* line found in cpu_patterns[] */
139 CPUINFO_LINE_CACHE
/* line found in cache_pattern[] */
142 /* Describes /proc/cpuinfo fields */
143 struct cpuinfo_pattern
{
144 int id
; /* field ID */
145 int domain
; /* CPUINFO_LINE_* */
146 const char *pattern
; /* field name as used in /proc/cpuinfo */
147 size_t offset
; /* offset in lscpu_cputype or lscpu_cpu struct */
150 /* field identifiers (field name may be different on different archs) */
153 PAT_BOGOMIPS
, /* global */
154 PAT_BOGOMIPS_CPU
, /* per-cpu */
177 * /proc/cpuinfo to lscpu_cputype conversion
179 #define DEF_PAT_CPUTYPE(_str, _id, _member) \
182 .domain = CPUINFO_LINE_CPUTYPE, \
184 .offset = offsetof(struct lscpu_cputype, _member), \
187 static const struct cpuinfo_pattern type_patterns
[] =
189 /* Sort by fields name! */
190 DEF_PAT_CPUTYPE( "ASEs implemented", PAT_FLAGS
, flags
), /* mips */
191 DEF_PAT_CPUTYPE( "BogoMIPS", PAT_BOGOMIPS
, bogomips
), /* aarch64 */
192 DEF_PAT_CPUTYPE( "CPU implementer", PAT_IMPLEMENTER
,vendor
), /* ARM and aarch64 */
193 DEF_PAT_CPUTYPE( "CPU part", PAT_PART
, model
), /* ARM and aarch64 */
194 DEF_PAT_CPUTYPE( "CPU revision", PAT_REVISION
, revision
), /* aarch64 */
195 DEF_PAT_CPUTYPE( "CPU variant", PAT_VARIANT
, stepping
), /* aarch64 */
196 DEF_PAT_CPUTYPE( "Features", PAT_FEATURES
, flags
), /* aarch64 */
197 DEF_PAT_CPUTYPE( "address sizes", PAT_ADDRESS_SIZES
, addrsz
),/* x86 */
198 DEF_PAT_CPUTYPE( "bogomips per cpu", PAT_BOGOMIPS
, bogomips
), /* s390 */
199 DEF_PAT_CPUTYPE( "cpu", PAT_CPU
, modelname
), /* ppc, sparc */
200 DEF_PAT_CPUTYPE( "cpu family", PAT_FAMILY
, family
),
201 DEF_PAT_CPUTYPE( "cpu model", PAT_MODEL
, model
), /* mips */
202 DEF_PAT_CPUTYPE( "family", PAT_FAMILY
, family
),
203 DEF_PAT_CPUTYPE( "features", PAT_FEATURES
, flags
), /* s390 */
204 DEF_PAT_CPUTYPE( "flags", PAT_FLAGS
, flags
), /* x86 */
205 DEF_PAT_CPUTYPE( "max thread id", PAT_MAX_THREAD_ID
, mtid
), /* s390 */
206 DEF_PAT_CPUTYPE( "model", PAT_MODEL
, model
),
207 DEF_PAT_CPUTYPE( "model name", PAT_MODEL_NAME
, modelname
),
208 DEF_PAT_CPUTYPE( "revision", PAT_REVISION
, revision
),
209 DEF_PAT_CPUTYPE( "stepping", PAT_STEPPING
, stepping
),
210 DEF_PAT_CPUTYPE( "type", PAT_TYPE
, flags
), /* sparc64 */
211 DEF_PAT_CPUTYPE( "vendor", PAT_VENDOR
, vendor
),
212 DEF_PAT_CPUTYPE( "vendor_id", PAT_VENDOR
, vendor
), /* s390 */
216 * /proc/cpuinfo to lscpu_cpu conversion
218 #define DEF_PAT_CPU(_str, _id, _member) \
221 .domain = CPUINFO_LINE_CPU, \
223 .offset = offsetof(struct lscpu_cpu, _member), \
226 static const struct cpuinfo_pattern cpu_patterns
[] =
228 /* Sort by fields name! */
229 DEF_PAT_CPU( "bogomips", PAT_BOGOMIPS_CPU
, bogomips
),
230 DEF_PAT_CPU( "cpu MHz", PAT_MHZ
, mhz
),
231 DEF_PAT_CPU( "cpu MHz dynamic", PAT_MHZ_DYNAMIC
, dynamic_mhz
), /* s390 */
232 DEF_PAT_CPU( "cpu MHz static", PAT_MHZ_STATIC
, static_mhz
), /* s390 */
233 DEF_PAT_CPU( "cpu number", PAT_PROCESSOR
, logical_id
), /* s390 */
234 DEF_PAT_CPU( "processor", PAT_PROCESSOR
, logical_id
),
239 * /proc/cpuinfo to lscpu_cache conversion
241 #define DEF_PAT_CACHE(_str, _id) \
244 .domain = CPUINFO_LINE_CACHE, \
248 static const struct cpuinfo_pattern cache_patterns
[] =
250 /* Sort by fields name! */
251 DEF_PAT_CACHE("cache", PAT_CACHE
),
254 #define CPUTYPE_PATTERN_BUFSZ 32
256 static int cmp_pattern(const void *a0
, const void *b0
)
258 const struct cpuinfo_pattern
259 *a
= (const struct cpuinfo_pattern
*) a0
,
260 *b
= (const struct cpuinfo_pattern
*) b0
;
261 return strcmp(a
->pattern
, b
->pattern
);
264 struct cpuinfo_parser
{
265 struct lscpu_cxt
*cxt
;
266 struct lscpu_cpu
*curr_cpu
;
267 struct lscpu_cputype
*curr_type
;
268 unsigned int curr_type_added
: 1;
271 static int is_different_cputype(struct lscpu_cputype
*ct
, size_t offset
, const char *value
)
274 case offsetof(struct lscpu_cputype
, vendor
):
275 return ct
->vendor
&& value
&& strcmp(ct
->vendor
, value
) != 0;
276 case offsetof(struct lscpu_cputype
, model
):
277 return ct
->model
&& value
&& strcmp(ct
->model
, value
) != 0;
278 case offsetof(struct lscpu_cputype
, modelname
):
279 return ct
->modelname
&& value
&& strcmp(ct
->modelname
, value
) != 0;
280 case offsetof(struct lscpu_cputype
, stepping
):
281 return ct
->stepping
&& value
&& strcmp(ct
->stepping
, value
) != 0;
286 /* cannonicalize @str -- remove number at the end return the
287 * number by @keynum. This is usable for example for "processor 5" or "cache1"
289 static char *key_cleanup(char *str
, int *keynum
)
291 size_t sz
= rtrim_whitespace((unsigned char *)str
);
297 for (i
= sz
; i
> 0; i
--) {
298 if (!isdigit(str
[i
- 1]))
303 char *end
= NULL
, *p
= str
+ i
;
307 n
= strtol(p
, &end
, 10);
308 if (errno
|| !end
|| end
== p
)
313 rtrim_whitespace((unsigned char *)str
);
318 static const struct cpuinfo_pattern
*cpuinfo_parse_line(char *str
, char **value
, int *keynum
)
320 struct cpuinfo_pattern key
, *pat
;
322 char buf
[CPUTYPE_PATTERN_BUFSZ
] = { 0 };
324 DBG(GATHER
, ul_debug("parse \"%s\"", str
));
328 p
= (char *) skip_blank(str
);
336 /* prepare name of the field */
337 xstrncpy(buf
, p
, sizeof(buf
));
342 v
= (char *) skip_space(v
);
346 key
.pattern
= key_cleanup(buf
, keynum
);
348 if ((pat
= bsearch(&key
, type_patterns
,
349 ARRAY_SIZE(type_patterns
),
350 sizeof(struct cpuinfo_pattern
),
355 if ((pat
= bsearch(&key
, cpu_patterns
,
356 ARRAY_SIZE(cpu_patterns
),
357 sizeof(struct cpuinfo_pattern
),
362 if ((pat
= bsearch(&key
, cache_patterns
,
363 ARRAY_SIZE(cache_patterns
),
364 sizeof(struct cpuinfo_pattern
),
370 rtrim_whitespace((unsigned char *) v
);
375 /* Parse extra cache lines contained within /proc/cpuinfo but which are not
376 * part of the cache topology information within the sysfs filesystem. This is
377 * true for all shared caches on e.g. s390. When there are layers of
378 * hypervisors in between it is not knows which CPUs share which caches.
379 * Therefore information about shared caches is only available in
380 * /proc/cpuinfo. Format is:
382 * cache<nr> : level=<lvl> type=<type> scope=<scope> size=<size> line_size=<lsz> associativity=<as>
384 * the cache<nr> part is parsed in cpuinfo_parse_line, in this function parses part after ":".
386 static int cpuinfo_parse_cache(struct lscpu_cxt
*cxt
, int keynum
, char *data
)
388 struct lscpu_cache
*cache
;
392 unsigned int line_size
, associativity
;
394 DBG(GATHER
, ul_debugobj(cxt
, " parse cpuinfo cache '%s'", data
));
396 p
= strstr(data
, "scope=") + 6;
397 /* Skip private caches, also present in sysfs */
398 if (!p
|| strncmp(p
, "Private", 7) == 0)
400 p
= strstr(data
, "level=");
401 if (!p
|| sscanf(p
, "level=%d", &level
) != 1)
403 p
= strstr(data
, "type=") + 5;
407 if (strncmp(p
, "Data", 4) == 0)
409 else if (strncmp(p
, "Instruction", 11) == 0)
411 else if (strncmp(p
, "Unified", 7) == 0)
413 p
= strstr(data
, "size=");
414 if (!p
|| sscanf(p
, "size=%lld", &size
) != 1)
417 p
= strstr(data
, "line_size=");
418 if (!p
|| sscanf(p
, "line_size=%u", &line_size
) != 1)
421 p
= strstr(data
, "associativity=");
422 if (!p
|| sscanf(p
, "associativity=%u", &associativity
) != 1)
426 cxt
->ecaches
= xrealloc(cxt
->ecaches
,
427 cxt
->necaches
* sizeof(struct lscpu_cache
));
428 cache
= &cxt
->ecaches
[cxt
->necaches
- 1];
429 memset(cache
, 0 , sizeof(*cache
));
431 if (type
== 'i' || type
== 'd')
432 xasprintf(&cache
->name
, "L%d%c", level
, type
);
434 xasprintf(&cache
->name
, "L%d", level
);
437 cache
->level
= level
;
438 cache
->size
= size
* 1024;
439 cache
->ways_of_associativity
= associativity
;
440 cache
->coherency_line_size
= line_size
;
441 /* Number of sets for s390. For safety, just check divide by zero */
442 cache
->number_of_sets
= line_size
? (cache
->size
/ line_size
): 0;
443 cache
->number_of_sets
= associativity
? (cache
->number_of_sets
/ associativity
) : 0;
445 cache
->type
= type
== 'i' ? xstrdup("Instruction") :
446 type
== 'd' ? xstrdup("Data") :
447 type
== 'u' ? xstrdup("Unified") : NULL
;
451 int lscpu_read_cpuinfo(struct lscpu_cxt
*cxt
)
456 struct lscpu_cputype
*ct
;
457 struct cpuinfo_parser _pr
= { .cxt
= cxt
}, *pr
= &_pr
;
459 assert(cxt
->npossibles
); /* lscpu_create_cpus() required */
462 DBG(GATHER
, ul_debugobj(cxt
, "reading cpuinfo"));
464 fp
= ul_path_fopen(cxt
->procfs
, "r", "cpuinfo");
466 err(EXIT_FAILURE
, _("cannot open %s"), "/proc/cpuinfo");
470 char *p
= NULL
, *value
= NULL
;
471 const struct cpuinfo_pattern
*pattern
;
473 if (fgets(buf
, sizeof(buf
), fp
) != NULL
)
474 p
= (char *) skip_space(buf
);
476 if (p
== NULL
|| (*buf
&& !*p
)) {
477 /* Blank line separates information */
479 break; /* fgets() returns nothing; EOF */
483 rtrim_whitespace((unsigned char *) buf
);
486 pattern
= cpuinfo_parse_line(p
, &value
, &keynum
);
488 DBG(GATHER
, ul_debug("'%s' not found", buf
));
493 switch (pattern
->domain
) {
494 case CPUINFO_LINE_CPU
:
495 if (pattern
->id
== PAT_PROCESSOR
) {
503 if (ul_strtou32(value
, &n
, 10) == 0)
507 if (pr
->curr_cpu
&& pr
->curr_type
)
508 lscpu_cpu_set_type(pr
->curr_cpu
, pr
->curr_type
);
510 lscpu_unref_cpu(pr
->curr_cpu
);
511 pr
->curr_cpu
= lscpu_get_cpu(cxt
, id
);
514 DBG(GATHER
, ul_debug("*** cpu ID '%d' undefined", id
));
516 DBG(GATHER
, ul_debug(" switch to CPU %d", id
));
517 lscpu_ref_cpu(pr
->curr_cpu
);
521 DBG(GATHER
, ul_debug("*** cpu data before cpu ID"));
523 strdup_to_offset(pr
->curr_cpu
, pattern
->offset
, value
);
525 if (pattern
->id
== PAT_MHZ_DYNAMIC
&& pr
->curr_type
&& !pr
->curr_type
->dynamic_mhz
)
526 pr
->curr_type
->dynamic_mhz
= xstrdup(value
);
527 if (pattern
->id
== PAT_MHZ_STATIC
&& pr
->curr_type
&& !pr
->curr_type
->static_mhz
)
528 pr
->curr_type
->static_mhz
= xstrdup(value
);
529 if (pattern
->id
== PAT_BOGOMIPS_CPU
&& pr
->curr_type
&& !pr
->curr_type
->bogomips
)
530 pr
->curr_type
->bogomips
= xstrdup(value
);
531 if (pattern
->id
== PAT_MHZ
&& pr
->curr_cpu
&& value
) {
533 pr
->curr_cpu
->mhz_cur_freq
= (float) c_strtod(value
, NULL
);
535 pr
->curr_cpu
->mhz_cur_freq
= 0;
538 case CPUINFO_LINE_CPUTYPE
:
539 if (pr
->curr_type
&& is_different_cputype(pr
->curr_type
, pattern
->offset
, value
)) {
540 lscpu_unref_cputype(pr
->curr_type
);
541 pr
->curr_type
= NULL
;
543 if (!pr
->curr_type
) {
544 pr
->curr_type
= lscpu_new_cputype();
545 lscpu_add_cputype(cxt
, pr
->curr_type
);
548 strdup_to_offset(pr
->curr_type
, pattern
->offset
, value
);
550 case CPUINFO_LINE_CACHE
:
551 if (pattern
->id
!= PAT_CACHE
)
553 cpuinfo_parse_cache(cxt
, keynum
, value
);
558 DBG(GATHER
, fprintf_cputypes(stderr
, cxt
));
560 if (pr
->curr_cpu
&& !pr
->curr_cpu
->type
)
561 lscpu_cpu_set_type(pr
->curr_cpu
, pr
->curr_type
);
563 lscpu_unref_cputype(pr
->curr_type
);
564 lscpu_unref_cpu(pr
->curr_cpu
);
567 lscpu_sort_caches(cxt
->ecaches
, cxt
->necaches
);
569 /* Set the default type to CPUs which are missing (or not parsed)
571 ct
= lscpu_cputype_get_default(cxt
);
572 for (i
= 0; i
< cxt
->npossibles
; i
++) {
573 struct lscpu_cpu
*cpu
= cxt
->cpus
[i
];
575 if (cpu
&& !cpu
->type
)
576 lscpu_cpu_set_type(cpu
, ct
);
582 struct lscpu_arch
*lscpu_read_architecture(struct lscpu_cxt
*cxt
)
584 struct utsname utsbuf
;
585 struct lscpu_arch
*ar
;
586 struct lscpu_cputype
*ct
;
590 DBG(GATHER
, ul_debug("reading architecture"));
592 if (uname(&utsbuf
) == -1)
593 err(EXIT_FAILURE
, _("error: uname failed"));
595 ar
= xcalloc(1, sizeof(*cxt
->arch
));
596 ar
->name
= xstrdup(utsbuf
.machine
);
599 /* reading info from any /{sys,proc} dump, don't mix it with
600 * information about our real CPU */
603 #if defined(__alpha__) || defined(__ia64__)
604 ar
->bit64
= 1; /* 64bit platforms only */
606 /* platforms with 64bit flag in /proc/cpuinfo, define
607 * 32bit default here */
608 #if defined(__i386__) || defined(__x86_64__) || \
609 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
613 #if defined(__aarch64__)
615 /* personality() is the most reliable way (since 4.7)
616 * to determine aarch32 support */
617 int pers
= personality(PER_LINUX32
);
627 ct
= lscpu_cputype_get_default(cxt
);
628 if (ct
&& ct
->flags
) {
631 snprintf(buf
, sizeof(buf
), " %s ", ct
->flags
);
632 if (strstr(buf
, " lm "))
633 ar
->bit32
= 1, ar
->bit64
= 1; /* x86_64 */
634 if (strstr(buf
, " zarch "))
635 ar
->bit32
= 1, ar
->bit64
= 1; /* s390x */
636 if (strstr(buf
, " sun4v ") || strstr(buf
, " sun4u "))
637 ar
->bit32
= 1, ar
->bit64
= 1; /* sparc64 */
640 if (ar
->name
&& !cxt
->noalive
) {
641 if (strcmp(ar
->name
, "ppc64") == 0)
642 ar
->bit32
= 1, ar
->bit64
= 1;
643 else if (strcmp(ar
->name
, "ppc") == 0)
647 DBG(GATHER
, ul_debugobj(ar
, "arch: name=%s %s %s",
649 ar
->bit64
? "64-bit" : "",
650 ar
->bit64
? "32-bit" : ""));
654 void lscpu_free_architecture(struct lscpu_arch
*ar
)
662 int lscpu_read_cpulists(struct lscpu_cxt
*cxt
)
664 cpu_set_t
*cpuset
= NULL
;
667 DBG(GATHER
, ul_debugobj(cxt
, "reading cpulists"));
669 if (ul_path_read_s32(cxt
->syscpu
, &cxt
->maxcpus
, "kernel_max") == 0)
670 /* note that kernel_max is maximum index [NR_CPUS-1] */
673 else if (!cxt
->noalive
)
674 /* the root is '/' so we are working with data from the current kernel */
675 cxt
->maxcpus
= get_max_number_of_cpus();
677 if (cxt
->maxcpus
<= 0)
678 /* error or we are reading some /sys snapshot instead of the
679 * real /sys, let's use any crazy number... */
682 cxt
->setsize
= CPU_ALLOC_SIZE(cxt
->maxcpus
);
684 /* create CPUs from possible mask */
685 if (ul_path_readf_cpulist(cxt
->syscpu
, &cpuset
, cxt
->maxcpus
, "possible") == 0) {
686 lscpu_create_cpus(cxt
, cpuset
, cxt
->setsize
);
690 err(EXIT_FAILURE
, _("failed to determine number of CPUs: %s"),
691 _PATH_SYS_CPU
"/possible");
694 /* get mask for present CPUs */
695 if (ul_path_readf_cpulist(cxt
->syscpu
, &cxt
->present
, cxt
->maxcpus
, "present") == 0)
696 cxt
->npresents
= CPU_COUNT_S(cxt
->setsize
, cxt
->present
);
698 /* get mask for online CPUs */
699 if (ul_path_readf_cpulist(cxt
->syscpu
, &cxt
->online
, cxt
->maxcpus
, "online") == 0)
700 cxt
->nonlines
= CPU_COUNT_S(cxt
->setsize
, cxt
->online
);
705 #if defined(HAVE_LIBRTAS)
706 # define PROCESSOR_MODULE_INFO 43
707 static int strbe16toh(const char *buf
, int offset
)
709 return (buf
[offset
] << 8) + buf
[offset
+1];
713 /* some extra information for the default CPU type */
714 int lscpu_read_archext(struct lscpu_cxt
*cxt
)
718 struct lscpu_cputype
*ct
;
720 DBG(GATHER
, ul_debugobj(cxt
, "reading extra arch info"));
723 ct
= lscpu_cputype_get_default(cxt
);
727 /* get dispatching mode */
728 if (ul_path_read_s32(cxt
->syscpu
, &ct
->dispatching
, "dispatching") != 0)
729 ct
->dispatching
= -1;
731 /* get cpufreq boost mode */
732 if (ul_path_read_s32(cxt
->syscpu
, &ct
->freqboost
, "cpufreq/boost") != 0)
735 if ((f
= ul_path_fopen(cxt
->procfs
, "r", "sysinfo"))) {
736 while (fgets(buf
, sizeof(buf
), f
) != NULL
) {
737 if (lookup(buf
, "Type", &ct
->machinetype
))
743 #if defined(HAVE_LIBRTAS)
744 /* Get PowerPC specific info */
748 ct
->physsockets
= ct
->physchips
= ct
->physcoresperchip
= 0;
750 rc
= rtas_get_sysparm(PROCESSOR_MODULE_INFO
, sizeof(buf
), buf
);
754 len
= strbe16toh(buf
, 0);
758 ntypes
= strbe16toh(buf
, 2);
762 ct
->physsockets
= strbe16toh(buf
, 4);
763 ct
->physchips
= strbe16toh(buf
, 6);
764 ct
->physcoresperchip
= strbe16toh(buf
, 8);
771 static int cmp_vulnerability_name(const void *a0
, const void *b0
)
773 const struct lscpu_vulnerability
774 *a
= (const struct lscpu_vulnerability
*) a0
,
775 *b
= (const struct lscpu_vulnerability
*) b0
;
776 return strcmp(a
->name
, b
->name
);
779 int lscpu_read_vulnerabilities(struct lscpu_cxt
*cxt
)
787 DBG(GATHER
, ul_debugobj(cxt
, "reading vulnerabilities"));
789 dir
= ul_path_opendir(cxt
->syscpu
, "vulnerabilities");
794 while (xreaddir(dir
))
802 cxt
->vuls
= xcalloc(n
, sizeof(struct lscpu_vulnerability
));
804 while (cxt
->nvuls
< n
&& (d
= xreaddir(dir
))) {
806 struct lscpu_vulnerability
*vu
;
808 #ifdef _DIRENT_HAVE_D_TYPE
809 if (d
->d_type
== DT_DIR
|| d
->d_type
== DT_UNKNOWN
)
812 if (ul_path_readf_string(cxt
->syscpu
, &str
,
813 "vulnerabilities/%s", d
->d_name
) <= 0)
816 vu
= &cxt
->vuls
[cxt
->nvuls
++];
819 vu
->name
= xstrdup(d
->d_name
);
820 *vu
->name
= toupper(*vu
->name
);
821 strrep(vu
->name
, '_', ' ');
825 p
= (char *) startswith(vu
->text
, "Mitigation");
828 strrem(vu
->text
, ':');
833 qsort(cxt
->vuls
, cxt
->nvuls
,
834 sizeof(struct lscpu_vulnerability
), cmp_vulnerability_name
);
839 static inline int is_node_dirent(struct dirent
*d
)
843 #ifdef _DIRENT_HAVE_D_TYPE
844 (d
->d_type
== DT_DIR
|| d
->d_type
== DT_UNKNOWN
) &&
846 strncmp(d
->d_name
, "node", 4) == 0 &&
847 isdigit_string(d
->d_name
+ 4);
850 static int nodecmp(const void *ap
, const void *bp
)
852 int *a
= (int *) ap
, *b
= (int *) bp
;
856 int lscpu_read_numas(struct lscpu_cxt
*cxt
)
861 struct path_cxt
*sys
;
863 assert(!cxt
->nnodes
);
866 sys
= ul_new_path(_PATH_SYS_NODE
);
868 err(EXIT_FAILURE
, _("failed to initialize %s handler"), _PATH_SYS_NODE
);
870 ul_path_set_prefix(sys
, cxt
->prefix
);
872 dir
= ul_path_opendir(sys
, NULL
);
876 while ((d
= readdir(dir
))) {
877 if (is_node_dirent(d
))
886 cxt
->nodemaps
= xcalloc(cxt
->nnodes
, sizeof(cpu_set_t
*));
887 cxt
->idx2nodenum
= xmalloc(cxt
->nnodes
* sizeof(int));
890 for (i
= 0; (d
= readdir(dir
)) && i
< cxt
->nnodes
;) {
891 if (is_node_dirent(d
))
892 cxt
->idx2nodenum
[i
++] = strtol_or_err(((d
->d_name
) + 4),
893 _("Failed to extract the node number"));
896 qsort(cxt
->idx2nodenum
, cxt
->nnodes
, sizeof(int), nodecmp
);
898 /* information about how nodes share different CPUs */
899 for (i
= 0; i
< cxt
->nnodes
; i
++)
900 ul_path_readf_cpuset(sys
, &cxt
->nodemaps
[i
], cxt
->maxcpus
,
901 "node%d/cpumap", cxt
->idx2nodenum
[i
]);
903 DBG(GATHER
, ul_debugobj(cxt
, "read %zu numas", cxt
->nnodes
));