]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
libperf cpumap: Make index and nr types unsigned
authorIan Rogers <irogers@google.com>
Tue, 31 Mar 2026 18:29:48 +0000 (11:29 -0700)
committerNamhyung Kim <namhyung@kernel.org>
Wed, 1 Apr 2026 21:50:53 +0000 (14:50 -0700)
The index into the cpumap array and the number of entries within the
array can never be negative, so let's make them unsigned. This is
prompted by reports that gcc 13 with -O6 is giving a
alloc-size-larger-than errors. The change makes the cpumap changes and
then updates the declaration of index variables throughout perf and
libperf to be unsigned. The two things are hard to separate as
compiler warnings about mixing signed and unsigned types breaks the
build.

Reported-by: Chingbin Li <liqb365@163.com>
Closes: https://lore.kernel.org/lkml/20260212025127.841090-1-liqb365@163.com/
Tested-by: Chingbin Li <liqb365@163.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
34 files changed:
tools/lib/perf/cpumap.c
tools/lib/perf/evsel.c
tools/lib/perf/include/internal/cpumap.h
tools/lib/perf/include/perf/cpumap.h
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/arch/arm64/util/header.c
tools/perf/arch/x86/util/pmu.c
tools/perf/builtin-c2c.c
tools/perf/builtin-record.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/tests/bitmap.c
tools/perf/tests/cpumap.c
tools/perf/tests/mem2node.c
tools/perf/tests/openat-syscall-all-cpus.c
tools/perf/tests/topology.c
tools/perf/util/affinity.c
tools/perf/util/bpf_counter.c
tools/perf/util/bpf_counter_cgroup.c
tools/perf/util/bpf_kwork.c
tools/perf/util/bpf_kwork_top.c
tools/perf/util/bpf_off_cpu.c
tools/perf/util/bpf_trace_augment.c
tools/perf/util/cpumap.c
tools/perf/util/cputopo.c
tools/perf/util/env.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/stat-display.c
tools/perf/util/stat.c
tools/perf/util/svghelper.c
tools/perf/util/symbol.c
tools/perf/util/synthetic-events.c

index 4160e7d2e120fea30604ce4c9b6fd395f35bec53..e51b0490ad57da05ceced624260d0e73cd94abb1 100644 (file)
 
 #define MAX_NR_CPUS 4096
 
-void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus)
 {
        RC_CHK_ACCESS(map)->nr = nr_cpus;
 }
 
-struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
+struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus)
 {
        RC_STRUCT(perf_cpu_map) *cpus;
        struct perf_cpu_map *result;
@@ -78,7 +78,7 @@ void perf_cpu_map__put(struct perf_cpu_map *map)
 static struct perf_cpu_map *cpu_map__new_sysconf(void)
 {
        struct perf_cpu_map *cpus;
-       int nr_cpus, nr_cpus_conf;
+       long nr_cpus, nr_cpus_conf;
 
        nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
        if (nr_cpus < 0)
@@ -86,15 +86,13 @@ static struct perf_cpu_map *cpu_map__new_sysconf(void)
 
        nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
        if (nr_cpus != nr_cpus_conf) {
-               pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
+               pr_warning("Number of online CPUs (%ld) differs from the number configured (%ld) the CPU map will only cover the first %ld CPUs.",
                        nr_cpus, nr_cpus_conf, nr_cpus);
        }
 
        cpus = perf_cpu_map__alloc(nr_cpus);
        if (cpus != NULL) {
-               int i;
-
-               for (i = 0; i < nr_cpus; ++i)
+               for (long i = 0; i < nr_cpus; ++i)
                        RC_CHK_ACCESS(cpus)->map[i].cpu = i;
        }
 
@@ -132,23 +130,23 @@ static int cmp_cpu(const void *a, const void *b)
        return cpu_a->cpu - cpu_b->cpu;
 }
 
-static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx)
 {
        return RC_CHK_ACCESS(cpus)->map[idx];
 }
 
-static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
+static struct perf_cpu_map *cpu_map__trim_new(unsigned int nr_cpus, const struct perf_cpu *tmp_cpus)
 {
        size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
        struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
-       int i, j;
 
        if (cpus != NULL) {
+               unsigned int j = 0;
+
                memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
                qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
                /* Remove dups */
-               j = 0;
-               for (i = 0; i < nr_cpus; i++) {
+               for (unsigned int i = 0; i < nr_cpus; i++) {
                        if (i == 0 ||
                            __perf_cpu_map__cpu(cpus, i).cpu !=
                            __perf_cpu_map__cpu(cpus, i - 1).cpu) {
@@ -167,9 +165,8 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
        struct perf_cpu_map *cpus = NULL;
        unsigned long start_cpu, end_cpu = 0;
        char *p = NULL;
-       int i, nr_cpus = 0;
+       unsigned int nr_cpus = 0, max_entries = 0;
        struct perf_cpu *tmp_cpus = NULL, *tmp;
-       int max_entries = 0;
 
        if (!cpu_list)
                return perf_cpu_map__new_online_cpus();
@@ -208,9 +205,10 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
 
                for (; start_cpu <= end_cpu; start_cpu++) {
                        /* check for duplicates */
-                       for (i = 0; i < nr_cpus; i++)
+                       for (unsigned int i = 0; i < nr_cpus; i++) {
                                if (tmp_cpus[i].cpu == (int16_t)start_cpu)
                                        goto invalid;
+                       }
 
                        if (nr_cpus == max_entries) {
                                max_entries += max(end_cpu - start_cpu + 1, 16UL);
@@ -252,12 +250,12 @@ struct perf_cpu_map *perf_cpu_map__new_int(int cpu)
        return cpus;
 }
 
-static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+static unsigned int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
 {
        return RC_CHK_ACCESS(cpus)->nr;
 }
 
-struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx)
 {
        struct perf_cpu result = {
                .cpu = -1
@@ -269,7 +267,7 @@ struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
        return result;
 }
 
-int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
 {
        return cpus ? __perf_cpu_map__nr(cpus) : 1;
 }
@@ -294,7 +292,7 @@ bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
 
 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
 {
-       int low, high;
+       unsigned int low, high;
 
        if (!cpus)
                return -1;
@@ -324,7 +322,7 @@ bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
 
 bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
 {
-       int nr;
+       unsigned int nr;
 
        if (lhs == rhs)
                return true;
@@ -336,7 +334,7 @@ bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_m
        if (nr != __perf_cpu_map__nr(rhs))
                return false;
 
-       for (int idx = 0; idx < nr; idx++) {
+       for (unsigned int idx = 0; idx < nr; idx++) {
                if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
                        return false;
        }
@@ -353,7 +351,7 @@ struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
        struct perf_cpu cpu, result = {
                .cpu = -1
        };
-       int idx;
+       unsigned int idx;
 
        perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
                result = cpu;
@@ -384,7 +382,7 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu
        if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
                return false;
 
-       for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
+       for (unsigned int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
                if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
                        return false;
                if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
@@ -410,8 +408,7 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu
 int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
 {
        struct perf_cpu *tmp_cpus;
-       int tmp_len;
-       int i, j, k;
+       unsigned int tmp_len, i, j, k;
        struct perf_cpu_map *merged;
 
        if (perf_cpu_map__is_subset(*orig, other))
@@ -455,7 +452,7 @@ int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
 struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
                                             struct perf_cpu_map *other)
 {
-       int i, j, k;
+       unsigned int i, j, k;
        struct perf_cpu_map *merged;
 
        if (perf_cpu_map__is_subset(other, orig))
index 13a307fc75ae865b757c968d90967611ba1deac3..f747c0bc692d8a61a22fe7b2884dedf729945723 100644 (file)
@@ -127,7 +127,8 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
                     struct perf_thread_map *threads)
 {
        struct perf_cpu cpu;
-       int idx, thread, err = 0;
+       unsigned int idx;
+       int thread, err = 0;
 
        if (cpus == NULL) {
                static struct perf_cpu_map *empty_cpu_map;
@@ -460,7 +461,7 @@ int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
 int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
 {
        struct perf_cpu cpu __maybe_unused;
-       int idx;
+       unsigned int idx;
        int err;
 
        perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
@@ -499,12 +500,13 @@ int perf_evsel__disable(struct perf_evsel *evsel)
 
 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
 {
-       int err = 0, i;
+       int err = 0;
 
-       for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
+       for (unsigned int i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) {
                err = perf_evsel__run_ioctl(evsel,
                                     PERF_EVENT_IOC_SET_FILTER,
                                     (void *)filter, i);
+       }
        return err;
 }
 
index e2be2d17c32b5bb6544b34b0f4df7db6ed10682b..c19678188b174bf792daa4fa6067df92069325b2 100644 (file)
 DECLARE_RC_STRUCT(perf_cpu_map) {
        refcount_t      refcnt;
        /** Length of the map array. */
-       int             nr;
+       unsigned int    nr;
        /** The CPU values. */
        struct perf_cpu map[];
 };
 
-struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus);
+struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus);
 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
 bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
 
-void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus);
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus);
 
 static inline refcount_t *perf_cpu_map__refcnt(struct perf_cpu_map *map)
 {
index 58cc5c5fa47c5f2608a85719da4d35bb2078b00f..a1dd25db65b62f6bbbe65921c24dfb4c90083055 100644 (file)
@@ -49,7 +49,7 @@ LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
  * perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index
  *                     is invalid.
  */
-LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx);
 /**
  * perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a
  *                    cpu of -1 for an invalid index, this makes an empty map
@@ -57,7 +57,7 @@ LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, i
  *                    the result is the number CPUs in the map plus one if the
  *                    "any CPU"/dummy value is present.
  */
-LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
+LIBPERF_API unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
 /**
  * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value.
  */
index 4418d21708d66b567a07345a9f6f28454b2444ed..b7a839de870755f5951aa09e9efb119d688046ac 100644 (file)
@@ -197,7 +197,8 @@ static struct perf_pmu *cs_etm_get_pmu(struct auxtrace_record *itr)
 static int cs_etm_validate_config(struct perf_pmu *cs_etm_pmu,
                                  struct evsel *evsel)
 {
-       int idx, err = 0;
+       unsigned int idx;
+       int err = 0;
        struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
        struct perf_cpu_map *intersect_cpus;
        struct perf_cpu cpu;
@@ -546,7 +547,7 @@ static size_t
 cs_etm_info_priv_size(struct auxtrace_record *itr,
                      struct evlist *evlist)
 {
-       int idx;
+       unsigned int idx;
        int etmv3 = 0, etmv4 = 0, ete = 0;
        struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
        struct perf_cpu_map *intersect_cpus;
@@ -783,7 +784,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
                            struct perf_record_auxtrace_info *info,
                            size_t priv_size)
 {
-       int i;
+       unsigned int i;
        u32 offset;
        u64 nr_cpu, type;
        struct perf_cpu_map *cpu_map;
index 17ced7bbbddaf15d52050990327c7ca074c3b411..f00d72d087fccd8ceac43b02d1ac67f150b8ac84 100644 (file)
@@ -144,7 +144,8 @@ static int arm_spe_info_fill(struct auxtrace_record *itr,
                             struct perf_record_auxtrace_info *auxtrace_info,
                             size_t priv_size)
 {
-       int i, ret;
+       unsigned int i;
+       int ret;
        size_t offset;
        struct arm_spe_recording *sper =
                        container_of(itr, struct arm_spe_recording, itr);
index cbc0ba101636bf275101385c662f5f3881c8f7c4..95e71c4f6c78cfdd2327d81790e8a26a990a772f 100644 (file)
@@ -43,7 +43,7 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu cpu)
 int get_cpuid(char *buf, size_t sz, struct perf_cpu cpu)
 {
        struct perf_cpu_map *cpus;
-       int idx;
+       unsigned int idx;
 
        if (cpu.cpu != -1)
                return _get_cpuid(buf, sz, cpu);
index 4ea4d022c9c35a5281248fb452e020b397d9918c..0661e0f0b02d03cd6f8992834a2168044564ce9a 100644 (file)
@@ -221,7 +221,8 @@ static void gnr_uncore_cha_imc_adjust_cpumask_for_snc(struct perf_pmu *pmu, bool
        static struct perf_cpu_map *cha_adjusted[MAX_SNCS];
        static struct perf_cpu_map *imc_adjusted[MAX_SNCS];
        struct perf_cpu_map **adjusted = cha ? cha_adjusted : imc_adjusted;
-       int idx, pmu_snc, cpu_adjust;
+       unsigned int idx;
+       int pmu_snc, cpu_adjust;
        struct perf_cpu cpu;
        bool alloc;
 
index d390ae4e3ec819c3d461aacf87a64264f3f1ed95..e60eea62c2fc76d66c07c5851672c13056fc1d7e 100644 (file)
@@ -2310,7 +2310,6 @@ static int setup_nodes(struct perf_session *session)
 {
        struct numa_node *n;
        unsigned long **nodes;
-       int node, idx;
        struct perf_cpu cpu;
        int *cpu2node;
        struct perf_env *env = perf_session__env(session);
@@ -2335,14 +2334,15 @@ static int setup_nodes(struct perf_session *session)
        if (!cpu2node)
                return -ENOMEM;
 
-       for (idx = 0; idx < c2c.cpus_cnt; idx++)
+       for (int idx = 0; idx < c2c.cpus_cnt; idx++)
                cpu2node[idx] = -1;
 
        c2c.cpu2node = cpu2node;
 
-       for (node = 0; node < c2c.nodes_cnt; node++) {
+       for (int node = 0; node < c2c.nodes_cnt; node++) {
                struct perf_cpu_map *map = n[node].map;
                unsigned long *set;
+               unsigned int idx;
 
                set = bitmap_zalloc(c2c.cpus_cnt);
                if (!set)
index 3276ffdc3141aa28d37b351022e63b22014a2b2d..e919d1f021c3cdbfbf780b3acac8c4fe8c561317 100644 (file)
@@ -3663,7 +3663,7 @@ struct option *record_options = __record_options;
 static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
 {
        struct perf_cpu cpu;
-       int idx;
+       unsigned int idx;
 
        if (cpu_map__is_dummy(cpus))
                return 0;
index b80c406d1fc1ba256081ffa4a20b7580c80db036..b005b23f9d8cd273dd3ef2620cad38a707d34f85 100644 (file)
@@ -2572,7 +2572,6 @@ static struct scripting_ops       *scripting_ops;
 static void __process_stat(struct evsel *counter, u64 tstamp)
 {
        int nthreads = perf_thread_map__nr(counter->core.threads);
-       int idx, thread;
        struct perf_cpu cpu;
        static int header_printed;
 
@@ -2582,7 +2581,9 @@ static void __process_stat(struct evsel *counter, u64 tstamp)
                header_printed = 1;
        }
 
-       for (thread = 0; thread < nthreads; thread++) {
+       for (int thread = 0; thread < nthreads; thread++) {
+               unsigned int idx;
+
                perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) {
                        struct perf_counts_values *counts;
 
index c043a31a2ab08aab2e1b42068d38cc9416f63242..a24326c44297c534819283dad0cfa9eb2dc5d8a4 100644 (file)
@@ -410,7 +410,7 @@ static int read_tool_counters(void)
        struct evsel *counter;
 
        evlist__for_each_entry(evsel_list, counter) {
-               int idx;
+               unsigned int idx;
 
                if (!evsel__is_tool(counter))
                        continue;
index 98956e0e076559bebcfed91f61791b6eb1a65b63..e7adf60be721a564f61d54bd67813ad6c04bafab 100644 (file)
@@ -16,7 +16,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
        bm = bitmap_zalloc(nbits);
 
        if (map && bm) {
-               int i;
+               unsigned int i;
                struct perf_cpu cpu;
 
                perf_cpu_map__for_each_cpu(cpu, i, map)
index 2354246afc5a598dc3e0d431e3ccef145dd80ce2..b051dce2cd8616b2e87d3f5230d9f3964221100d 100644 (file)
@@ -156,7 +156,8 @@ static int test__cpu_map_print(struct test_suite *test __maybe_unused, int subte
        return 0;
 }
 
-static int __test__cpu_map_merge(const char *lhs, const char *rhs, int nr, const char *expected)
+static int __test__cpu_map_merge(const char *lhs, const char *rhs, unsigned int nr,
+                                const char *expected)
 {
        struct perf_cpu_map *a = perf_cpu_map__new(lhs);
        struct perf_cpu_map *b = perf_cpu_map__new(rhs);
@@ -204,7 +205,8 @@ static int test__cpu_map_merge(struct test_suite *test __maybe_unused,
        return ret;
 }
 
-static int __test__cpu_map_intersect(const char *lhs, const char *rhs, int nr, const char *expected)
+static int __test__cpu_map_intersect(const char *lhs, const char *rhs, unsigned int nr,
+                                    const char *expected)
 {
        struct perf_cpu_map *a = perf_cpu_map__new(lhs);
        struct perf_cpu_map *b = perf_cpu_map__new(rhs);
index a0e88c49610746d4686002d0aaa17de741b178a9..7ce1ad7b6ce5950477dd377035e0cccaaf8eca85 100644 (file)
@@ -30,7 +30,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
 
        if (map && bm) {
                struct perf_cpu cpu;
-               int i;
+               unsigned int i;
 
                perf_cpu_map__for_each_cpu(cpu, i, map)
                        __set_bit(cpu.cpu, bm);
index 3644d6f52c07d0ab494bfd29f30a009837cce861..0be43f8db3bda209d4f46c6e8e8e1ef88997580e 100644 (file)
@@ -22,7 +22,8 @@
 static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused,
                                                  int subtest __maybe_unused)
 {
-       int err = TEST_FAIL, fd, idx;
+       int err = TEST_FAIL, fd;
+       unsigned int idx;
        struct perf_cpu cpu;
        struct perf_cpu_map *cpus;
        struct evsel *evsel;
index a34a7ab19a80c6bdc877a9f1f24d035d862387af..75b748ddf8248091791a1ebcd95eb9a6d251a558 100644 (file)
@@ -69,7 +69,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
                .path = path,
                .mode = PERF_DATA_MODE_READ,
        };
-       int i;
+       unsigned int i;
        struct aggr_cpu_id id;
        struct perf_cpu cpu;
        struct perf_env *env;
@@ -116,7 +116,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
 
        TEST_ASSERT_VAL("Session header CPU map not set", env->cpu);
 
-       for (i = 0; i < env->nr_cpus_avail; i++) {
+       for (i = 0; i < (unsigned int)env->nr_cpus_avail; i++) {
                cpu.cpu = i;
                if (!perf_cpu_map__has(map, cpu))
                        continue;
index 4fe85133429621d0954c1418c4575f5a28b27164..6c64b5f69a4e05e6896b0bb5f5bb1ed737d7dbba 100644 (file)
@@ -90,7 +90,7 @@ void cpu_map__set_affinity(const struct perf_cpu_map *cpumap)
        int cpu_set_size = get_cpu_set_size();
        unsigned long *cpuset = bitmap_zalloc(cpu_set_size * 8);
        struct perf_cpu cpu;
-       int idx;
+       unsigned int idx;
 
        if (!cpuset)
                return;
index a5882b5822057b528807c3666e3a8da7b43bd653..2ffd7aefb6eb3bf8dfc736019b562b0d56f57806 100644 (file)
@@ -294,7 +294,8 @@ static int bpf_program_profiler__read(struct evsel *evsel)
        struct perf_counts_values *counts;
        int reading_map_fd;
        __u32 key = 0;
-       int err, idx, bpf_cpu;
+       int err, bpf_cpu;
+       unsigned int idx;
 
        if (list_empty(&evsel->bpf_counter_list))
                return -EAGAIN;
@@ -318,11 +319,12 @@ static int bpf_program_profiler__read(struct evsel *evsel)
                }
 
                for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) {
-                       idx = perf_cpu_map__idx(evsel__cpus(evsel),
-                                               (struct perf_cpu){.cpu = bpf_cpu});
-                       if (idx == -1)
+                       int i = perf_cpu_map__idx(evsel__cpus(evsel),
+                                                 (struct perf_cpu){.cpu = bpf_cpu});
+
+                       if (i == -1)
                                continue;
-                       counts = perf_counts(evsel->counts, idx, 0);
+                       counts = perf_counts(evsel->counts, i, 0);
                        counts->val += values[bpf_cpu].counter;
                        counts->ena += values[bpf_cpu].enabled;
                        counts->run += values[bpf_cpu].running;
@@ -668,7 +670,7 @@ static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
 static int bperf_sync_counters(struct evsel *evsel)
 {
        struct perf_cpu cpu;
-       int idx;
+       unsigned int idx;
 
        perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus)
                bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu.cpu);
@@ -695,13 +697,11 @@ static int bperf__read(struct evsel *evsel)
        struct bpf_perf_event_value values[num_cpu_bpf];
        struct perf_counts_values *counts;
        int reading_map_fd, err = 0;
-       __u32 i;
-       int j;
 
        bperf_sync_counters(evsel);
        reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
 
-       for (i = 0; i < filter_entry_cnt; i++) {
+       for (__u32 i = 0; i < filter_entry_cnt; i++) {
                struct perf_cpu entry;
                __u32 cpu;
 
@@ -709,9 +709,10 @@ static int bperf__read(struct evsel *evsel)
                if (err)
                        goto out;
                switch (evsel->follower_skel->bss->type) {
-               case BPERF_FILTER_GLOBAL:
-                       assert(i == 0);
+               case BPERF_FILTER_GLOBAL: {
+                       unsigned int j;
 
+                       assert(i == 0);
                        perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) {
                                counts = perf_counts(evsel->counts, j, 0);
                                counts->val = values[entry.cpu].counter;
@@ -719,6 +720,7 @@ static int bperf__read(struct evsel *evsel)
                                counts->run = values[entry.cpu].running;
                        }
                        break;
+               }
                case BPERF_FILTER_CPU:
                        cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
                        assert(cpu >= 0);
index 17d7196c658920bd2074ee05668b73ed64bef481..5572ceccf86092ff9e235881e869fb9749258ff7 100644 (file)
@@ -98,7 +98,7 @@ static int bperf_load_program(struct evlist *evlist)
        struct bpf_link *link;
        struct evsel *evsel;
        struct cgroup *cgrp, *leader_cgrp;
-       int i, j;
+       unsigned int i;
        struct perf_cpu cpu;
        int total_cpus = cpu__max_cpu().cpu;
        int map_fd, prog_fd, err;
@@ -146,6 +146,8 @@ static int bperf_load_program(struct evlist *evlist)
 
        evlist__for_each_entry(evlist, evsel) {
                if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
+                       unsigned int j;
+
                        leader_cgrp = evsel->cgrp;
                        evsel->cgrp = NULL;
 
@@ -234,7 +236,7 @@ static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
 static int bperf_cgrp__sync_counters(struct evlist *evlist)
 {
        struct perf_cpu cpu;
-       int idx;
+       unsigned int idx;
        int prog_fd = bpf_program__fd(skel->progs.trigger_read);
 
        perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
@@ -286,7 +288,7 @@ static int bperf_cgrp__read(struct evsel *evsel)
 
        evlist__for_each_entry(evlist, evsel) {
                __u32 idx = evsel->core.idx;
-               int i;
+               unsigned int i;
                struct perf_cpu cpu;
 
                err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
index 5cff755c71faed12aaa3c9029237bfdd2e3fc2d5..d3a2e548f2b625bf405e0e5c0fa68aca7c6b35a1 100644 (file)
@@ -148,7 +148,8 @@ static bool valid_kwork_class_type(enum kwork_class_type type)
 static int setup_filters(struct perf_kwork *kwork)
 {
        if (kwork->cpu_list != NULL) {
-               int idx, nr_cpus;
+               unsigned int idx;
+               int nr_cpus;
                struct perf_cpu_map *map;
                struct perf_cpu cpu;
                int fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
index b6f187dd9136ddca3c6d41e3e19fff328cd9261f..189a29d2bc9618f53d6db75e07866d930d5f5656 100644 (file)
@@ -123,7 +123,8 @@ static bool valid_kwork_class_type(enum kwork_class_type type)
 static int setup_filters(struct perf_kwork *kwork)
 {
        if (kwork->cpu_list) {
-               int idx, nr_cpus, fd;
+               unsigned int idx;
+               int nr_cpus, fd;
                struct perf_cpu_map *map;
                struct perf_cpu cpu;
 
index 88e0660c4bff4cde3428a3b1dae2c0539ac700d0..0891d9c736609cfabee2e1cb51d51e61bf753fd9 100644 (file)
@@ -67,7 +67,7 @@ static void off_cpu_start(void *arg)
        struct evlist *evlist = arg;
        struct evsel *evsel;
        struct perf_cpu pcpu;
-       int i;
+       unsigned int i;
 
        /* update task filter for the given workload */
        if (skel->rodata->has_task && skel->rodata->uses_tgid &&
index 56ed17534caa4f3f9bcc9efd275aabcf966c8b05..9e706f0fa53d4a0a48ff79704cc41e5a95e3ce4d 100644 (file)
@@ -60,7 +60,7 @@ int augmented_syscalls__create_bpf_output(struct evlist *evlist)
 void augmented_syscalls__setup_bpf_output(void)
 {
        struct perf_cpu cpu;
-       int i;
+       unsigned int i;
 
        if (bpf_output == NULL)
                return;
index a80845038a5ebcf6fa08bab26628642b5da57199..11922e1ded844a03a49b8757f2f91f7dfc0e4587 100644 (file)
@@ -254,7 +254,7 @@ struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
                                       aggr_cpu_id_get_t get_id,
                                       void *data, bool needs_sort)
 {
-       int idx;
+       unsigned int idx;
        struct perf_cpu cpu;
        struct cpu_aggr_map *c = cpu_aggr_map__empty_new(perf_cpu_map__nr(cpus));
 
@@ -280,7 +280,7 @@ struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
                }
        }
        /* Trim. */
-       if (c->nr != perf_cpu_map__nr(cpus)) {
+       if (c->nr != (int)perf_cpu_map__nr(cpus)) {
                struct cpu_aggr_map *trimmed_c =
                        realloc(c,
                                sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
@@ -631,9 +631,9 @@ size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
 
 #define COMMA first ? "" : ","
 
-       for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) {
+       for (i = 0; i < (int)perf_cpu_map__nr(map) + 1; i++) {
                struct perf_cpu cpu = { .cpu = INT16_MAX };
-               bool last = i == perf_cpu_map__nr(map);
+               bool last = i == (int)perf_cpu_map__nr(map);
 
                if (!last)
                        cpu = perf_cpu_map__cpu(map, i);
@@ -679,7 +679,7 @@ static char hex_char(unsigned char val)
 
 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
 {
-       int idx;
+       unsigned int idx;
        char *ptr = buf;
        unsigned char *bitmap;
        struct perf_cpu c, last_cpu = perf_cpu_map__max(map);
index 8bbeb2dc76fda994b7f83abd227aceaed6e78c55..e0091804fe9849d219c2f06b46873cd8a3f90c76 100644 (file)
@@ -191,7 +191,7 @@ bool cpu_topology__core_wide(const struct cpu_topology *topology,
                const char *core_cpu_list = topology->core_cpus_list[i];
                struct perf_cpu_map *core_cpus = perf_cpu_map__new(core_cpu_list);
                struct perf_cpu cpu;
-               int idx;
+               unsigned int idx;
                bool has_first, first = true;
 
                perf_cpu_map__for_each_cpu(cpu, idx, core_cpus) {
index 93d475a80f14deb2989cd1b1b28536ccba0a1549..1e54e2c863605d09da2353bfacbfb62e84dfcc29 100644 (file)
@@ -718,7 +718,7 @@ int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
 
                for (i = 0; i < env->nr_numa_nodes; i++) {
                        struct perf_cpu tmp;
-                       int j;
+                       unsigned int j;
 
                        nn = &env->numa_nodes[i];
                        perf_cpu_map__for_each_cpu(tmp, j, nn->map)
index 2b0df7bd9a468f291b8a518eaa95f3ec41d461dc..5a30caaec73ef06b8c7fa286fb32088905b62af5 100644 (file)
@@ -1701,7 +1701,7 @@ static void python_process_stat(struct perf_stat_config *config,
        struct perf_cpu_map *cpus = counter->core.cpus;
 
        for (int thread = 0; thread < perf_thread_map__nr(threads); thread++) {
-               int idx;
+               unsigned int idx;
                struct perf_cpu cpu;
 
                perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
index 4b465abfa36c8e4486696a027ceb9bf62a898313..09de5288f9e15bb0ce07efbee815e22e503e3081 100644 (file)
@@ -2766,7 +2766,8 @@ struct evsel *perf_session__find_first_evtype(struct perf_session *session,
 int perf_session__cpu_bitmap(struct perf_session *session,
                             const char *cpu_list, unsigned long *cpu_bitmap)
 {
-       int i, err = -1;
+       unsigned int i;
+       int err = -1;
        struct perf_cpu_map *map;
        int nr_cpus = min(perf_session__env(session)->nr_cpus_avail, MAX_NR_CPUS);
        struct perf_cpu cpu;
index dc2b66855f6c43b944021a87ed57ebba41611b26..993f4c4b8f44286e6d7d632b0abffb86e34e3cce 100644 (file)
@@ -897,7 +897,7 @@ static bool should_skip_zero_counter(struct perf_stat_config *config,
                                     const struct aggr_cpu_id *id)
 {
        struct perf_cpu cpu;
-       int idx;
+       unsigned int idx;
 
        /*
         * Skip unsupported default events when not verbose. (default events
@@ -1125,7 +1125,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
                                 struct evlist *evlist,
                                 struct outstate *os)
 {
-       int all_idx;
+       unsigned int all_idx;
        struct perf_cpu cpu;
 
        perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
index 976a06e632529ef00468549b62c0132a75df59aa..14d169e22e8f56fc9281a0599885c50e25087b1c 100644 (file)
@@ -246,9 +246,11 @@ void evlist__reset_prev_raw_counts(struct evlist *evlist)
 
 static void evsel__copy_prev_raw_counts(struct evsel *evsel)
 {
-       int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
+       int nthreads = perf_thread_map__nr(evsel->core.threads);
 
        for (int thread = 0; thread < nthreads; thread++) {
+               unsigned int idx;
+
                perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
                        *perf_counts(evsel->counts, idx, thread) =
                                *perf_counts(evsel->prev_raw_counts, idx, thread);
@@ -580,7 +582,7 @@ static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id
        struct perf_counts_values counts = { 0, };
        struct aggr_cpu_id id;
        struct perf_cpu cpu;
-       int idx;
+       unsigned int idx;
 
        /* collect per-core counts */
        perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
@@ -617,7 +619,7 @@ static void evsel__process_percore(struct evsel *evsel)
        struct perf_stat_evsel *ps = evsel->stats;
        struct aggr_cpu_id core_id;
        struct perf_cpu cpu;
-       int idx;
+       unsigned int idx;
 
        if (!evsel->percore)
                return;
index b1d259f590e9859d3a0a187e75f0a59c5777d094..e360e7736c7ba65b6a02ebba4117e11d5639e374 100644 (file)
@@ -726,7 +726,8 @@ static void scan_core_topology(int *map, struct topology *t, int nr_cpus)
 
 static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
 {
-       int idx, ret = 0;
+       unsigned int idx;
+       int ret = 0;
        struct perf_cpu_map *map;
        struct perf_cpu cpu;
 
index ce9195717f4429013f0df5d7d57850a1ac14c84b..b4b30675688dbda5bf39ecf1faef6c5ec12ae287 100644 (file)
@@ -2363,7 +2363,8 @@ static int setup_parallelism_bitmap(void)
 {
        struct perf_cpu_map *map;
        struct perf_cpu cpu;
-       int i, err = -1;
+       unsigned int i;
+       int err = -1;
 
        if (symbol_conf.parallelism_list_str == NULL)
                return 0;
index ddf1cbda1902cb2df5c587a856898bb78ea916dd..85bee747f4cd2a73c9133093c5bef0140a60c7bc 100644 (file)
@@ -1266,7 +1266,7 @@ static void synthesize_cpus(struct synthesize_cpu_map_data *data)
 
 static void synthesize_mask(struct synthesize_cpu_map_data *data)
 {
-       int idx;
+       unsigned int idx;
        struct perf_cpu cpu;
 
        /* Due to padding, the 4bytes per entry mask variant is always smaller. */