#define MAX_NR_CPUS 4096
-void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus)
{
RC_CHK_ACCESS(map)->nr = nr_cpus;
}
-struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
+struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus)
{
RC_STRUCT(perf_cpu_map) *cpus;
struct perf_cpu_map *result;
static struct perf_cpu_map *cpu_map__new_sysconf(void)
{
struct perf_cpu_map *cpus;
- int nr_cpus, nr_cpus_conf;
+ long nr_cpus, nr_cpus_conf;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (nr_cpus < 0)
nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
if (nr_cpus != nr_cpus_conf) {
- pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
+ pr_warning("Number of online CPUs (%ld) differs from the number configured (%ld) the CPU map will only cover the first %ld CPUs.",
nr_cpus, nr_cpus_conf, nr_cpus);
}
cpus = perf_cpu_map__alloc(nr_cpus);
if (cpus != NULL) {
- int i;
-
- for (i = 0; i < nr_cpus; ++i)
+ for (long i = 0; i < nr_cpus; ++i)
RC_CHK_ACCESS(cpus)->map[i].cpu = i;
}
return cpu_a->cpu - cpu_b->cpu;
}
-static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx)
{
return RC_CHK_ACCESS(cpus)->map[idx];
}
-static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
+static struct perf_cpu_map *cpu_map__trim_new(unsigned int nr_cpus, const struct perf_cpu *tmp_cpus)
{
size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
- int i, j;
if (cpus != NULL) {
+ unsigned int j = 0;
+
memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
/* Remove dups */
- j = 0;
- for (i = 0; i < nr_cpus; i++) {
+ for (unsigned int i = 0; i < nr_cpus; i++) {
if (i == 0 ||
__perf_cpu_map__cpu(cpus, i).cpu !=
__perf_cpu_map__cpu(cpus, i - 1).cpu) {
struct perf_cpu_map *cpus = NULL;
unsigned long start_cpu, end_cpu = 0;
char *p = NULL;
- int i, nr_cpus = 0;
+ unsigned int nr_cpus = 0, max_entries = 0;
struct perf_cpu *tmp_cpus = NULL, *tmp;
- int max_entries = 0;
if (!cpu_list)
return perf_cpu_map__new_online_cpus();
for (; start_cpu <= end_cpu; start_cpu++) {
/* check for duplicates */
- for (i = 0; i < nr_cpus; i++)
+ for (unsigned int i = 0; i < nr_cpus; i++) {
if (tmp_cpus[i].cpu == (int16_t)start_cpu)
goto invalid;
+ }
if (nr_cpus == max_entries) {
max_entries += max(end_cpu - start_cpu + 1, 16UL);
return cpus;
}
-static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+static unsigned int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{
return RC_CHK_ACCESS(cpus)->nr;
}
-struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx)
{
struct perf_cpu result = {
.cpu = -1
return result;
}
-int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{
return cpus ? __perf_cpu_map__nr(cpus) : 1;
}
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
{
- int low, high;
+ unsigned int low, high;
if (!cpus)
return -1;
bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
{
- int nr;
+ unsigned int nr;
if (lhs == rhs)
return true;
if (nr != __perf_cpu_map__nr(rhs))
return false;
- for (int idx = 0; idx < nr; idx++) {
+ for (unsigned int idx = 0; idx < nr; idx++) {
if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
return false;
}
struct perf_cpu cpu, result = {
.cpu = -1
};
- int idx;
+ unsigned int idx;
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
result = cpu;
if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
return false;
- for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
+ for (unsigned int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
return false;
if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
{
struct perf_cpu *tmp_cpus;
- int tmp_len;
- int i, j, k;
+ unsigned int tmp_len, i, j, k;
struct perf_cpu_map *merged;
if (perf_cpu_map__is_subset(*orig, other))
struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
struct perf_cpu_map *other)
{
- int i, j, k;
+ unsigned int i, j, k;
struct perf_cpu_map *merged;
if (perf_cpu_map__is_subset(other, orig))
struct perf_thread_map *threads)
{
struct perf_cpu cpu;
- int idx, thread, err = 0;
+ unsigned int idx;
+ int thread, err = 0;
if (cpus == NULL) {
static struct perf_cpu_map *empty_cpu_map;
int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
{
struct perf_cpu cpu __maybe_unused;
- int idx;
+ unsigned int idx;
int err;
perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
{
- int err = 0, i;
+ int err = 0;
- for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
+ for (unsigned int i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) {
err = perf_evsel__run_ioctl(evsel,
PERF_EVENT_IOC_SET_FILTER,
(void *)filter, i);
+ }
return err;
}
DECLARE_RC_STRUCT(perf_cpu_map) {
refcount_t refcnt;
/** Length of the map array. */
- int nr;
+ unsigned int nr;
/** The CPU values. */
struct perf_cpu map[];
};
-struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus);
+struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus);
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
-void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus);
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus);
static inline refcount_t *perf_cpu_map__refcnt(struct perf_cpu_map *map)
{
* perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index
* is invalid.
*/
-LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx);
/**
* perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a
* cpu of -1 for an invalid index, this makes an empty map
* the result is the number CPUs in the map plus one if the
* "any CPU"/dummy value is present.
*/
-LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
+LIBPERF_API unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
/**
* perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value.
*/
static int cs_etm_validate_config(struct perf_pmu *cs_etm_pmu,
struct evsel *evsel)
{
- int idx, err = 0;
+ unsigned int idx;
+ int err = 0;
struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
struct perf_cpu_map *intersect_cpus;
struct perf_cpu cpu;
cs_etm_info_priv_size(struct auxtrace_record *itr,
struct evlist *evlist)
{
- int idx;
+ unsigned int idx;
int etmv3 = 0, etmv4 = 0, ete = 0;
struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
struct perf_cpu_map *intersect_cpus;
struct perf_record_auxtrace_info *info,
size_t priv_size)
{
- int i;
+ unsigned int i;
u32 offset;
u64 nr_cpu, type;
struct perf_cpu_map *cpu_map;
struct perf_record_auxtrace_info *auxtrace_info,
size_t priv_size)
{
- int i, ret;
+ unsigned int i;
+ int ret;
size_t offset;
struct arm_spe_recording *sper =
container_of(itr, struct arm_spe_recording, itr);
int get_cpuid(char *buf, size_t sz, struct perf_cpu cpu)
{
struct perf_cpu_map *cpus;
- int idx;
+ unsigned int idx;
if (cpu.cpu != -1)
return _get_cpuid(buf, sz, cpu);
static struct perf_cpu_map *cha_adjusted[MAX_SNCS];
static struct perf_cpu_map *imc_adjusted[MAX_SNCS];
struct perf_cpu_map **adjusted = cha ? cha_adjusted : imc_adjusted;
- int idx, pmu_snc, cpu_adjust;
+ unsigned int idx;
+ int pmu_snc, cpu_adjust;
struct perf_cpu cpu;
bool alloc;
{
struct numa_node *n;
unsigned long **nodes;
- int node, idx;
struct perf_cpu cpu;
int *cpu2node;
struct perf_env *env = perf_session__env(session);
if (!cpu2node)
return -ENOMEM;
- for (idx = 0; idx < c2c.cpus_cnt; idx++)
+ for (int idx = 0; idx < c2c.cpus_cnt; idx++)
cpu2node[idx] = -1;
c2c.cpu2node = cpu2node;
- for (node = 0; node < c2c.nodes_cnt; node++) {
+ for (int node = 0; node < c2c.nodes_cnt; node++) {
struct perf_cpu_map *map = n[node].map;
unsigned long *set;
+ unsigned int idx;
set = bitmap_zalloc(c2c.cpus_cnt);
if (!set)
static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
{
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
if (cpu_map__is_dummy(cpus))
return 0;
static void __process_stat(struct evsel *counter, u64 tstamp)
{
int nthreads = perf_thread_map__nr(counter->core.threads);
- int idx, thread;
struct perf_cpu cpu;
static int header_printed;
header_printed = 1;
}
- for (thread = 0; thread < nthreads; thread++) {
+ for (int thread = 0; thread < nthreads; thread++) {
+ unsigned int idx;
+
perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) {
struct perf_counts_values *counts;
struct evsel *counter;
evlist__for_each_entry(evsel_list, counter) {
- int idx;
+ unsigned int idx;
if (!evsel__is_tool(counter))
continue;
bm = bitmap_zalloc(nbits);
if (map && bm) {
- int i;
+ unsigned int i;
struct perf_cpu cpu;
perf_cpu_map__for_each_cpu(cpu, i, map)
return 0;
}
-static int __test__cpu_map_merge(const char *lhs, const char *rhs, int nr, const char *expected)
+static int __test__cpu_map_merge(const char *lhs, const char *rhs, unsigned int nr,
+ const char *expected)
{
struct perf_cpu_map *a = perf_cpu_map__new(lhs);
struct perf_cpu_map *b = perf_cpu_map__new(rhs);
return ret;
}
-static int __test__cpu_map_intersect(const char *lhs, const char *rhs, int nr, const char *expected)
+static int __test__cpu_map_intersect(const char *lhs, const char *rhs, unsigned int nr,
+ const char *expected)
{
struct perf_cpu_map *a = perf_cpu_map__new(lhs);
struct perf_cpu_map *b = perf_cpu_map__new(rhs);
if (map && bm) {
struct perf_cpu cpu;
- int i;
+ unsigned int i;
perf_cpu_map__for_each_cpu(cpu, i, map)
__set_bit(cpu.cpu, bm);
static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- int err = TEST_FAIL, fd, idx;
+ int err = TEST_FAIL, fd;
+ unsigned int idx;
struct perf_cpu cpu;
struct perf_cpu_map *cpus;
struct evsel *evsel;
.path = path,
.mode = PERF_DATA_MODE_READ,
};
- int i;
+ unsigned int i;
struct aggr_cpu_id id;
struct perf_cpu cpu;
struct perf_env *env;
TEST_ASSERT_VAL("Session header CPU map not set", env->cpu);
- for (i = 0; i < env->nr_cpus_avail; i++) {
+ for (i = 0; i < (unsigned int)env->nr_cpus_avail; i++) {
cpu.cpu = i;
if (!perf_cpu_map__has(map, cpu))
continue;
int cpu_set_size = get_cpu_set_size();
unsigned long *cpuset = bitmap_zalloc(cpu_set_size * 8);
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
if (!cpuset)
return;
struct perf_counts_values *counts;
int reading_map_fd;
__u32 key = 0;
- int err, idx, bpf_cpu;
+ int err, bpf_cpu;
+ unsigned int idx;
if (list_empty(&evsel->bpf_counter_list))
return -EAGAIN;
}
for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) {
- idx = perf_cpu_map__idx(evsel__cpus(evsel),
- (struct perf_cpu){.cpu = bpf_cpu});
- if (idx == -1)
+ int i = perf_cpu_map__idx(evsel__cpus(evsel),
+ (struct perf_cpu){.cpu = bpf_cpu});
+
+ if (i == -1)
continue;
- counts = perf_counts(evsel->counts, idx, 0);
+ counts = perf_counts(evsel->counts, i, 0);
counts->val += values[bpf_cpu].counter;
counts->ena += values[bpf_cpu].enabled;
counts->run += values[bpf_cpu].running;
static int bperf_sync_counters(struct evsel *evsel)
{
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus)
bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu.cpu);
struct bpf_perf_event_value values[num_cpu_bpf];
struct perf_counts_values *counts;
int reading_map_fd, err = 0;
- __u32 i;
- int j;
bperf_sync_counters(evsel);
reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
- for (i = 0; i < filter_entry_cnt; i++) {
+ for (__u32 i = 0; i < filter_entry_cnt; i++) {
struct perf_cpu entry;
__u32 cpu;
if (err)
goto out;
switch (evsel->follower_skel->bss->type) {
- case BPERF_FILTER_GLOBAL:
- assert(i == 0);
+ case BPERF_FILTER_GLOBAL: {
+ unsigned int j;
+ assert(i == 0);
perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) {
counts = perf_counts(evsel->counts, j, 0);
counts->val = values[entry.cpu].counter;
counts->run = values[entry.cpu].running;
}
break;
+ }
case BPERF_FILTER_CPU:
cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
assert(cpu >= 0);
struct bpf_link *link;
struct evsel *evsel;
struct cgroup *cgrp, *leader_cgrp;
- int i, j;
+ unsigned int i;
struct perf_cpu cpu;
int total_cpus = cpu__max_cpu().cpu;
int map_fd, prog_fd, err;
evlist__for_each_entry(evlist, evsel) {
if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
+ unsigned int j;
+
leader_cgrp = evsel->cgrp;
evsel->cgrp = NULL;
static int bperf_cgrp__sync_counters(struct evlist *evlist)
{
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
int prog_fd = bpf_program__fd(skel->progs.trigger_read);
perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
evlist__for_each_entry(evlist, evsel) {
__u32 idx = evsel->core.idx;
- int i;
+ unsigned int i;
struct perf_cpu cpu;
err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
static int setup_filters(struct perf_kwork *kwork)
{
if (kwork->cpu_list != NULL) {
- int idx, nr_cpus;
+ unsigned int idx;
+ int nr_cpus;
struct perf_cpu_map *map;
struct perf_cpu cpu;
int fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
static int setup_filters(struct perf_kwork *kwork)
{
if (kwork->cpu_list) {
- int idx, nr_cpus, fd;
+ unsigned int idx;
+ int nr_cpus, fd;
struct perf_cpu_map *map;
struct perf_cpu cpu;
struct evlist *evlist = arg;
struct evsel *evsel;
struct perf_cpu pcpu;
- int i;
+ unsigned int i;
/* update task filter for the given workload */
if (skel->rodata->has_task && skel->rodata->uses_tgid &&
void augmented_syscalls__setup_bpf_output(void)
{
struct perf_cpu cpu;
- int i;
+ unsigned int i;
if (bpf_output == NULL)
return;
aggr_cpu_id_get_t get_id,
void *data, bool needs_sort)
{
- int idx;
+ unsigned int idx;
struct perf_cpu cpu;
struct cpu_aggr_map *c = cpu_aggr_map__empty_new(perf_cpu_map__nr(cpus));
}
}
/* Trim. */
- if (c->nr != perf_cpu_map__nr(cpus)) {
+ if (c->nr != (int)perf_cpu_map__nr(cpus)) {
struct cpu_aggr_map *trimmed_c =
realloc(c,
sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
#define COMMA first ? "" : ","
- for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) {
+ for (i = 0; i < (int)perf_cpu_map__nr(map) + 1; i++) {
struct perf_cpu cpu = { .cpu = INT16_MAX };
- bool last = i == perf_cpu_map__nr(map);
+ bool last = i == (int)perf_cpu_map__nr(map);
if (!last)
cpu = perf_cpu_map__cpu(map, i);
size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
{
- int idx;
+ unsigned int idx;
char *ptr = buf;
unsigned char *bitmap;
struct perf_cpu c, last_cpu = perf_cpu_map__max(map);
const char *core_cpu_list = topology->core_cpus_list[i];
struct perf_cpu_map *core_cpus = perf_cpu_map__new(core_cpu_list);
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
bool has_first, first = true;
perf_cpu_map__for_each_cpu(cpu, idx, core_cpus) {
for (i = 0; i < env->nr_numa_nodes; i++) {
struct perf_cpu tmp;
- int j;
+ unsigned int j;
nn = &env->numa_nodes[i];
perf_cpu_map__for_each_cpu(tmp, j, nn->map)
struct perf_cpu_map *cpus = counter->core.cpus;
for (int thread = 0; thread < perf_thread_map__nr(threads); thread++) {
- int idx;
+ unsigned int idx;
struct perf_cpu cpu;
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap)
{
- int i, err = -1;
+ unsigned int i;
+ int err = -1;
struct perf_cpu_map *map;
int nr_cpus = min(perf_session__env(session)->nr_cpus_avail, MAX_NR_CPUS);
struct perf_cpu cpu;
const struct aggr_cpu_id *id)
{
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
/*
* Skip unsupported default events when not verbose. (default events
struct evlist *evlist,
struct outstate *os)
{
- int all_idx;
+ unsigned int all_idx;
struct perf_cpu cpu;
perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
static void evsel__copy_prev_raw_counts(struct evsel *evsel)
{
- int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
+ int nthreads = perf_thread_map__nr(evsel->core.threads);
for (int thread = 0; thread < nthreads; thread++) {
+ unsigned int idx;
+
perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
*perf_counts(evsel->counts, idx, thread) =
*perf_counts(evsel->prev_raw_counts, idx, thread);
struct perf_counts_values counts = { 0, };
struct aggr_cpu_id id;
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
/* collect per-core counts */
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
struct perf_stat_evsel *ps = evsel->stats;
struct aggr_cpu_id core_id;
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
if (!evsel->percore)
return;
static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
{
- int idx, ret = 0;
+ unsigned int idx;
+ int ret = 0;
struct perf_cpu_map *map;
struct perf_cpu cpu;
{
struct perf_cpu_map *map;
struct perf_cpu cpu;
- int i, err = -1;
+ unsigned int i;
+ int err = -1;
if (symbol_conf.parallelism_list_str == NULL)
return 0;
static void synthesize_mask(struct synthesize_cpu_map_data *data)
{
- int idx;
+ unsigned int idx;
struct perf_cpu cpu;
/* Due to padding, the 4bytes per entry mask variant is always smaller. */