#include "coresight-trace-id.h"
/* Default trace ID map. Used in sysfs mode and for system sources */
-static struct coresight_trace_id_map id_map_default;
+static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
+static struct coresight_trace_id_map id_map_default = {
+ .cpu_map = &id_map_default_cpu_ids
+};
-/* maintain a record of the mapping of IDs and pending releases per cpu */
-static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0);
+/* maintain a record of the pending releases per cpu */
static cpumask_t cpu_id_release_pending;
/* perf session active counter */
/* unlocked read of current trace ID value for given CPU */
static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
- return atomic_read(&per_cpu(cpu_id, cpu));
+ return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu));
}
/* look for next available odd ID, return 0 if none found */
clear_bit(bit, id_map->pend_rel_ids);
}
for_each_cpu(cpu, &cpu_id_release_pending) {
- atomic_set(&per_cpu(cpu_id, cpu), 0);
+ atomic_set(per_cpu_ptr(id_map_default.cpu_map, cpu), 0);
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
}
spin_unlock_irqrestore(&id_map_lock, flags);
goto get_cpu_id_out_unlock;
/* allocate the new id to the cpu */
- atomic_set(&per_cpu(cpu_id, cpu), id);
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
get_cpu_id_clr_pend:
/* we are (re)using this ID - so ensure it is not marked for release */
} else {
/* otherwise clear id */
coresight_trace_id_free(id, id_map);
- atomic_set(&per_cpu(cpu_id, cpu), 0);
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
}
spin_unlock_irqrestore(&id_map_lock, flags);