]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf/x86: Remove swap_task_ctx()
authorKan Liang <kan.liang@linux.intel.com>
Fri, 14 Mar 2025 17:26:59 +0000 (10:26 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 17 Mar 2025 10:23:37 +0000 (11:23 +0100)
The pmu specific data is saved in task_struct now. It doesn't need to
swap between context.

Remove swap_task_ctx() support.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250314172700.438923-6-kan.liang@linux.intel.com
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/lbr.c
arch/x86/events/perf_event.h

index ae8c90adca0f0fa6ed0c14f336528421f8c7bd26..833478ffbbf559b43e232648f45e46a257d61e7d 100644 (file)
@@ -87,7 +87,6 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
 DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling,   *x86_pmu.stop_scheduling);
 
 DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task,    *x86_pmu.sched_task);
-DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
 
 DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs,   *x86_pmu.drain_pebs);
 DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
@@ -2039,7 +2038,6 @@ static void x86_pmu_static_call_update(void)
        static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling);
 
        static_call_update(x86_pmu_sched_task, x86_pmu.sched_task);
-       static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx);
 
        static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
        static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
@@ -2644,12 +2642,6 @@ static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
        static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in);
 }
 
-static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
-                                 struct perf_event_pmu_context *next_epc)
-{
-       static_call_cond(x86_pmu_swap_task_ctx)(prev_epc, next_epc);
-}
-
 void perf_check_microcode(void)
 {
        if (x86_pmu.check_microcode)
@@ -2714,7 +2706,6 @@ static struct pmu pmu = {
 
        .event_idx              = x86_pmu_event_idx,
        .sched_task             = x86_pmu_sched_task,
-       .swap_task_ctx          = x86_pmu_swap_task_ctx,
        .check_period           = x86_pmu_check_period,
 
        .aux_output_match       = x86_pmu_aux_output_match,
index 3efbb03fd77ee24846ec2af8f6b0ec4caadea52f..dc38dec244c1577d443d9feb88b8ab3a92a97c24 100644 (file)
@@ -5300,12 +5300,6 @@ static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
        intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
 }
 
-static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
-                                   struct perf_event_pmu_context *next_epc)
-{
-       intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
-}
-
 static int intel_pmu_check_period(struct perf_event *event, u64 value)
 {
        return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
@@ -5474,7 +5468,6 @@ static __initconst const struct x86_pmu intel_pmu = {
 
        .guest_get_msrs         = intel_guest_get_msrs,
        .sched_task             = intel_pmu_sched_task,
-       .swap_task_ctx          = intel_pmu_swap_task_ctx,
 
        .check_period           = intel_pmu_check_period,
 
index 24719adbcd7eadb2fcb41c0530d64e6f6ebd0f4a..f44c3d866f248ca7a91d19e12babbc4bcc0024ce 100644 (file)
@@ -522,29 +522,6 @@ static void __intel_pmu_lbr_save(void *ctx)
        cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
 }
 
-void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
-                                struct perf_event_pmu_context *next_epc)
-{
-       void *prev_ctx_data, *next_ctx_data;
-
-       swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);
-
-       /*
-        * Architecture specific synchronization makes sense in case
-        * both prev_epc->task_ctx_data and next_epc->task_ctx_data
-        * pointers are allocated.
-        */
-
-       prev_ctx_data = next_epc->task_ctx_data;
-       next_ctx_data = prev_epc->task_ctx_data;
-
-       if (!prev_ctx_data || !next_ctx_data)
-               return;
-
-       swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
-            task_context_opt(next_ctx_data)->lbr_callstack_users);
-}
-
 void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
                              struct task_struct *task, bool sched_in)
 {
index 67d2d250248c1c4118a04392548e5a59ade0f7c2..8e5a4c3c5b950873552a879a088af24c5ba18ace 100644 (file)
@@ -958,14 +958,6 @@ struct x86_pmu {
         */
        int             num_topdown_events;
 
-       /*
-        * perf task context (i.e. struct perf_event_pmu_context::task_ctx_data)
-        * switch helper to bridge calls from perf/core to perf/x86.
-        * See struct pmu::swap_task_ctx() usage for examples;
-        */
-       void            (*swap_task_ctx)(struct perf_event_pmu_context *prev_epc,
-                                        struct perf_event_pmu_context *next_epc);
-
        /*
         * AMD bits
         */
@@ -1671,9 +1663,6 @@ void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
                                struct cpu_hw_events *cpuc,
                                struct perf_event *event);
 
-void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
-                                struct perf_event_pmu_context *next_epc);
-
 void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
                              struct task_struct *task, bool sched_in);