]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf: Add generic exclude_guest support
authorKan Liang <kan.liang@linux.intel.com>
Sat, 6 Dec 2025 00:16:38 +0000 (16:16 -0800)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 17 Dec 2025 12:31:03 +0000 (13:31 +0100)
Only KVM knows the exact time when a guest is entering/exiting. Expose
two interfaces to KVM to switch the ownership of the PMU resources.

All the pinned events must be scheduled in first. Extend the
perf_event_sched_in() helper to support extra flag, e.g., EVENT_GUEST.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Link: https://patch.msgid.link/20251206001720.468579-3-seanjc@google.com
kernel/events/core.c

index 406371ce45f24bc0e9e4cd039c8ecb2f9cfc4fa6..fab358daa42e7158222c3cc2d19e3399dad1d0ec 100644 (file)
@@ -2870,14 +2870,15 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
 
 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
                                struct perf_event_context *ctx,
-                               struct pmu *pmu)
+                               struct pmu *pmu,
+                               enum event_type_t event_type)
 {
-       ctx_sched_in(&cpuctx->ctx, pmu, EVENT_PINNED);
+       ctx_sched_in(&cpuctx->ctx, pmu, EVENT_PINNED | event_type);
        if (ctx)
-                ctx_sched_in(ctx, pmu, EVENT_PINNED);
-       ctx_sched_in(&cpuctx->ctx, pmu, EVENT_FLEXIBLE);
+               ctx_sched_in(ctx, pmu, EVENT_PINNED | event_type);
+       ctx_sched_in(&cpuctx->ctx, pmu, EVENT_FLEXIBLE | event_type);
        if (ctx)
-                ctx_sched_in(ctx, pmu, EVENT_FLEXIBLE);
+               ctx_sched_in(ctx, pmu, EVENT_FLEXIBLE | event_type);
 }
 
 /*
@@ -2933,7 +2934,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
        else if (event_type & EVENT_PINNED)
                ctx_sched_out(&cpuctx->ctx, pmu, EVENT_FLEXIBLE);
 
-       perf_event_sched_in(cpuctx, task_ctx, pmu);
+       perf_event_sched_in(cpuctx, task_ctx, pmu, 0);
 
        for_each_epc(epc, &cpuctx->ctx, pmu, 0)
                perf_pmu_enable(epc->pmu);
@@ -4151,7 +4152,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
                ctx_sched_out(&cpuctx->ctx, NULL, EVENT_FLEXIBLE);
        }
 
-       perf_event_sched_in(cpuctx, ctx, NULL);
+       perf_event_sched_in(cpuctx, ctx, NULL, 0);
 
        perf_ctx_sched_task_cb(cpuctx->task_ctx, task, true);