From: Thomas Richter Date: Tue, 8 Oct 2024 12:51:10 +0000 (+0200) Subject: s390/cpum_sf: Do not re-enable event after deletion X-Git-Tag: v6.13-rc1~206^2~74 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=db417646fecd10b330c9dd611a39999520553bf2;p=thirdparty%2Fkernel%2Flinux.git s390/cpum_sf: Do not re-enable event after deletion Event delete removes an event from the event list, but common code invokes the PMU's enable function later on. This happens in event_sched_out() and leads to the following call sequence: event_sched_out() +--> cpumsf_pmu_del() +--> cpumsf_pmu_enable() In cpumsf_pmu_enable() return immediately when the event is not active. Also remove an unneeded if clause. That if() statement is only reached when flag PMU_F_IN_USE has been set in cpumsf_pmu_add(). And this function also sets cpuhw->event to a valid value. Remove WARN_ON_ONCE() statement which never triggered. Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Acked-by: Sumanth Korikkar Signed-off-by: Heiko Carstens --- diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index e806693c8473d..c916d04815db0 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -910,10 +910,14 @@ static void cpumsf_pmu_enable(struct pmu *pmu) struct hw_perf_event *hwc; int err; - if (cpuhw->flags & PMU_F_ENABLED) - return; - - if (cpuhw->flags & PMU_F_ERR_MASK) + /* + * Event must be + * - added/started on this CPU (PMU_F_IN_USE set) + * - and CPU must be available (PMU_F_RESERVED set) + * - and not already enabled (PMU_F_ENABLED not set) + * - and not in error condition (PMU_F_ERR_MASK not set) + */ + if (cpuhw->flags != (PMU_F_IN_USE | PMU_F_RESERVED)) return; /* Check whether to extent the sampling buffer. @@ -927,19 +931,16 @@ static void cpumsf_pmu_enable(struct pmu *pmu) * facility, but it can be fully re-enabled using sampling controls that * have been saved in cpumsf_pmu_disable(). */ - if (cpuhw->event) { - hwc = &cpuhw->event->hw; - if (!(SAMPL_DIAG_MODE(hwc))) { - /* - * Account number of overflow-designated - * buffer extents - */ - sfb_account_overflows(cpuhw, hwc); - extend_sampling_buffer(&cpuhw->sfb, hwc); - } - /* Rate may be adjusted with ioctl() */ - cpuhw->lsctl.interval = SAMPL_RATE(hwc); + hwc = &cpuhw->event->hw; + if (!(SAMPL_DIAG_MODE(hwc))) { + /* + * Account number of overflow-designated buffer extents + */ + sfb_account_overflows(cpuhw, hwc); + extend_sampling_buffer(&cpuhw->sfb, hwc); } + /* Rate may be adjusted with ioctl() */ + cpuhw->lsctl.interval = SAMPL_RATE(hwc); /* (Re)enable the PMU and sampling facility */ err = lsctl(&cpuhw->lsctl); @@ -1954,13 +1955,12 @@ static void cpumf_measurement_alert(struct ext_code ext_code, /* Program alert request */ if (alert & CPU_MF_INT_SF_PRA) { - if (cpuhw->flags & PMU_F_IN_USE) + if (cpuhw->flags & PMU_F_IN_USE) { if (SAMPL_DIAG_MODE(&cpuhw->event->hw)) hw_collect_aux(cpuhw); else hw_perf_event_update(cpuhw->event, 0); - else - WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); + } } /* Report measurement alerts only for non-PRA codes */