]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
pmdomain: core: Enable s2idle for CPU PM domains on PREEMPT_RT
authorUlf Hansson <ulf.hansson@linaro.org>
Mon, 27 May 2024 14:25:51 +0000 (16:25 +0200)
committerUlf Hansson <ulf.hansson@linaro.org>
Mon, 5 Aug 2024 11:12:04 +0000 (13:12 +0200)
To allow a genpd provider for a CPU PM domain to enter a domain-idle-state
during s2idle on a PREEMPT_RT based configuration, we can't use the regular
spinlock, as they are turned into sleepable locks on PREEMPT_RT.

To address this problem, let's convert into using the raw spinlock, but
only for genpd providers that have the GENPD_FLAG_CPU_DOMAIN bit set. In
this way, the lock can still be acquired/released in atomic context, which
is needed in the idle-path for PREEMPT_RT.

Do note that the genpd power-on/off notifiers may also be fired during
s2idle, but these are already prepared for PREEMPT_RT as they are based on
the raw notifiers. However, consumers of them may need to adopt accordingly
to work properly on PREEMPT_RT.

Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Raghavendra Kakarla <quic_rkakarla@quicinc.com> # qcm6490 with PREEMPT_RT set
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lore.kernel.org/r/20240527142557.321610-2-ulf.hansson@linaro.org
drivers/pmdomain/core.c
include/linux/pm_domain.h

index 7a61aa88c0614a9bf02753e2331a243de7d94e8b..8c798a46ffec046051f22c01c8f5b0197949a9c9 100644 (file)
@@ -117,6 +117,48 @@ static const struct genpd_lock_ops genpd_spin_ops = {
        .unlock = genpd_unlock_spin,
 };
 
+static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
+       __acquires(&genpd->raw_slock)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&genpd->raw_slock, flags);
+       genpd->raw_lock_flags = flags;
+}
+
+static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
+                                       int depth)
+       __acquires(&genpd->raw_slock)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
+       genpd->raw_lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
+       __acquires(&genpd->raw_slock)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&genpd->raw_slock, flags);
+       genpd->raw_lock_flags = flags;
+       return 0;
+}
+
+static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
+       __releases(&genpd->raw_slock)
+{
+       raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_raw_spin_ops = {
+       .lock = genpd_lock_raw_spin,
+       .lock_nested = genpd_lock_nested_raw_spin,
+       .lock_interruptible = genpd_lock_interruptible_raw_spin,
+       .unlock = genpd_unlock_raw_spin,
+};
+
 #define genpd_lock(p)                  p->lock_ops->lock(p)
 #define genpd_lock_nested(p, d)                p->lock_ops->lock_nested(p, d)
 #define genpd_lock_interruptible(p)    p->lock_ops->lock_interruptible(p)
@@ -2143,7 +2185,10 @@ static void genpd_free_data(struct generic_pm_domain *genpd)
 
 static void genpd_lock_init(struct generic_pm_domain *genpd)
 {
-       if (genpd_is_irq_safe(genpd)) {
+       if (genpd_is_cpu_domain(genpd)) {
+               raw_spin_lock_init(&genpd->raw_slock);
+               genpd->lock_ops = &genpd_raw_spin_ops;
+       } else if (genpd_is_irq_safe(genpd)) {
                spin_lock_init(&genpd->slock);
                genpd->lock_ops = &genpd_spin_ops;
        } else {
index 858c8e7851fb5df19bfee9fd4373587f494c5064..b86bb52858ac563200b3bd5eefaed9dbedc762b5 100644 (file)
@@ -198,8 +198,11 @@ struct generic_pm_domain {
                        spinlock_t slock;
                        unsigned long lock_flags;
                };
+               struct {
+                       raw_spinlock_t raw_slock;
+                       unsigned long raw_lock_flags;
+               };
        };
-
 };
 
 static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)