]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.15
authorSasha Levin <sashal@kernel.org>
Wed, 9 Aug 2023 01:43:54 +0000 (21:43 -0400)
committerSasha Levin <sashal@kernel.org>
Wed, 9 Aug 2023 01:43:54 +0000 (21:43 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.15/pm-sleep-wakeirq-fix-wake-irq-arming.patch [new file with mode: 0644]
queue-5.15/pm-wakeirq-support-enabling-wake-up-irq-after-runtim.patch [new file with mode: 0644]
queue-5.15/selftests-rseq-check-if-libc-rseq-support-is-registe.patch [new file with mode: 0644]
queue-5.15/selftests-rseq-play-nice-with-binaries-statically-li.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/soundwire-bus-pm_runtime_request_resume-on-periphera.patch [new file with mode: 0644]
queue-5.15/soundwire-fix-enumeration-completion.patch [new file with mode: 0644]

diff --git a/queue-5.15/pm-sleep-wakeirq-fix-wake-irq-arming.patch b/queue-5.15/pm-sleep-wakeirq-fix-wake-irq-arming.patch
new file mode 100644 (file)
index 0000000..e0516d4
--- /dev/null
@@ -0,0 +1,95 @@
+From 37977c58ef65b049fbcc8310ec43fe73af8ff9aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Jul 2023 16:57:39 +0200
+Subject: PM: sleep: wakeirq: fix wake irq arming
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit 8527beb12087238d4387607597b4020bc393c4b4 ]
+
+The decision whether to enable a wake irq during suspend can not be done
+based on the runtime PM state directly as a driver may use wake irqs
+without implementing runtime PM. Such drivers specifically leave the
+state set to the default 'suspended' and the wake irq is thus never
+enabled at suspend.
+
+Add a new wake irq flag to track whether a dedicated wake irq has been
+enabled at runtime suspend and therefore must not be enabled at system
+suspend.
+
+Note that pm_runtime_enabled() can not be used as runtime PM is always
+disabled during late suspend.
+
+Fixes: 69728051f5bf ("PM / wakeirq: Fix unbalanced IRQ enable for wakeirq")
+Cc: 4.16+ <stable@vger.kernel.org> # 4.16+
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Tested-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/power/power.h   |  1 +
+ drivers/base/power/wakeirq.c | 12 ++++++++----
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
+index 0eb7f02b3ad59..922ed457db191 100644
+--- a/drivers/base/power/power.h
++++ b/drivers/base/power/power.h
+@@ -29,6 +29,7 @@ extern u64 pm_runtime_active_time(struct device *dev);
+ #define WAKE_IRQ_DEDICATED_MASK               (WAKE_IRQ_DEDICATED_ALLOCATED | \
+                                        WAKE_IRQ_DEDICATED_MANAGED | \
+                                        WAKE_IRQ_DEDICATED_REVERSE)
++#define WAKE_IRQ_DEDICATED_ENABLED    BIT(3)
+ struct wake_irq {
+       struct device *dev;
+diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
+index 0004db4a9d3b9..6f2cdd8643afa 100644
+--- a/drivers/base/power/wakeirq.c
++++ b/drivers/base/power/wakeirq.c
+@@ -314,8 +314,10 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
+       return;
+ enable:
+-      if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
++      if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
+               enable_irq(wirq->irq);
++              wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
++      }
+ }
+ /**
+@@ -336,8 +338,10 @@ void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
+       if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+               return;
+-      if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
++      if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
++              wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
+               disable_irq_nosync(wirq->irq);
++      }
+ }
+ /**
+@@ -376,7 +380,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
+       if (device_may_wakeup(wirq->dev)) {
+               if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+-                  !pm_runtime_status_suspended(wirq->dev))
++                  !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+                       enable_irq(wirq->irq);
+               enable_irq_wake(wirq->irq);
+@@ -399,7 +403,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
+               disable_irq_wake(wirq->irq);
+               if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+-                  !pm_runtime_status_suspended(wirq->dev))
++                  !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+                       disable_irq_nosync(wirq->irq);
+       }
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.15/pm-wakeirq-support-enabling-wake-up-irq-after-runtim.patch b/queue-5.15/pm-wakeirq-support-enabling-wake-up-irq-after-runtim.patch
new file mode 100644 (file)
index 0000000..e9dbcc4
--- /dev/null
@@ -0,0 +1,290 @@
+From 9436c352e37a8109fa2a487f4be789bb396ad750 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Oct 2021 15:01:53 +0800
+Subject: PM / wakeirq: support enabling wake-up irq after runtime_suspend
+ called
+
+From: Chunfeng Yun <chunfeng.yun@mediatek.com>
+
+[ Upstream commit 259714100d98b50bf04d36a21bf50ca8b829fc11 ]
+
+When the dedicated wake IRQ is level trigger, and it uses the
+device's low-power status as the wakeup source, that means if the
+device is not in low-power state, the wake IRQ will be triggered
+if enabled; For this case, need enable the wake IRQ after running
+the device's ->runtime_suspend() which make it enter low-power state.
+
+e.g.
+Assume the wake IRQ is a low level trigger type, and the wakeup
+signal comes from the low-power status of the device.
+The wakeup signal is low level at running time (0), and becomes
+high level when the device enters low-power state (runtime_suspend
+(1) is called), a wakeup event at (2) make the device exit low-power
+state, then the wakeup signal also becomes low level.
+
+                ------------------
+               |           ^     ^|
+----------------           |     | --------------
+ |<---(0)--->|<--(1)--|   (3)   (2)    (4)
+
+if enable the wake IRQ before running runtime_suspend during (0),
+a wake IRQ will arise, it causes resume immediately;
+it works if enable wake IRQ ( e.g. at (3) or (4)) after running
+->runtime_suspend().
+
+This patch introduces a new status WAKE_IRQ_DEDICATED_REVERSE to
+optionally support enabling wake IRQ after running ->runtime_suspend().
+
+Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Chunfeng Yun <chunfeng.yun@mediatek.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 8527beb12087 ("PM: sleep: wakeirq: fix wake irq arming")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/power/power.h   |   7 ++-
+ drivers/base/power/runtime.c |   6 ++-
+ drivers/base/power/wakeirq.c | 101 +++++++++++++++++++++++++++--------
+ include/linux/pm_wakeirq.h   |   9 +++-
+ 4 files changed, 96 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
+index 54292cdd7808b..0eb7f02b3ad59 100644
+--- a/drivers/base/power/power.h
++++ b/drivers/base/power/power.h
+@@ -25,8 +25,10 @@ extern u64 pm_runtime_active_time(struct device *dev);
+ #define WAKE_IRQ_DEDICATED_ALLOCATED  BIT(0)
+ #define WAKE_IRQ_DEDICATED_MANAGED    BIT(1)
++#define WAKE_IRQ_DEDICATED_REVERSE    BIT(2)
+ #define WAKE_IRQ_DEDICATED_MASK               (WAKE_IRQ_DEDICATED_ALLOCATED | \
+-                                       WAKE_IRQ_DEDICATED_MANAGED)
++                                       WAKE_IRQ_DEDICATED_MANAGED | \
++                                       WAKE_IRQ_DEDICATED_REVERSE)
+ struct wake_irq {
+       struct device *dev;
+@@ -39,7 +41,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
+ extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+ extern void dev_pm_enable_wake_irq_check(struct device *dev,
+                                        bool can_change_status);
+-extern void dev_pm_disable_wake_irq_check(struct device *dev);
++extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
++extern void dev_pm_enable_wake_irq_complete(struct device *dev);
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index c1142a7a4fe65..5824d41a0b745 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -673,6 +673,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       if (retval)
+               goto fail;
++      dev_pm_enable_wake_irq_complete(dev);
++
+  no_callback:
+       __update_runtime_status(dev, RPM_SUSPENDED);
+       pm_runtime_deactivate_timer(dev);
+@@ -718,7 +720,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       return retval;
+  fail:
+-      dev_pm_disable_wake_irq_check(dev);
++      dev_pm_disable_wake_irq_check(dev, true);
+       __update_runtime_status(dev, RPM_ACTIVE);
+       dev->power.deferred_resume = false;
+       wake_up_all(&dev->power.wait_queue);
+@@ -901,7 +903,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+       callback = RPM_GET_CALLBACK(dev, runtime_resume);
+-      dev_pm_disable_wake_irq_check(dev);
++      dev_pm_disable_wake_irq_check(dev, false);
+       retval = rpm_callback(callback, dev);
+       if (retval) {
+               __update_runtime_status(dev, RPM_SUSPENDED);
+diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
+index b91a3a9bf9f6d..0004db4a9d3b9 100644
+--- a/drivers/base/power/wakeirq.c
++++ b/drivers/base/power/wakeirq.c
+@@ -142,24 +142,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
+       return IRQ_HANDLED;
+ }
+-/**
+- * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+- * @dev: Device entry
+- * @irq: Device wake-up interrupt
+- *
+- * Unless your hardware has separate wake-up interrupts in addition
+- * to the device IO interrupts, you don't need this.
+- *
+- * Sets up a threaded interrupt handler for a device that has
+- * a dedicated wake-up interrupt in addition to the device IO
+- * interrupt.
+- *
+- * The interrupt starts disabled, and needs to be managed for
+- * the device by the bus code or the device driver using
+- * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
+- * functions.
+- */
+-int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
++static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
+ {
+       struct wake_irq *wirq;
+       int err;
+@@ -197,7 +180,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+       if (err)
+               goto err_free_irq;
+-      wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
++      wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
+       return err;
+@@ -210,8 +193,57 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+       return err;
+ }
++
++
++/**
++ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
++ * @dev: Device entry
++ * @irq: Device wake-up interrupt
++ *
++ * Unless your hardware has separate wake-up interrupts in addition
++ * to the device IO interrupts, you don't need this.
++ *
++ * Sets up a threaded interrupt handler for a device that has
++ * a dedicated wake-up interrupt in addition to the device IO
++ * interrupt.
++ *
++ * The interrupt starts disabled, and needs to be managed for
++ * the device by the bus code or the device driver using
++ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
++ * functions.
++ */
++int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
++{
++      return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
++}
+ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
++/**
++ * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
++ *                                         with reverse enable ordering
++ * @dev: Device entry
++ * @irq: Device wake-up interrupt
++ *
++ * Unless your hardware has separate wake-up interrupts in addition
++ * to the device IO interrupts, you don't need this.
++ *
++ * Sets up a threaded interrupt handler for a device that has a dedicated
++ * wake-up interrupt in addition to the device IO interrupt. It sets
++ * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
++ * to enable dedicated wake-up interrupt after running the runtime suspend
++ * callback for @dev.
++ *
++ * The interrupt starts disabled, and needs to be managed for
++ * the device by the bus code or the device driver using
++ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
++ * functions.
++ */
++int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
++{
++      return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
++}
++EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
++
+ /**
+  * dev_pm_enable_wake_irq - Enable device wake-up interrupt
+  * @dev: Device
+@@ -282,27 +314,54 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
+       return;
+ enable:
+-      enable_irq(wirq->irq);
++      if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
++              enable_irq(wirq->irq);
+ }
+ /**
+  * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
+  * @dev: Device
++ * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
+  *
+  * Disables wake-up interrupt conditionally based on status.
+  * Should be only called from rpm_suspend() and rpm_resume() path.
+  */
+-void dev_pm_disable_wake_irq_check(struct device *dev)
++void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
+ {
+       struct wake_irq *wirq = dev->power.wakeirq;
+       if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+               return;
++      if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
++              return;
++
+       if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
+               disable_irq_nosync(wirq->irq);
+ }
++/**
++ * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
++ * @dev: Device using the wake IRQ
++ *
++ * Enable wake IRQ conditionally based on status, mainly used if want to
++ * enable wake IRQ after running ->runtime_suspend() which depends on
++ * WAKE_IRQ_DEDICATED_REVERSE.
++ *
++ * Should be only called from rpm_suspend() path.
++ */
++void dev_pm_enable_wake_irq_complete(struct device *dev)
++{
++      struct wake_irq *wirq = dev->power.wakeirq;
++
++      if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
++              return;
++
++      if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
++          wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
++              enable_irq(wirq->irq);
++}
++
+ /**
+  * dev_pm_arm_wake_irq - Arm device wake-up
+  * @wirq: Device wake-up interrupt
+diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
+index cd5b62db90845..e63a63aa47a37 100644
+--- a/include/linux/pm_wakeirq.h
++++ b/include/linux/pm_wakeirq.h
+@@ -17,8 +17,8 @@
+ #ifdef CONFIG_PM
+ extern int dev_pm_set_wake_irq(struct device *dev, int irq);
+-extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
+-                                       int irq);
++extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
++extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
+ extern void dev_pm_clear_wake_irq(struct device *dev);
+ extern void dev_pm_enable_wake_irq(struct device *dev);
+ extern void dev_pm_disable_wake_irq(struct device *dev);
+@@ -35,6 +35,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+       return 0;
+ }
++static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
++{
++      return 0;
++}
++
+ static inline void dev_pm_clear_wake_irq(struct device *dev)
+ {
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.15/selftests-rseq-check-if-libc-rseq-support-is-registe.patch b/queue-5.15/selftests-rseq-check-if-libc-rseq-support-is-registe.patch
new file mode 100644 (file)
index 0000000..11d3e4e
--- /dev/null
@@ -0,0 +1,44 @@
+From d55fefacc40d90d53e23d05b56b1b879b8eabdbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jun 2022 11:48:30 -0400
+Subject: selftests/rseq: check if libc rseq support is registered
+
+From: Michael Jeanson <mjeanson@efficios.com>
+
+[ Upstream commit d1a997ba4c1bf65497d956aea90de42a6398f73a ]
+
+When checking for libc rseq support in the library constructor, don't
+only depend on the symbols presence, check that the registration was
+completed.
+
+This targets a scenario where the libc has rseq support but it is not
+wired for the current architecture in 'bits/rseq.h', we want to fallback
+to our internal registration mechanism.
+
+Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Link: https://lore.kernel.org/r/20220614154830.1367382-4-mjeanson@efficios.com
+Stable-dep-of: 3bcbc20942db ("selftests/rseq: Play nice with binaries statically linked against glibc 2.35+")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/rseq/rseq.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index 986b9458efb26..4177f9507bbee 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -111,7 +111,8 @@ void rseq_init(void)
+       libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
+       libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
+       libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
+-      if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p) {
++      if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p &&
++                      *libc_rseq_size_p != 0) {
+               /* rseq registration owned by glibc */
+               rseq_offset = *libc_rseq_offset_p;
+               rseq_size = *libc_rseq_size_p;
+-- 
+2.40.1
+
diff --git a/queue-5.15/selftests-rseq-play-nice-with-binaries-statically-li.patch b/queue-5.15/selftests-rseq-play-nice-with-binaries-statically-li.patch
new file mode 100644 (file)
index 0000000..1a34a91
--- /dev/null
@@ -0,0 +1,84 @@
+From 31918f87055f7c766b1242869f9f1fd2a10ab409 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jul 2023 15:33:52 -0700
+Subject: selftests/rseq: Play nice with binaries statically linked against
+ glibc 2.35+
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 3bcbc20942db5d738221cca31a928efc09827069 ]
+
+To allow running rseq and KVM's rseq selftests as statically linked
+binaries, initialize the various "trampoline" pointers to point directly
+at the expect glibc symbols, and skip the dlysm() lookups if the rseq
+size is non-zero, i.e. the binary is statically linked *and* the libc
+registered its own rseq.
+
+Define weak versions of the symbols so as not to break linking against
+libc versions that don't support rseq in any capacity.
+
+The KVM selftests in particular are often statically linked so that they
+can be run on targets with very limited runtime environments, i.e. test
+machines.
+
+Fixes: 233e667e1ae3 ("selftests/rseq: Uplift rseq selftests for compatibility with glibc-2.35")
+Cc: Aaron Lewis <aaronlewis@google.com>
+Cc: kvm@vger.kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20230721223352.2333911-1-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/rseq/rseq.c | 28 ++++++++++++++++++++++------
+ 1 file changed, 22 insertions(+), 6 deletions(-)
+
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index 4177f9507bbee..b736a5169aad0 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -32,9 +32,17 @@
+ #include "../kselftest.h"
+ #include "rseq.h"
+-static const ptrdiff_t *libc_rseq_offset_p;
+-static const unsigned int *libc_rseq_size_p;
+-static const unsigned int *libc_rseq_flags_p;
++/*
++ * Define weak versions to play nice with binaries that are statically linked
++ * against a libc that doesn't support registering its own rseq.
++ */
++__weak ptrdiff_t __rseq_offset;
++__weak unsigned int __rseq_size;
++__weak unsigned int __rseq_flags;
++
++static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset;
++static const unsigned int *libc_rseq_size_p = &__rseq_size;
++static const unsigned int *libc_rseq_flags_p = &__rseq_flags;
+ /* Offset from the thread pointer to the rseq area.  */
+ ptrdiff_t rseq_offset;
+@@ -108,9 +116,17 @@ int rseq_unregister_current_thread(void)
+ static __attribute__((constructor))
+ void rseq_init(void)
+ {
+-      libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
+-      libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
+-      libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
++      /*
++       * If the libc's registered rseq size isn't already valid, it may be
++       * because the binary is dynamically linked and not necessarily due to
++       * libc not having registered a restartable sequence.  Try to find the
++       * symbols if that's the case.
++       */
++      if (!*libc_rseq_size_p) {
++              libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
++              libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
++              libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
++      }
+       if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p &&
+                       *libc_rseq_size_p != 0) {
+               /* rseq registration owned by glibc */
+-- 
+2.40.1
+
index 7605dfcff74e8398ecb93bcf5d5952b32d5821a4..dd5aa8eff3b6e3b0ad1461be9752174639278f87 100644 (file)
@@ -84,3 +84,9 @@ mtd-rawnand-fsl_upm-fix-an-off-by-one-test-in-fun_ex.patch
 powerpc-mm-altmap-fix-altmap-boundary-check.patch
 drm-fsl-dcu-use-drm_plane_helper_destroy.patch
 drm-imx-ipuv3-fix-front-porch-adjustment-upon-hactiv.patch
+selftests-rseq-check-if-libc-rseq-support-is-registe.patch
+selftests-rseq-play-nice-with-binaries-statically-li.patch
+soundwire-bus-pm_runtime_request_resume-on-periphera.patch
+soundwire-fix-enumeration-completion.patch
+pm-wakeirq-support-enabling-wake-up-irq-after-runtim.patch
+pm-sleep-wakeirq-fix-wake-irq-arming.patch
diff --git a/queue-5.15/soundwire-bus-pm_runtime_request_resume-on-periphera.patch b/queue-5.15/soundwire-bus-pm_runtime_request_resume-on-periphera.patch
new file mode 100644 (file)
index 0000000..931203d
--- /dev/null
@@ -0,0 +1,68 @@
+From 83ae1908418977d06bc096d0c2f80e9988341b2f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Apr 2022 10:32:41 +0800
+Subject: soundwire: bus: pm_runtime_request_resume on peripheral attachment
+
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+
+[ Upstream commit e557bca49b812908f380c56b5b4b2f273848b676 ]
+
+In typical use cases, the peripheral becomes pm_runtime active as a
+result of the ALSA/ASoC framework starting up a DAI. The parent/child
+hierarchy guarantees that the manager device will be fully resumed
+beforehand.
+
+There is however a corner case where the manager device may become
+pm_runtime active, but without ALSA/ASoC requesting any functionality
+from the peripherals. In this case, the hardware peripheral device
+will report as ATTACHED and its initialization routine will be
+executed. If this initialization routine initiates any sort of
+deferred processing, there is a possibility that the manager could
+suspend without the peripheral suspend sequence being invoked: from
+the pm_runtime framework perspective, the peripheral is *already*
+suspended.
+
+To avoid such disconnects between hardware state and pm_runtime state,
+this patch adds an asynchronous pm_request_resume() upon successful
+attach/initialization which will result in the proper resume/suspend
+sequence to be followed on the peripheral side.
+
+BugLink: https://github.com/thesofproject/linux/issues/3459
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Rander Wang <rander.wang@intel.com>
+Signed-off-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Link: https://lore.kernel.org/r/20220420023241.14335-4-yung-chuan.liao@linux.intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: c40d6b3249b1 ("soundwire: fix enumeration completion")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/soundwire/bus.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index b7cdfa65157c6..cc4cca0325b98 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -1841,6 +1841,18 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
+                               __func__, slave->dev_num);
+                       complete(&slave->initialization_complete);
++
++                      /*
++                       * If the manager became pm_runtime active, the peripherals will be
++                       * restarted and attach, but their pm_runtime status may remain
++                       * suspended. If the 'update_slave_status' callback initiates
++                       * any sort of deferred processing, this processing would not be
++                       * cancelled on pm_runtime suspend.
++                       * To avoid such zombie states, we queue a request to resume.
++                       * This would be a no-op in case the peripheral was being resumed
++                       * by e.g. the ALSA/ASoC framework.
++                       */
++                      pm_request_resume(&slave->dev);
+               }
+       }
+-- 
+2.40.1
+
diff --git a/queue-5.15/soundwire-fix-enumeration-completion.patch b/queue-5.15/soundwire-fix-enumeration-completion.patch
new file mode 100644 (file)
index 0000000..527340b
--- /dev/null
@@ -0,0 +1,75 @@
+From e1113254203f45ff4ec287caaad5528220dad9d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jul 2023 14:30:11 +0200
+Subject: soundwire: fix enumeration completion
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit c40d6b3249b11d60e09d81530588f56233d9aa44 ]
+
+The soundwire subsystem uses two completion structures that allow
+drivers to wait for soundwire device to become enumerated on the bus and
+initialised by their drivers, respectively.
+
+The code implementing the signalling is currently broken as it does not
+signal all current and future waiters and also uses the wrong
+reinitialisation function, which can potentially lead to memory
+corruption if there are still waiters on the queue.
+
+Not signalling future waiters specifically breaks sound card probe
+deferrals as codec drivers can not tell that the soundwire device is
+already attached when being reprobed. Some codec runtime PM
+implementations suffer from similar problems as waiting for enumeration
+during resume can also timeout despite the device already having been
+enumerated.
+
+Fixes: fb9469e54fa7 ("soundwire: bus: fix race condition with enumeration_complete signaling")
+Fixes: a90def068127 ("soundwire: bus: fix race condition with initialization_complete signaling")
+Cc: stable@vger.kernel.org      # 5.7
+Cc: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Cc: Rander Wang <rander.wang@linux.intel.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Link: https://lore.kernel.org/r/20230705123018.30903-2-johan+linaro@kernel.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/soundwire/bus.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index cc4cca0325b98..230a3250f3154 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -828,8 +828,8 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
+                       "%s: initializing enumeration and init completion for Slave %d\n",
+                       __func__, slave->dev_num);
+-              init_completion(&slave->enumeration_complete);
+-              init_completion(&slave->initialization_complete);
++              reinit_completion(&slave->enumeration_complete);
++              reinit_completion(&slave->initialization_complete);
+       } else if ((status == SDW_SLAVE_ATTACHED) &&
+                  (slave->status == SDW_SLAVE_UNATTACHED)) {
+@@ -837,7 +837,7 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
+                       "%s: signaling enumeration completion for Slave %d\n",
+                       __func__, slave->dev_num);
+-              complete(&slave->enumeration_complete);
++              complete_all(&slave->enumeration_complete);
+       }
+       slave->status = status;
+       mutex_unlock(&bus->bus_lock);
+@@ -1840,7 +1840,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
+                               "%s: signaling initialization completion for Slave %d\n",
+                               __func__, slave->dev_num);
+-                      complete(&slave->initialization_complete);
++                      complete_all(&slave->initialization_complete);
+                       /*
+                        * If the manager became pm_runtime active, the peripherals will be
+-- 
+2.40.1
+