]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 2 Oct 2024 10:15:12 +0000 (12:15 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 2 Oct 2024 10:15:12 +0000 (12:15 +0200)
added patches:
i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch
i2c-isch-add-missed-else.patch
lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch

queue-5.10/i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch [new file with mode: 0644]
queue-5.10/i2c-isch-add-missed-else.patch [new file with mode: 0644]
queue-5.10/lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch [new file with mode: 0644]
queue-5.10/mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch b/queue-5.10/i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch
new file mode 100644 (file)
index 0000000..66823f3
--- /dev/null
@@ -0,0 +1,63 @@
+From 93701d3b84ac5f3ea07259d4ced405c53d757985 Mon Sep 17 00:00:00 2001
+From: Tommy Huang <tommy_huang@aspeedtech.com>
+Date: Wed, 11 Sep 2024 17:39:51 +0800
+Subject: i2c: aspeed: Update the stop sw state when the bus recovery occurs
+
+From: Tommy Huang <tommy_huang@aspeedtech.com>
+
+commit 93701d3b84ac5f3ea07259d4ced405c53d757985 upstream.
+
+When the i2c bus recovery occurs, driver will send i2c stop command
+in the scl low condition. In this case the sw state will still keep
+original situation. Under multi-master usage, i2c bus recovery will
+be called when i2c transfer timeout occurs. Update the stop command
+calling with aspeed_i2c_do_stop function to update master_state.
+
+Fixes: f327c686d3ba ("i2c: aspeed: added driver for Aspeed I2C")
+Cc: stable@vger.kernel.org # v4.13+
+Signed-off-by: Tommy Huang <tommy_huang@aspeedtech.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-aspeed.c |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -172,6 +172,13 @@ struct aspeed_i2c_bus {
+ static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
++/* precondition: bus.lock has been acquired. */
++static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
++{
++      bus->master_state = ASPEED_I2C_MASTER_STOP;
++      writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
++}
++
+ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
+ {
+       unsigned long time_left, flags;
+@@ -189,7 +196,7 @@ static int aspeed_i2c_recover_bus(struct
+                       command);
+               reinit_completion(&bus->cmd_complete);
+-              writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
++              aspeed_i2c_do_stop(bus);
+               spin_unlock_irqrestore(&bus->lock, flags);
+               time_left = wait_for_completion_timeout(
+@@ -386,13 +393,6 @@ static void aspeed_i2c_do_start(struct a
+ }
+ /* precondition: bus.lock has been acquired. */
+-static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
+-{
+-      bus->master_state = ASPEED_I2C_MASTER_STOP;
+-      writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
+-}
+-
+-/* precondition: bus.lock has been acquired. */
+ static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
+ {
+       if (bus->msgs_index + 1 < bus->msgs_count) {
diff --git a/queue-5.10/i2c-isch-add-missed-else.patch b/queue-5.10/i2c-isch-add-missed-else.patch
new file mode 100644 (file)
index 0000000..dfbf4de
--- /dev/null
@@ -0,0 +1,34 @@
+From 1db4da55070d6a2754efeb3743f5312fc32f5961 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 11 Sep 2024 18:39:14 +0300
+Subject: i2c: isch: Add missed 'else'
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 1db4da55070d6a2754efeb3743f5312fc32f5961 upstream.
+
+In accordance with the existing comment and code analysis
+it is quite likely that there is a missed 'else' when adapter
+times out. Add it.
+
+Fixes: 5bc1200852c3 ("i2c: Add Intel SCH SMBus support")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v2.6.27+
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-isch.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-isch.c
++++ b/drivers/i2c/busses/i2c-isch.c
+@@ -99,8 +99,7 @@ static int sch_transaction(void)
+       if (retries > MAX_RETRIES) {
+               dev_err(&sch_adapter.dev, "SMBus Timeout!\n");
+               result = -ETIMEDOUT;
+-      }
+-      if (temp & 0x04) {
++      } else if (temp & 0x04) {
+               result = -EIO;
+               dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be "
+                       "locked until next hard reset. (sorry!)\n");
diff --git a/queue-5.10/lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch b/queue-5.10/lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
new file mode 100644 (file)
index 0000000..7b07f59
--- /dev/null
@@ -0,0 +1,215 @@
+From a6f88ac32c6e63e69c595bfae220d8641704c9b7 Mon Sep 17 00:00:00 2001
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Date: Thu, 20 Jun 2024 22:54:34 +0000
+Subject: lockdep: fix deadlock issue between lockdep and rcu
+
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+
+commit a6f88ac32c6e63e69c595bfae220d8641704c9b7 upstream.
+
+There is a deadlock scenario between lockdep and rcu when
+rcu nocb feature is enabled, just as following call stack:
+
+     rcuop/x
+-000|queued_spin_lock_slowpath(lock = 0xFFFFFF817F2A8A80, val = ?)
+-001|queued_spin_lock(inline) // try to hold nocb_gp_lock
+-001|do_raw_spin_lock(lock = 0xFFFFFF817F2A8A80)
+-002|__raw_spin_lock_irqsave(inline)
+-002|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F2A8A80)
+-003|wake_nocb_gp_defer(inline)
+-003|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F30B680)
+-004|__call_rcu_common(inline)
+-004|call_rcu(head = 0xFFFFFFC082EECC28, func = ?)
+-005|call_rcu_zapped(inline)
+-005|free_zapped_rcu(ch = ?)// hold graph lock
+-006|rcu_do_batch(rdp = 0xFFFFFF817F245680)
+-007|nocb_cb_wait(inline)
+-007|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F245680)
+-008|kthread(_create = 0xFFFFFF80803122C0)
+-009|ret_from_fork(asm)
+
+     rcuop/y
+-000|queued_spin_lock_slowpath(lock = 0xFFFFFFC08291BBC8, val = 0)
+-001|queued_spin_lock()
+-001|lockdep_lock()
+-001|graph_lock() // try to hold graph lock
+-002|lookup_chain_cache_add()
+-002|validate_chain()
+-003|lock_acquire
+-004|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F211D80)
+-005|lock_timer_base(inline)
+-006|mod_timer(inline)
+-006|wake_nocb_gp_defer(inline)// hold nocb_gp_lock
+-006|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F2A8680)
+-007|__call_rcu_common(inline)
+-007|call_rcu(head = 0xFFFFFFC0822E0B58, func = ?)
+-008|call_rcu_hurry(inline)
+-008|rcu_sync_call(inline)
+-008|rcu_sync_func(rhp = 0xFFFFFFC0822E0B58)
+-009|rcu_do_batch(rdp = 0xFFFFFF817F266680)
+-010|nocb_cb_wait(inline)
+-010|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F266680)
+-011|kthread(_create = 0xFFFFFF8080363740)
+-012|ret_from_fork(asm)
+
+rcuop/x and rcuop/y are rcu nocb threads with the same nocb gp thread.
+This patch release the graph lock before lockdep call_rcu.
+
+Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
+Cc: stable@vger.kernel.org
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Waiman Long <longman@redhat.com>
+Cc: Carlos Llamas <cmllamas@google.com>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
+Reviewed-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Acked-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Link: https://lore.kernel.org/r/20240620225436.3127927-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/locking/lockdep.c |   48 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 32 insertions(+), 16 deletions(-)
+
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -5967,25 +5967,27 @@ static struct pending_free *get_pending_
+ static void free_zapped_rcu(struct rcu_head *cb);
+ /*
+- * Schedule an RCU callback if no RCU callback is pending. Must be called with
+- * the graph lock held.
+- */
+-static void call_rcu_zapped(struct pending_free *pf)
++* See if we need to queue an RCU callback, must called with
++* the lockdep lock held, returns false if either we don't have
++* any pending free or the callback is already scheduled.
++* Otherwise, a call_rcu() must follow this function call.
++*/
++static bool prepare_call_rcu_zapped(struct pending_free *pf)
+ {
+       WARN_ON_ONCE(inside_selftest());
+       if (list_empty(&pf->zapped))
+-              return;
++              return false;
+       if (delayed_free.scheduled)
+-              return;
++              return false;
+       delayed_free.scheduled = true;
+       WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
+       delayed_free.index ^= 1;
+-      call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++      return true;
+ }
+ /* The caller must hold the graph lock. May be called from RCU context. */
+@@ -6011,6 +6013,7 @@ static void free_zapped_rcu(struct rcu_h
+ {
+       struct pending_free *pf;
+       unsigned long flags;
++      bool need_callback;
+       if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
+               return;
+@@ -6022,14 +6025,18 @@ static void free_zapped_rcu(struct rcu_h
+       pf = delayed_free.pf + (delayed_free.index ^ 1);
+       __free_zapped_classes(pf);
+       delayed_free.scheduled = false;
++      need_callback =
++              prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
++      lockdep_unlock();
++      raw_local_irq_restore(flags);
+       /*
+-       * If there's anything on the open list, close and start a new callback.
+-       */
+-      call_rcu_zapped(delayed_free.pf + delayed_free.index);
++      * If there's pending free and its callback has not been scheduled,
++      * queue an RCU callback.
++      */
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+-      lockdep_unlock();
+-      raw_local_irq_restore(flags);
+ }
+ /*
+@@ -6069,6 +6076,7 @@ static void lockdep_free_key_range_reg(v
+ {
+       struct pending_free *pf;
+       unsigned long flags;
++      bool need_callback;
+       init_data_structures_once();
+@@ -6076,10 +6084,11 @@ static void lockdep_free_key_range_reg(v
+       lockdep_lock();
+       pf = get_pending_free();
+       __lockdep_free_key_range(pf, start, size);
+-      call_rcu_zapped(pf);
++      need_callback = prepare_call_rcu_zapped(pf);
+       lockdep_unlock();
+       raw_local_irq_restore(flags);
+-
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+       /*
+        * Wait for any possible iterators from look_up_lock_class() to pass
+        * before continuing to free the memory they refer to.
+@@ -6173,6 +6182,7 @@ static void lockdep_reset_lock_reg(struc
+       struct pending_free *pf;
+       unsigned long flags;
+       int locked;
++      bool need_callback = false;
+       raw_local_irq_save(flags);
+       locked = graph_lock();
+@@ -6181,11 +6191,13 @@ static void lockdep_reset_lock_reg(struc
+       pf = get_pending_free();
+       __lockdep_reset_lock(pf, lock);
+-      call_rcu_zapped(pf);
++      need_callback = prepare_call_rcu_zapped(pf);
+       graph_unlock();
+ out_irq:
+       raw_local_irq_restore(flags);
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+ }
+ /*
+@@ -6229,6 +6241,7 @@ void lockdep_unregister_key(struct lock_
+       struct pending_free *pf;
+       unsigned long flags;
+       bool found = false;
++      bool need_callback = false;
+       might_sleep();
+@@ -6249,11 +6262,14 @@ void lockdep_unregister_key(struct lock_
+       if (found) {
+               pf = get_pending_free();
+               __lockdep_free_key_range(pf, key, 1);
+-              call_rcu_zapped(pf);
++              need_callback = prepare_call_rcu_zapped(pf);
+       }
+       lockdep_unlock();
+       raw_local_irq_restore(flags);
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++
+       /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
+       synchronize_rcu();
+ }
diff --git a/queue-5.10/mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch b/queue-5.10/mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch
new file mode 100644 (file)
index 0000000..6ae5d0d
--- /dev/null
@@ -0,0 +1,51 @@
+From 69b50d4351ed924f29e3d46b159e28f70dfc707f Mon Sep 17 00:00:00 2001
+From: David Gow <davidgow@google.com>
+Date: Sat, 3 Aug 2024 15:46:41 +0800
+Subject: mm: only enforce minimum stack gap size if it's sensible
+
+From: David Gow <davidgow@google.com>
+
+commit 69b50d4351ed924f29e3d46b159e28f70dfc707f upstream.
+
+The generic mmap_base code tries to leave a gap between the top of the
+stack and the mmap base address, but enforces a minimum gap size (MIN_GAP)
+of 128MB, which is too large on some setups.  In particular, on arm tasks
+without ADDR_LIMIT_32BIT, the STACK_TOP value is less than 128MB, so it's
+impossible to fit such a gap in.
+
+Only enforce this minimum if MIN_GAP < MAX_GAP, as we'd prefer to honour
+MAX_GAP, which is defined proportionally, so scales better and always
+leaves us with both _some_ stack space and some room for mmap.
+
+This fixes the usercopy KUnit test suite on 32-bit arm, as it doesn't set
+any personality flags so gets the default (in this case 26-bit) task size.
+This test can be run with: ./tools/testing/kunit/kunit.py run --arch arm
+usercopy --make_options LLVM=1
+
+Link: https://lkml.kernel.org/r/20240803074642.1849623-2-davidgow@google.com
+Fixes: dba79c3df4a2 ("arm: use generic mmap top-down layout and brk randomization")
+Signed-off-by: David Gow <davidgow@google.com>
+Reviewed-by: Kees Cook <kees@kernel.org>
+Cc: Alexandre Ghiti <alex@ghiti.fr>
+Cc: Linus Walleij <linus.walleij@linaro.org>
+Cc: Luis Chamberlain <mcgrof@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/util.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -418,7 +418,7 @@ static unsigned long mmap_base(unsigned
+       if (gap + pad > gap)
+               gap += pad;
+-      if (gap < MIN_GAP)
++      if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
+               gap = MIN_GAP;
+       else if (gap > MAX_GAP)
+               gap = MAX_GAP;
index 49b3c1e3987580c0d72d9ced03dbf93a9730cc91..784dfc2135ea2b17706f1e3f342ab6abf76a419d 100644 (file)
@@ -268,3 +268,7 @@ pps-add-an-error-check-in-parport_attach.patch
 usb-renesas-xhci-remove-renesas_xhci_pci_exit.patch
 xhci-set-quirky-xhc-pci-hosts-to-d3-_after_-stopping.patch
 io_uring-sqpoll-do-not-allow-pinning-outside-of-cpuset.patch
+lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
+mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch
+i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch
+i2c-isch-add-missed-else.patch