]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.4
authorSasha Levin <sashal@kernel.org>
Sat, 5 Oct 2024 18:29:13 +0000 (14:29 -0400)
committerSasha Levin <sashal@kernel.org>
Sat, 5 Oct 2024 18:29:13 +0000 (14:29 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.4/alsa-hda-conexant-fix-conflicting-quirk-for-system76.patch [new file with mode: 0644]
queue-5.4/alsa-hda-generic-unconditionally-prefer-preferred_da.patch [new file with mode: 0644]
queue-5.4/alsa-hda-realtek-fix-the-push-button-function-for-th.patch [new file with mode: 0644]
queue-5.4/f2fs-require-fmode_write-for-atomic-write-ioctls.patch [new file with mode: 0644]
queue-5.4/lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch [new file with mode: 0644]
queue-5.4/locking-lockdep-avoid-potential-access-of-invalid-me.patch [new file with mode: 0644]
queue-5.4/locking-lockdep-fix-bad-recursion-pattern.patch [new file with mode: 0644]
queue-5.4/locking-lockdep-rework-lockdep_lock.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/alsa-hda-conexant-fix-conflicting-quirk-for-system76.patch b/queue-5.4/alsa-hda-conexant-fix-conflicting-quirk-for-system76.patch
new file mode 100644 (file)
index 0000000..55f5554
--- /dev/null
@@ -0,0 +1,72 @@
+From 4d526dead77e572e0ffcd6c95b717be4e7acbaa1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Oct 2024 10:25:58 +0200
+Subject: ALSA: hda/conexant: Fix conflicting quirk for System76 Pangolin
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit b3ebb007060f89d5a45c9b99f06a55e36a1945b5 ]
+
+We received a regression report for System76 Pangolin (pang14) due to
+the recent fix for Tuxedo Sirius devices to support the top speaker.
+The reason was the conflicting PCI SSID, as often seen.
+
+As a workaround, now the codec SSID is checked and the quirk is
+applied conditionally only to Sirius devices.
+
+Fixes: 4178d78cd7a8 ("ALSA: hda/conexant: Add pincfg quirk to enable top speakers on Sirius devices")
+Reported-by: Christian Heusel <christian@heusel.eu>
+Reported-by: Jerry <jerryluo225@gmail.com>
+Closes: https://lore.kernel.org/c930b6a6-64e5-498f-b65a-1cd5e0a1d733@heusel.eu
+Link: https://patch.msgid.link/20241004082602.29016-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index d0f26d7ed8611..5c0a40083342f 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -717,6 +717,23 @@ static const struct hda_pintbl cxt_pincfg_sws_js201d[] = {
+       {}
+ };
++/* pincfg quirk for Tuxedo Sirius;
++ * unfortunately the (PCI) SSID conflicts with System76 Pangolin pang14,
++ * which has incompatible pin setup, so we check the codec SSID (luckily
++ * different one!) and conditionally apply the quirk here
++ */
++static void cxt_fixup_sirius_top_speaker(struct hda_codec *codec,
++                                       const struct hda_fixup *fix,
++                                       int action)
++{
++      /* ignore for incorrectly picked-up pang14 */
++      if (codec->core.subsystem_id == 0x278212b3)
++              return;
++      /* set up the top speaker pin */
++      if (action == HDA_FIXUP_ACT_PRE_PROBE)
++              snd_hda_codec_set_pincfg(codec, 0x1d, 0x82170111);
++}
++
+ static const struct hda_fixup cxt_fixups[] = {
+       [CXT_PINCFG_LENOVO_X200] = {
+               .type = HDA_FIXUP_PINS,
+@@ -873,11 +890,8 @@ static const struct hda_fixup cxt_fixups[] = {
+               .v.pins = cxt_pincfg_sws_js201d,
+       },
+       [CXT_PINCFG_TOP_SPEAKER] = {
+-              .type = HDA_FIXUP_PINS,
+-              .v.pins = (const struct hda_pintbl[]) {
+-                      { 0x1d, 0x82170111 },
+-                      { }
+-              },
++              .type = HDA_FIXUP_FUNC,
++              .v.func = cxt_fixup_sirius_top_speaker,
+       },
+ };
+-- 
+2.43.0
+
diff --git a/queue-5.4/alsa-hda-generic-unconditionally-prefer-preferred_da.patch b/queue-5.4/alsa-hda-generic-unconditionally-prefer-preferred_da.patch
new file mode 100644 (file)
index 0000000..a8d3b7f
--- /dev/null
@@ -0,0 +1,60 @@
+From 96f659bfcd36310fc7fe1b54de9b638ac4403702 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 14:14:36 +0200
+Subject: ALSA: hda/generic: Unconditionally prefer preferred_dacs pairs
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1c801e7f77445bc56e5e1fec6191fd4503534787 ]
+
+Some time ago, we introduced the obey_preferred_dacs flag for choosing
+the DAC/pin pairs specified by the driver instead of parsing the
+paths.  This works as expected, per se, but there have been a few
+cases where we forgot to set this flag while preferred_dacs table is
+already set up.  It ended up with incorrect wiring and made us
+wondering why it doesn't work.
+
+Basically, when the preferred_dacs table is provided, it means that
+the driver really wants to wire up to follow that.  That is, the
+presence of the preferred_dacs table itself is already a "do-it"
+flag.
+
+In this patch, we simply replace the evaluation of obey_preferred_dacs
+flag with the presence of preferred_dacs table for fixing the
+misbehavior.  Another patch to drop of the obsoleted flag will
+follow.
+
+Fixes: 242d990c158d ("ALSA: hda/generic: Add option to enforce preferred_dacs pairs")
+Link: https://bugzilla.suse.com/show_bug.cgi?id=1219803
+Link: https://patch.msgid.link/20241001121439.26060-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/hda_generic.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 77c0abd252eb0..a534460b4222c 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -1371,7 +1371,7 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
+               struct nid_path *path;
+               hda_nid_t pin = pins[i];
+-              if (!spec->obey_preferred_dacs) {
++              if (!spec->preferred_dacs) {
+                       path = snd_hda_get_path_from_idx(codec, path_idx[i]);
+                       if (path) {
+                               badness += assign_out_path_ctls(codec, path);
+@@ -1383,7 +1383,7 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
+               if (dacs[i]) {
+                       if (is_dac_already_used(codec, dacs[i]))
+                               badness += bad->shared_primary;
+-              } else if (spec->obey_preferred_dacs) {
++              } else if (spec->preferred_dacs) {
+                       badness += BAD_NO_PRIMARY_DAC;
+               }
+-- 
+2.43.0
+
diff --git a/queue-5.4/alsa-hda-realtek-fix-the-push-button-function-for-th.patch b/queue-5.4/alsa-hda-realtek-fix-the-push-button-function-for-th.patch
new file mode 100644 (file)
index 0000000..d175338
--- /dev/null
@@ -0,0 +1,36 @@
+From 2c3f85faa9bf8347cf44079cb8111115613bae1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Sep 2024 18:50:39 +0800
+Subject: ALSA: hda/realtek: Fix the push button function for the ALC257
+
+From: Oder Chiou <oder_chiou@realtek.com>
+
+[ Upstream commit 05df9732a0894846c46d0062d4af535c5002799d ]
+
+The headset push button cannot work properly in case of the ALC257.
+This patch reverted the previous commit to correct the side effect.
+
+Fixes: ef9718b3d54e ("ALSA: hda/realtek: Fix noise from speakers on Lenovo IdeaPad 3 15IAU7")
+Signed-off-by: Oder Chiou <oder_chiou@realtek.com>
+Link: https://patch.msgid.link/20240930105039.3473266-1-oder_chiou@realtek.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index fbd8b4206bad8..f32c3c3752417 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -520,6 +520,7 @@ static void alc_shutup_pins(struct hda_codec *codec)
+       switch (codec->core.vendor_id) {
+       case 0x10ec0236:
+       case 0x10ec0256:
++      case 0x10ec0257:
+       case 0x19e58326:
+       case 0x10ec0283:
+       case 0x10ec0285:
+-- 
+2.43.0
+
diff --git a/queue-5.4/f2fs-require-fmode_write-for-atomic-write-ioctls.patch b/queue-5.4/f2fs-require-fmode_write-for-atomic-write-ioctls.patch
new file mode 100644 (file)
index 0000000..9cfbc2e
--- /dev/null
@@ -0,0 +1,97 @@
+From 841a5f64146273906c92f879cc16a32e8bdf0bc3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Oct 2024 19:37:10 +0000
+Subject: f2fs: Require FMODE_WRITE for atomic write ioctls
+
+From: Jann Horn <jannh@google.com>
+
+commit 4f5a100f87f32cb65d4bb1ad282a08c92f6f591e upstream.
+
+The F2FS ioctls for starting and committing atomic writes check for
+inode_owner_or_capable(), but this does not give LSMs like SELinux or
+Landlock an opportunity to deny the write access - if the caller's FSUID
+matches the inode's UID, inode_owner_or_capable() immediately returns true.
+
+There are scenarios where LSMs want to deny a process the ability to write
+particular files, even files that the FSUID of the process owns; but this
+can currently partially be bypassed using atomic write ioctls in two ways:
+
+ - F2FS_IOC_START_ATOMIC_REPLACE + F2FS_IOC_COMMIT_ATOMIC_WRITE can
+   truncate an inode to size 0
+ - F2FS_IOC_START_ATOMIC_WRITE + F2FS_IOC_ABORT_ATOMIC_WRITE can revert
+   changes another process concurrently made to a file
+
+Fix it by requiring FMODE_WRITE for these operations, just like for
+F2FS_IOC_MOVE_RANGE. Since any legitimate caller should only be using these
+ioctls when intending to write into the file, that seems unlikely to break
+anything.
+
+Fixes: 88b88a667971 ("f2fs: support atomic writes")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jann Horn <jannh@google.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Reviewed-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/file.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 030152219c4d6..0057e7a6596d0 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1862,6 +1862,9 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       int ret;
++      if (!(filp->f_mode & FMODE_WRITE))
++              return -EBADF;
++
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+@@ -1928,6 +1931,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
+       struct inode *inode = file_inode(filp);
+       int ret;
++      if (!(filp->f_mode & FMODE_WRITE))
++              return -EBADF;
++
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+@@ -1970,6 +1976,9 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
+       struct inode *inode = file_inode(filp);
+       int ret;
++      if (!(filp->f_mode & FMODE_WRITE))
++              return -EBADF;
++
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+@@ -2005,6 +2014,9 @@ static int f2fs_ioc_release_volatile_write(struct file *filp)
+       struct inode *inode = file_inode(filp);
+       int ret;
++      if (!(filp->f_mode & FMODE_WRITE))
++              return -EBADF;
++
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+@@ -2034,6 +2046,9 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
+       struct inode *inode = file_inode(filp);
+       int ret;
++      if (!(filp->f_mode & FMODE_WRITE))
++              return -EBADF;
++
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+-- 
+2.43.0
+
diff --git a/queue-5.4/lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch b/queue-5.4/lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
new file mode 100644 (file)
index 0000000..9c3a314
--- /dev/null
@@ -0,0 +1,220 @@
+From 9b77da44049e49e040887d135333341e5eb18ca4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 22:54:34 +0000
+Subject: lockdep: fix deadlock issue between lockdep and rcu
+
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+
+[ Upstream commit a6f88ac32c6e63e69c595bfae220d8641704c9b7 ]
+
+There is a deadlock scenario between lockdep and rcu when
+rcu nocb feature is enabled, just as following call stack:
+
+     rcuop/x
+-000|queued_spin_lock_slowpath(lock = 0xFFFFFF817F2A8A80, val = ?)
+-001|queued_spin_lock(inline) // try to hold nocb_gp_lock
+-001|do_raw_spin_lock(lock = 0xFFFFFF817F2A8A80)
+-002|__raw_spin_lock_irqsave(inline)
+-002|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F2A8A80)
+-003|wake_nocb_gp_defer(inline)
+-003|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F30B680)
+-004|__call_rcu_common(inline)
+-004|call_rcu(head = 0xFFFFFFC082EECC28, func = ?)
+-005|call_rcu_zapped(inline)
+-005|free_zapped_rcu(ch = ?)// hold graph lock
+-006|rcu_do_batch(rdp = 0xFFFFFF817F245680)
+-007|nocb_cb_wait(inline)
+-007|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F245680)
+-008|kthread(_create = 0xFFFFFF80803122C0)
+-009|ret_from_fork(asm)
+
+     rcuop/y
+-000|queued_spin_lock_slowpath(lock = 0xFFFFFFC08291BBC8, val = 0)
+-001|queued_spin_lock()
+-001|lockdep_lock()
+-001|graph_lock() // try to hold graph lock
+-002|lookup_chain_cache_add()
+-002|validate_chain()
+-003|lock_acquire
+-004|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F211D80)
+-005|lock_timer_base(inline)
+-006|mod_timer(inline)
+-006|wake_nocb_gp_defer(inline)// hold nocb_gp_lock
+-006|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F2A8680)
+-007|__call_rcu_common(inline)
+-007|call_rcu(head = 0xFFFFFFC0822E0B58, func = ?)
+-008|call_rcu_hurry(inline)
+-008|rcu_sync_call(inline)
+-008|rcu_sync_func(rhp = 0xFFFFFFC0822E0B58)
+-009|rcu_do_batch(rdp = 0xFFFFFF817F266680)
+-010|nocb_cb_wait(inline)
+-010|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F266680)
+-011|kthread(_create = 0xFFFFFF8080363740)
+-012|ret_from_fork(asm)
+
+rcuop/x and rcuop/y are rcu nocb threads with the same nocb gp thread.
+This patch release the graph lock before lockdep call_rcu.
+
+Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
+Cc: stable@vger.kernel.org
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Waiman Long <longman@redhat.com>
+Cc: Carlos Llamas <cmllamas@google.com>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
+Reviewed-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Acked-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Link: https://lore.kernel.org/r/20240620225436.3127927-1-cmllamas@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/locking/lockdep.c | 48 ++++++++++++++++++++++++++--------------
+ 1 file changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 8e0351970a1da..7cc790a262def 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -5054,25 +5054,27 @@ static struct pending_free *get_pending_free(void)
+ static void free_zapped_rcu(struct rcu_head *cb);
+ /*
+- * Schedule an RCU callback if no RCU callback is pending. Must be called with
+- * the graph lock held.
+- */
+-static void call_rcu_zapped(struct pending_free *pf)
++* See if we need to queue an RCU callback, must called with
++* the lockdep lock held, returns false if either we don't have
++* any pending free or the callback is already scheduled.
++* Otherwise, a call_rcu() must follow this function call.
++*/
++static bool prepare_call_rcu_zapped(struct pending_free *pf)
+ {
+       WARN_ON_ONCE(inside_selftest());
+       if (list_empty(&pf->zapped))
+-              return;
++              return false;
+       if (delayed_free.scheduled)
+-              return;
++              return false;
+       delayed_free.scheduled = true;
+       WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
+       delayed_free.index ^= 1;
+-      call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++      return true;
+ }
+ /* The caller must hold the graph lock. May be called from RCU context. */
+@@ -5098,6 +5100,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
+ {
+       struct pending_free *pf;
+       unsigned long flags;
++      bool need_callback;
+       if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
+               return;
+@@ -5109,14 +5112,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
+       pf = delayed_free.pf + (delayed_free.index ^ 1);
+       __free_zapped_classes(pf);
+       delayed_free.scheduled = false;
++      need_callback =
++              prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
++      lockdep_unlock();
++      raw_local_irq_restore(flags);
+       /*
+-       * If there's anything on the open list, close and start a new callback.
+-       */
+-      call_rcu_zapped(delayed_free.pf + delayed_free.index);
++      * If there's pending free and its callback has not been scheduled,
++      * queue an RCU callback.
++      */
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+-      lockdep_unlock();
+-      raw_local_irq_restore(flags);
+ }
+ /*
+@@ -5156,6 +5163,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
+ {
+       struct pending_free *pf;
+       unsigned long flags;
++      bool need_callback;
+       init_data_structures_once();
+@@ -5163,10 +5171,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
+       lockdep_lock();
+       pf = get_pending_free();
+       __lockdep_free_key_range(pf, start, size);
+-      call_rcu_zapped(pf);
++      need_callback = prepare_call_rcu_zapped(pf);
+       lockdep_unlock();
+       raw_local_irq_restore(flags);
+-
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+       /*
+        * Wait for any possible iterators from look_up_lock_class() to pass
+        * before continuing to free the memory they refer to.
+@@ -5260,6 +5269,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
+       struct pending_free *pf;
+       unsigned long flags;
+       int locked;
++      bool need_callback = false;
+       raw_local_irq_save(flags);
+       locked = graph_lock();
+@@ -5268,11 +5278,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
+       pf = get_pending_free();
+       __lockdep_reset_lock(pf, lock);
+-      call_rcu_zapped(pf);
++      need_callback = prepare_call_rcu_zapped(pf);
+       graph_unlock();
+ out_irq:
+       raw_local_irq_restore(flags);
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+ }
+ /*
+@@ -5316,6 +5328,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
+       struct pending_free *pf;
+       unsigned long flags;
+       bool found = false;
++      bool need_callback = false;
+       might_sleep();
+@@ -5336,11 +5349,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
+       if (found) {
+               pf = get_pending_free();
+               __lockdep_free_key_range(pf, key, 1);
+-              call_rcu_zapped(pf);
++              need_callback = prepare_call_rcu_zapped(pf);
+       }
+       lockdep_unlock();
+       raw_local_irq_restore(flags);
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++
+       /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
+       synchronize_rcu();
+ }
+-- 
+2.43.0
+
diff --git a/queue-5.4/locking-lockdep-avoid-potential-access-of-invalid-me.patch b/queue-5.4/locking-lockdep-avoid-potential-access-of-invalid-me.patch
new file mode 100644 (file)
index 0000000..e4bef6f
--- /dev/null
@@ -0,0 +1,93 @@
+From bd44862fbae7319075186917122f23728373c61d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Jan 2022 21:35:58 -0500
+Subject: locking/lockdep: Avoid potential access of invalid memory in
+ lock_class
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 61cc4534b6550997c97a03759ab46b29d44c0017 ]
+
+It was found that reading /proc/lockdep after a lockdep splat may
+potentially cause an access to freed memory if lockdep_unregister_key()
+is called after the splat but before access to /proc/lockdep [1]. This
+is due to the fact that graph_lock() call in lockdep_unregister_key()
+fails after the clearing of debug_locks by the splat process.
+
+After lockdep_unregister_key() is called, the lock_name may be freed
+but the corresponding lock_class structure still have a reference to
+it. That invalid memory pointer will then be accessed when /proc/lockdep
+is read by a user and a use-after-free (UAF) error will be reported if
+KASAN is enabled.
+
+To fix this problem, lockdep_unregister_key() is now modified to always
+search for a matching key irrespective of the debug_locks state and
+zap the corresponding lock class if a matching one is found.
+
+[1] https://lore.kernel.org/lkml/77f05c15-81b6-bddd-9650-80d5f23fe330@i-love.sakura.ne.jp/
+
+Fixes: 8b39adbee805 ("locking/lockdep: Make lockdep_unregister_key() honor 'debug_locks' again")
+Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lkml.kernel.org/r/20220103023558.1377055-1-longman@redhat.com
+Stable-dep-of: a6f88ac32c6e ("lockdep: fix deadlock issue between lockdep and rcu")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/locking/lockdep.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index b9fabbab39183..8e0351970a1da 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -5302,7 +5302,13 @@ void lockdep_reset_lock(struct lockdep_map *lock)
+               lockdep_reset_lock_reg(lock);
+ }
+-/* Unregister a dynamically allocated key. */
++/*
++ * Unregister a dynamically allocated key.
++ *
++ * Unlike lockdep_register_key(), a search is always done to find a matching
++ * key irrespective of debug_locks to avoid potential invalid access to freed
++ * memory in lock_class entry.
++ */
+ void lockdep_unregister_key(struct lock_class_key *key)
+ {
+       struct hlist_head *hash_head = keyhashentry(key);
+@@ -5317,10 +5323,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
+               return;
+       raw_local_irq_save(flags);
+-      if (!graph_lock())
+-              goto out_irq;
++      lockdep_lock();
+-      pf = get_pending_free();
+       hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+               if (k == key) {
+                       hlist_del_rcu(&k->hash_entry);
+@@ -5328,11 +5332,13 @@ void lockdep_unregister_key(struct lock_class_key *key)
+                       break;
+               }
+       }
+-      WARN_ON_ONCE(!found);
+-      __lockdep_free_key_range(pf, key, 1);
+-      call_rcu_zapped(pf);
+-      graph_unlock();
+-out_irq:
++      WARN_ON_ONCE(!found && debug_locks);
++      if (found) {
++              pf = get_pending_free();
++              __lockdep_free_key_range(pf, key, 1);
++              call_rcu_zapped(pf);
++      }
++      lockdep_unlock();
+       raw_local_irq_restore(flags);
+       /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
+-- 
+2.43.0
+
diff --git a/queue-5.4/locking-lockdep-fix-bad-recursion-pattern.patch b/queue-5.4/locking-lockdep-fix-bad-recursion-pattern.patch
new file mode 100644 (file)
index 0000000..582576a
--- /dev/null
@@ -0,0 +1,296 @@
+From 695de97327d0ff04c3c8134ca9b98e3bdbe15b8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Mar 2020 09:56:38 +0100
+Subject: locking/lockdep: Fix bad recursion pattern
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 10476e6304222ced7df9b3d5fb0a043b3c2a1ad8 ]
+
+There were two patterns for lockdep_recursion:
+
+Pattern-A:
+       if (current->lockdep_recursion)
+               return
+
+       current->lockdep_recursion = 1;
+       /* do stuff */
+       current->lockdep_recursion = 0;
+
+Pattern-B:
+       current->lockdep_recursion++;
+       /* do stuff */
+       current->lockdep_recursion--;
+
+But a third pattern has emerged:
+
+Pattern-C:
+       current->lockdep_recursion = 1;
+       /* do stuff */
+       current->lockdep_recursion = 0;
+
+And while this isn't broken per-se, it is highly dangerous because it
+doesn't nest properly.
+
+Get rid of all Pattern-C instances and shore up Pattern-A with a
+warning.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200313093325.GW12561@hirez.programming.kicks-ass.net
+Stable-dep-of: a6f88ac32c6e ("lockdep: fix deadlock issue between lockdep and rcu")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/locking/lockdep.c | 74 ++++++++++++++++++++++------------------
+ 1 file changed, 40 insertions(+), 34 deletions(-)
+
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 0d9ff8b621e6d..0a2be60e4aa7b 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -389,6 +389,12 @@ void lockdep_on(void)
+ }
+ EXPORT_SYMBOL(lockdep_on);
++static inline void lockdep_recursion_finish(void)
++{
++      if (WARN_ON_ONCE(--current->lockdep_recursion))
++              current->lockdep_recursion = 0;
++}
++
+ void lockdep_set_selftest_task(struct task_struct *task)
+ {
+       lockdep_selftest_task_struct = task;
+@@ -1720,11 +1726,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
+       this.class = class;
+       raw_local_irq_save(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       arch_spin_lock(&lockdep_lock);
+       ret = __lockdep_count_forward_deps(&this);
+       arch_spin_unlock(&lockdep_lock);
+-      current->lockdep_recursion = 0;
++      current->lockdep_recursion--;
+       raw_local_irq_restore(flags);
+       return ret;
+@@ -1749,11 +1755,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
+       this.class = class;
+       raw_local_irq_save(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       arch_spin_lock(&lockdep_lock);
+       ret = __lockdep_count_backward_deps(&this);
+       arch_spin_unlock(&lockdep_lock);
+-      current->lockdep_recursion = 0;
++      current->lockdep_recursion--;
+       raw_local_irq_restore(flags);
+       return ret;
+@@ -3550,9 +3556,9 @@ void lockdep_hardirqs_on(unsigned long ip)
+       if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
+               return;
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       __trace_hardirqs_on_caller(ip);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+ }
+ NOKPROBE_SYMBOL(lockdep_hardirqs_on);
+@@ -3608,7 +3614,7 @@ void trace_softirqs_on(unsigned long ip)
+               return;
+       }
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       /*
+        * We'll do an OFF -> ON transition:
+        */
+@@ -3623,7 +3629,7 @@ void trace_softirqs_on(unsigned long ip)
+        */
+       if (curr->hardirqs_enabled)
+               mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+ }
+ /*
+@@ -3877,9 +3883,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
+                       return;
+               raw_local_irq_save(flags);
+-              current->lockdep_recursion = 1;
++              current->lockdep_recursion++;
+               register_lock_class(lock, subclass, 1);
+-              current->lockdep_recursion = 0;
++              lockdep_recursion_finish();
+               raw_local_irq_restore(flags);
+       }
+ }
+@@ -4561,11 +4567,11 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
+               return;
+       raw_local_irq_save(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       check_flags(flags);
+       if (__lock_set_class(lock, name, key, subclass, ip))
+               check_chain_key(current);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(lock_set_class);
+@@ -4578,11 +4584,11 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
+               return;
+       raw_local_irq_save(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       check_flags(flags);
+       if (__lock_downgrade(lock, ip))
+               check_chain_key(current);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(lock_downgrade);
+@@ -4603,11 +4609,11 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+       raw_local_irq_save(flags);
+       check_flags(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+       __lock_acquire(lock, subclass, trylock, read, check,
+                      irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(lock_acquire);
+@@ -4622,11 +4628,11 @@ void lock_release(struct lockdep_map *lock, int nested,
+       raw_local_irq_save(flags);
+       check_flags(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       trace_lock_release(lock, ip);
+       if (__lock_release(lock, ip))
+               check_chain_key(current);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(lock_release);
+@@ -4642,9 +4648,9 @@ int lock_is_held_type(const struct lockdep_map *lock, int read)
+       raw_local_irq_save(flags);
+       check_flags(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       ret = __lock_is_held(lock, read);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+       return ret;
+@@ -4663,9 +4669,9 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
+       raw_local_irq_save(flags);
+       check_flags(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       cookie = __lock_pin_lock(lock);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+       return cookie;
+@@ -4682,9 +4688,9 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
+       raw_local_irq_save(flags);
+       check_flags(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       __lock_repin_lock(lock, cookie);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(lock_repin_lock);
+@@ -4699,9 +4705,9 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
+       raw_local_irq_save(flags);
+       check_flags(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       __lock_unpin_lock(lock, cookie);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(lock_unpin_lock);
+@@ -4837,10 +4843,10 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
+       raw_local_irq_save(flags);
+       check_flags(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       trace_lock_contended(lock, ip);
+       __lock_contended(lock, ip);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(lock_contended);
+@@ -4857,9 +4863,9 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
+       raw_local_irq_save(flags);
+       check_flags(flags);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       __lock_acquired(lock, ip);
+-      current->lockdep_recursion = 0;
++      lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(lock_acquired);
+@@ -5087,7 +5093,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
+       raw_local_irq_save(flags);
+       arch_spin_lock(&lockdep_lock);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       /* closed head */
+       pf = delayed_free.pf + (delayed_free.index ^ 1);
+@@ -5099,7 +5105,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
+        */
+       call_rcu_zapped(delayed_free.pf + delayed_free.index);
+-      current->lockdep_recursion = 0;
++      current->lockdep_recursion--;
+       arch_spin_unlock(&lockdep_lock);
+       raw_local_irq_restore(flags);
+ }
+@@ -5146,11 +5152,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
+       raw_local_irq_save(flags);
+       arch_spin_lock(&lockdep_lock);
+-      current->lockdep_recursion = 1;
++      current->lockdep_recursion++;
+       pf = get_pending_free();
+       __lockdep_free_key_range(pf, start, size);
+       call_rcu_zapped(pf);
+-      current->lockdep_recursion = 0;
++      current->lockdep_recursion--;
+       arch_spin_unlock(&lockdep_lock);
+       raw_local_irq_restore(flags);
+-- 
+2.43.0
+
diff --git a/queue-5.4/locking-lockdep-rework-lockdep_lock.patch b/queue-5.4/locking-lockdep-rework-lockdep_lock.patch
new file mode 100644 (file)
index 0000000..0e1fd39
--- /dev/null
@@ -0,0 +1,227 @@
+From 8b2b42aea942e3b2339ee85381d4da95f48a7e46 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Mar 2020 11:09:49 +0100
+Subject: locking/lockdep: Rework lockdep_lock
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 248efb2158f1e23750728e92ad9db3ab60c14485 ]
+
+A few sites want to assert we own the graph_lock/lockdep_lock, provide
+a more conventional lock interface for it with a number of trivial
+debug checks.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200313102107.GX12561@hirez.programming.kicks-ass.net
+Stable-dep-of: a6f88ac32c6e ("lockdep: fix deadlock issue between lockdep and rcu")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/locking/lockdep.c | 89 ++++++++++++++++++++++------------------
+ 1 file changed, 48 insertions(+), 41 deletions(-)
+
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 0a2be60e4aa7b..b9fabbab39183 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -84,12 +84,39 @@ module_param(lock_stat, int, 0644);
+  * to use a raw spinlock - we really dont want the spinlock
+  * code to recurse back into the lockdep code...
+  */
+-static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
++static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
++static struct task_struct *__owner;
++
++static inline void lockdep_lock(void)
++{
++      DEBUG_LOCKS_WARN_ON(!irqs_disabled());
++
++      arch_spin_lock(&__lock);
++      __owner = current;
++      current->lockdep_recursion++;
++}
++
++static inline void lockdep_unlock(void)
++{
++      if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
++              return;
++
++      current->lockdep_recursion--;
++      __owner = NULL;
++      arch_spin_unlock(&__lock);
++}
++
++static inline bool lockdep_assert_locked(void)
++{
++      return DEBUG_LOCKS_WARN_ON(__owner != current);
++}
++
+ static struct task_struct *lockdep_selftest_task_struct;
++
+ static int graph_lock(void)
+ {
+-      arch_spin_lock(&lockdep_lock);
++      lockdep_lock();
+       /*
+        * Make sure that if another CPU detected a bug while
+        * walking the graph we dont change it (while the other
+@@ -97,27 +124,15 @@ static int graph_lock(void)
+        * dropped already)
+        */
+       if (!debug_locks) {
+-              arch_spin_unlock(&lockdep_lock);
++              lockdep_unlock();
+               return 0;
+       }
+-      /* prevent any recursions within lockdep from causing deadlocks */
+-      current->lockdep_recursion++;
+       return 1;
+ }
+-static inline int graph_unlock(void)
++static inline void graph_unlock(void)
+ {
+-      if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
+-              /*
+-               * The lockdep graph lock isn't locked while we expect it to
+-               * be, we're confused now, bye!
+-               */
+-              return DEBUG_LOCKS_WARN_ON(1);
+-      }
+-
+-      current->lockdep_recursion--;
+-      arch_spin_unlock(&lockdep_lock);
+-      return 0;
++      lockdep_unlock();
+ }
+ /*
+@@ -128,7 +143,7 @@ static inline int debug_locks_off_graph_unlock(void)
+ {
+       int ret = debug_locks_off();
+-      arch_spin_unlock(&lockdep_lock);
++      lockdep_unlock();
+       return ret;
+ }
+@@ -1476,6 +1491,8 @@ static int __bfs(struct lock_list *source_entry,
+       struct circular_queue *cq = &lock_cq;
+       int ret = 1;
++      lockdep_assert_locked();
++
+       if (match(source_entry, data)) {
+               *target_entry = source_entry;
+               ret = 0;
+@@ -1498,8 +1515,6 @@ static int __bfs(struct lock_list *source_entry,
+               head = get_dep_list(lock, offset);
+-              DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+-
+               list_for_each_entry_rcu(entry, head, entry) {
+                       if (!lock_accessed(entry)) {
+                               unsigned int cq_depth;
+@@ -1726,11 +1741,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
+       this.class = class;
+       raw_local_irq_save(flags);
+-      current->lockdep_recursion++;
+-      arch_spin_lock(&lockdep_lock);
++      lockdep_lock();
+       ret = __lockdep_count_forward_deps(&this);
+-      arch_spin_unlock(&lockdep_lock);
+-      current->lockdep_recursion--;
++      lockdep_unlock();
+       raw_local_irq_restore(flags);
+       return ret;
+@@ -1755,11 +1768,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
+       this.class = class;
+       raw_local_irq_save(flags);
+-      current->lockdep_recursion++;
+-      arch_spin_lock(&lockdep_lock);
++      lockdep_lock();
+       ret = __lockdep_count_backward_deps(&this);
+-      arch_spin_unlock(&lockdep_lock);
+-      current->lockdep_recursion--;
++      lockdep_unlock();
+       raw_local_irq_restore(flags);
+       return ret;
+@@ -2930,7 +2941,7 @@ static inline int add_chain_cache(struct task_struct *curr,
+        * disabled to make this an IRQ-safe lock.. for recursion reasons
+        * lockdep won't complain about its own locking errors.
+        */
+-      if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
++      if (lockdep_assert_locked())
+               return 0;
+       chain = alloc_lock_chain();
+@@ -5092,8 +5103,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
+               return;
+       raw_local_irq_save(flags);
+-      arch_spin_lock(&lockdep_lock);
+-      current->lockdep_recursion++;
++      lockdep_lock();
+       /* closed head */
+       pf = delayed_free.pf + (delayed_free.index ^ 1);
+@@ -5105,8 +5115,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
+        */
+       call_rcu_zapped(delayed_free.pf + delayed_free.index);
+-      current->lockdep_recursion--;
+-      arch_spin_unlock(&lockdep_lock);
++      lockdep_unlock();
+       raw_local_irq_restore(flags);
+ }
+@@ -5151,13 +5160,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
+       init_data_structures_once();
+       raw_local_irq_save(flags);
+-      arch_spin_lock(&lockdep_lock);
+-      current->lockdep_recursion++;
++      lockdep_lock();
+       pf = get_pending_free();
+       __lockdep_free_key_range(pf, start, size);
+       call_rcu_zapped(pf);
+-      current->lockdep_recursion--;
+-      arch_spin_unlock(&lockdep_lock);
++      lockdep_unlock();
+       raw_local_irq_restore(flags);
+       /*
+@@ -5179,10 +5186,10 @@ static void lockdep_free_key_range_imm(void *start, unsigned long size)
+       init_data_structures_once();
+       raw_local_irq_save(flags);
+-      arch_spin_lock(&lockdep_lock);
++      lockdep_lock();
+       __lockdep_free_key_range(pf, start, size);
+       __free_zapped_classes(pf);
+-      arch_spin_unlock(&lockdep_lock);
++      lockdep_unlock();
+       raw_local_irq_restore(flags);
+ }
+@@ -5278,10 +5285,10 @@ static void lockdep_reset_lock_imm(struct lockdep_map *lock)
+       unsigned long flags;
+       raw_local_irq_save(flags);
+-      arch_spin_lock(&lockdep_lock);
++      lockdep_lock();
+       __lockdep_reset_lock(pf, lock);
+       __free_zapped_classes(pf);
+-      arch_spin_unlock(&lockdep_lock);
++      lockdep_unlock();
+       raw_local_irq_restore(flags);
+ }
+-- 
+2.43.0
+
index bc6632d091740d206d2b3646e63f32df5b5cb3dd..e8c3c891201c3140cfacb915b2ccf51a6c48f939 100644 (file)
@@ -192,3 +192,11 @@ net-avoid-potential-underflow-in-qdisc_pkt_len_init-.patch
 net-add-more-sanity-checks-to-qdisc_pkt_len_init.patch
 ipv4-ip_gre-fix-drops-of-small-packets-in-ipgre_xmit.patch
 sctp-set-sk_state-back-to-closed-if-autobind-fails-i.patch
+locking-lockdep-fix-bad-recursion-pattern.patch
+locking-lockdep-rework-lockdep_lock.patch
+locking-lockdep-avoid-potential-access-of-invalid-me.patch
+lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
+alsa-hda-realtek-fix-the-push-button-function-for-th.patch
+alsa-hda-generic-unconditionally-prefer-preferred_da.patch
+alsa-hda-conexant-fix-conflicting-quirk-for-system76.patch
+f2fs-require-fmode_write-for-atomic-write-ioctls.patch