]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.15
authorSasha Levin <sashal@kernel.org>
Sat, 26 Aug 2023 13:50:15 +0000 (09:50 -0400)
committerSasha Levin <sashal@kernel.org>
Sat, 26 Aug 2023 13:50:15 +0000 (09:50 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
45 files changed:
queue-5.15/alsa-pcm-fix-potential-data-race-at-pcm-memory-alloc.patch [new file with mode: 0644]
queue-5.15/bonding-fix-macvlan-over-alb-bond-support.patch [new file with mode: 0644]
queue-5.15/can-raw-fix-lockdep-issue-in-raw_release.patch [new file with mode: 0644]
queue-5.15/can-raw-fix-receiver-memory-leak.patch [new file with mode: 0644]
queue-5.15/dccp-annotate-data-races-in-dccp_poll.patch [new file with mode: 0644]
queue-5.15/dlm-improve-plock-logging-if-interrupted.patch [new file with mode: 0644]
queue-5.15/dlm-replace-usage-of-found-with-dedicated-list-itera.patch [new file with mode: 0644]
queue-5.15/drm-amd-display-check-tg-is-non-null-before-checking.patch [new file with mode: 0644]
queue-5.15/drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch [new file with mode: 0644]
queue-5.15/exfat-remove-argument-sector-from-exfat_get_dentry.patch [new file with mode: 0644]
queue-5.15/exfat-support-dynamic-allocate-bh-for-exfat_entry_se.patch [new file with mode: 0644]
queue-5.15/fbdev-fix-potential-oob-read-in-fast_imageblit.patch [new file with mode: 0644]
queue-5.15/fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch [new file with mode: 0644]
queue-5.15/fbdev-improve-performance-of-sys_imageblit.patch [new file with mode: 0644]
queue-5.15/fs-dlm-add-pid-to-debug-log.patch [new file with mode: 0644]
queue-5.15/fs-dlm-change-plock-interrupted-message-to-debug-aga.patch [new file with mode: 0644]
queue-5.15/fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch [new file with mode: 0644]
queue-5.15/fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch [new file with mode: 0644]
queue-5.15/ice-fix-receive-buffer-size-miscalculation.patch [new file with mode: 0644]
queue-5.15/igb-avoid-starting-unnecessary-workqueues.patch [new file with mode: 0644]
queue-5.15/igc-fix-the-typo-in-the-ptm-control-macro.patch [new file with mode: 0644]
queue-5.15/ipvlan-fix-a-reference-count-leak-warning-in-ipvlan_.patch [new file with mode: 0644]
queue-5.15/jbd2-fix-a-race-when-checking-checkpoint-buffer-busy.patch [new file with mode: 0644]
queue-5.15/jbd2-remove-journal_clean_one_cp_list.patch [new file with mode: 0644]
queue-5.15/jbd2-remove-t_checkpoint_io_list.patch [new file with mode: 0644]
queue-5.15/mips-cpu-features-enable-octeon_cache-by-cpu_type.patch [new file with mode: 0644]
queue-5.15/mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch [new file with mode: 0644]
queue-5.15/net-bcmgenet-fix-return-value-check-for-fixed_phy_re.patch [new file with mode: 0644]
queue-5.15/net-bgmac-fix-return-value-check-for-fixed_phy_regis.patch [new file with mode: 0644]
queue-5.15/net-remove-bond_slave_has_mac_rcu.patch [new file with mode: 0644]
queue-5.15/net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch [new file with mode: 0644]
queue-5.15/net-validate-veth-and-vxcan-peer-ifindexes.patch [new file with mode: 0644]
queue-5.15/netfilter-nf_tables-fix-out-of-memory-error-handling.patch [new file with mode: 0644]
queue-5.15/netfilter-nf_tables-flush-pending-destroy-work-befor.patch [new file with mode: 0644]
queue-5.15/nfsv4-fix-out-path-in-__nfs4_get_acl_uncached.patch [new file with mode: 0644]
queue-5.15/nfsv4.2-fix-error-handling-in-nfs42_proc_getxattr.patch [new file with mode: 0644]
queue-5.15/octeontx2-af-sdp-fix-receive-link-config.patch [new file with mode: 0644]
queue-5.15/pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch [new file with mode: 0644]
queue-5.15/rtnetlink-reject-negative-ifindexes-in-rtm_newlink.patch [new file with mode: 0644]
queue-5.15/rtnetlink-return-enodev-when-ifname-does-not-exist-a.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/sock-annotate-data-races-around-prot-memory_pressure.patch [new file with mode: 0644]
queue-5.15/tracing-fix-cpu-buffers-unavailable-due-to-record_di.patch [new file with mode: 0644]
queue-5.15/tracing-fix-memleak-due-to-race-between-current_trac.patch [new file with mode: 0644]
queue-5.15/xprtrdma-remap-receive-buffers-after-a-reconnect.patch [new file with mode: 0644]

diff --git a/queue-5.15/alsa-pcm-fix-potential-data-race-at-pcm-memory-alloc.patch b/queue-5.15/alsa-pcm-fix-potential-data-race-at-pcm-memory-alloc.patch
new file mode 100644 (file)
index 0000000..7133a3b
--- /dev/null
@@ -0,0 +1,111 @@
+From 1e996e9affeb456aecfa7a4c702a8f2fb03be524 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jul 2023 13:24:30 +0200
+Subject: ALSA: pcm: Fix potential data race at PCM memory allocation helpers
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit bd55842ed998a622ba6611fe59b3358c9f76773d ]
+
+The PCM memory allocation helpers have a sanity check against too many
+buffer allocations.  However, the check is performed without a proper
+lock and the allocation isn't serialized; this allows user to allocate
+more memories than predefined max size.
+
+Practically seen, this isn't really a big problem, as it's more or
+less some "soft limit" as a sanity check, and it's not possible to
+allocate unlimitedly.  But it's still better to address this for more
+consistent behavior.
+
+The patch covers the size check in do_alloc_pages() with the
+card->memory_mutex, and increases the allocated size there for
+preventing the further overflow.  When the actual allocation fails,
+the size is decreased accordingly.
+
+Reported-by: BassCheck <bass@buaa.edu.cn>
+Reported-by: Tuo Li <islituo@gmail.com>
+Link: https://lore.kernel.org/r/CADm8Tek6t0WedK+3Y6rbE5YEt19tML8BUL45N2ji4ZAz1KcN_A@mail.gmail.com
+Reviewed-by: Jaroslav Kysela <perex@perex.cz>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20230703112430.30634-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/pcm_memory.c | 44 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 36 insertions(+), 8 deletions(-)
+
+diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
+index f1470590239e5..711e71016a7c3 100644
+--- a/sound/core/pcm_memory.c
++++ b/sound/core/pcm_memory.c
+@@ -31,20 +31,51 @@ static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL;
+ module_param(max_alloc_per_card, ulong, 0644);
+ MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
++static void __update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++      card->total_pcm_alloc_bytes += bytes;
++}
++
++static void update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++      mutex_lock(&card->memory_mutex);
++      __update_allocated_size(card, bytes);
++      mutex_unlock(&card->memory_mutex);
++}
++
++static void decrease_allocated_size(struct snd_card *card, size_t bytes)
++{
++      mutex_lock(&card->memory_mutex);
++      WARN_ON(card->total_pcm_alloc_bytes < bytes);
++      __update_allocated_size(card, -(ssize_t)bytes);
++      mutex_unlock(&card->memory_mutex);
++}
++
+ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
+                         size_t size, struct snd_dma_buffer *dmab)
+ {
+       int err;
++      /* check and reserve the requested size */
++      mutex_lock(&card->memory_mutex);
+       if (max_alloc_per_card &&
+-          card->total_pcm_alloc_bytes + size > max_alloc_per_card)
++          card->total_pcm_alloc_bytes + size > max_alloc_per_card) {
++              mutex_unlock(&card->memory_mutex);
+               return -ENOMEM;
++      }
++      __update_allocated_size(card, size);
++      mutex_unlock(&card->memory_mutex);
+       err = snd_dma_alloc_pages(type, dev, size, dmab);
+       if (!err) {
+-              mutex_lock(&card->memory_mutex);
+-              card->total_pcm_alloc_bytes += dmab->bytes;
+-              mutex_unlock(&card->memory_mutex);
++              /* the actual allocation size might be bigger than requested,
++               * and we need to correct the account
++               */
++              if (dmab->bytes != size)
++                      update_allocated_size(card, dmab->bytes - size);
++      } else {
++              /* take back on allocation failure */
++              decrease_allocated_size(card, size);
+       }
+       return err;
+ }
+@@ -53,10 +84,7 @@ static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab)
+ {
+       if (!dmab->area)
+               return;
+-      mutex_lock(&card->memory_mutex);
+-      WARN_ON(card->total_pcm_alloc_bytes < dmab->bytes);
+-      card->total_pcm_alloc_bytes -= dmab->bytes;
+-      mutex_unlock(&card->memory_mutex);
++      decrease_allocated_size(card, dmab->bytes);
+       snd_dma_free_pages(dmab);
+       dmab->area = NULL;
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.15/bonding-fix-macvlan-over-alb-bond-support.patch b/queue-5.15/bonding-fix-macvlan-over-alb-bond-support.patch
new file mode 100644 (file)
index 0000000..4117180
--- /dev/null
@@ -0,0 +1,90 @@
+From 2519219bec75b45d0d6ea19c8773f5a2df3f88ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Aug 2023 15:19:04 +0800
+Subject: bonding: fix macvlan over alb bond support
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit e74216b8def3803e98ae536de78733e9d7f3b109 ]
+
+The commit 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode
+bonds") aims to enable the use of macvlans on top of rlb bond mode. However,
+the current rlb bond mode only handles ARP packets to update remote neighbor
+entries. This causes an issue when a macvlan is on top of the bond, and
+remote devices send packets to the macvlan using the bond's MAC address
+as the destination. After delivering the packets to the macvlan, the macvlan
+will rejects them as the MAC address is incorrect. Consequently, this commit
+makes macvlan over bond non-functional.
+
+To address this problem, one potential solution is to check for the presence
+of a macvlan port on the bond device using netif_is_macvlan_port(bond->dev)
+and return NULL in the rlb_arp_xmit() function. However, this approach
+doesn't fully resolve the situation when a VLAN exists between the bond and
+macvlan.
+
+So let's just do a partial revert for commit 14af9963ba1e in rlb_arp_xmit().
+As the comment said, Don't modify or load balance ARPs that do not originate
+locally.
+
+Fixes: 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode bonds")
+Reported-by: susan.zheng@veritas.com
+Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2117816
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Acked-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_alb.c |  6 +++---
+ include/net/bonding.h          | 11 +----------
+ 2 files changed, 4 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index a6a70b872ac4a..b29393831a302 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -657,10 +657,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+               return NULL;
+       arp = (struct arp_pkt *)skb_network_header(skb);
+-      /* Don't modify or load balance ARPs that do not originate locally
+-       * (e.g.,arrive via a bridge).
++      /* Don't modify or load balance ARPs that do not originate
++       * from the bond itself or a VLAN directly above the bond.
+        */
+-      if (!bond_slave_has_mac_rx(bond, arp->mac_src))
++      if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
+               return NULL;
+       if (arp->op_code == htons(ARPOP_REPLY)) {
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 6c90aca917edc..08d222752cc88 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -700,23 +700,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ }
+ /* Caller must hold rcu_read_lock() for read */
+-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
++static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
+ {
+       struct list_head *iter;
+       struct slave *tmp;
+-      struct netdev_hw_addr *ha;
+       bond_for_each_slave_rcu(bond, tmp, iter)
+               if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+                       return true;
+-
+-      if (netdev_uc_empty(bond->dev))
+-              return false;
+-
+-      netdev_for_each_uc_addr(ha, bond->dev)
+-              if (ether_addr_equal_64bits(mac, ha->addr))
+-                      return true;
+-
+       return false;
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.15/can-raw-fix-lockdep-issue-in-raw_release.patch b/queue-5.15/can-raw-fix-lockdep-issue-in-raw_release.patch
new file mode 100644 (file)
index 0000000..8d493cd
--- /dev/null
@@ -0,0 +1,159 @@
+From eb8eae370d5b4a930dba57949411b2244340a45f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jul 2023 11:44:38 +0000
+Subject: can: raw: fix lockdep issue in raw_release()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 11c9027c983e9e4b408ee5613b6504d24ebd85be ]
+
+syzbot complained about a lockdep issue [1]
+
+Since raw_bind() and raw_setsockopt() first get RTNL
+before locking the socket, we must adopt the same order in raw_release()
+
+[1]
+WARNING: possible circular locking dependency detected
+6.5.0-rc1-syzkaller-00192-g78adb4bcf99e #0 Not tainted
+------------------------------------------------------
+syz-executor.0/14110 is trying to acquire lock:
+ffff88804e4b6130 (sk_lock-AF_CAN){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1708 [inline]
+ffff88804e4b6130 (sk_lock-AF_CAN){+.+.}-{0:0}, at: raw_bind+0xb1/0xab0 net/can/raw.c:435
+
+but task is already holding lock:
+ffffffff8e3df368 (rtnl_mutex){+.+.}-{3:3}, at: raw_bind+0xa7/0xab0 net/can/raw.c:434
+
+which lock already depends on the new lock.
+
+the existing dependency chain (in reverse order) is:
+
+-> #1 (rtnl_mutex){+.+.}-{3:3}:
+__mutex_lock_common kernel/locking/mutex.c:603 [inline]
+__mutex_lock+0x181/0x1340 kernel/locking/mutex.c:747
+raw_release+0x1c6/0x9b0 net/can/raw.c:391
+__sock_release+0xcd/0x290 net/socket.c:654
+sock_close+0x1c/0x20 net/socket.c:1386
+__fput+0x3fd/0xac0 fs/file_table.c:384
+task_work_run+0x14d/0x240 kernel/task_work.c:179
+resume_user_mode_work include/linux/resume_user_mode.h:49 [inline]
+exit_to_user_mode_loop kernel/entry/common.c:171 [inline]
+exit_to_user_mode_prepare+0x210/0x240 kernel/entry/common.c:204
+__syscall_exit_to_user_mode_work kernel/entry/common.c:286 [inline]
+syscall_exit_to_user_mode+0x1d/0x50 kernel/entry/common.c:297
+do_syscall_64+0x44/0xb0 arch/x86/entry/common.c:86
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+-> #0 (sk_lock-AF_CAN){+.+.}-{0:0}:
+check_prev_add kernel/locking/lockdep.c:3142 [inline]
+check_prevs_add kernel/locking/lockdep.c:3261 [inline]
+validate_chain kernel/locking/lockdep.c:3876 [inline]
+__lock_acquire+0x2e3d/0x5de0 kernel/locking/lockdep.c:5144
+lock_acquire kernel/locking/lockdep.c:5761 [inline]
+lock_acquire+0x1ae/0x510 kernel/locking/lockdep.c:5726
+lock_sock_nested+0x3a/0xf0 net/core/sock.c:3492
+lock_sock include/net/sock.h:1708 [inline]
+raw_bind+0xb1/0xab0 net/can/raw.c:435
+__sys_bind+0x1ec/0x220 net/socket.c:1792
+__do_sys_bind net/socket.c:1803 [inline]
+__se_sys_bind net/socket.c:1801 [inline]
+__x64_sys_bind+0x72/0xb0 net/socket.c:1801
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+other info that might help us debug this:
+
+Possible unsafe locking scenario:
+
+CPU0 CPU1
+---- ----
+lock(rtnl_mutex);
+        lock(sk_lock-AF_CAN);
+        lock(rtnl_mutex);
+lock(sk_lock-AF_CAN);
+
+*** DEADLOCK ***
+
+1 lock held by syz-executor.0/14110:
+
+stack backtrace:
+CPU: 0 PID: 14110 Comm: syz-executor.0 Not tainted 6.5.0-rc1-syzkaller-00192-g78adb4bcf99e #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/03/2023
+Call Trace:
+<TASK>
+__dump_stack lib/dump_stack.c:88 [inline]
+dump_stack_lvl+0xd9/0x1b0 lib/dump_stack.c:106
+check_noncircular+0x311/0x3f0 kernel/locking/lockdep.c:2195
+check_prev_add kernel/locking/lockdep.c:3142 [inline]
+check_prevs_add kernel/locking/lockdep.c:3261 [inline]
+validate_chain kernel/locking/lockdep.c:3876 [inline]
+__lock_acquire+0x2e3d/0x5de0 kernel/locking/lockdep.c:5144
+lock_acquire kernel/locking/lockdep.c:5761 [inline]
+lock_acquire+0x1ae/0x510 kernel/locking/lockdep.c:5726
+lock_sock_nested+0x3a/0xf0 net/core/sock.c:3492
+lock_sock include/net/sock.h:1708 [inline]
+raw_bind+0xb1/0xab0 net/can/raw.c:435
+__sys_bind+0x1ec/0x220 net/socket.c:1792
+__do_sys_bind net/socket.c:1803 [inline]
+__se_sys_bind net/socket.c:1801 [inline]
+__x64_sys_bind+0x72/0xb0 net/socket.c:1801
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+RIP: 0033:0x7fd89007cb29
+Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007fd890d2a0c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000031
+RAX: ffffffffffffffda RBX: 00007fd89019bf80 RCX: 00007fd89007cb29
+RDX: 0000000000000010 RSI: 0000000020000040 RDI: 0000000000000003
+RBP: 00007fd8900c847a R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 000000000000000b R14: 00007fd89019bf80 R15: 00007ffebf8124f8
+</TASK>
+
+Fixes: ee8b94c8510c ("can: raw: fix receiver memory leak")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Ziyang Xuan <william.xuanziyang@huawei.com>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: stable@vger.kernel.org
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Link: https://lore.kernel.org/all/20230720114438.172434-1-edumazet@google.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/raw.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/can/raw.c b/net/can/raw.c
+index afa76ce0bf608..c02df37894ff9 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -383,9 +383,9 @@ static int raw_release(struct socket *sock)
+       list_del(&ro->notifier);
+       spin_unlock(&raw_notifier_lock);
++      rtnl_lock();
+       lock_sock(sk);
+-      rtnl_lock();
+       /* remove current filters & unregister */
+       if (ro->bound) {
+               if (ro->dev)
+@@ -402,12 +402,13 @@ static int raw_release(struct socket *sock)
+       ro->dev = NULL;
+       ro->count = 0;
+       free_percpu(ro->uniq);
+-      rtnl_unlock();
+       sock_orphan(sk);
+       sock->sk = NULL;
+       release_sock(sk);
++      rtnl_unlock();
++
+       sock_put(sk);
+       return 0;
+-- 
+2.40.1
+
diff --git a/queue-5.15/can-raw-fix-receiver-memory-leak.patch b/queue-5.15/can-raw-fix-receiver-memory-leak.patch
new file mode 100644 (file)
index 0000000..fc2c030
--- /dev/null
@@ -0,0 +1,238 @@
+From 17e4ed6eb592db86e5435b3d9cf4f5a3c5beadf1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Jul 2023 09:17:37 +0800
+Subject: can: raw: fix receiver memory leak
+
+From: Ziyang Xuan <william.xuanziyang@huawei.com>
+
+[ Upstream commit ee8b94c8510ce64afe0b87ef548d23e00915fb10 ]
+
+Got kmemleak errors with the following ltp can_filter testcase:
+
+for ((i=1; i<=100; i++))
+do
+        ./can_filter &
+        sleep 0.1
+done
+
+==============================================================
+[<00000000db4a4943>] can_rx_register+0x147/0x360 [can]
+[<00000000a289549d>] raw_setsockopt+0x5ef/0x853 [can_raw]
+[<000000006d3d9ebd>] __sys_setsockopt+0x173/0x2c0
+[<00000000407dbfec>] __x64_sys_setsockopt+0x61/0x70
+[<00000000fd468496>] do_syscall_64+0x33/0x40
+[<00000000b7e47d51>] entry_SYSCALL_64_after_hwframe+0x61/0xc6
+
+It's a bug in the concurrent scenario of unregister_netdevice_many()
+and raw_release() as following:
+
+             cpu0                                        cpu1
+unregister_netdevice_many(can_dev)
+  unlist_netdevice(can_dev) // dev_get_by_index() return NULL after this
+  net_set_todo(can_dev)
+                                               raw_release(can_socket)
+                                                 dev = dev_get_by_index(, ro->ifindex); // dev == NULL
+                                                 if (dev) { // receivers in dev_rcv_lists not free because dev is NULL
+                                                   raw_disable_allfilters(, dev, );
+                                                   dev_put(dev);
+                                                 }
+                                                 ...
+                                                 ro->bound = 0;
+                                                 ...
+
+call_netdevice_notifiers(NETDEV_UNREGISTER, )
+  raw_notify(, NETDEV_UNREGISTER, )
+    if (ro->bound) // invalid because ro->bound has been set 0
+      raw_disable_allfilters(, dev, ); // receivers in dev_rcv_lists will never be freed
+
+Add a net_device pointer member in struct raw_sock to record bound
+can_dev, and use rtnl_lock to serialize raw_socket members between
+raw_bind(), raw_release(), raw_setsockopt() and raw_notify(). Use
+ro->dev to decide whether to free receivers in dev_rcv_lists.
+
+Fixes: 8d0caedb7596 ("can: bcm/raw/isotp: use per module netdevice notifier")
+Reviewed-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Acked-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Ziyang Xuan <william.xuanziyang@huawei.com>
+Link: https://lore.kernel.org/all/20230711011737.1969582-1-william.xuanziyang@huawei.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/raw.c | 57 ++++++++++++++++++++++-----------------------------
+ 1 file changed, 24 insertions(+), 33 deletions(-)
+
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 7105fa4824e4b..afa76ce0bf608 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -83,6 +83,7 @@ struct raw_sock {
+       struct sock sk;
+       int bound;
+       int ifindex;
++      struct net_device *dev;
+       struct list_head notifier;
+       int loopback;
+       int recv_own_msgs;
+@@ -275,7 +276,7 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg,
+       if (!net_eq(dev_net(dev), sock_net(sk)))
+               return;
+-      if (ro->ifindex != dev->ifindex)
++      if (ro->dev != dev)
+               return;
+       switch (msg) {
+@@ -290,6 +291,7 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg,
+               ro->ifindex = 0;
+               ro->bound = 0;
++              ro->dev = NULL;
+               ro->count = 0;
+               release_sock(sk);
+@@ -335,6 +337,7 @@ static int raw_init(struct sock *sk)
+       ro->bound            = 0;
+       ro->ifindex          = 0;
++      ro->dev              = NULL;
+       /* set default filter to single entry dfilter */
+       ro->dfilter.can_id   = 0;
+@@ -382,19 +385,13 @@ static int raw_release(struct socket *sock)
+       lock_sock(sk);
++      rtnl_lock();
+       /* remove current filters & unregister */
+       if (ro->bound) {
+-              if (ro->ifindex) {
+-                      struct net_device *dev;
+-
+-                      dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+-                      if (dev) {
+-                              raw_disable_allfilters(dev_net(dev), dev, sk);
+-                              dev_put(dev);
+-                      }
+-              } else {
++              if (ro->dev)
++                      raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
++              else
+                       raw_disable_allfilters(sock_net(sk), NULL, sk);
+-              }
+       }
+       if (ro->count > 1)
+@@ -402,8 +399,10 @@ static int raw_release(struct socket *sock)
+       ro->ifindex = 0;
+       ro->bound = 0;
++      ro->dev = NULL;
+       ro->count = 0;
+       free_percpu(ro->uniq);
++      rtnl_unlock();
+       sock_orphan(sk);
+       sock->sk = NULL;
+@@ -419,6 +418,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+       struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+       struct sock *sk = sock->sk;
+       struct raw_sock *ro = raw_sk(sk);
++      struct net_device *dev = NULL;
+       int ifindex;
+       int err = 0;
+       int notify_enetdown = 0;
+@@ -428,14 +428,13 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+       if (addr->can_family != AF_CAN)
+               return -EINVAL;
++      rtnl_lock();
+       lock_sock(sk);
+       if (ro->bound && addr->can_ifindex == ro->ifindex)
+               goto out;
+       if (addr->can_ifindex) {
+-              struct net_device *dev;
+-
+               dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
+               if (!dev) {
+                       err = -ENODEV;
+@@ -464,26 +463,20 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+       if (!err) {
+               if (ro->bound) {
+                       /* unregister old filters */
+-                      if (ro->ifindex) {
+-                              struct net_device *dev;
+-
+-                              dev = dev_get_by_index(sock_net(sk),
+-                                                     ro->ifindex);
+-                              if (dev) {
+-                                      raw_disable_allfilters(dev_net(dev),
+-                                                             dev, sk);
+-                                      dev_put(dev);
+-                              }
+-                      } else {
++                      if (ro->dev)
++                              raw_disable_allfilters(dev_net(ro->dev),
++                                                     ro->dev, sk);
++                      else
+                               raw_disable_allfilters(sock_net(sk), NULL, sk);
+-                      }
+               }
+               ro->ifindex = ifindex;
+               ro->bound = 1;
++              ro->dev = dev;
+       }
+  out:
+       release_sock(sk);
++      rtnl_unlock();
+       if (notify_enetdown) {
+               sk->sk_err = ENETDOWN;
+@@ -549,9 +542,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+               rtnl_lock();
+               lock_sock(sk);
+-              if (ro->bound && ro->ifindex) {
+-                      dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+-                      if (!dev) {
++              dev = ro->dev;
++              if (ro->bound && dev) {
++                      if (dev->reg_state != NETREG_REGISTERED) {
+                               if (count > 1)
+                                       kfree(filter);
+                               err = -ENODEV;
+@@ -592,7 +585,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+               ro->count  = count;
+  out_fil:
+-              dev_put(dev);
+               release_sock(sk);
+               rtnl_unlock();
+@@ -610,9 +602,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+               rtnl_lock();
+               lock_sock(sk);
+-              if (ro->bound && ro->ifindex) {
+-                      dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+-                      if (!dev) {
++              dev = ro->dev;
++              if (ro->bound && dev) {
++                      if (dev->reg_state != NETREG_REGISTERED) {
+                               err = -ENODEV;
+                               goto out_err;
+                       }
+@@ -636,7 +628,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+               ro->err_mask = err_mask;
+  out_err:
+-              dev_put(dev);
+               release_sock(sk);
+               rtnl_unlock();
+-- 
+2.40.1
+
diff --git a/queue-5.15/dccp-annotate-data-races-in-dccp_poll.patch b/queue-5.15/dccp-annotate-data-races-in-dccp_poll.patch
new file mode 100644 (file)
index 0000000..8675945
--- /dev/null
@@ -0,0 +1,82 @@
+From 1b118edc27df65f2f4b9ef2cae12a4d6af3a8c1c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 01:58:20 +0000
+Subject: dccp: annotate data-races in dccp_poll()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cba3f1786916063261e3e5ccbb803abc325b24ef ]
+
+We changed tcp_poll() over time, bug never updated dccp.
+
+Note that we also could remove dccp instead of maintaining it.
+
+Fixes: 7c657876b63c ("[DCCP]: Initial implementation")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230818015820.2701595-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/dccp/proto.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 5422d64af246e..0b0567a692a8f 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -324,11 +324,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
+ __poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait)
+ {
+-      __poll_t mask;
+       struct sock *sk = sock->sk;
++      __poll_t mask;
++      u8 shutdown;
++      int state;
+       sock_poll_wait(file, sock, wait);
+-      if (sk->sk_state == DCCP_LISTEN)
++
++      state = inet_sk_state_load(sk);
++      if (state == DCCP_LISTEN)
+               return inet_csk_listen_poll(sk);
+       /* Socket is not locked. We are protected from async events
+@@ -337,20 +341,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+        */
+       mask = 0;
+-      if (sk->sk_err)
++      if (READ_ONCE(sk->sk_err))
+               mask = EPOLLERR;
++      shutdown = READ_ONCE(sk->sk_shutdown);
+-      if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
++      if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED)
+               mask |= EPOLLHUP;
+-      if (sk->sk_shutdown & RCV_SHUTDOWN)
++      if (shutdown & RCV_SHUTDOWN)
+               mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+       /* Connected? */
+-      if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
++      if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
+               if (atomic_read(&sk->sk_rmem_alloc) > 0)
+                       mask |= EPOLLIN | EPOLLRDNORM;
+-              if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
++              if (!(shutdown & SEND_SHUTDOWN)) {
+                       if (sk_stream_is_writeable(sk)) {
+                               mask |= EPOLLOUT | EPOLLWRNORM;
+                       } else {  /* send SIGIO later */
+@@ -368,7 +373,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+       }
+       return mask;
+ }
+-
+ EXPORT_SYMBOL_GPL(dccp_poll);
+ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+-- 
+2.40.1
+
diff --git a/queue-5.15/dlm-improve-plock-logging-if-interrupted.patch b/queue-5.15/dlm-improve-plock-logging-if-interrupted.patch
new file mode 100644 (file)
index 0000000..6c7c145
--- /dev/null
@@ -0,0 +1,65 @@
+From 8a0cb36eefb56a043af051d31ddfd5ff8be023fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 16:06:34 -0400
+Subject: dlm: improve plock logging if interrupted
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit bcfad4265cedf3adcac355e994ef9771b78407bd ]
+
+This patch changes the log level if a plock is removed when interrupted
+from debug to info. Additional it signals now that the plock entity was
+removed to let the user know what's happening.
+
+If on a dev_write() a pending plock cannot be find it will signal that
+it might have been removed because wait interruption.
+
+Before this patch there might be a "dev_write no op ..." info message
+and the users can only guess that the plock was removed before because
+the wait interruption. To be sure that is the case we log both messages
+on the same log level.
+
+Let both message be logged on info layer because it should not happened
+a lot and if it happens it should be clear why the op was not found.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f3482e936cc25..f74d5a28ad27c 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -161,11 +161,12 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+       rv = wait_event_killable(recv_wq, (op->done != 0));
+       if (rv == -ERESTARTSYS) {
+-              log_debug(ls, "%s: wait killed %llx", __func__,
+-                        (unsigned long long)number);
+               spin_lock(&ops_lock);
+               list_del(&op->list);
+               spin_unlock(&ops_lock);
++              log_print("%s: wait interrupted %x %llx, op removed",
++                        __func__, ls->ls_global_id,
++                        (unsigned long long)number);
+               dlm_release_plock_op(op);
+               do_unlock_close(ls, number, file, fl);
+               goto out;
+@@ -469,8 +470,8 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+               else
+                       wake_up(&recv_wq);
+       } else
+-              log_print("dev_write no op %x %llx", info.fsid,
+-                        (unsigned long long)info.number);
++              log_print("%s: no op %x %llx - may got interrupted?", __func__,
++                        info.fsid, (unsigned long long)info.number);
+       return count;
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.15/dlm-replace-usage-of-found-with-dedicated-list-itera.patch b/queue-5.15/dlm-replace-usage-of-found-with-dedicated-list-itera.patch
new file mode 100644 (file)
index 0000000..4851e61
--- /dev/null
@@ -0,0 +1,300 @@
+From 649be7df619b96915c3ceac7488b6fa3979b1159 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 14:05:31 -0400
+Subject: dlm: replace usage of found with dedicated list iterator variable
+
+From: Jakob Koschel <jakobkoschel@gmail.com>
+
+[ Upstream commit dc1acd5c94699389a9ed023e94dd860c846ea1f6 ]
+
+To move the list iterator variable into the list_for_each_entry_*()
+macro in the future it should be avoided to use the list iterator
+variable after the loop body.
+
+To *never* use the list iterator variable after the loop it was
+concluded to use a separate iterator variable instead of a
+found boolean [1].
+
+This removes the need to use a found variable and simply checking if
+the variable was set, can determine if the break/goto was hit.
+
+Link: https://lore.kernel.org/all/CAHk-=wgRr_D8CB-D9Kg-c=EHreAsk5SqXPwr9Y7k9sA6cWXJ6w@mail.gmail.com/ [1]
+Signed-off-by: Jakob Koschel <jakobkoschel@gmail.com>
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/lock.c    | 53 +++++++++++++++++++++++-------------------------
+ fs/dlm/plock.c   | 24 +++++++++++-----------
+ fs/dlm/recover.c | 39 +++++++++++++++++------------------
+ 3 files changed, 56 insertions(+), 60 deletions(-)
+
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index 862cb7a353c1c..b9829b873bf2e 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1856,7 +1856,7 @@ static void del_timeout(struct dlm_lkb *lkb)
+ void dlm_scan_timeout(struct dlm_ls *ls)
+ {
+       struct dlm_rsb *r;
+-      struct dlm_lkb *lkb;
++      struct dlm_lkb *lkb = NULL, *iter;
+       int do_cancel, do_warn;
+       s64 wait_us;
+@@ -1867,27 +1867,28 @@ void dlm_scan_timeout(struct dlm_ls *ls)
+               do_cancel = 0;
+               do_warn = 0;
+               mutex_lock(&ls->ls_timeout_mutex);
+-              list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
++              list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
+                       wait_us = ktime_to_us(ktime_sub(ktime_get(),
+-                                                      lkb->lkb_timestamp));
++                                                      iter->lkb_timestamp));
+-                      if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
+-                          wait_us >= (lkb->lkb_timeout_cs * 10000))
++                      if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
++                          wait_us >= (iter->lkb_timeout_cs * 10000))
+                               do_cancel = 1;
+-                      if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
++                      if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
+                           wait_us >= dlm_config.ci_timewarn_cs * 10000)
+                               do_warn = 1;
+                       if (!do_cancel && !do_warn)
+                               continue;
+-                      hold_lkb(lkb);
++                      hold_lkb(iter);
++                      lkb = iter;
+                       break;
+               }
+               mutex_unlock(&ls->ls_timeout_mutex);
+-              if (!do_cancel && !do_warn)
++              if (!lkb)
+                       break;
+               r = lkb->lkb_resource;
+@@ -5239,21 +5240,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
+ {
+-      struct dlm_lkb *lkb;
+-      int found = 0;
++      struct dlm_lkb *lkb = NULL, *iter;
+       mutex_lock(&ls->ls_waiters_mutex);
+-      list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
+-              if (lkb->lkb_flags & DLM_IFL_RESEND) {
+-                      hold_lkb(lkb);
+-                      found = 1;
++      list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
++              if (iter->lkb_flags & DLM_IFL_RESEND) {
++                      hold_lkb(iter);
++                      lkb = iter;
+                       break;
+               }
+       }
+       mutex_unlock(&ls->ls_waiters_mutex);
+-      if (!found)
+-              lkb = NULL;
+       return lkb;
+ }
+@@ -5912,37 +5910,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+                    int mode, uint32_t flags, void *name, unsigned int namelen,
+                    unsigned long timeout_cs, uint32_t *lkid)
+ {
+-      struct dlm_lkb *lkb;
++      struct dlm_lkb *lkb = NULL, *iter;
+       struct dlm_user_args *ua;
+       int found_other_mode = 0;
+-      int found = 0;
+       int rv = 0;
+       mutex_lock(&ls->ls_orphans_mutex);
+-      list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
+-              if (lkb->lkb_resource->res_length != namelen)
++      list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
++              if (iter->lkb_resource->res_length != namelen)
+                       continue;
+-              if (memcmp(lkb->lkb_resource->res_name, name, namelen))
++              if (memcmp(iter->lkb_resource->res_name, name, namelen))
+                       continue;
+-              if (lkb->lkb_grmode != mode) {
++              if (iter->lkb_grmode != mode) {
+                       found_other_mode = 1;
+                       continue;
+               }
+-              found = 1;
+-              list_del_init(&lkb->lkb_ownqueue);
+-              lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
+-              *lkid = lkb->lkb_id;
++              lkb = iter;
++              list_del_init(&iter->lkb_ownqueue);
++              iter->lkb_flags &= ~DLM_IFL_ORPHAN;
++              *lkid = iter->lkb_id;
+               break;
+       }
+       mutex_unlock(&ls->ls_orphans_mutex);
+-      if (!found && found_other_mode) {
++      if (!lkb && found_other_mode) {
+               rv = -EAGAIN;
+               goto out;
+       }
+-      if (!found) {
++      if (!lkb) {
+               rv = -ENOENT;
+               goto out;
+       }
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f74d5a28ad27c..95f4662c1209a 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -434,9 +434,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+                        loff_t *ppos)
+ {
++      struct plock_op *op = NULL, *iter;
+       struct dlm_plock_info info;
+-      struct plock_op *op;
+-      int found = 0, do_callback = 0;
++      int do_callback = 0;
+       if (count != sizeof(info))
+               return -EINVAL;
+@@ -448,23 +448,23 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+               return -EINVAL;
+       spin_lock(&ops_lock);
+-      list_for_each_entry(op, &recv_list, list) {
+-              if (op->info.fsid == info.fsid &&
+-                  op->info.number == info.number &&
+-                  op->info.owner == info.owner) {
+-                      list_del_init(&op->list);
+-                      memcpy(&op->info, &info, sizeof(info));
+-                      if (op->data)
++      list_for_each_entry(iter, &recv_list, list) {
++              if (iter->info.fsid == info.fsid &&
++                  iter->info.number == info.number &&
++                  iter->info.owner == info.owner) {
++                      list_del_init(&iter->list);
++                      memcpy(&iter->info, &info, sizeof(info));
++                      if (iter->data)
+                               do_callback = 1;
+                       else
+-                              op->done = 1;
+-                      found = 1;
++                              iter->done = 1;
++                      op = iter;
+                       break;
+               }
+       }
+       spin_unlock(&ops_lock);
+-      if (found) {
++      if (op) {
+               if (do_callback)
+                       dlm_plock_callback(op);
+               else
+diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
+index 8928e99dfd47d..df18f38a02734 100644
+--- a/fs/dlm/recover.c
++++ b/fs/dlm/recover.c
+@@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
+ static void recover_lvb(struct dlm_rsb *r)
+ {
+-      struct dlm_lkb *lkb, *high_lkb = NULL;
++      struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
+       uint32_t high_seq = 0;
+       int lock_lvb_exists = 0;
+-      int big_lock_exists = 0;
+       int lvblen = r->res_ls->ls_lvblen;
+       if (!rsb_flag(r, RSB_NEW_MASTER2) &&
+@@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r)
+       /* we are the new master, so figure out if VALNOTVALID should
+          be set, and set the rsb lvb from the best lkb available. */
+-      list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
+-              if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++      list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
++              if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+                       continue;
+               lock_lvb_exists = 1;
+-              if (lkb->lkb_grmode > DLM_LOCK_CR) {
+-                      big_lock_exists = 1;
++              if (iter->lkb_grmode > DLM_LOCK_CR) {
++                      big_lkb = iter;
+                       goto setflag;
+               }
+-              if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+-                      high_lkb = lkb;
+-                      high_seq = lkb->lkb_lvbseq;
++              if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++                      high_lkb = iter;
++                      high_seq = iter->lkb_lvbseq;
+               }
+       }
+-      list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
+-              if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++      list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
++              if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+                       continue;
+               lock_lvb_exists = 1;
+-              if (lkb->lkb_grmode > DLM_LOCK_CR) {
+-                      big_lock_exists = 1;
++              if (iter->lkb_grmode > DLM_LOCK_CR) {
++                      big_lkb = iter;
+                       goto setflag;
+               }
+-              if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+-                      high_lkb = lkb;
+-                      high_seq = lkb->lkb_lvbseq;
++              if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++                      high_lkb = iter;
++                      high_seq = iter->lkb_lvbseq;
+               }
+       }
+@@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r)
+               goto out;
+       /* lvb is invalidated if only NL/CR locks remain */
+-      if (!big_lock_exists)
++      if (!big_lkb)
+               rsb_set_flag(r, RSB_VALNOTVALID);
+       if (!r->res_lvbptr) {
+@@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r)
+                       goto out;
+       }
+-      if (big_lock_exists) {
+-              r->res_lvbseq = lkb->lkb_lvbseq;
+-              memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
++      if (big_lkb) {
++              r->res_lvbseq = big_lkb->lkb_lvbseq;
++              memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
+       } else if (high_lkb) {
+               r->res_lvbseq = high_lkb->lkb_lvbseq;
+               memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
+-- 
+2.40.1
+
diff --git a/queue-5.15/drm-amd-display-check-tg-is-non-null-before-checking.patch b/queue-5.15/drm-amd-display-check-tg-is-non-null-before-checking.patch
new file mode 100644 (file)
index 0000000..6de112c
--- /dev/null
@@ -0,0 +1,43 @@
+From d0fcd3b291a7c66b76ad623c940189c25f95e1bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jun 2023 17:00:28 -0400
+Subject: drm/amd/display: check TG is non-null before checking if enabled
+
+From: Taimur Hassan <syed.hassan@amd.com>
+
+[ Upstream commit 5a25cefc0920088bb9afafeb80ad3dcd84fe278b ]
+
+[Why & How]
+If there is no TG allocation we can dereference a NULL pointer when
+checking if the TG is enabled.
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Acked-by: Alan Liu <haoping.liu@amd.com>
+Signed-off-by: Taimur Hassan <syed.hassan@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 6d17cdf1bd921..aa5a1fa68da05 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -3142,7 +3142,8 @@ void dcn10_wait_for_mpcc_disconnect(
+               if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+                       struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+-                      if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++                      if (pipe_ctx->stream_res.tg &&
++                              pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+                               res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+                       pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+                       hubp->funcs->set_blank(hubp, true);
+-- 
+2.40.1
+
diff --git a/queue-5.15/drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch b/queue-5.15/drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch
new file mode 100644 (file)
index 0000000..7f1c359
--- /dev/null
@@ -0,0 +1,47 @@
+From c0da6ae383369e70c885bf2c90b5a1625a956557 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Mar 2022 12:08:43 -0400
+Subject: drm/amd/display: do not wait for mpc idle if tg is disabled
+
+From: Josip Pavic <Josip.Pavic@amd.com>
+
+[ Upstream commit 2513ed4f937999c0446fd824f7564f76b697d722 ]
+
+[Why]
+When booting, the driver waits for the MPC idle bit to be set as part of
+pipe initialization. However, on some systems this occurs before OTG is
+enabled, and since the MPC idle bit won't be set until the vupdate
+signal occurs (which requires OTG to be enabled), this never happens and
+the wait times out. This can add hundreds of milliseconds to the boot
+time.
+
+[How]
+Do not wait for mpc idle if tg is disabled
+
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Pavle Kotarac <Pavle.Kotarac@amd.com>
+Signed-off-by: Josip Pavic <Josip.Pavic@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 5a25cefc0920 ("drm/amd/display: check TG is non-null before checking if enabled")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 73457c32f3e7f..6d17cdf1bd921 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -3142,7 +3142,8 @@ void dcn10_wait_for_mpcc_disconnect(
+               if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+                       struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+-                      res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
++                      if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++                              res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+                       pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+                       hubp->funcs->set_blank(hubp, true);
+               }
+-- 
+2.40.1
+
diff --git a/queue-5.15/exfat-remove-argument-sector-from-exfat_get_dentry.patch b/queue-5.15/exfat-remove-argument-sector-from-exfat_get_dentry.patch
new file mode 100644 (file)
index 0000000..9e47f7e
--- /dev/null
@@ -0,0 +1,419 @@
+From 13d9aff5bbf879cf6ac055cbe9acaad0459f19e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Dec 2021 21:58:37 +0900
+Subject: exfat: remove argument 'sector' from exfat_get_dentry()
+
+From: Yuezhang.Mo <Yuezhang.Mo@sony.com>
+
+[ Upstream commit c71510b3fa27f96840c5737d8a47dd7b370e376c ]
+
+No any function uses argument 'sector', remove it.
+
+Reviewed-by: Andy.Wu <Andy.Wu@sony.com>
+Reviewed-by: Aoyama, Wataru <wataru.aoyama@sony.com>
+Acked-by: Sungjong Seo <sj1557.seo@samsung.com>
+Signed-off-by: Yuezhang.Mo <Yuezhang.Mo@sony.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Stable-dep-of: d42334578eba ("exfat: check if filename entries exceeds max filename length")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/exfat/balloc.c   |  2 +-
+ fs/exfat/dir.c      | 36 ++++++++++++++----------------------
+ fs/exfat/exfat_fs.h |  3 +--
+ fs/exfat/namei.c    | 42 ++++++++++++++++--------------------------
+ fs/exfat/nls.c      |  2 +-
+ 5 files changed, 33 insertions(+), 52 deletions(-)
+
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+index 1dce6b4e90885..e918decb37358 100644
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -105,7 +105,7 @@ int exfat_load_bitmap(struct super_block *sb)
+                       struct exfat_dentry *ep;
+                       struct buffer_head *bh;
+-                      ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
++                      ep = exfat_get_dentry(sb, &clu, i, &bh);
+                       if (!ep)
+                               return -EIO;
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index f6dd4fc8eaf45..8e355c6efbeb7 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -68,7 +68,6 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
+ {
+       int i, dentries_per_clu, dentries_per_clu_bits = 0, num_ext;
+       unsigned int type, clu_offset, max_dentries;
+-      sector_t sector;
+       struct exfat_chain dir, clu;
+       struct exfat_uni_name uni_name;
+       struct exfat_dentry *ep;
+@@ -119,7 +118,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
+               i = dentry & (dentries_per_clu - 1);
+               for ( ; i < dentries_per_clu; i++, dentry++) {
+-                      ep = exfat_get_dentry(sb, &clu, i, &bh, &sector);
++                      ep = exfat_get_dentry(sb, &clu, i, &bh);
+                       if (!ep)
+                               return -EIO;
+@@ -160,7 +159,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
+                               dir_entry->namebuf.lfnbuf_len);
+                       brelse(bh);
+-                      ep = exfat_get_dentry(sb, &clu, i + 1, &bh, NULL);
++                      ep = exfat_get_dentry(sb, &clu, i + 1, &bh);
+                       if (!ep)
+                               return -EIO;
+                       dir_entry->size =
+@@ -443,7 +442,6 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
+       struct super_block *sb = inode->i_sb;
+       struct exfat_sb_info *sbi = EXFAT_SB(sb);
+       struct timespec64 ts = current_time(inode);
+-      sector_t sector;
+       struct exfat_dentry *ep;
+       struct buffer_head *bh;
+@@ -451,7 +449,7 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
+        * We cannot use exfat_get_dentry_set here because file ep is not
+        * initialized yet.
+        */
+-      ep = exfat_get_dentry(sb, p_dir, entry, &bh, &sector);
++      ep = exfat_get_dentry(sb, p_dir, entry, &bh);
+       if (!ep)
+               return -EIO;
+@@ -475,7 +473,7 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
+       exfat_update_bh(bh, IS_DIRSYNC(inode));
+       brelse(bh);
+-      ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
++      ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh);
+       if (!ep)
+               return -EIO;
+@@ -494,12 +492,11 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
+       struct super_block *sb = inode->i_sb;
+       int ret = 0;
+       int i, num_entries;
+-      sector_t sector;
+       u16 chksum;
+       struct exfat_dentry *ep, *fep;
+       struct buffer_head *fbh, *bh;
+-      fep = exfat_get_dentry(sb, p_dir, entry, &fbh, &sector);
++      fep = exfat_get_dentry(sb, p_dir, entry, &fbh);
+       if (!fep)
+               return -EIO;
+@@ -507,7 +504,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
+       chksum = exfat_calc_chksum16(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY);
+       for (i = 1; i < num_entries; i++) {
+-              ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, NULL);
++              ep = exfat_get_dentry(sb, p_dir, entry + i, &bh);
+               if (!ep) {
+                       ret = -EIO;
+                       goto release_fbh;
+@@ -529,13 +526,12 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
+ {
+       struct super_block *sb = inode->i_sb;
+       int i;
+-      sector_t sector;
+       unsigned short *uniname = p_uniname->name;
+       struct exfat_dentry *ep;
+       struct buffer_head *bh;
+       int sync = IS_DIRSYNC(inode);
+-      ep = exfat_get_dentry(sb, p_dir, entry, &bh, &sector);
++      ep = exfat_get_dentry(sb, p_dir, entry, &bh);
+       if (!ep)
+               return -EIO;
+@@ -543,7 +539,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
+       exfat_update_bh(bh, sync);
+       brelse(bh);
+-      ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
++      ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh);
+       if (!ep)
+               return -EIO;
+@@ -553,7 +549,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
+       brelse(bh);
+       for (i = EXFAT_FIRST_CLUSTER; i < num_entries; i++) {
+-              ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, &sector);
++              ep = exfat_get_dentry(sb, p_dir, entry + i, &bh);
+               if (!ep)
+                       return -EIO;
+@@ -572,12 +568,11 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
+ {
+       struct super_block *sb = inode->i_sb;
+       int i;
+-      sector_t sector;
+       struct exfat_dentry *ep;
+       struct buffer_head *bh;
+       for (i = order; i < num_entries; i++) {
+-              ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, &sector);
++              ep = exfat_get_dentry(sb, p_dir, entry + i, &bh);
+               if (!ep)
+                       return -EIO;
+@@ -715,8 +710,7 @@ static int exfat_dir_readahead(struct super_block *sb, sector_t sec)
+ }
+ struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
+-              struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
+-              sector_t *sector)
++              struct exfat_chain *p_dir, int entry, struct buffer_head **bh)
+ {
+       unsigned int dentries_per_page = EXFAT_B_TO_DEN(PAGE_SIZE);
+       int off;
+@@ -738,8 +732,6 @@ struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
+       if (!*bh)
+               return NULL;
+-      if (sector)
+-              *sector = sec;
+       return (struct exfat_dentry *)((*bh)->b_data + off);
+ }
+@@ -958,7 +950,7 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
+                       if (rewind && dentry == end_eidx)
+                               goto not_found;
+-                      ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
++                      ep = exfat_get_dentry(sb, &clu, i, &bh);
+                       if (!ep)
+                               return -EIO;
+@@ -1144,7 +1136,7 @@ int exfat_count_ext_entries(struct super_block *sb, struct exfat_chain *p_dir,
+       struct buffer_head *bh;
+       for (i = 0, entry++; i < ep->dentry.file.num_ext; i++, entry++) {
+-              ext_ep = exfat_get_dentry(sb, p_dir, entry, &bh, NULL);
++              ext_ep = exfat_get_dentry(sb, p_dir, entry, &bh);
+               if (!ext_ep)
+                       return -EIO;
+@@ -1174,7 +1166,7 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
+       while (clu.dir != EXFAT_EOF_CLUSTER) {
+               for (i = 0; i < dentries_per_clu; i++) {
+-                      ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
++                      ep = exfat_get_dentry(sb, &clu, i, &bh);
+                       if (!ep)
+                               return -EIO;
+                       entry_type = exfat_get_entry_type(ep);
+diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
+index db538709dafa0..8b4253a2e0586 100644
+--- a/fs/exfat/exfat_fs.h
++++ b/fs/exfat/exfat_fs.h
+@@ -470,8 +470,7 @@ int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu);
+ int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
+               int entry, sector_t *sector, int *offset);
+ struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
+-              struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
+-              sector_t *sector);
++              struct exfat_chain *p_dir, int entry, struct buffer_head **bh);
+ struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es,
+               int num);
+ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index b22d6c984f8c7..8ed7c24aae1a4 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -229,7 +229,7 @@ static int exfat_search_empty_slot(struct super_block *sb,
+               i = dentry & (dentries_per_clu - 1);
+               for (; i < dentries_per_clu; i++, dentry++) {
+-                      ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
++                      ep = exfat_get_dentry(sb, &clu, i, &bh);
+                       if (!ep)
+                               return -EIO;
+                       type = exfat_get_entry_type(ep);
+@@ -306,7 +306,6 @@ static int exfat_find_empty_entry(struct inode *inode,
+ {
+       int dentry;
+       unsigned int ret, last_clu;
+-      sector_t sector;
+       loff_t size = 0;
+       struct exfat_chain clu;
+       struct exfat_dentry *ep = NULL;
+@@ -379,7 +378,7 @@ static int exfat_find_empty_entry(struct inode *inode,
+                       struct buffer_head *bh;
+                       ep = exfat_get_dentry(sb,
+-                              &(ei->dir), ei->entry + 1, &bh, &sector);
++                              &(ei->dir), ei->entry + 1, &bh);
+                       if (!ep)
+                               return -EIO;
+@@ -779,7 +778,6 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
+       struct inode *inode = dentry->d_inode;
+       struct exfat_inode_info *ei = EXFAT_I(inode);
+       struct buffer_head *bh;
+-      sector_t sector;
+       int num_entries, entry, err = 0;
+       mutex_lock(&EXFAT_SB(sb)->s_lock);
+@@ -791,7 +789,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
+               goto unlock;
+       }
+-      ep = exfat_get_dentry(sb, &cdir, entry, &bh, &sector);
++      ep = exfat_get_dentry(sb, &cdir, entry, &bh);
+       if (!ep) {
+               err = -EIO;
+               goto unlock;
+@@ -895,7 +893,7 @@ static int exfat_check_dir_empty(struct super_block *sb,
+       while (clu.dir != EXFAT_EOF_CLUSTER) {
+               for (i = 0; i < dentries_per_clu; i++) {
+-                      ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
++                      ep = exfat_get_dentry(sb, &clu, i, &bh);
+                       if (!ep)
+                               return -EIO;
+                       type = exfat_get_entry_type(ep);
+@@ -932,7 +930,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
+       struct exfat_sb_info *sbi = EXFAT_SB(sb);
+       struct exfat_inode_info *ei = EXFAT_I(inode);
+       struct buffer_head *bh;
+-      sector_t sector;
+       int num_entries, entry, err;
+       mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
+@@ -957,7 +954,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
+               goto unlock;
+       }
+-      ep = exfat_get_dentry(sb, &cdir, entry, &bh, &sector);
++      ep = exfat_get_dentry(sb, &cdir, entry, &bh);
+       if (!ep) {
+               err = -EIO;
+               goto unlock;
+@@ -1005,13 +1002,12 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
+               struct exfat_inode_info *ei)
+ {
+       int ret, num_old_entries, num_new_entries;
+-      sector_t sector_old, sector_new;
+       struct exfat_dentry *epold, *epnew;
+       struct super_block *sb = inode->i_sb;
+       struct buffer_head *new_bh, *old_bh;
+       int sync = IS_DIRSYNC(inode);
+-      epold = exfat_get_dentry(sb, p_dir, oldentry, &old_bh, &sector_old);
++      epold = exfat_get_dentry(sb, p_dir, oldentry, &old_bh);
+       if (!epold)
+               return -EIO;
+@@ -1032,8 +1028,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
+               if (newentry < 0)
+                       return newentry; /* -EIO or -ENOSPC */
+-              epnew = exfat_get_dentry(sb, p_dir, newentry, &new_bh,
+-                      &sector_new);
++              epnew = exfat_get_dentry(sb, p_dir, newentry, &new_bh);
+               if (!epnew)
+                       return -EIO;
+@@ -1046,12 +1041,10 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
+               brelse(old_bh);
+               brelse(new_bh);
+-              epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh,
+-                      &sector_old);
++              epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh);
+               if (!epold)
+                       return -EIO;
+-              epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh,
+-                      &sector_new);
++              epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh);
+               if (!epnew) {
+                       brelse(old_bh);
+                       return -EIO;
+@@ -1094,12 +1087,11 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
+               struct exfat_uni_name *p_uniname, struct exfat_inode_info *ei)
+ {
+       int ret, newentry, num_new_entries, num_old_entries;
+-      sector_t sector_mov, sector_new;
+       struct exfat_dentry *epmov, *epnew;
+       struct super_block *sb = inode->i_sb;
+       struct buffer_head *mov_bh, *new_bh;
+-      epmov = exfat_get_dentry(sb, p_olddir, oldentry, &mov_bh, &sector_mov);
++      epmov = exfat_get_dentry(sb, p_olddir, oldentry, &mov_bh);
+       if (!epmov)
+               return -EIO;
+@@ -1117,7 +1109,7 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
+       if (newentry < 0)
+               return newentry; /* -EIO or -ENOSPC */
+-      epnew = exfat_get_dentry(sb, p_newdir, newentry, &new_bh, &sector_new);
++      epnew = exfat_get_dentry(sb, p_newdir, newentry, &new_bh);
+       if (!epnew)
+               return -EIO;
+@@ -1130,12 +1122,10 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
+       brelse(mov_bh);
+       brelse(new_bh);
+-      epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh,
+-              &sector_mov);
++      epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh);
+       if (!epmov)
+               return -EIO;
+-      epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh,
+-              &sector_new);
++      epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh);
+       if (!epnew) {
+               brelse(mov_bh);
+               return -EIO;
+@@ -1195,7 +1185,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
+               EXFAT_I(old_parent_inode)->flags);
+       dentry = ei->entry;
+-      ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh, NULL);
++      ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh);
+       if (!ep) {
+               ret = -EIO;
+               goto out;
+@@ -1214,7 +1204,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
+               p_dir = &(new_ei->dir);
+               new_entry = new_ei->entry;
+-              ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh, NULL);
++              ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh);
+               if (!ep)
+                       goto out;
+@@ -1254,7 +1244,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
+       if (!ret && new_inode) {
+               /* delete entries of new_dir */
+-              ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh, NULL);
++              ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh);
+               if (!ep) {
+                       ret = -EIO;
+                       goto del_out;
+diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
+index 314d5407a1be5..ef115e673406f 100644
+--- a/fs/exfat/nls.c
++++ b/fs/exfat/nls.c
+@@ -761,7 +761,7 @@ int exfat_create_upcase_table(struct super_block *sb)
+       while (clu.dir != EXFAT_EOF_CLUSTER) {
+               for (i = 0; i < sbi->dentries_per_clu; i++) {
+-                      ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
++                      ep = exfat_get_dentry(sb, &clu, i, &bh);
+                       if (!ep)
+                               return -EIO;
+-- 
+2.40.1
+
diff --git a/queue-5.15/exfat-support-dynamic-allocate-bh-for-exfat_entry_se.patch b/queue-5.15/exfat-support-dynamic-allocate-bh-for-exfat_entry_se.patch
new file mode 100644 (file)
index 0000000..6442301
--- /dev/null
@@ -0,0 +1,91 @@
+From bc71b2ede225e52d70c9b809b631aaf4fef463f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Nov 2022 13:50:22 +0800
+Subject: exfat: support dynamic allocate bh for exfat_entry_set_cache
+
+From: Yuezhang Mo <Yuezhang.Mo@sony.com>
+
+[ Upstream commit a3ff29a95fde16906304455aa8c0bd84eb770258 ]
+
+In special cases, a file or a directory may occupied more than 19
+directory entries, pre-allocating 3 bh is not enough. Such as
+  - Support vendor secondary directory entry in the future.
+  - Since file directory entry is damaged, the SecondaryCount
+    field is bigger than 18.
+
+So this commit supports dynamic allocation of bh.
+
+Signed-off-by: Yuezhang Mo <Yuezhang.Mo@sony.com>
+Reviewed-by: Andy Wu <Andy.Wu@sony.com>
+Reviewed-by: Aoyama Wataru <wataru.aoyama@sony.com>
+Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Stable-dep-of: d42334578eba ("exfat: check if filename entries exceeds max filename length")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/exfat/dir.c      | 15 +++++++++++++++
+ fs/exfat/exfat_fs.h |  5 ++++-
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index 8e355c6efbeb7..0c51bc986b7a6 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -613,6 +613,10 @@ int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync)
+                       bforget(es->bh[i]);
+               else
+                       brelse(es->bh[i]);
++
++      if (IS_DYNAMIC_ES(es))
++              kfree(es->bh);
++
+       kfree(es);
+       return err;
+ }
+@@ -845,6 +849,7 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
+       /* byte offset in sector */
+       off = EXFAT_BLK_OFFSET(byte_offset, sb);
+       es->start_off = off;
++      es->bh = es->__bh;
+       /* sector offset in cluster */
+       sec = EXFAT_B_TO_BLK(byte_offset, sb);
+@@ -864,6 +869,16 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
+       es->num_entries = num_entries;
+       num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb);
++      if (num_bh > ARRAY_SIZE(es->__bh)) {
++              es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_KERNEL);
++              if (!es->bh) {
++                      brelse(bh);
++                      kfree(es);
++                      return NULL;
++              }
++              es->bh[0] = bh;
++      }
++
+       for (i = 1; i < num_bh; i++) {
+               /* get the next sector */
+               if (exfat_is_last_sector_in_cluster(sbi, sec)) {
+diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
+index 8b4253a2e0586..efa7f116a835f 100644
+--- a/fs/exfat/exfat_fs.h
++++ b/fs/exfat/exfat_fs.h
+@@ -170,10 +170,13 @@ struct exfat_entry_set_cache {
+       bool modified;
+       unsigned int start_off;
+       int num_bh;
+-      struct buffer_head *bh[DIR_CACHE_SIZE];
++      struct buffer_head *__bh[DIR_CACHE_SIZE];
++      struct buffer_head **bh;
+       unsigned int num_entries;
+ };
++#define IS_DYNAMIC_ES(es)     ((es)->__bh != (es)->bh)
++
+ struct exfat_dir_entry {
+       struct exfat_chain dir;
+       int entry;
+-- 
+2.40.1
+
diff --git a/queue-5.15/fbdev-fix-potential-oob-read-in-fast_imageblit.patch b/queue-5.15/fbdev-fix-potential-oob-read-in-fast_imageblit.patch
new file mode 100644 (file)
index 0000000..a9f4ab7
--- /dev/null
@@ -0,0 +1,45 @@
+From b2cd55dfb6f890b54dc672dc47354dc093419bb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 25 Jun 2023 00:16:49 +0800
+Subject: fbdev: fix potential OOB read in fast_imageblit()
+
+From: Zhang Shurong <zhang_shurong@foxmail.com>
+
+[ Upstream commit c2d22806aecb24e2de55c30a06e5d6eb297d161d ]
+
+There is a potential OOB read at fast_imageblit, for
+"colortab[(*src >> 4)]" can become a negative value due to
+"const char *s = image->data, *src".
+This change makes sure the index for colortab always positive
+or zero.
+
+Similar commit:
+https://patchwork.kernel.org/patch/11746067
+
+Potential bug report:
+https://groups.google.com/g/syzkaller-bugs/c/9ubBXKeKXf4/m/k-QXy4UgAAAJ
+
+Signed-off-by: Zhang Shurong <zhang_shurong@foxmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index 335e92b813fc4..665ef7a0a2495 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -189,7 +189,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+       u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+       u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+       u32 bit_mask, eorx, shift;
+-      const char *s = image->data, *src;
++      const u8 *s = image->data, *src;
+       u32 *dst;
+       const u32 *tab;
+       size_t tablen;
+-- 
+2.40.1
+
diff --git a/queue-5.15/fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch b/queue-5.15/fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch
new file mode 100644 (file)
index 0000000..fddf2fc
--- /dev/null
@@ -0,0 +1,103 @@
+From 149285a665612583506df690fa0a3d6eef12c2f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Mar 2022 20:29:51 +0100
+Subject: fbdev: Fix sys_imageblit() for arbitrary image widths
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 61bfcb6a3b981e8f19e044ac8c3de6edbe6caf70 ]
+
+Commit 6f29e04938bf ("fbdev: Improve performance of sys_imageblit()")
+broke sys_imageblit() for image width that are not aligned to 8-bit
+boundaries. Fix this by handling the trailing pixels on each line
+separately. The performance improvements in the original commit do not
+regress by this change.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 6f29e04938bf ("fbdev: Improve performance of sys_imageblit()")
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Javier Martinez Canillas <javierm@redhat.com>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220313192952.12058-2-tzimmermann@suse.de
+Stable-dep-of: c2d22806aecb ("fbdev: fix potential OOB read in fast_imageblit()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 29 ++++++++++++++++++++++++----
+ 1 file changed, 25 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index 722c327a381bd..335e92b813fc4 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -188,7 +188,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ {
+       u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+       u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+-      u32 bit_mask, eorx;
++      u32 bit_mask, eorx, shift;
+       const char *s = image->data, *src;
+       u32 *dst;
+       const u32 *tab;
+@@ -229,17 +229,23 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+       for (i = image->height; i--; ) {
+               dst = dst1;
++              shift = 8;
+               src = s;
++              /*
++               * Manually unroll the per-line copying loop for better
++               * performance. This works until we processed the last
++               * completely filled source byte (inclusive).
++               */
+               switch (ppw) {
+               case 4: /* 8 bpp */
+-                      for (j = k; j; j -= 2, ++src) {
++                      for (j = k; j >= 2; j -= 2, ++src) {
+                               *dst++ = colortab[(*src >> 4) & bit_mask];
+                               *dst++ = colortab[(*src >> 0) & bit_mask];
+                       }
+                       break;
+               case 2: /* 16 bpp */
+-                      for (j = k; j; j -= 4, ++src) {
++                      for (j = k; j >= 4; j -= 4, ++src) {
+                               *dst++ = colortab[(*src >> 6) & bit_mask];
+                               *dst++ = colortab[(*src >> 4) & bit_mask];
+                               *dst++ = colortab[(*src >> 2) & bit_mask];
+@@ -247,7 +253,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+                       }
+                       break;
+               case 1: /* 32 bpp */
+-                      for (j = k; j; j -= 8, ++src) {
++                      for (j = k; j >= 8; j -= 8, ++src) {
+                               *dst++ = colortab[(*src >> 7) & bit_mask];
+                               *dst++ = colortab[(*src >> 6) & bit_mask];
+                               *dst++ = colortab[(*src >> 5) & bit_mask];
+@@ -259,6 +265,21 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+                       }
+                       break;
+               }
++
++              /*
++               * For image widths that are not a multiple of 8, there
++               * are trailing pixels left on the current line. Print
++               * them as well.
++               */
++              for (; j--; ) {
++                      shift -= ppw;
++                      *dst++ = colortab[(*src >> shift) & bit_mask];
++                      if (!shift) {
++                              shift = 8;
++                              ++src;
++                      }
++              }
++
+               dst1 += p->fix.line_length;
+               s += spitch;
+       }
+-- 
+2.40.1
+
diff --git a/queue-5.15/fbdev-improve-performance-of-sys_imageblit.patch b/queue-5.15/fbdev-improve-performance-of-sys_imageblit.patch
new file mode 100644 (file)
index 0000000..a86a14c
--- /dev/null
@@ -0,0 +1,130 @@
+From bbeaf9b6f85955f4994d0158070aa301532059d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Feb 2022 20:38:01 +0100
+Subject: fbdev: Improve performance of sys_imageblit()
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 6f29e04938bf509fccfad490a74284cf158891ce ]
+
+Improve the performance of sys_imageblit() by manually unrolling
+the inner blitting loop and moving some invariants out. The compiler
+failed to do this automatically. The resulting binary code was even
+slower than the cfb_imageblit() helper, which uses the same algorithm,
+but operates on I/O memory.
+
+A microbenchmark measures the average number of CPU cycles
+for sys_imageblit() after a stabilizing period of a few minutes
+(i7-4790, FullHD, simpledrm, kernel with debugging). The value
+for CFB is given as a reference.
+
+  sys_imageblit(), new: 25934 cycles
+  sys_imageblit(), old: 35944 cycles
+  cfb_imageblit():      30566 cycles
+
+In the optimized case, sys_imageblit() is now ~30% faster than before
+and ~20% faster than cfb_imageblit().
+
+v2:
+       * move switch out of inner loop (Gerd)
+       * remove test for alignment of dst1 (Sam)
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220223193804.18636-3-tzimmermann@suse.de
+Stable-dep-of: c2d22806aecb ("fbdev: fix potential OOB read in fast_imageblit()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 49 +++++++++++++++++++++-------
+ 1 file changed, 38 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index a4d05b1b17d7d..722c327a381bd 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ {
+       u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+       u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+-      u32 bit_mask, end_mask, eorx, shift;
++      u32 bit_mask, eorx;
+       const char *s = image->data, *src;
+       u32 *dst;
+-      const u32 *tab = NULL;
++      const u32 *tab;
++      size_t tablen;
++      u32 colortab[16];
+       int i, j, k;
+       switch (bpp) {
+       case 8:
+               tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
++              tablen = 16;
+               break;
+       case 16:
+               tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
++              tablen = 4;
+               break;
+       case 32:
+-      default:
+               tab = cfb_tab32;
++              tablen = 2;
+               break;
++      default:
++              return;
+       }
+       for (i = ppw-1; i--; ) {
+@@ -218,19 +224,40 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+       eorx = fgx ^ bgx;
+       k = image->width/ppw;
++      for (i = 0; i < tablen; ++i)
++              colortab[i] = (tab[i] & eorx) ^ bgx;
++
+       for (i = image->height; i--; ) {
+               dst = dst1;
+-              shift = 8;
+               src = s;
+-              for (j = k; j--; ) {
+-                      shift -= ppw;
+-                      end_mask = tab[(*src >> shift) & bit_mask];
+-                      *dst++ = (end_mask & eorx) ^ bgx;
+-                      if (!shift) {
+-                              shift = 8;
+-                              src++;
++              switch (ppw) {
++              case 4: /* 8 bpp */
++                      for (j = k; j; j -= 2, ++src) {
++                              *dst++ = colortab[(*src >> 4) & bit_mask];
++                              *dst++ = colortab[(*src >> 0) & bit_mask];
++                      }
++                      break;
++              case 2: /* 16 bpp */
++                      for (j = k; j; j -= 4, ++src) {
++                              *dst++ = colortab[(*src >> 6) & bit_mask];
++                              *dst++ = colortab[(*src >> 4) & bit_mask];
++                              *dst++ = colortab[(*src >> 2) & bit_mask];
++                              *dst++ = colortab[(*src >> 0) & bit_mask];
++                      }
++                      break;
++              case 1: /* 32 bpp */
++                      for (j = k; j; j -= 8, ++src) {
++                              *dst++ = colortab[(*src >> 7) & bit_mask];
++                              *dst++ = colortab[(*src >> 6) & bit_mask];
++                              *dst++ = colortab[(*src >> 5) & bit_mask];
++                              *dst++ = colortab[(*src >> 4) & bit_mask];
++                              *dst++ = colortab[(*src >> 3) & bit_mask];
++                              *dst++ = colortab[(*src >> 2) & bit_mask];
++                              *dst++ = colortab[(*src >> 1) & bit_mask];
++                              *dst++ = colortab[(*src >> 0) & bit_mask];
+                       }
++                      break;
+               }
+               dst1 += p->fix.line_length;
+               s += spitch;
+-- 
+2.40.1
+
diff --git a/queue-5.15/fs-dlm-add-pid-to-debug-log.patch b/queue-5.15/fs-dlm-add-pid-to-debug-log.patch
new file mode 100644 (file)
index 0000000..f4e54bf
--- /dev/null
@@ -0,0 +1,39 @@
+From 670fb7c2e4fc0324d81c91e70f0c0ab2b7a77f48 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:06 -0400
+Subject: fs: dlm: add pid to debug log
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 19d7ca051d303622c423b4cb39e6bde5d177328b ]
+
+This patch adds the pid information which requested the lock operation
+to the debug log output.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 95f4662c1209a..f685d56a4f909 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -164,9 +164,9 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+               spin_lock(&ops_lock);
+               list_del(&op->list);
+               spin_unlock(&ops_lock);
+-              log_print("%s: wait interrupted %x %llx, op removed",
++              log_print("%s: wait interrupted %x %llx pid %d, op removed",
+                         __func__, ls->ls_global_id,
+-                        (unsigned long long)number);
++                        (unsigned long long)number, op->info.pid);
+               dlm_release_plock_op(op);
+               do_unlock_close(ls, number, file, fl);
+               goto out;
+-- 
+2.40.1
+
diff --git a/queue-5.15/fs-dlm-change-plock-interrupted-message-to-debug-aga.patch b/queue-5.15/fs-dlm-change-plock-interrupted-message-to-debug-aga.patch
new file mode 100644 (file)
index 0000000..5ad89e2
--- /dev/null
@@ -0,0 +1,46 @@
+From 48e2e686900a947d35fe23b15a6e200dad3cd3db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:05 -0400
+Subject: fs: dlm: change plock interrupted message to debug again
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit ea06d4cabf529eefbe7e89e3a8325f1f89355ccd ]
+
+This patch reverses the commit bcfad4265ced ("dlm: improve plock logging
+if interrupted") by moving it to debug level and notifying the user an op
+was removed.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f685d56a4f909..0d00ca2c44c71 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -164,7 +164,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+               spin_lock(&ops_lock);
+               list_del(&op->list);
+               spin_unlock(&ops_lock);
+-              log_print("%s: wait interrupted %x %llx pid %d, op removed",
++              log_debug(ls, "%s: wait interrupted %x %llx pid %d",
+                         __func__, ls->ls_global_id,
+                         (unsigned long long)number, op->info.pid);
+               dlm_release_plock_op(op);
+@@ -470,7 +470,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+               else
+                       wake_up(&recv_wq);
+       } else
+-              log_print("%s: no op %x %llx - may got interrupted?", __func__,
++              log_print("%s: no op %x %llx", __func__,
+                         info.fsid, (unsigned long long)info.number);
+       return count;
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.15/fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch b/queue-5.15/fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch
new file mode 100644 (file)
index 0000000..4088a24
--- /dev/null
@@ -0,0 +1,109 @@
+From e97a6229e6da7180e7e99fe12bf994fd85a08540 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 May 2023 12:02:04 -0400
+Subject: fs: dlm: fix mismatch of plock results from userspace
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 57e2c2f2d94cfd551af91cedfa1af6d972487197 ]
+
+When a waiting plock request (F_SETLKW) is sent to userspace
+for processing (dlm_controld), the result is returned at a
+later time. That result could be incorrectly matched to a
+different waiting request in cases where the owner field is
+the same (e.g. different threads in a process.) This is fixed
+by comparing all the properties in the request and reply.
+
+The results for non-waiting plock requests are now matched
+based on list order because the results are returned in the
+same order they were sent.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 58 +++++++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 45 insertions(+), 13 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index fa8969c0a5f55..28735e8c5e206 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -405,7 +405,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+               if (op->info.flags & DLM_PLOCK_FL_CLOSE)
+                       list_del(&op->list);
+               else
+-                      list_move(&op->list, &recv_list);
++                      list_move_tail(&op->list, &recv_list);
+               memcpy(&info, &op->info, sizeof(info));
+       }
+       spin_unlock(&ops_lock);
+@@ -443,20 +443,52 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+       if (check_version(&info))
+               return -EINVAL;
++      /*
++       * The results for waiting ops (SETLKW) can be returned in any
++       * order, so match all fields to find the op.  The results for
++       * non-waiting ops are returned in the order that they were sent
++       * to userspace, so match the result with the first non-waiting op.
++       */
+       spin_lock(&ops_lock);
+-      list_for_each_entry(iter, &recv_list, list) {
+-              if (iter->info.fsid == info.fsid &&
+-                  iter->info.number == info.number &&
+-                  iter->info.owner == info.owner) {
+-                      list_del_init(&iter->list);
+-                      memcpy(&iter->info, &info, sizeof(info));
+-                      if (iter->data)
+-                              do_callback = 1;
+-                      else
+-                              iter->done = 1;
+-                      op = iter;
+-                      break;
++      if (info.wait) {
++              list_for_each_entry(iter, &recv_list, list) {
++                      if (iter->info.fsid == info.fsid &&
++                          iter->info.number == info.number &&
++                          iter->info.owner == info.owner &&
++                          iter->info.pid == info.pid &&
++                          iter->info.start == info.start &&
++                          iter->info.end == info.end &&
++                          iter->info.ex == info.ex &&
++                          iter->info.wait) {
++                              op = iter;
++                              break;
++                      }
+               }
++      } else {
++              list_for_each_entry(iter, &recv_list, list) {
++                      if (!iter->info.wait) {
++                              op = iter;
++                              break;
++                      }
++              }
++      }
++
++      if (op) {
++              /* Sanity check that op and info match. */
++              if (info.wait)
++                      WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
++              else
++                      WARN_ON(op->info.fsid != info.fsid ||
++                              op->info.number != info.number ||
++                              op->info.owner != info.owner ||
++                              op->info.optype != info.optype);
++
++              list_del_init(&op->list);
++              memcpy(&op->info, &info, sizeof(info));
++              if (op->data)
++                      do_callback = 1;
++              else
++                      op->done = 1;
+       }
+       spin_unlock(&ops_lock);
+-- 
+2.40.1
+
diff --git a/queue-5.15/fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch b/queue-5.15/fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch
new file mode 100644 (file)
index 0000000..28fd08c
--- /dev/null
@@ -0,0 +1,66 @@
+From 1af64111dbaa3622cf5e87b03040861f1772a5ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:08 -0400
+Subject: fs: dlm: use dlm_plock_info for do_unlock_close
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 4d413ae9ced4180c0e2114553c3a7560b509b0f8 ]
+
+This patch refactors do_unlock_close() by using only struct dlm_plock_info
+as a parameter.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 16 ++++++----------
+ 1 file changed, 6 insertions(+), 10 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 0d00ca2c44c71..fa8969c0a5f55 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -80,8 +80,7 @@ static void send_op(struct plock_op *op)
+    abandoned waiter.  So, we have to insert the unlock-close when the
+    lock call is interrupted. */
+-static void do_unlock_close(struct dlm_ls *ls, u64 number,
+-                          struct file *file, struct file_lock *fl)
++static void do_unlock_close(const struct dlm_plock_info *info)
+ {
+       struct plock_op *op;
+@@ -90,15 +89,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
+               return;
+       op->info.optype         = DLM_PLOCK_OP_UNLOCK;
+-      op->info.pid            = fl->fl_pid;
+-      op->info.fsid           = ls->ls_global_id;
+-      op->info.number         = number;
++      op->info.pid            = info->pid;
++      op->info.fsid           = info->fsid;
++      op->info.number         = info->number;
+       op->info.start          = 0;
+       op->info.end            = OFFSET_MAX;
+-      if (fl->fl_lmops && fl->fl_lmops->lm_grant)
+-              op->info.owner  = (__u64) fl->fl_pid;
+-      else
+-              op->info.owner  = (__u64)(long) fl->fl_owner;
++      op->info.owner          = info->owner;
+       op->info.flags |= DLM_PLOCK_FL_CLOSE;
+       send_op(op);
+@@ -168,7 +164,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+                         __func__, ls->ls_global_id,
+                         (unsigned long long)number, op->info.pid);
+               dlm_release_plock_op(op);
+-              do_unlock_close(ls, number, file, fl);
++              do_unlock_close(&op->info);
+               goto out;
+       }
+-- 
+2.40.1
+
diff --git a/queue-5.15/ice-fix-receive-buffer-size-miscalculation.patch b/queue-5.15/ice-fix-receive-buffer-size-miscalculation.patch
new file mode 100644 (file)
index 0000000..bc3673c
--- /dev/null
@@ -0,0 +1,50 @@
+From 5be47c57a161e8a36a173976283e7749fa570afd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Aug 2023 16:51:10 -0700
+Subject: ice: fix receive buffer size miscalculation
+
+From: Jesse Brandeburg <jesse.brandeburg@intel.com>
+
+[ Upstream commit 10083aef784031fa9f06c19a1b182e6fad5338d9 ]
+
+The driver is misconfiguring the hardware for some values of MTU such that
+it could use multiple descriptors to receive a packet when it could have
+simply used one.
+
+Change the driver to use a round-up instead of the result of a shift, as
+the shift can truncate the lower bits of the size, and result in the
+problem noted above. It also aligns this driver with similar code in i40e.
+
+The insidiousness of this problem is that everything works with the wrong
+size, it's just not working as well as it could, as some MTU sizes end up
+using two or more descriptors, and there is no way to tell that is
+happening without looking at ice_trace or a bus analyzer.
+
+Fixes: efc2214b6047 ("ice: Add support for XDP")
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_base.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 533a953f15acb..09525dbeccfec 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -359,7 +359,8 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
+       /* Receive Packet Data Buffer Size.
+        * The Packet Data Buffer Size is defined in 128 byte units.
+        */
+-      rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
++      rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
++                                   BIT_ULL(ICE_RLAN_CTX_DBUF_S));
+       /* use 32 byte descriptors */
+       rlan_ctx.dsize = 1;
+-- 
+2.40.1
+
diff --git a/queue-5.15/igb-avoid-starting-unnecessary-workqueues.patch b/queue-5.15/igb-avoid-starting-unnecessary-workqueues.patch
new file mode 100644 (file)
index 0000000..b28990a
--- /dev/null
@@ -0,0 +1,91 @@
+From dc5c9745c1fa3f5c58b1d039c854feba042e3533 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Aug 2023 10:19:27 -0700
+Subject: igb: Avoid starting unnecessary workqueues
+
+From: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+
+[ Upstream commit b888c510f7b3d64ca75fc0f43b4a4bd1a611312f ]
+
+If ptp_clock_register() fails or CONFIG_PTP isn't enabled, avoid starting
+PTP related workqueues.
+
+In this way we can fix this:
+ BUG: unable to handle page fault for address: ffffc9000440b6f8
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 100000067 P4D 100000067 PUD 1001e0067 PMD 107dc5067 PTE 0
+ Oops: 0000 [#1] PREEMPT SMP
+ [...]
+ Workqueue: events igb_ptp_overflow_check
+ RIP: 0010:igb_rd32+0x1f/0x60
+ [...]
+ Call Trace:
+  igb_ptp_read_82580+0x20/0x50
+  timecounter_read+0x15/0x60
+  igb_ptp_overflow_check+0x1a/0x50
+  process_one_work+0x1cb/0x3c0
+  worker_thread+0x53/0x3f0
+  ? rescuer_thread+0x370/0x370
+  kthread+0x142/0x160
+  ? kthread_associate_blkcg+0xc0/0xc0
+  ret_from_fork+0x1f/0x30
+
+Fixes: 1f6e8178d685 ("igb: Prevent dropped Tx timestamps via work items and interrupts.")
+Fixes: d339b1331616 ("igb: add PTP Hardware Clock code")
+Signed-off-by: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+Tested-by: Arpana Arland <arpanax.arland@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20230821171927.2203644-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ptp.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index 0011b15e678c3..9cdb7a856ab6c 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -1260,18 +1260,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
+               return;
+       }
+-      spin_lock_init(&adapter->tmreg_lock);
+-      INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+-
+-      if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+-              INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+-                                igb_ptp_overflow_check);
+-
+-      adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+-      adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+-
+-      igb_ptp_reset(adapter);
+-
+       adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+                                               &adapter->pdev->dev);
+       if (IS_ERR(adapter->ptp_clock)) {
+@@ -1281,6 +1269,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
+               dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+                        adapter->netdev->name);
+               adapter->ptp_flags |= IGB_PTP_ENABLED;
++
++              spin_lock_init(&adapter->tmreg_lock);
++              INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
++
++              if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
++                      INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
++                                        igb_ptp_overflow_check);
++
++              adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
++              adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
++
++              igb_ptp_reset(adapter);
+       }
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.15/igc-fix-the-typo-in-the-ptm-control-macro.patch b/queue-5.15/igc-fix-the-typo-in-the-ptm-control-macro.patch
new file mode 100644 (file)
index 0000000..e502d8d
--- /dev/null
@@ -0,0 +1,43 @@
+From f321e02183a4bc364b330cd1e6a6bdae735496fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Aug 2023 10:17:21 -0700
+Subject: igc: Fix the typo in the PTM Control macro
+
+From: Sasha Neftin <sasha.neftin@intel.com>
+
+[ Upstream commit de43975721b97283d5f17eea4228faddf08f2681 ]
+
+The IGC_PTM_CTRL_SHRT_CYC defines the time between two consecutive PTM
+requests. The bit resolution of this field is six bits. That bit five was
+missing in the mask. This patch comes to correct the typo in the
+IGC_PTM_CTRL_SHRT_CYC macro.
+
+Fixes: a90ec8483732 ("igc: Add support for PTP getcrosststamp()")
+Signed-off-by: Sasha Neftin <sasha.neftin@intel.com>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://lore.kernel.org/r/20230821171721.2203572-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_defines.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
+index 60d0ca69ceca9..703b62c5f79b5 100644
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h
+@@ -539,7 +539,7 @@
+ #define IGC_PTM_CTRL_START_NOW        BIT(29) /* Start PTM Now */
+ #define IGC_PTM_CTRL_EN               BIT(30) /* Enable PTM */
+ #define IGC_PTM_CTRL_TRIG     BIT(31) /* PTM Cycle trigger */
+-#define IGC_PTM_CTRL_SHRT_CYC(usec)   (((usec) & 0x2f) << 2)
++#define IGC_PTM_CTRL_SHRT_CYC(usec)   (((usec) & 0x3f) << 2)
+ #define IGC_PTM_CTRL_PTM_TO(usec)     (((usec) & 0xff) << 8)
+ #define IGC_PTM_SHORT_CYC_DEFAULT     10  /* Default Short/interrupted cycle interval */
+-- 
+2.40.1
+
diff --git a/queue-5.15/ipvlan-fix-a-reference-count-leak-warning-in-ipvlan_.patch b/queue-5.15/ipvlan-fix-a-reference-count-leak-warning-in-ipvlan_.patch
new file mode 100644 (file)
index 0000000..e9adc6a
--- /dev/null
@@ -0,0 +1,90 @@
+From eca4703660e03198b481d6fbbcb12103318dc4be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 22:54:49 +0800
+Subject: ipvlan: Fix a reference count leak warning in ipvlan_ns_exit()
+
+From: Lu Wei <luwei32@huawei.com>
+
+[ Upstream commit 043d5f68d0ccdda91029b4b6dce7eeffdcfad281 ]
+
+There are two network devices(veth1 and veth3) in ns1, and ipvlan1 with
+L3S mode and ipvlan2 with L2 mode are created based on them as
+figure (1). In this case, ipvlan_register_nf_hook() will be called to
+register nf hook which is needed by ipvlans in L3S mode in ns1 and value
+of ipvl_nf_hook_refcnt is set to 1.
+
+(1)
+           ns1                           ns2
+      ------------                  ------------
+
+   veth1--ipvlan1 (L3S)
+
+   veth3--ipvlan2 (L2)
+
+(2)
+           ns1                           ns2
+      ------------                  ------------
+
+   veth1--ipvlan1 (L3S)
+
+         ipvlan2 (L2)                  veth3
+     |                                  |
+     |------->-------->--------->--------
+                    migrate
+
+When veth3 migrates from ns1 to ns2 as figure (2), veth3 will register in
+ns2 and calls call_netdevice_notifiers with NETDEV_REGISTER event:
+
+dev_change_net_namespace
+    call_netdevice_notifiers
+        ipvlan_device_event
+            ipvlan_migrate_l3s_hook
+                ipvlan_register_nf_hook(newnet)      (I)
+                ipvlan_unregister_nf_hook(oldnet)    (II)
+
+In function ipvlan_migrate_l3s_hook(), ipvl_nf_hook_refcnt in ns1 is not 0
+since veth1 with ipvlan1 still in ns1, (I) and (II) will be called to
+register nf_hook in ns2 and unregister nf_hook in ns1. As a result,
+ipvl_nf_hook_refcnt in ns1 is decreased incorrectly and this in ns2
+is increased incorrectly. When the second net namespace is removed, a
+reference count leak warning in ipvlan_ns_exit() will be triggered.
+
+This patch add a check before ipvlan_migrate_l3s_hook() is called. The
+warning can be triggered as follows:
+
+$ ip netns add ns1
+$ ip netns add ns2
+$ ip netns exec ns1 ip link add veth1 type veth peer name veth2
+$ ip netns exec ns1 ip link add veth3 type veth peer name veth4
+$ ip netns exec ns1 ip link add ipv1 link veth1 type ipvlan mode l3s
+$ ip netns exec ns1 ip link add ipv2 link veth3 type ipvlan mode l2
+$ ip netns exec ns1 ip link set veth3 netns ns2
+$ ip net del ns2
+
+Fixes: 3133822f5ac1 ("ipvlan: use pernet operations and restrict l3s hooks to master netns")
+Signed-off-by: Lu Wei <luwei32@huawei.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Link: https://lore.kernel.org/r/20230817145449.141827-1-luwei32@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index 3f43c253adaca..c199f0b465cd0 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -748,7 +748,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
+               write_pnet(&port->pnet, newnet);
+-              ipvlan_migrate_l3s_hook(oldnet, newnet);
++              if (port->mode == IPVLAN_MODE_L3S)
++                      ipvlan_migrate_l3s_hook(oldnet, newnet);
+               break;
+       }
+       case NETDEV_UNREGISTER:
+-- 
+2.40.1
+
diff --git a/queue-5.15/jbd2-fix-a-race-when-checking-checkpoint-buffer-busy.patch b/queue-5.15/jbd2-fix-a-race-when-checking-checkpoint-buffer-busy.patch
new file mode 100644 (file)
index 0000000..4278ff5
--- /dev/null
@@ -0,0 +1,150 @@
+From 7622192d54dcbc355085100740684998a5648c9f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 21:59:27 +0800
+Subject: jbd2: fix a race when checking checkpoint buffer busy
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 46f881b5b1758dc4a35fba4a643c10717d0cf427 ]
+
+Before removing checkpoint buffer from the t_checkpoint_list, we have to
+check both BH_Dirty and BH_Lock bits together to distinguish buffers
+have not been or were being written back. But __cp_buffer_busy() checks
+them separately, it first check lock state and then check dirty, the
+window between these two checks could be raced by writing back
+procedure, which locks buffer and clears buffer dirty before I/O
+completes. So it cannot guarantee checkpointing buffers been written
+back to disk if some error happens later. Finally, it may clean
+checkpoint transactions and lead to inconsistent filesystem.
+
+jbd2_journal_forget() and __journal_try_to_free_buffer() also have the
+same problem (journal_unmap_buffer() escape from this issue since it's
+running under the buffer lock), so fix them through introducing a new
+helper to try holding the buffer lock and remove really clean buffer.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217490
+Cc: stable@vger.kernel.org
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230606135928.434610-6-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jbd2/checkpoint.c  | 38 +++++++++++++++++++++++++++++++++++---
+ fs/jbd2/transaction.c | 17 +++++------------
+ include/linux/jbd2.h  |  1 +
+ 3 files changed, 41 insertions(+), 15 deletions(-)
+
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index ab72aeb766a74..fc6989e7a8c51 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -376,11 +376,15 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+               jh = next_jh;
+               next_jh = jh->b_cpnext;
+-              if (!destroy && __cp_buffer_busy(jh))
+-                      continue;
++              if (destroy) {
++                      ret = __jbd2_journal_remove_checkpoint(jh);
++              } else {
++                      ret = jbd2_journal_try_remove_checkpoint(jh);
++                      if (ret < 0)
++                              continue;
++              }
+               nr_freed++;
+-              ret = __jbd2_journal_remove_checkpoint(jh);
+               if (ret) {
+                       *released = true;
+                       break;
+@@ -616,6 +620,34 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
+       return 1;
+ }
++/*
++ * Check the checkpoint buffer and try to remove it from the checkpoint
++ * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if
++ * it frees the transaction, 0 otherwise.
++ *
++ * This function is called with j_list_lock held.
++ */
++int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
++{
++      struct buffer_head *bh = jh2bh(jh);
++
++      if (!trylock_buffer(bh))
++              return -EBUSY;
++      if (buffer_dirty(bh)) {
++              unlock_buffer(bh);
++              return -EBUSY;
++      }
++      unlock_buffer(bh);
++
++      /*
++       * Buffer is clean and the IO has finished (we held the buffer
++       * lock) so the checkpoint is done. We can safely remove the
++       * buffer from this transaction.
++       */
++      JBUFFER_TRACE(jh, "remove from checkpoint list");
++      return __jbd2_journal_remove_checkpoint(jh);
++}
++
+ /*
+  * journal_insert_checkpoint: put a committed buffer onto a checkpoint
+  * list so that we know when it is safe to clean the transaction out of
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index ce4a5ccadeff4..62e68c5b8ec3d 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1775,8 +1775,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
+                * Otherwise, if the buffer has been written to disk,
+                * it is safe to remove the checkpoint and drop it.
+                */
+-              if (!buffer_dirty(bh)) {
+-                      __jbd2_journal_remove_checkpoint(jh);
++              if (jbd2_journal_try_remove_checkpoint(jh) >= 0) {
+                       spin_unlock(&journal->j_list_lock);
+                       goto drop;
+               }
+@@ -2103,20 +2102,14 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
+       jh = bh2jh(bh);
+-      if (buffer_locked(bh) || buffer_dirty(bh))
+-              goto out;
+-
+       if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
+-              goto out;
++              return;
+       spin_lock(&journal->j_list_lock);
+-      if (jh->b_cp_transaction != NULL) {
+-              /* written-back checkpointed metadata buffer */
+-              JBUFFER_TRACE(jh, "remove from checkpoint list");
+-              __jbd2_journal_remove_checkpoint(jh);
+-      }
++      /* Remove written-back checkpointed metadata buffer */
++      if (jh->b_cp_transaction != NULL)
++              jbd2_journal_try_remove_checkpoint(jh);
+       spin_unlock(&journal->j_list_lock);
+-out:
+       return;
+ }
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index e6cfbcde96f29..ade8a6d7acff9 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1441,6 +1441,7 @@ extern void jbd2_journal_commit_transaction(journal_t *);
+ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
+ int __jbd2_journal_remove_checkpoint(struct journal_head *);
++int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
+ void jbd2_journal_destroy_checkpoint(journal_t *journal);
+ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
+-- 
+2.40.1
+
diff --git a/queue-5.15/jbd2-remove-journal_clean_one_cp_list.patch b/queue-5.15/jbd2-remove-journal_clean_one_cp_list.patch
new file mode 100644 (file)
index 0000000..dde1a8f
--- /dev/null
@@ -0,0 +1,235 @@
+From 7bd25c0b46f8c5a58f57b7c650e5da5a0d9224da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 21:59:25 +0800
+Subject: jbd2: remove journal_clean_one_cp_list()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit b98dba273a0e47dbfade89c9af73c5b012a4eabb ]
+
+journal_clean_one_cp_list() and journal_shrink_one_cp_list() are almost
+the same, so merge them into journal_shrink_one_cp_list(), remove the
+nr_to_scan parameter, always scan and try to free the whole checkpoint
+list.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230606135928.434610-4-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 46f881b5b175 ("jbd2: fix a race when checking checkpoint buffer busy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jbd2/checkpoint.c        | 75 +++++++++----------------------------
+ include/trace/events/jbd2.h | 12 ++----
+ 2 files changed, 21 insertions(+), 66 deletions(-)
+
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index c1f543e86170a..ab72aeb766a74 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -349,50 +349,10 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ /* Checkpoint list management */
+-/*
+- * journal_clean_one_cp_list
+- *
+- * Find all the written-back checkpoint buffers in the given list and
+- * release them. If 'destroy' is set, clean all buffers unconditionally.
+- *
+- * Called with j_list_lock held.
+- * Returns 1 if we freed the transaction, 0 otherwise.
+- */
+-static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
+-{
+-      struct journal_head *last_jh;
+-      struct journal_head *next_jh = jh;
+-
+-      if (!jh)
+-              return 0;
+-
+-      last_jh = jh->b_cpprev;
+-      do {
+-              jh = next_jh;
+-              next_jh = jh->b_cpnext;
+-
+-              if (!destroy && __cp_buffer_busy(jh))
+-                      return 0;
+-
+-              if (__jbd2_journal_remove_checkpoint(jh))
+-                      return 1;
+-              /*
+-               * This function only frees up some memory
+-               * if possible so we dont have an obligation
+-               * to finish processing. Bail out if preemption
+-               * requested:
+-               */
+-              if (need_resched())
+-                      return 0;
+-      } while (jh != last_jh);
+-
+-      return 0;
+-}
+-
+ /*
+  * journal_shrink_one_cp_list
+  *
+- * Find 'nr_to_scan' written-back checkpoint buffers in the given list
++ * Find all the written-back checkpoint buffers in the given list
+  * and try to release them. If the whole transaction is released, set
+  * the 'released' parameter. Return the number of released checkpointed
+  * buffers.
+@@ -400,15 +360,15 @@ static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
+  * Called with j_list_lock held.
+  */
+ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+-                                              unsigned long *nr_to_scan,
+-                                              bool *released)
++                                              bool destroy, bool *released)
+ {
+       struct journal_head *last_jh;
+       struct journal_head *next_jh = jh;
+       unsigned long nr_freed = 0;
+       int ret;
+-      if (!jh || *nr_to_scan == 0)
++      *released = false;
++      if (!jh)
+               return 0;
+       last_jh = jh->b_cpprev;
+@@ -416,8 +376,7 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+               jh = next_jh;
+               next_jh = jh->b_cpnext;
+-              (*nr_to_scan)--;
+-              if (__cp_buffer_busy(jh))
++              if (!destroy && __cp_buffer_busy(jh))
+                       continue;
+               nr_freed++;
+@@ -429,7 +388,7 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+               if (need_resched())
+                       break;
+-      } while (jh != last_jh && *nr_to_scan);
++      } while (jh != last_jh);
+       return nr_freed;
+ }
+@@ -447,11 +406,11 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
+                                                 unsigned long *nr_to_scan)
+ {
+       transaction_t *transaction, *last_transaction, *next_transaction;
+-      bool released;
++      bool __maybe_unused released;
+       tid_t first_tid = 0, last_tid = 0, next_tid = 0;
+       tid_t tid = 0;
+       unsigned long nr_freed = 0;
+-      unsigned long nr_scanned = *nr_to_scan;
++      unsigned long freed;
+ again:
+       spin_lock(&journal->j_list_lock);
+@@ -480,10 +439,11 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
+               transaction = next_transaction;
+               next_transaction = transaction->t_cpnext;
+               tid = transaction->t_tid;
+-              released = false;
+-              nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_list,
+-                                                     nr_to_scan, &released);
++              freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
++                                                 false, &released);
++              nr_freed += freed;
++              (*nr_to_scan) -= min(*nr_to_scan, freed);
+               if (*nr_to_scan == 0)
+                       break;
+               if (need_resched() || spin_needbreak(&journal->j_list_lock))
+@@ -504,9 +464,8 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
+       if (*nr_to_scan && next_tid)
+               goto again;
+ out:
+-      nr_scanned -= *nr_to_scan;
+       trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
+-                                        nr_freed, nr_scanned, next_tid);
++                                        nr_freed, next_tid);
+       return nr_freed;
+ }
+@@ -522,7 +481,7 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
+ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ {
+       transaction_t *transaction, *last_transaction, *next_transaction;
+-      int ret;
++      bool released;
+       transaction = journal->j_checkpoint_transactions;
+       if (!transaction)
+@@ -533,8 +492,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+       do {
+               transaction = next_transaction;
+               next_transaction = transaction->t_cpnext;
+-              ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
+-                                              destroy);
++              journal_shrink_one_cp_list(transaction->t_checkpoint_list,
++                                         destroy, &released);
+               /*
+                * This function only frees up some memory if possible so we
+                * dont have an obligation to finish processing. Bail out if
+@@ -547,7 +506,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+                * avoids pointless scanning of transactions which still
+                * weren't checkpointed.
+                */
+-              if (!ret)
++              if (!released)
+                       return;
+       } while (transaction != last_transaction);
+ }
+diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
+index 29414288ea3e0..34ce197bd76e0 100644
+--- a/include/trace/events/jbd2.h
++++ b/include/trace/events/jbd2.h
+@@ -462,11 +462,9 @@ TRACE_EVENT(jbd2_shrink_scan_exit,
+ TRACE_EVENT(jbd2_shrink_checkpoint_list,
+       TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid,
+-               unsigned long nr_freed, unsigned long nr_scanned,
+-               tid_t next_tid),
++               unsigned long nr_freed, tid_t next_tid),
+-      TP_ARGS(journal, first_tid, tid, last_tid, nr_freed,
+-              nr_scanned, next_tid),
++      TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid),
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+@@ -474,7 +472,6 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
+               __field(tid_t, tid)
+               __field(tid_t, last_tid)
+               __field(unsigned long, nr_freed)
+-              __field(unsigned long, nr_scanned)
+               __field(tid_t, next_tid)
+       ),
+@@ -484,15 +481,14 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
+               __entry->tid            = tid;
+               __entry->last_tid       = last_tid;
+               __entry->nr_freed       = nr_freed;
+-              __entry->nr_scanned     = nr_scanned;
+               __entry->next_tid       = next_tid;
+       ),
+       TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
+-                "scanned %lu next transaction %u",
++                "next transaction %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->first_tid, __entry->tid, __entry->last_tid,
+-                __entry->nr_freed, __entry->nr_scanned, __entry->next_tid)
++                __entry->nr_freed, __entry->next_tid)
+ );
+ #endif /* _TRACE_JBD2_H */
+-- 
+2.40.1
+
diff --git a/queue-5.15/jbd2-remove-t_checkpoint_io_list.patch b/queue-5.15/jbd2-remove-t_checkpoint_io_list.patch
new file mode 100644 (file)
index 0000000..e4c481b
--- /dev/null
@@ -0,0 +1,146 @@
+From 6820c2658f753a380af3f35f1da09f0b58763654 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 21:59:24 +0800
+Subject: jbd2: remove t_checkpoint_io_list
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit be22255360f80d3af789daad00025171a65424a5 ]
+
+Since t_checkpoint_io_list was stop using in jbd2_log_do_checkpoint()
+now, it's time to remove the whole t_checkpoint_io_list logic.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230606135928.434610-3-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 46f881b5b175 ("jbd2: fix a race when checking checkpoint buffer busy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jbd2/checkpoint.c | 42 ++----------------------------------------
+ fs/jbd2/commit.c     |  3 +--
+ include/linux/jbd2.h |  6 ------
+ 3 files changed, 3 insertions(+), 48 deletions(-)
+
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index d2aba55833f92..c1f543e86170a 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -27,7 +27,7 @@
+  *
+  * Called with j_list_lock held.
+  */
+-static inline void __buffer_unlink_first(struct journal_head *jh)
++static inline void __buffer_unlink(struct journal_head *jh)
+ {
+       transaction_t *transaction = jh->b_cp_transaction;
+@@ -40,23 +40,6 @@ static inline void __buffer_unlink_first(struct journal_head *jh)
+       }
+ }
+-/*
+- * Unlink a buffer from a transaction checkpoint(io) list.
+- *
+- * Called with j_list_lock held.
+- */
+-static inline void __buffer_unlink(struct journal_head *jh)
+-{
+-      transaction_t *transaction = jh->b_cp_transaction;
+-
+-      __buffer_unlink_first(jh);
+-      if (transaction->t_checkpoint_io_list == jh) {
+-              transaction->t_checkpoint_io_list = jh->b_cpnext;
+-              if (transaction->t_checkpoint_io_list == jh)
+-                      transaction->t_checkpoint_io_list = NULL;
+-      }
+-}
+-
+ /*
+  * Check a checkpoint buffer could be release or not.
+  *
+@@ -505,15 +488,6 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
+                       break;
+               if (need_resched() || spin_needbreak(&journal->j_list_lock))
+                       break;
+-              if (released)
+-                      continue;
+-
+-              nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_io_list,
+-                                                     nr_to_scan, &released);
+-              if (*nr_to_scan == 0)
+-                      break;
+-              if (need_resched() || spin_needbreak(&journal->j_list_lock))
+-                      break;
+       } while (transaction != last_transaction);
+       if (transaction != last_transaction) {
+@@ -568,17 +542,6 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+                */
+               if (need_resched())
+                       return;
+-              if (ret)
+-                      continue;
+-              /*
+-               * It is essential that we are as careful as in the case of
+-               * t_checkpoint_list with removing the buffer from the list as
+-               * we can possibly see not yet submitted buffers on io_list
+-               */
+-              ret = journal_clean_one_cp_list(transaction->
+-                              t_checkpoint_io_list, destroy);
+-              if (need_resched())
+-                      return;
+               /*
+                * Stop scanning if we couldn't free the transaction. This
+                * avoids pointless scanning of transactions which still
+@@ -663,7 +626,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
+       jbd2_journal_put_journal_head(jh);
+       /* Is this transaction empty? */
+-      if (transaction->t_checkpoint_list || transaction->t_checkpoint_io_list)
++      if (transaction->t_checkpoint_list)
+               return 0;
+       /*
+@@ -755,7 +718,6 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
+       J_ASSERT(transaction->t_forget == NULL);
+       J_ASSERT(transaction->t_shadow_list == NULL);
+       J_ASSERT(transaction->t_checkpoint_list == NULL);
+-      J_ASSERT(transaction->t_checkpoint_io_list == NULL);
+       J_ASSERT(atomic_read(&transaction->t_updates) == 0);
+       J_ASSERT(journal->j_committing_transaction != transaction);
+       J_ASSERT(journal->j_running_transaction != transaction);
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index ac328e3321242..20294c1bbeab7 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -1184,8 +1184,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+       spin_lock(&journal->j_list_lock);
+       commit_transaction->t_state = T_FINISHED;
+       /* Check if the transaction can be dropped now that we are finished */
+-      if (commit_transaction->t_checkpoint_list == NULL &&
+-          commit_transaction->t_checkpoint_io_list == NULL) {
++      if (commit_transaction->t_checkpoint_list == NULL) {
+               __jbd2_journal_drop_transaction(journal, commit_transaction);
+               jbd2_journal_free_transaction(commit_transaction);
+       }
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index d63b8106796e2..e6cfbcde96f29 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -626,12 +626,6 @@ struct transaction_s
+        */
+       struct journal_head     *t_checkpoint_list;
+-      /*
+-       * Doubly-linked circular list of all buffers submitted for IO while
+-       * checkpointing. [j_list_lock]
+-       */
+-      struct journal_head     *t_checkpoint_io_list;
+-
+       /*
+        * Doubly-linked circular list of metadata buffers being
+        * shadowed by log IO.  The IO buffers on the iobuf list and
+-- 
+2.40.1
+
diff --git a/queue-5.15/mips-cpu-features-enable-octeon_cache-by-cpu_type.patch b/queue-5.15/mips-cpu-features-enable-octeon_cache-by-cpu_type.patch
new file mode 100644 (file)
index 0000000..4a88ab2
--- /dev/null
@@ -0,0 +1,56 @@
+From a05ab41bd691a0422b8bd9155eeaad039cb630f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Apr 2023 10:33:44 +0100
+Subject: MIPS: cpu-features: Enable octeon_cache by cpu_type
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit f641519409a73403ee6612b8648b95a688ab85c2 ]
+
+cpu_has_octeon_cache was tied to 0 for generic cpu-features,
+whith this generic kernel built for octeon CPU won't boot.
+
+Just enable this flag by cpu_type. It won't hurt orther platforms
+because compiler will eliminate the code path on other processors.
+
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Stable-dep-of: 5487a7b60695 ("MIPS: cpu-features: Use boot_cpu_type for CPU type based features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 3d71081afc55f..133385fe03c69 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -124,7 +124,24 @@
+ #define cpu_has_tx39_cache    __opt(MIPS_CPU_TX39_CACHE)
+ #endif
+ #ifndef cpu_has_octeon_cache
+-#define cpu_has_octeon_cache  0
++#define cpu_has_octeon_cache                                          \
++({                                                                    \
++      int __res;                                                      \
++                                                                      \
++      switch (current_cpu_type()) {                                   \
++      case CPU_CAVIUM_OCTEON:                                         \
++      case CPU_CAVIUM_OCTEON_PLUS:                                    \
++      case CPU_CAVIUM_OCTEON2:                                        \
++      case CPU_CAVIUM_OCTEON3:                                        \
++              __res = 1;                                              \
++              break;                                                  \
++                                                                      \
++      default:                                                        \
++              __res = 0;                                              \
++      }                                                               \
++                                                                      \
++      __res;                                                          \
++})
+ #endif
+ /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work.  */
+ #ifndef cpu_has_fpu
+-- 
+2.40.1
+
diff --git a/queue-5.15/mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch b/queue-5.15/mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch
new file mode 100644 (file)
index 0000000..ca61354
--- /dev/null
@@ -0,0 +1,51 @@
+From a3202566a6bafef9689051a09e61a1ace0ff50f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 13:51:22 +0800
+Subject: MIPS: cpu-features: Use boot_cpu_type for CPU type based features
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit 5487a7b60695a92cf998350e4beac17144c91fcd ]
+
+Some CPU feature macros were using current_cpu_type to mark feature
+availability.
+
+However current_cpu_type will use smp_processor_id, which is prohibited
+under preemptable context.
+
+Since those features are all uniform on all CPUs in a SMP system, use
+boot_cpu_type instead of current_cpu_type to fix preemptable kernel.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 133385fe03c69..e69833213e792 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -128,7 +128,7 @@
+ ({                                                                    \
+       int __res;                                                      \
+                                                                       \
+-      switch (current_cpu_type()) {                                   \
++      switch (boot_cpu_type()) {                                      \
+       case CPU_CAVIUM_OCTEON:                                         \
+       case CPU_CAVIUM_OCTEON_PLUS:                                    \
+       case CPU_CAVIUM_OCTEON2:                                        \
+@@ -368,7 +368,7 @@
+ ({                                                                    \
+       int __res;                                                      \
+                                                                       \
+-      switch (current_cpu_type()) {                                   \
++      switch (boot_cpu_type()) {                                      \
+       case CPU_M14KC:                                                 \
+       case CPU_74K:                                                   \
+       case CPU_1074K:                                                 \
+-- 
+2.40.1
+
diff --git a/queue-5.15/net-bcmgenet-fix-return-value-check-for-fixed_phy_re.patch b/queue-5.15/net-bcmgenet-fix-return-value-check-for-fixed_phy_re.patch
new file mode 100644 (file)
index 0000000..9662b94
--- /dev/null
@@ -0,0 +1,38 @@
+From ec410fe5543167fd45898ce690786c7c84f20342 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 13:12:21 +0800
+Subject: net: bcmgenet: Fix return value check for fixed_phy_register()
+
+From: Ruan Jinjie <ruanjinjie@huawei.com>
+
+[ Upstream commit 32bbe64a1386065ab2aef8ce8cae7c689d0add6e ]
+
+The fixed_phy_register() function returns error pointers and never
+returns NULL. Update the checks accordingly.
+
+Fixes: b0ba512e25d7 ("net: bcmgenet: enable driver to work without a device tree")
+Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Acked-by: Doug Berger <opendmb@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmmii.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 8c800d9c11b78..bfe90cacbd073 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -571,7 +571,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
+               };
+               phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+-              if (!phydev || IS_ERR(phydev)) {
++              if (IS_ERR(phydev)) {
+                       dev_err(kdev, "failed to register fixed PHY device\n");
+                       return -ENODEV;
+               }
+-- 
+2.40.1
+
diff --git a/queue-5.15/net-bgmac-fix-return-value-check-for-fixed_phy_regis.patch b/queue-5.15/net-bgmac-fix-return-value-check-for-fixed_phy_regis.patch
new file mode 100644 (file)
index 0000000..2eba279
--- /dev/null
@@ -0,0 +1,38 @@
+From 9496884ef94925c23f6e5cb706f0d639421e7991 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 13:12:20 +0800
+Subject: net: bgmac: Fix return value check for fixed_phy_register()
+
+From: Ruan Jinjie <ruanjinjie@huawei.com>
+
+[ Upstream commit 23a14488ea5882dea5851b65c9fce2127ee8fcad ]
+
+The fixed_phy_register() function returns error pointers and never
+returns NULL. Update the checks accordingly.
+
+Fixes: c25b23b8a387 ("bgmac: register fixed PHY for ARM BCM470X / BCM5301X chipsets")
+Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bgmac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index a9c99ac81730a..c691635cf4ebe 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
+       int err;
+       phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+-      if (!phy_dev || IS_ERR(phy_dev)) {
++      if (IS_ERR(phy_dev)) {
+               dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
+               return -ENODEV;
+       }
+-- 
+2.40.1
+
diff --git a/queue-5.15/net-remove-bond_slave_has_mac_rcu.patch b/queue-5.15/net-remove-bond_slave_has_mac_rcu.patch
new file mode 100644 (file)
index 0000000..1c4b9fd
--- /dev/null
@@ -0,0 +1,47 @@
+From e45b5eefce802b39a134907a2aab16298e9b988a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jan 2022 11:10:59 -0800
+Subject: net: remove bond_slave_has_mac_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 8b0fdcdc3a7d44aff907f0103f5ffb86b12bfe71 ]
+
+No caller since v3.16.
+
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: e74216b8def3 ("bonding: fix macvlan over alb bond support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bonding.h | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index e4453cf4f0171..6c90aca917edc 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -699,20 +699,6 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+       return NULL;
+ }
+-/* Caller must hold rcu_read_lock() for read */
+-static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+-                                             const u8 *mac)
+-{
+-      struct list_head *iter;
+-      struct slave *tmp;
+-
+-      bond_for_each_slave_rcu(bond, tmp, iter)
+-              if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+-                      return tmp;
+-
+-      return NULL;
+-}
+-
+ /* Caller must hold rcu_read_lock() for read */
+ static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
+ {
+-- 
+2.40.1
+
diff --git a/queue-5.15/net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch b/queue-5.15/net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch
new file mode 100644 (file)
index 0000000..28f1086
--- /dev/null
@@ -0,0 +1,138 @@
+From c56fc1f58cee78e7be0ffa24d3a8f74f9760bdc9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Aug 2023 06:12:31 -0400
+Subject: net/sched: fix a qdisc modification with ambiguous command request
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+[ Upstream commit da71714e359b64bd7aab3bd56ec53f307f058133 ]
+
+When replacing an existing root qdisc, with one that is of the same kind, the
+request boils down to essentially a parameterization change  i.e not one that
+requires allocation and grafting of a new qdisc. syzbot was able to create a
+scenario which resulted in a taprio qdisc replacing an existing taprio qdisc
+with a combination of NLM_F_CREATE, NLM_F_REPLACE and NLM_F_EXCL leading to
+create and graft scenario.
+The fix ensures that only when the qdisc kinds are different that we should
+allow a create and graft, otherwise it goes into the "change" codepath.
+
+While at it, fix the code and comments to improve readability.
+
+While syzbot was able to create the issue, it did not zone on the root cause.
+Analysis from Vladimir Oltean <vladimir.oltean@nxp.com> helped narrow it down.
+
+v1->V2 changes:
+- remove "inline" function definition (Vladmir)
+- remove extrenous braces in branches (Vladmir)
+- change inline function names (Pedro)
+- Run tdc tests (Victor)
+v2->v3 changes:
+- dont break else/if (Simon)
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+a3618a167af2021433cd@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/20230816225759.g25x76kmgzya2gei@skbuf/T/
+Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Tested-by: Victor Nogueira <victor@mojatatu.com>
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Reviewed-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_api.c | 53 ++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 40 insertions(+), 13 deletions(-)
+
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 328db5e1b0eaf..fa79dbd3601fa 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1513,10 +1513,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+       return 0;
+ }
++static bool req_create_or_replace(struct nlmsghdr *n)
++{
++      return (n->nlmsg_flags & NLM_F_CREATE &&
++              n->nlmsg_flags & NLM_F_REPLACE);
++}
++
++static bool req_create_exclusive(struct nlmsghdr *n)
++{
++      return (n->nlmsg_flags & NLM_F_CREATE &&
++              n->nlmsg_flags & NLM_F_EXCL);
++}
++
++static bool req_change(struct nlmsghdr *n)
++{
++      return (!(n->nlmsg_flags & NLM_F_CREATE) &&
++              !(n->nlmsg_flags & NLM_F_REPLACE) &&
++              !(n->nlmsg_flags & NLM_F_EXCL));
++}
++
+ /*
+  * Create/change qdisc.
+  */
+-
+ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+                          struct netlink_ext_ack *extack)
+ {
+@@ -1613,27 +1631,35 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+                                *
+                                *   We know, that some child q is already
+                                *   attached to this parent and have choice:
+-                               *   either to change it or to create/graft new one.
++                               *   1) change it or 2) create/graft new one.
++                               *   If the requested qdisc kind is different
++                               *   than the existing one, then we choose graft.
++                               *   If they are the same then this is "change"
++                               *   operation - just let it fallthrough..
+                                *
+                                *   1. We are allowed to create/graft only
+-                               *   if CREATE and REPLACE flags are set.
++                               *   if the request is explicitly stating
++                               *   "please create if it doesn't exist".
+                                *
+-                               *   2. If EXCL is set, requestor wanted to say,
+-                               *   that qdisc tcm_handle is not expected
++                               *   2. If the request is to exclusive create
++                               *   then the qdisc tcm_handle is not expected
+                                *   to exist, so that we choose create/graft too.
+                                *
+                                *   3. The last case is when no flags are set.
++                               *   This will happen when for example tc
++                               *   utility issues a "change" command.
+                                *   Alas, it is sort of hole in API, we
+                                *   cannot decide what to do unambiguously.
+-                               *   For now we select create/graft, if
+-                               *   user gave KIND, which does not match existing.
++                               *   For now we select create/graft.
+                                */
+-                              if ((n->nlmsg_flags & NLM_F_CREATE) &&
+-                                  (n->nlmsg_flags & NLM_F_REPLACE) &&
+-                                  ((n->nlmsg_flags & NLM_F_EXCL) ||
+-                                   (tca[TCA_KIND] &&
+-                                    nla_strcmp(tca[TCA_KIND], q->ops->id))))
+-                                      goto create_n_graft;
++                              if (tca[TCA_KIND] &&
++                                  nla_strcmp(tca[TCA_KIND], q->ops->id)) {
++                                      if (req_create_or_replace(n) ||
++                                          req_create_exclusive(n))
++                                              goto create_n_graft;
++                                      else if (req_change(n))
++                                              goto create_n_graft2;
++                              }
+                       }
+               }
+       } else {
+@@ -1667,6 +1693,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+               NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
+               return -ENOENT;
+       }
++create_n_graft2:
+       if (clid == TC_H_INGRESS) {
+               if (dev_ingress_queue(dev)) {
+                       q = qdisc_create(dev, dev_ingress_queue(dev), p,
+-- 
+2.40.1
+
diff --git a/queue-5.15/net-validate-veth-and-vxcan-peer-ifindexes.patch b/queue-5.15/net-validate-veth-and-vxcan-peer-ifindexes.patch
new file mode 100644 (file)
index 0000000..334559a
--- /dev/null
@@ -0,0 +1,137 @@
+From b8aad250c0640d0d41d356346dfdbb18e36f6e49 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 18:26:02 -0700
+Subject: net: validate veth and vxcan peer ifindexes
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit f534f6581ec084fe94d6759f7672bd009794b07e ]
+
+veth and vxcan need to make sure the ifindexes of the peer
+are not negative, core does not validate this.
+
+Using iproute2 with user-space-level checking removed:
+
+Before:
+
+  # ./ip link add index 10 type veth peer index -1
+  # ip link show
+  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
+    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+  2: enp1s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
+    link/ether 52:54:00:74:b2:03 brd ff:ff:ff:ff:ff:ff
+  10: veth1@veth0: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
+    link/ether 8a:90:ff:57:6d:5d brd ff:ff:ff:ff:ff:ff
+  -1: veth0@veth1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
+    link/ether ae:ed:18:e6:fa:7f brd ff:ff:ff:ff:ff:ff
+
+Now:
+
+  $ ./ip link add index 10 type veth peer index -1
+  Error: ifindex can't be negative.
+
+This problem surfaced in net-next because an explicit WARN()
+was added, the root cause is older.
+
+Fixes: e6f8f1a739b6 ("veth: Allow to create peer link with given ifindex")
+Fixes: a8f820a380a2 ("can: add Virtual CAN Tunnel driver (vxcan)")
+Reported-by: syzbot+5ba06978f34abb058571@syzkaller.appspotmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/vxcan.c |  7 +------
+ drivers/net/veth.c      |  5 +----
+ include/net/rtnetlink.h |  4 ++--
+ net/core/rtnetlink.c    | 22 ++++++++++++++++++----
+ 4 files changed, 22 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
+index be5566168d0f3..afd9060c5421c 100644
+--- a/drivers/net/can/vxcan.c
++++ b/drivers/net/can/vxcan.c
+@@ -179,12 +179,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
+               nla_peer = data[VXCAN_INFO_PEER];
+               ifmp = nla_data(nla_peer);
+-              err = rtnl_nla_parse_ifla(peer_tb,
+-                                        nla_data(nla_peer) +
+-                                        sizeof(struct ifinfomsg),
+-                                        nla_len(nla_peer) -
+-                                        sizeof(struct ifinfomsg),
+-                                        NULL);
++              err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+               if (err < 0)
+                       return err;
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 41cb9179e8b79..45ee44f66e77d 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -1654,10 +1654,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
+               nla_peer = data[VETH_INFO_PEER];
+               ifmp = nla_data(nla_peer);
+-              err = rtnl_nla_parse_ifla(peer_tb,
+-                                        nla_data(nla_peer) + sizeof(struct ifinfomsg),
+-                                        nla_len(nla_peer) - sizeof(struct ifinfomsg),
+-                                        NULL);
++              err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+               if (err < 0)
+                       return err;
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 9f48733bfd21c..a2a74e0e5c494 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -175,8 +175,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ int rtnl_delete_link(struct net_device *dev);
+ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+-                      struct netlink_ext_ack *exterr);
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++                           struct netlink_ext_ack *exterr);
+ struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
+ #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index b055e196f5306..03dd8dc9e1425 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2173,13 +2173,27 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+       return err;
+ }
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+-                      struct netlink_ext_ack *exterr)
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++                           struct netlink_ext_ack *exterr)
+ {
+-      return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
++      const struct ifinfomsg *ifmp;
++      const struct nlattr *attrs;
++      size_t len;
++
++      ifmp = nla_data(nla_peer);
++      attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
++      len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
++
++      if (ifmp->ifi_index < 0) {
++              NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
++                                  "ifindex can't be negative");
++              return -EINVAL;
++      }
++
++      return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
+                                   exterr);
+ }
+-EXPORT_SYMBOL(rtnl_nla_parse_ifla);
++EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
+ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+ {
+-- 
+2.40.1
+
diff --git a/queue-5.15/netfilter-nf_tables-fix-out-of-memory-error-handling.patch b/queue-5.15/netfilter-nf_tables-fix-out-of-memory-error-handling.patch
new file mode 100644 (file)
index 0000000..599c94c
--- /dev/null
@@ -0,0 +1,65 @@
+From 81fd0e155456022ac06e52039b8a34180a367e6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Aug 2023 19:49:52 +0200
+Subject: netfilter: nf_tables: fix out of memory error handling
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 5e1be4cdc98c989d5387ce94ff15b5ad06a5b681 ]
+
+Several instances of pipapo_resize() don't propagate allocation failures,
+this causes a crash when fault injection is enabled for gfp_kernel slabs.
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_pipapo.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 32cfd0a84b0e2..8c16681884b7e 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -901,12 +901,14 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
+ static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
+                        int mask_bits)
+ {
+-      int rule = f->rules++, group, ret, bit_offset = 0;
++      int rule = f->rules, group, ret, bit_offset = 0;
+-      ret = pipapo_resize(f, f->rules - 1, f->rules);
++      ret = pipapo_resize(f, f->rules, f->rules + 1);
+       if (ret)
+               return ret;
++      f->rules++;
++
+       for (group = 0; group < f->groups; group++) {
+               int i, v;
+               u8 mask;
+@@ -1051,7 +1053,9 @@ static int pipapo_expand(struct nft_pipapo_field *f,
+                       step++;
+                       if (step >= len) {
+                               if (!masks) {
+-                                      pipapo_insert(f, base, 0);
++                                      err = pipapo_insert(f, base, 0);
++                                      if (err < 0)
++                                              return err;
+                                       masks = 1;
+                               }
+                               goto out;
+@@ -1234,6 +1238,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+               else
+                       ret = pipapo_expand(f, start, end, f->groups * f->bb);
++              if (ret < 0)
++                      return ret;
++
+               if (f->bsize > bsize_max)
+                       bsize_max = f->bsize;
+-- 
+2.40.1
+
diff --git a/queue-5.15/netfilter-nf_tables-flush-pending-destroy-work-befor.patch b/queue-5.15/netfilter-nf_tables-flush-pending-destroy-work-befor.patch
new file mode 100644 (file)
index 0000000..077dec9
--- /dev/null
@@ -0,0 +1,44 @@
+From de7d796c5ffa6c3d3aa2c37eaca966c9f955368c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 01:13:31 +0200
+Subject: netfilter: nf_tables: flush pending destroy work before netlink
+ notifier
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 2c9f0293280e258606e54ed2b96fa71498432eae ]
+
+Destroy work waits for the RCU grace period then it releases the objects
+with no mutex held. All releases objects follow this path for
+transactions, therefore, order is guaranteed and references to top-level
+objects in the hierarchy remain valid.
+
+However, netlink notifier might interfer with pending destroy work.
+rcu_barrier() is not correct because objects are not release via RCU
+callback. Flush destroy work before releasing objects from netlink
+notifier path.
+
+Fixes: d4bc8271db21 ("netfilter: nf_tables: netlink notifier might race to release objects")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 1e2d1e4bdb74d..d84da11aaee5c 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -10303,7 +10303,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+       deleted = 0;
+       mutex_lock(&nft_net->commit_mutex);
+       if (!list_empty(&nf_tables_destroy_list))
+-              rcu_barrier();
++              nf_tables_trans_destroy_flush_work();
+ again:
+       list_for_each_entry(table, &nft_net->tables, list) {
+               if (nft_table_has_owner(table) &&
+-- 
+2.40.1
+
diff --git a/queue-5.15/nfsv4-fix-out-path-in-__nfs4_get_acl_uncached.patch b/queue-5.15/nfsv4-fix-out-path-in-__nfs4_get_acl_uncached.patch
new file mode 100644 (file)
index 0000000..2aabb9a
--- /dev/null
@@ -0,0 +1,46 @@
+From c909d8ca1540286544381831cf0c93572377c047 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Jul 2023 14:59:30 +0300
+Subject: NFSv4: fix out path in __nfs4_get_acl_uncached
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit f4e89f1a6dab4c063fc1e823cc9dddc408ff40cf ]
+
+Another highly rare error case when a page allocating loop (inside
+__nfs4_get_acl_uncached, this time) is not properly unwound on error.
+Since pages array is allocated being uninitialized, need to free only
+lower array indices. NULL checks were useful before commit 62a1573fcf84
+("NFSv4 fix acl retrieval over krb5i/krb5p mounts") when the array had
+been initialized to zero on stack.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 62a1573fcf84 ("NFSv4 fix acl retrieval over krb5i/krb5p mounts")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4proc.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index b1ec9b5d06e58..31bf3e9dc7a56 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5964,9 +5964,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ out_ok:
+       ret = res.acl_len;
+ out_free:
+-      for (i = 0; i < npages; i++)
+-              if (pages[i])
+-                      __free_page(pages[i]);
++      while (--i >= 0)
++              __free_page(pages[i]);
+       if (res.acl_scratch)
+               __free_page(res.acl_scratch);
+       kfree(pages);
+-- 
+2.40.1
+
diff --git a/queue-5.15/nfsv4.2-fix-error-handling-in-nfs42_proc_getxattr.patch b/queue-5.15/nfsv4.2-fix-error-handling-in-nfs42_proc_getxattr.patch
new file mode 100644 (file)
index 0000000..34a3f4f
--- /dev/null
@@ -0,0 +1,51 @@
+From 47e442ce1bebc7ec8ddf9679b0176e1d8c60cfd4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Jul 2023 14:58:58 +0300
+Subject: NFSv4.2: fix error handling in nfs42_proc_getxattr
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit 4e3733fd2b0f677faae21cf838a43faf317986d3 ]
+
+There is a slight issue with error handling code inside
+nfs42_proc_getxattr(). If page allocating loop fails then we free the
+failing page array element which is NULL but __free_page() can't deal with
+NULL args.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: a1f26739ccdc ("NFSv4.2: improve page handling for GETXATTR")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs42proc.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index da94bf2afd070..bc07012741cb4 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -1339,7 +1339,6 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
+       for (i = 0; i < np; i++) {
+               pages[i] = alloc_page(GFP_KERNEL);
+               if (!pages[i]) {
+-                      np = i + 1;
+                       err = -ENOMEM;
+                       goto out;
+               }
+@@ -1363,8 +1362,8 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
+       } while (exception.retry);
+ out:
+-      while (--np >= 0)
+-              __free_page(pages[np]);
++      while (--i >= 0)
++              __free_page(pages[i]);
+       kfree(pages);
+       return err;
+-- 
+2.40.1
+
diff --git a/queue-5.15/octeontx2-af-sdp-fix-receive-link-config.patch b/queue-5.15/octeontx2-af-sdp-fix-receive-link-config.patch
new file mode 100644 (file)
index 0000000..1894a3f
--- /dev/null
@@ -0,0 +1,45 @@
+From c63d6c793d5fa9ded3b1d0d0fb93581fba0f9219 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 12:00:06 +0530
+Subject: octeontx2-af: SDP: fix receive link config
+
+From: Hariprasad Kelam <hkelam@marvell.com>
+
+[ Upstream commit 05f3d5bc23524bed6f043dfe6b44da687584f9fb ]
+
+On SDP interfaces, frame oversize and undersize errors are
+observed as driver is not considering packet sizes of all
+subscribers of the link before updating the link config.
+
+This patch fixes the same.
+
+Fixes: 9b7dd87ac071 ("octeontx2-af: Support to modify min/max allowed packet lengths")
+Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
+Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Link: https://lore.kernel.org/r/20230817063006.10366-1-hkelam@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index dee2f2086bb5d..f5922d63e33e4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -4013,9 +4013,10 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+       if (link < 0)
+               return NIX_AF_ERR_RX_LINK_INVALID;
+-      nix_find_link_frs(rvu, req, pcifunc);
+ linkcfg:
++      nix_find_link_frs(rvu, req, pcifunc);
++
+       cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+       cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+       if (req->update_minlen)
+-- 
+2.40.1
+
diff --git a/queue-5.15/pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch b/queue-5.15/pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch
new file mode 100644 (file)
index 0000000..899fb19
--- /dev/null
@@ -0,0 +1,83 @@
+From 921dd1a08b58b8b02460ee2c9e019432972de2c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 21:15:57 +0200
+Subject: PCI: acpiphp: Reassign resources on bridge if necessary
+
+From: Igor Mammedov <imammedo@redhat.com>
+
+[ Upstream commit 40613da52b13fb21c5566f10b287e0ca8c12c4e9 ]
+
+When using ACPI PCI hotplug, hotplugging a device with large BARs may fail
+if bridge windows programmed by firmware are not large enough.
+
+Reproducer:
+  $ qemu-kvm -monitor stdio -M q35  -m 4G \
+      -global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=on \
+      -device id=rp1,pcie-root-port,bus=pcie.0,chassis=4 \
+      disk_image
+
+ wait till linux guest boots, then hotplug device:
+   (qemu) device_add qxl,bus=rp1
+
+ hotplug on guest side fails with:
+   pci 0000:01:00.0: [1b36:0100] type 00 class 0x038000
+   pci 0000:01:00.0: reg 0x10: [mem 0x00000000-0x03ffffff]
+   pci 0000:01:00.0: reg 0x14: [mem 0x00000000-0x03ffffff]
+   pci 0000:01:00.0: reg 0x18: [mem 0x00000000-0x00001fff]
+   pci 0000:01:00.0: reg 0x1c: [io  0x0000-0x001f]
+   pci 0000:01:00.0: BAR 0: no space for [mem size 0x04000000]
+   pci 0000:01:00.0: BAR 0: failed to assign [mem size 0x04000000]
+   pci 0000:01:00.0: BAR 1: no space for [mem size 0x04000000]
+   pci 0000:01:00.0: BAR 1: failed to assign [mem size 0x04000000]
+   pci 0000:01:00.0: BAR 2: assigned [mem 0xfe800000-0xfe801fff]
+   pci 0000:01:00.0: BAR 3: assigned [io  0x1000-0x101f]
+   qxl 0000:01:00.0: enabling device (0000 -> 0003)
+   Unable to create vram_mapping
+   qxl: probe of 0000:01:00.0 failed with error -12
+
+However when using native PCIe hotplug
+  '-global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=off'
+it works fine, since kernel attempts to reassign unused resources.
+
+Use the same machinery as native PCIe hotplug to (re)assign resources.
+
+Link: https://lore.kernel.org/r/20230424191557.2464760-1-imammedo@redhat.com
+Signed-off-by: Igor Mammedov <imammedo@redhat.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Rafael J. Wysocki <rafael@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/hotplug/acpiphp_glue.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index f031302ad4019..44c0b025f09e1 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -489,7 +489,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+                               acpiphp_native_scan_bridge(dev);
+               }
+       } else {
+-              LIST_HEAD(add_list);
+               int max, pass;
+               acpiphp_rescan_slot(slot);
+@@ -503,12 +502,10 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+                               if (pass && dev->subordinate) {
+                                       check_hotplug_bridge(slot, dev);
+                                       pcibios_resource_survey_bus(dev->subordinate);
+-                                      __pci_bus_size_bridges(dev->subordinate,
+-                                                             &add_list);
+                               }
+                       }
+               }
+-              __pci_bus_assign_resources(bus, &add_list, NULL);
++              pci_assign_unassigned_bridge_resources(bus->self);
+       }
+       acpiphp_sanitize_bus(bus);
+-- 
+2.40.1
+
diff --git a/queue-5.15/rtnetlink-reject-negative-ifindexes-in-rtm_newlink.patch b/queue-5.15/rtnetlink-reject-negative-ifindexes-in-rtm_newlink.patch
new file mode 100644 (file)
index 0000000..205d04b
--- /dev/null
@@ -0,0 +1,68 @@
+From 7d013e33341cce09c699638000c63de6f76892bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Aug 2023 09:43:48 +0300
+Subject: rtnetlink: Reject negative ifindexes in RTM_NEWLINK
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit 30188bd7838c16a98a520db1fe9df01ffc6ed368 ]
+
+Negative ifindexes are illegal, but the kernel does not validate the
+ifindex in the ancillary header of RTM_NEWLINK messages, resulting in
+the kernel generating a warning [1] when such an ifindex is specified.
+
+Fix by rejecting negative ifindexes.
+
+[1]
+WARNING: CPU: 0 PID: 5031 at net/core/dev.c:9593 dev_index_reserve+0x1a2/0x1c0 net/core/dev.c:9593
+[...]
+Call Trace:
+ <TASK>
+ register_netdevice+0x69a/0x1490 net/core/dev.c:10081
+ br_dev_newlink+0x27/0x110 net/bridge/br_netlink.c:1552
+ rtnl_newlink_create net/core/rtnetlink.c:3471 [inline]
+ __rtnl_newlink+0x115e/0x18c0 net/core/rtnetlink.c:3688
+ rtnl_newlink+0x67/0xa0 net/core/rtnetlink.c:3701
+ rtnetlink_rcv_msg+0x439/0xd30 net/core/rtnetlink.c:6427
+ netlink_rcv_skb+0x16b/0x440 net/netlink/af_netlink.c:2545
+ netlink_unicast_kernel net/netlink/af_netlink.c:1342 [inline]
+ netlink_unicast+0x536/0x810 net/netlink/af_netlink.c:1368
+ netlink_sendmsg+0x93c/0xe40 net/netlink/af_netlink.c:1910
+ sock_sendmsg_nosec net/socket.c:728 [inline]
+ sock_sendmsg+0xd9/0x180 net/socket.c:751
+ ____sys_sendmsg+0x6ac/0x940 net/socket.c:2538
+ ___sys_sendmsg+0x135/0x1d0 net/socket.c:2592
+ __sys_sendmsg+0x117/0x1e0 net/socket.c:2621
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Fixes: 38f7b870d4a6 ("[RTNETLINK]: Link creation API")
+Reported-by: syzbot+5ba06978f34abb058571@syzkaller.appspotmail.com
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20230823064348.2252280-1-idosch@nvidia.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/rtnetlink.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 7c1a2fd7d9532..1b71e5c582bbc 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3318,6 +3318,9 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+       if (ifm->ifi_index > 0) {
+               link_specified = true;
+               dev = __dev_get_by_index(net, ifm->ifi_index);
++      } else if (ifm->ifi_index < 0) {
++              NL_SET_ERR_MSG(extack, "ifindex can't be negative");
++              return -EINVAL;
+       } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
+               link_specified = true;
+               dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
+-- 
+2.40.1
+
diff --git a/queue-5.15/rtnetlink-return-enodev-when-ifname-does-not-exist-a.patch b/queue-5.15/rtnetlink-return-enodev-when-ifname-does-not-exist-a.patch
new file mode 100644 (file)
index 0000000..e86de30
--- /dev/null
@@ -0,0 +1,96 @@
+From 00b5193b40885e41ac9e38a7b43a6c50abb5c3c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Apr 2022 18:53:27 +0200
+Subject: rtnetlink: return ENODEV when ifname does not exist and group is
+ given
+
+From: Florent Fourcot <florent.fourcot@wifirst.fr>
+
+[ Upstream commit ef2a7c9065cea4e3fbc0390e82d05141abbccd7f ]
+
+When the interface does not exist, and a group is given, the given
+parameters are being set to all interfaces of the given group. The given
+IFNAME/ALT_IF_NAME are being ignored in that case.
+
+That can be dangerous since a typo (or a deleted interface) can produce
+weird side effects for caller:
+
+Case 1:
+
+ IFLA_IFNAME=valid_interface
+ IFLA_GROUP=1
+ MTU=1234
+
+Case 1 will update MTU and group of the given interface "valid_interface".
+
+Case 2:
+
+ IFLA_IFNAME=doesnotexist
+ IFLA_GROUP=1
+ MTU=1234
+
+Case 2 will update MTU of all interfaces in group 1. IFLA_IFNAME is
+ignored in this case
+
+This behaviour is not consistent and dangerous. In order to fix this issue,
+we now return ENODEV when the given IFNAME does not exist.
+
+Signed-off-by: Florent Fourcot <florent.fourcot@wifirst.fr>
+Signed-off-by: Brian Baboch <brian.baboch@wifirst.fr>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 30188bd7838c ("rtnetlink: Reject negative ifindexes in RTM_NEWLINK")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/rtnetlink.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 03dd8dc9e1425..7c1a2fd7d9532 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3294,6 +3294,7 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+       struct ifinfomsg *ifm;
+       char ifname[IFNAMSIZ];
+       struct nlattr **data;
++      bool link_specified;
+       int err;
+ #ifdef CONFIG_MODULES
+@@ -3314,12 +3315,16 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+               ifname[0] = '\0';
+       ifm = nlmsg_data(nlh);
+-      if (ifm->ifi_index > 0)
++      if (ifm->ifi_index > 0) {
++              link_specified = true;
+               dev = __dev_get_by_index(net, ifm->ifi_index);
+-      else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
++      } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
++              link_specified = true;
+               dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
+-      else
++      } else {
++              link_specified = false;
+               dev = NULL;
++      }
+       master_dev = NULL;
+       m_ops = NULL;
+@@ -3422,7 +3427,12 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+       }
+       if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+-              if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
++              /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
++               * or it's for a group
++              */
++              if (link_specified)
++                      return -ENODEV;
++              if (tb[IFLA_GROUP])
+                       return rtnl_group_changelink(skb, net,
+                                               nla_get_u32(tb[IFLA_GROUP]),
+                                               ifm, extack, tb);
+-- 
+2.40.1
+
index 20944b76ca81dc9c27c6b61f644a6677a788bf92..c5b27fe7be2979334d2e8a8ae9c9cc9b56c196c9 100644 (file)
@@ -1 +1,45 @@
 objtool-x86-fix-srso-mess.patch
+nfsv4.2-fix-error-handling-in-nfs42_proc_getxattr.patch
+nfsv4-fix-out-path-in-__nfs4_get_acl_uncached.patch
+xprtrdma-remap-receive-buffers-after-a-reconnect.patch
+pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch
+dlm-improve-plock-logging-if-interrupted.patch
+dlm-replace-usage-of-found-with-dedicated-list-itera.patch
+fs-dlm-add-pid-to-debug-log.patch
+fs-dlm-change-plock-interrupted-message-to-debug-aga.patch
+fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch
+fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch
+mips-cpu-features-enable-octeon_cache-by-cpu_type.patch
+mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch
+fbdev-improve-performance-of-sys_imageblit.patch
+fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch
+fbdev-fix-potential-oob-read-in-fast_imageblit.patch
+alsa-pcm-fix-potential-data-race-at-pcm-memory-alloc.patch
+jbd2-remove-t_checkpoint_io_list.patch
+jbd2-remove-journal_clean_one_cp_list.patch
+jbd2-fix-a-race-when-checking-checkpoint-buffer-busy.patch
+exfat-remove-argument-sector-from-exfat_get_dentry.patch
+exfat-support-dynamic-allocate-bh-for-exfat_entry_se.patch
+can-raw-fix-receiver-memory-leak.patch
+drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch
+drm-amd-display-check-tg-is-non-null-before-checking.patch
+can-raw-fix-lockdep-issue-in-raw_release.patch
+tracing-fix-cpu-buffers-unavailable-due-to-record_di.patch
+tracing-fix-memleak-due-to-race-between-current_trac.patch
+octeontx2-af-sdp-fix-receive-link-config.patch
+sock-annotate-data-races-around-prot-memory_pressure.patch
+dccp-annotate-data-races-in-dccp_poll.patch
+ipvlan-fix-a-reference-count-leak-warning-in-ipvlan_.patch
+net-bgmac-fix-return-value-check-for-fixed_phy_regis.patch
+net-bcmgenet-fix-return-value-check-for-fixed_phy_re.patch
+net-validate-veth-and-vxcan-peer-ifindexes.patch
+ice-fix-receive-buffer-size-miscalculation.patch
+igb-avoid-starting-unnecessary-workqueues.patch
+igc-fix-the-typo-in-the-ptm-control-macro.patch
+net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch
+netfilter-nf_tables-flush-pending-destroy-work-befor.patch
+netfilter-nf_tables-fix-out-of-memory-error-handling.patch
+rtnetlink-return-enodev-when-ifname-does-not-exist-a.patch
+rtnetlink-reject-negative-ifindexes-in-rtm_newlink.patch
+net-remove-bond_slave_has_mac_rcu.patch
+bonding-fix-macvlan-over-alb-bond-support.patch
diff --git a/queue-5.15/sock-annotate-data-races-around-prot-memory_pressure.patch b/queue-5.15/sock-annotate-data-races-around-prot-memory_pressure.patch
new file mode 100644 (file)
index 0000000..26fce96
--- /dev/null
@@ -0,0 +1,82 @@
+From 1f895d170f09af4791dd7243e692630ea1c3943d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 01:51:32 +0000
+Subject: sock: annotate data-races around prot->memory_pressure
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 76f33296d2e09f63118db78125c95ef56df438e9 ]
+
+*prot->memory_pressure is read/writen locklessly, we need
+to add proper annotations.
+
+A recent commit added a new race, it is time to audit all accesses.
+
+Fixes: 2d0c88e84e48 ("sock: Fix misuse of sk_under_memory_pressure()")
+Fixes: 4d93df0abd50 ("[SCTP]: Rewrite of sctp buffer management code")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Abel Wu <wuyun.abel@bytedance.com>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Link: https://lore.kernel.org/r/20230818015132.2699348-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 7 ++++---
+ net/sctp/socket.c  | 2 +-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 6b12b62417e08..640bd7a367779 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1259,6 +1259,7 @@ struct proto {
+       /*
+        * Pressure flag: try to collapse.
+        * Technical note: it is used by multiple contexts non atomically.
++       * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
+        * All the __sk_mem_schedule() is of this nature: accounting
+        * is strict, actions are advisory and have some latency.
+        */
+@@ -1384,7 +1385,7 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+ {
+       return sk->sk_prot->memory_pressure &&
+-              !!*sk->sk_prot->memory_pressure;
++              !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+@@ -1396,7 +1397,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
+           mem_cgroup_under_socket_pressure(sk->sk_memcg))
+               return true;
+-      return !!*sk->sk_prot->memory_pressure;
++      return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+ static inline long
+@@ -1454,7 +1455,7 @@ proto_memory_pressure(struct proto *prot)
+ {
+       if (!prot->memory_pressure)
+               return false;
+-      return !!*prot->memory_pressure;
++      return !!READ_ONCE(*prot->memory_pressure);
+ }
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index f10ad80fd6948..717e2f60370b3 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -97,7 +97,7 @@ struct percpu_counter sctp_sockets_allocated;
+ static void sctp_enter_memory_pressure(struct sock *sk)
+ {
+-      sctp_memory_pressure = 1;
++      WRITE_ONCE(sctp_memory_pressure, 1);
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.15/tracing-fix-cpu-buffers-unavailable-due-to-record_di.patch b/queue-5.15/tracing-fix-cpu-buffers-unavailable-due-to-record_di.patch
new file mode 100644 (file)
index 0000000..223aaaa
--- /dev/null
@@ -0,0 +1,73 @@
+From ed62cb20c8509994b2c4a0075c07e862dfc2f535 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 5 Aug 2023 11:38:15 +0800
+Subject: tracing: Fix cpu buffers unavailable due to 'record_disabled' missed
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit b71645d6af10196c46cbe3732de2ea7d36b3ff6d ]
+
+Trace ring buffer can no longer record anything after executing
+following commands at the shell prompt:
+
+  # cd /sys/kernel/tracing
+  # cat tracing_cpumask
+  fff
+  # echo 0 > tracing_cpumask
+  # echo 1 > snapshot
+  # echo fff > tracing_cpumask
+  # echo 1 > tracing_on
+  # echo "hello world" > trace_marker
+  -bash: echo: write error: Bad file descriptor
+
+The root cause is that:
+  1. After `echo 0 > tracing_cpumask`, 'record_disabled' of cpu buffers
+     in 'tr->array_buffer.buffer' became 1 (see tracing_set_cpumask());
+  2. After `echo 1 > snapshot`, 'tr->array_buffer.buffer' is swapped
+     with 'tr->max_buffer.buffer', then the 'record_disabled' became 0
+     (see update_max_tr());
+  3. After `echo fff > tracing_cpumask`, the 'record_disabled' become -1;
+Then array_buffer and max_buffer are both unavailable due to value of
+'record_disabled' is not 0.
+
+To fix it, enable or disable both array_buffer and max_buffer at the same
+time in tracing_set_cpumask().
+
+Link: https://lkml.kernel.org/r/20230805033816.3284594-2-zhengyejian1@huawei.com
+
+Cc: <mhiramat@kernel.org>
+Cc: <vnagarnaik@google.com>
+Cc: <shuah@kernel.org>
+Fixes: 71babb2705e2 ("tracing: change CPU ring buffer state from tracing_cpumask")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index d4c381f06b7b2..edc58133ed5ed 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5171,11 +5171,17 @@ int tracing_set_cpumask(struct trace_array *tr,
+                               !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+                       atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+                       ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
++#ifdef CONFIG_TRACER_MAX_TRACE
++                      ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
++#endif
+               }
+               if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
+                               cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+                       atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+                       ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
++#ifdef CONFIG_TRACER_MAX_TRACE
++                      ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
++#endif
+               }
+       }
+       arch_spin_unlock(&tr->max_lock);
+-- 
+2.40.1
+
diff --git a/queue-5.15/tracing-fix-memleak-due-to-race-between-current_trac.patch b/queue-5.15/tracing-fix-memleak-due-to-race-between-current_trac.patch
new file mode 100644 (file)
index 0000000..2c15b85
--- /dev/null
@@ -0,0 +1,122 @@
+From 7674da215d2493394c20750014d5464911645f14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 20:55:39 +0800
+Subject: tracing: Fix memleak due to race between current_tracer and trace
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit eecb91b9f98d6427d4af5fdb8f108f52572a39e7 ]
+
+Kmemleak report a leak in graph_trace_open():
+
+  unreferenced object 0xffff0040b95f4a00 (size 128):
+    comm "cat", pid 204981, jiffies 4301155872 (age 99771.964s)
+    hex dump (first 32 bytes):
+      e0 05 e7 b4 ab 7d 00 00 0b 00 01 00 00 00 00 00 .....}..........
+      f4 00 01 10 00 a0 ff ff 00 00 00 00 65 00 10 00 ............e...
+    backtrace:
+      [<000000005db27c8b>] kmem_cache_alloc_trace+0x348/0x5f0
+      [<000000007df90faa>] graph_trace_open+0xb0/0x344
+      [<00000000737524cd>] __tracing_open+0x450/0xb10
+      [<0000000098043327>] tracing_open+0x1a0/0x2a0
+      [<00000000291c3876>] do_dentry_open+0x3c0/0xdc0
+      [<000000004015bcd6>] vfs_open+0x98/0xd0
+      [<000000002b5f60c9>] do_open+0x520/0x8d0
+      [<00000000376c7820>] path_openat+0x1c0/0x3e0
+      [<00000000336a54b5>] do_filp_open+0x14c/0x324
+      [<000000002802df13>] do_sys_openat2+0x2c4/0x530
+      [<0000000094eea458>] __arm64_sys_openat+0x130/0x1c4
+      [<00000000a71d7881>] el0_svc_common.constprop.0+0xfc/0x394
+      [<00000000313647bf>] do_el0_svc+0xac/0xec
+      [<000000002ef1c651>] el0_svc+0x20/0x30
+      [<000000002fd4692a>] el0_sync_handler+0xb0/0xb4
+      [<000000000c309c35>] el0_sync+0x160/0x180
+
+The root cause is descripted as follows:
+
+  __tracing_open() {  // 1. File 'trace' is being opened;
+    ...
+    *iter->trace = *tr->current_trace;  // 2. Tracer 'function_graph' is
+                                        //    currently set;
+    ...
+    iter->trace->open(iter);  // 3. Call graph_trace_open() here,
+                              //    and memory are allocated in it;
+    ...
+  }
+
+  s_start() {  // 4. The opened file is being read;
+    ...
+    *iter->trace = *tr->current_trace;  // 5. If tracer is switched to
+                                        //    'nop' or others, then memory
+                                        //    in step 3 are leaked!!!
+    ...
+  }
+
+To fix it, in s_start(), close tracer before switching then reopen the
+new tracer after switching. And some tracers like 'wakeup' may not update
+'iter->private' in some cases when reopen, then it should be cleared
+to avoid being mistakenly closed again.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230817125539.1646321-1-zhengyejian1@huawei.com
+
+Fixes: d7350c3f4569 ("tracing/core: make the read callbacks reentrants")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c              | 9 ++++++++-
+ kernel/trace/trace_irqsoff.c      | 3 ++-
+ kernel/trace/trace_sched_wakeup.c | 2 ++
+ 3 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index edc58133ed5ed..8769cd18f622f 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4108,8 +4108,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+        * will point to the same string as current_trace->name.
+        */
+       mutex_lock(&trace_types_lock);
+-      if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
++      if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
++              /* Close iter->trace before switching to the new current tracer */
++              if (iter->trace->close)
++                      iter->trace->close(iter);
+               *iter->trace = *tr->current_trace;
++              /* Reopen the new current tracer */
++              if (iter->trace->open)
++                      iter->trace->open(iter);
++      }
+       mutex_unlock(&trace_types_lock);
+ #ifdef CONFIG_TRACER_MAX_TRACE
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 590b3d51afae9..ba37f768e2f27 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -231,7 +231,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
+ {
+       if (is_graph(iter->tr))
+               graph_trace_open(iter);
+-
++      else
++              iter->private = NULL;
+ }
+ static void irqsoff_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index 2402de520eca7..b239bfaa51ae8 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -168,6 +168,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
+ {
+       if (is_graph(iter->tr))
+               graph_trace_open(iter);
++      else
++              iter->private = NULL;
+ }
+ static void wakeup_trace_close(struct trace_iterator *iter)
+-- 
+2.40.1
+
diff --git a/queue-5.15/xprtrdma-remap-receive-buffers-after-a-reconnect.patch b/queue-5.15/xprtrdma-remap-receive-buffers-after-a-reconnect.patch
new file mode 100644 (file)
index 0000000..6d73982
--- /dev/null
@@ -0,0 +1,60 @@
+From dbf73477dd9ee74b240350d6442be500fbcfd98f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jul 2023 14:18:29 -0400
+Subject: xprtrdma: Remap Receive buffers after a reconnect
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 895cedc1791916e8a98864f12b656702fad0bb67 ]
+
+On server-initiated disconnect, rpcrdma_xprt_disconnect() was DMA-
+unmapping the Receive buffers, but rpcrdma_post_recvs() neglected
+to remap them after a new connection had been established. The
+result was immediate failure of the new connection with the Receives
+flushing with LOCAL_PROT_ERR.
+
+Fixes: 671c450b6fe0 ("xprtrdma: Fix oops in Receive handler after device removal")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xprtrdma/verbs.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 507ba8b799920..41095a278f798 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -962,9 +962,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+       if (!rep->rr_rdmabuf)
+               goto out_free;
+-      if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
+-              goto out_free_regbuf;
+-
+       rep->rr_cid.ci_completion_id =
+               atomic_inc_return(&r_xprt->rx_ep->re_completion_ids);
+@@ -983,8 +980,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+       spin_unlock(&buf->rb_lock);
+       return rep;
+-out_free_regbuf:
+-      rpcrdma_regbuf_free(rep->rr_rdmabuf);
+ out_free:
+       kfree(rep);
+ out:
+@@ -1391,6 +1386,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
+                       rep = rpcrdma_rep_create(r_xprt, temp);
+               if (!rep)
+                       break;
++              if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) {
++                      rpcrdma_rep_put(buf, rep);
++                      break;
++              }
+               rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id;
+               trace_xprtrdma_post_recv(rep);
+-- 
+2.40.1
+