]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.6
authorSasha Levin <sashal@kernel.org>
Sun, 16 Jun 2024 02:01:15 +0000 (22:01 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 16 Jun 2024 02:01:15 +0000 (22:01 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
81 files changed:
queue-6.6/af_unix-annodate-data-races-around-sk-sk_state-for-w.patch [new file with mode: 0644]
queue-6.6/af_unix-annotate-data-race-of-net-unx.sysctl_max_dgr.patch [new file with mode: 0644]
queue-6.6/af_unix-annotate-data-race-of-sk-sk_shutdown-in-sk_d.patch [new file with mode: 0644]
queue-6.6/af_unix-annotate-data-race-of-sk-sk_state-in-unix_in.patch [new file with mode: 0644]
queue-6.6/af_unix-annotate-data-race-of-sk-sk_state-in-unix_st.patch [new file with mode: 0644]
queue-6.6/af_unix-annotate-data-race-of-sk-sk_state-in-unix_st.patch-2156 [new file with mode: 0644]
queue-6.6/af_unix-annotate-data-races-around-sk-sk_sndbuf.patch [new file with mode: 0644]
queue-6.6/af_unix-annotate-data-races-around-sk-sk_state-in-se.patch [new file with mode: 0644]
queue-6.6/af_unix-annotate-data-races-around-sk-sk_state-in-un.patch [new file with mode: 0644]
queue-6.6/af_unix-annotate-data-races-around-sk-sk_state-in-un.patch-11288 [new file with mode: 0644]
queue-6.6/af_unix-set-sk-sk_state-under-unix_state_lock-for-tr.patch [new file with mode: 0644]
queue-6.6/af_unix-use-skb_queue_empty_lockless-in-unix_release.patch [new file with mode: 0644]
queue-6.6/af_unix-use-skb_queue_len_lockless-in-sk_diag_show_r.patch [new file with mode: 0644]
queue-6.6/af_unix-use-unix_recvq_full_lockless-in-unix_stream_.patch [new file with mode: 0644]
queue-6.6/ax25-fix-refcount-imbalance-on-inbound-connections.patch [new file with mode: 0644]
queue-6.6/ax25-replace-kfree-in-ax25_dev_free-with-ax25_dev_pu.patch [new file with mode: 0644]
queue-6.6/bpf-fix-a-potential-use-after-free-in-bpf_link_free.patch [new file with mode: 0644]
queue-6.6/bpf-fix-multi-uprobe-pid-filtering-logic.patch [new file with mode: 0644]
queue-6.6/bpf-optimize-the-free-of-inner-map.patch [new file with mode: 0644]
queue-6.6/bpf-set-run-context-for-rawtp-test_run-callback.patch [new file with mode: 0644]
queue-6.6/bpf-store-ref_ctr_offsets-values-in-bpf_uprobe-array.patch [new file with mode: 0644]
queue-6.6/eventfs-update-all-the-eventfs_inodes-from-the-event.patch [new file with mode: 0644]
queue-6.6/ext4-avoid-overflow-when-setting-values-via-sysfs.patch [new file with mode: 0644]
queue-6.6/ext4-fix-slab-out-of-bounds-in-ext4_mb_find_good_gro.patch [new file with mode: 0644]
queue-6.6/ext4-refactor-out-ext4_generic_attr_show.patch [new file with mode: 0644]
queue-6.6/firmware-qcom_scm-disable-clocks-if-qcom_scm_bw_enab.patch [new file with mode: 0644]
queue-6.6/ice-add-flag-to-distinguish-reset-from-.ndo_bpf-in-x.patch [new file with mode: 0644]
queue-6.6/ice-fix-iteration-of-tlvs-in-preserved-fields-area.patch [new file with mode: 0644]
queue-6.6/ice-remove-af_xdp_zc_qps-bitmap.patch [new file with mode: 0644]
queue-6.6/ipv6-fix-possible-race-in-__fib6_drop_pcpu_from.patch [new file with mode: 0644]
queue-6.6/ipv6-ioam-block-bh-from-ioam6_output.patch [new file with mode: 0644]
queue-6.6/ipv6-sr-block-bh-in-seg6_output_core-and-seg6_input_.patch [new file with mode: 0644]
queue-6.6/irqchip-riscv-intc-allow-large-non-standard-interrup.patch [new file with mode: 0644]
queue-6.6/irqchip-riscv-intc-introduce-andes-hart-level-interr.patch [new file with mode: 0644]
queue-6.6/irqchip-riscv-intc-prevent-memory-leak-when-riscv_in.patch [new file with mode: 0644]
queue-6.6/ksmbd-use-rwsem-instead-of-rwlock-for-lease-break.patch [new file with mode: 0644]
queue-6.6/kvm-sev-do-not-intercept-accesses-to-msr_ia32_xss-fo.patch [new file with mode: 0644]
queue-6.6/kvm-sev-es-delegate-lbr-virtualization-to-the-proces.patch [new file with mode: 0644]
queue-6.6/kvm-sev-es-disallow-sev-es-guests-when-x86_feature_l.patch [new file with mode: 0644]
queue-6.6/memory-failure-use-a-folio-in-me_huge_page.patch [new file with mode: 0644]
queue-6.6/mm-memory-failure-fix-handling-of-dissolved-but-not-.patch [new file with mode: 0644]
queue-6.6/mptcp-count-close-wait-sockets-for-mptcp_mib_currest.patch [new file with mode: 0644]
queue-6.6/net-ethtool-fix-the-error-condition-in-ethtool_get_p.patch [new file with mode: 0644]
queue-6.6/net-mlx5-always-stop-health-timer-during-driver-remo.patch [new file with mode: 0644]
queue-6.6/net-mlx5-fix-tainted-pointer-delete-is-case-of-flow-.patch [new file with mode: 0644]
queue-6.6/net-mlx5-stop-waiting-for-pci-if-pci-channel-is-offl.patch [new file with mode: 0644]
queue-6.6/net-ncsi-fix-the-multi-thread-manner-of-ncsi-driver.patch [new file with mode: 0644]
queue-6.6/net-ncsi-simplify-kconfig-dts-control-flow.patch [new file with mode: 0644]
queue-6.6/net-phy-micrel-fix-ksz9477-phy-issues-after-suspend-.patch [new file with mode: 0644]
queue-6.6/net-phy-micrel-ksz8061-fix-errata-solution-not-takin.patch [new file with mode: 0644]
queue-6.6/net-sched-sch_multiq-fix-possible-oob-write-in-multi.patch [new file with mode: 0644]
queue-6.6/net-sched-taprio-always-validate-tca_taprio_attr_pri.patch [new file with mode: 0644]
queue-6.6/net-smc-avoid-overwriting-when-adjusting-sock-bufsiz.patch [new file with mode: 0644]
queue-6.6/net-tls-fix-marking-packets-as-decrypted.patch [new file with mode: 0644]
queue-6.6/net-wwan-iosm-fix-tainted-pointer-delete-is-case-of-.patch [new file with mode: 0644]
queue-6.6/nilfs2-fix-nilfs_empty_dir-misjudgment-and-long-loop.patch [new file with mode: 0644]
queue-6.6/nilfs2-return-the-mapped-address-from-nilfs_get_page.patch [new file with mode: 0644]
queue-6.6/octeontx2-af-always-allocate-pf-entries-from-low-pri.patch [new file with mode: 0644]
queue-6.6/ptp-fix-error-message-on-failed-pin-verification.patch [new file with mode: 0644]
queue-6.6/risc-v-kvm-fix-incorrect-reg_subtype-labels-in-kvm_r.patch [new file with mode: 0644]
queue-6.6/risc-v-kvm-no-need-to-use-mask-when-hart-index-bit-i.patch [new file with mode: 0644]
queue-6.6/scsi-ufs-mcq-fix-error-output-and-clean-up-ufshcd_mc.patch [new file with mode: 0644]
queue-6.6/selftests-mm-compaction_test-fix-bogus-test-success-.patch [new file with mode: 0644]
queue-6.6/selftests-mm-conform-test-to-tap-format-output.patch [new file with mode: 0644]
queue-6.6/selftests-mm-log-a-consistent-test-name-for-check_co.patch [new file with mode: 0644]
queue-6.6/series
queue-6.6/tcp-count-close-wait-sockets-for-tcp_mib_currestab.patch [new file with mode: 0644]
queue-6.6/vmxnet3-disable-rx-data-ring-on-dma-allocation-failu.patch [new file with mode: 0644]
queue-6.6/vxlan-fix-regression-when-dropping-packets-due-to-in.patch [new file with mode: 0644]
queue-6.6/wifi-cfg80211-fully-move-wiphy-work-to-unbound-workq.patch [new file with mode: 0644]
queue-6.6/wifi-cfg80211-lock-wiphy-in-cfg80211_get_station.patch [new file with mode: 0644]
queue-6.6/wifi-cfg80211-pmsr-use-correct-nla_get_ux-functions.patch [new file with mode: 0644]
queue-6.6/wifi-iwlwifi-dbg_ini-move-iwl_dbg_tlv_free-outside-o.patch [new file with mode: 0644]
queue-6.6/wifi-iwlwifi-mvm-check-n_ssids-before-accessing-the-.patch [new file with mode: 0644]
queue-6.6/wifi-iwlwifi-mvm-don-t-initialize-csa_work-twice.patch [new file with mode: 0644]
queue-6.6/wifi-iwlwifi-mvm-don-t-read-past-the-mfuart-notifcat.patch [new file with mode: 0644]
queue-6.6/wifi-iwlwifi-mvm-revert-gen2-tx-a-mpdu-size-to-64.patch [new file with mode: 0644]
queue-6.6/wifi-iwlwifi-mvm-set-properly-mac-header.patch [new file with mode: 0644]
queue-6.6/wifi-mac80211-correctly-parse-spatial-reuse-paramete.patch [new file with mode: 0644]
queue-6.6/wifi-mac80211-fix-deadlock-in-ieee80211_sta_ps_deliv.patch [new file with mode: 0644]
queue-6.6/wifi-mac80211-mesh-fix-leak-of-mesh_preq_queue-objec.patch [new file with mode: 0644]

diff --git a/queue-6.6/af_unix-annodate-data-races-around-sk-sk_state-for-w.patch b/queue-6.6/af_unix-annodate-data-races-around-sk-sk_state-for-w.patch
new file mode 100644 (file)
index 0000000..7c6aac2
--- /dev/null
@@ -0,0 +1,88 @@
+From 7396984c57611b2d5fa829452d2a13999a574330 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:28 -0700
+Subject: af_unix: Annodate data-races around sk->sk_state for writers.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 942238f9735a4a4ebf8274b218d9a910158941d1 ]
+
+sk->sk_state is changed under unix_state_lock(), but it's read locklessly
+in many places.
+
+This patch adds WRITE_ONCE() on the writer side.
+
+We will add READ_ONCE() to the lockless readers in the following patches.
+
+Fixes: 83301b5367a9 ("af_unix: Set TCP_ESTABLISHED for datagram sockets too")
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 348f9e34f6696..bd2af62f58605 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -617,7 +617,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+       u->path.dentry = NULL;
+       u->path.mnt = NULL;
+       state = sk->sk_state;
+-      sk->sk_state = TCP_CLOSE;
++      WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+       skpair = unix_peer(sk);
+       unix_peer(sk) = NULL;
+@@ -739,7 +739,8 @@ static int unix_listen(struct socket *sock, int backlog)
+       if (backlog > sk->sk_max_ack_backlog)
+               wake_up_interruptible_all(&u->peer_wait);
+       sk->sk_max_ack_backlog  = backlog;
+-      sk->sk_state            = TCP_LISTEN;
++      WRITE_ONCE(sk->sk_state, TCP_LISTEN);
++
+       /* set credentials so connect can copy them */
+       init_peercred(sk);
+       err = 0;
+@@ -1411,7 +1412,8 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+               if (err)
+                       goto out_unlock;
+-              sk->sk_state = other->sk_state = TCP_ESTABLISHED;
++              WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
++              WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
+       } else {
+               /*
+                *      1003.1g breaking connected state with AF_UNSPEC
+@@ -1428,7 +1430,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+               unix_peer(sk) = other;
+               if (!other)
+-                      sk->sk_state = TCP_CLOSE;
++                      WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+               unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
+               unix_state_double_unlock(sk, other);
+@@ -1644,7 +1646,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+       copy_peercred(sk, other);
+       sock->state     = SS_CONNECTED;
+-      sk->sk_state    = TCP_ESTABLISHED;
++      WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+       sock_hold(newsk);
+       smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
+@@ -2027,7 +2029,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+                       unix_peer(sk) = NULL;
+                       unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+-                      sk->sk_state = TCP_CLOSE;
++                      WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+                       unix_state_unlock(sk);
+                       unix_dgram_disconnected(sk, other);
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-annotate-data-race-of-net-unx.sysctl_max_dgr.patch b/queue-6.6/af_unix-annotate-data-race-of-net-unx.sysctl_max_dgr.patch
new file mode 100644 (file)
index 0000000..7eebec2
--- /dev/null
@@ -0,0 +1,38 @@
+From 0c145926e227ed63ed75060455dfe2b5c50ff4f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:37 -0700
+Subject: af_unix: Annotate data-race of net->unx.sysctl_max_dgram_qlen.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit bd9f2d05731f6a112d0c7391a0d537bfc588dbe6 ]
+
+net->unx.sysctl_max_dgram_qlen is exposed as a sysctl knob and can be
+changed concurrently.
+
+Let's use READ_ONCE() in unix_create1().
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 4640497c29da4..2b35c517be718 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -990,7 +990,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
+       sk->sk_hash             = unix_unbound_hash(sk);
+       sk->sk_allocation       = GFP_KERNEL_ACCOUNT;
+       sk->sk_write_space      = unix_write_space;
+-      sk->sk_max_ack_backlog  = net->unx.sysctl_max_dgram_qlen;
++      sk->sk_max_ack_backlog  = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
+       sk->sk_destruct         = unix_sock_destructor;
+       u = unix_sk(sk);
+       u->inflight = 0;
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-annotate-data-race-of-sk-sk_shutdown-in-sk_d.patch b/queue-6.6/af_unix-annotate-data-race-of-sk-sk_shutdown-in-sk_d.patch
new file mode 100644 (file)
index 0000000..4acd7ea
--- /dev/null
@@ -0,0 +1,37 @@
+From 77bbbdb45d0c9fb6b4366fcd23f3e79327a4415e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:41 -0700
+Subject: af_unix: Annotate data-race of sk->sk_shutdown in sk_diag_fill().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit efaf24e30ec39ebbea9112227485805a48b0ceb1 ]
+
+While dumping sockets via UNIX_DIAG, we do not hold unix_state_lock().
+
+Let's use READ_ONCE() to read sk->sk_shutdown.
+
+Fixes: e4e541a84863 ("sock-diag: Report shutdown for inet and unix sockets (v2)")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/diag.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index fc56244214c30..1de7500b41b61 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
+           sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
+               goto out_nlmsg_trim;
+-      if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
++      if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
+               goto out_nlmsg_trim;
+       if ((req->udiag_show & UDIAG_SHOW_UID) &&
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-annotate-data-race-of-sk-sk_state-in-unix_in.patch b/queue-6.6/af_unix-annotate-data-race-of-sk-sk_state-in-unix_in.patch
new file mode 100644 (file)
index 0000000..6165b0a
--- /dev/null
@@ -0,0 +1,50 @@
+From ae32731906e3462330643d827d6fb88c170bf792 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:29 -0700
+Subject: af_unix: Annotate data-race of sk->sk_state in unix_inq_len().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 3a0f38eb285c8c2eead4b3230c7ac2983707599d ]
+
+ioctl(SIOCINQ) calls unix_inq_len() that checks sk->sk_state first
+and returns -EINVAL if it's TCP_LISTEN.
+
+Then, for SOCK_STREAM sockets, unix_inq_len() returns the number of
+bytes in recvq.
+
+However, unix_inq_len() does not hold unix_state_lock(), and the
+concurrent listen() might change the state after checking sk->sk_state.
+
+If the race occurs, 0 is returned for the listener, instead of -EINVAL,
+because the length of skb with embryo is 0.
+
+We could hold unix_state_lock() in unix_inq_len(), but it's overkill
+given the result is true for pre-listen() TCP_CLOSE state.
+
+So, let's use READ_ONCE() for sk->sk_state in unix_inq_len().
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index bd2af62f58605..8d0918a112a9d 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2983,7 +2983,7 @@ long unix_inq_len(struct sock *sk)
+       struct sk_buff *skb;
+       long amount = 0;
+-      if (sk->sk_state == TCP_LISTEN)
++      if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
+               return -EINVAL;
+       spin_lock(&sk->sk_receive_queue.lock);
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-annotate-data-race-of-sk-sk_state-in-unix_st.patch b/queue-6.6/af_unix-annotate-data-race-of-sk-sk_state-in-unix_st.patch
new file mode 100644 (file)
index 0000000..247bd10
--- /dev/null
@@ -0,0 +1,60 @@
+From 9ca458fd70435e9c3615e46bf42aa05e30144e1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:31 -0700
+Subject: af_unix: Annotate data-race of sk->sk_state in unix_stream_connect().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit a9bf9c7dc6a5899c01cb8f6e773a66315a5cd4b7 ]
+
+As small optimisation, unix_stream_connect() prefetches the client's
+sk->sk_state without unix_state_lock() and checks if it's TCP_CLOSE.
+
+Later, sk->sk_state is checked again under unix_state_lock().
+
+Let's use READ_ONCE() for the first check and TCP_CLOSE directly for
+the second check.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 4a43091c95419..53d67d540a574 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1491,7 +1491,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+       struct sk_buff *skb = NULL;
+       long timeo;
+       int err;
+-      int st;
+       err = unix_validate_addr(sunaddr, addr_len);
+       if (err)
+@@ -1577,9 +1576,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+          Well, and we have to recheck the state after socket locked.
+        */
+-      st = sk->sk_state;
+-
+-      switch (st) {
++      switch (READ_ONCE(sk->sk_state)) {
+       case TCP_CLOSE:
+               /* This is ok... continue with connect */
+               break;
+@@ -1594,7 +1591,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+       unix_state_lock_nested(sk, U_LOCK_SECOND);
+-      if (sk->sk_state != st) {
++      if (sk->sk_state != TCP_CLOSE) {
+               unix_state_unlock(sk);
+               unix_state_unlock(other);
+               sock_put(other);
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-annotate-data-race-of-sk-sk_state-in-unix_st.patch-2156 b/queue-6.6/af_unix-annotate-data-race-of-sk-sk_state-in-unix_st.patch-2156
new file mode 100644 (file)
index 0000000..6e6889f
--- /dev/null
@@ -0,0 +1,39 @@
+From bf16a061d15cca7a40cde9ac0dc06f4d9452d3a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:34 -0700
+Subject: af_unix: Annotate data-race of sk->sk_state in
+ unix_stream_read_skb().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit af4c733b6b1aded4dc808fafece7dfe6e9d2ebb3 ]
+
+unix_stream_read_skb() is called from sk->sk_data_ready() context
+where unix_state_lock() is not held.
+
+Let's use READ_ONCE() there.
+
+Fixes: 77462de14a43 ("af_unix: Add read_sock for stream socket types")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index dfa013283f478..2299a464c602e 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2630,7 +2630,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+-      if (unlikely(sk->sk_state != TCP_ESTABLISHED))
++      if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
+               return -ENOTCONN;
+       return unix_read_skb(sk, recv_actor);
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-annotate-data-races-around-sk-sk_sndbuf.patch b/queue-6.6/af_unix-annotate-data-races-around-sk-sk_sndbuf.patch
new file mode 100644 (file)
index 0000000..46ce515
--- /dev/null
@@ -0,0 +1,57 @@
+From 690aaeb9c70d88aa1165eec8297415ca0cf130e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:36 -0700
+Subject: af_unix: Annotate data-races around sk->sk_sndbuf.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit b0632e53e0da8054e36bc973f0eec69d30f1b7c6 ]
+
+sk_setsockopt() changes sk->sk_sndbuf under lock_sock(), but it's
+not used in af_unix.c.
+
+Let's use READ_ONCE() to read sk->sk_sndbuf in unix_writable(),
+unix_dgram_sendmsg(), and unix_stream_sendmsg().
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 2299a464c602e..4640497c29da4 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -534,7 +534,7 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+ static int unix_writable(const struct sock *sk, unsigned char state)
+ {
+       return state != TCP_LISTEN &&
+-             (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
++              (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
+ }
+ static void unix_write_space(struct sock *sk)
+@@ -1944,7 +1944,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+       }
+       err = -EMSGSIZE;
+-      if (len > sk->sk_sndbuf - 32)
++      if (len > READ_ONCE(sk->sk_sndbuf) - 32)
+               goto out;
+       if (len > SKB_MAX_ALLOC) {
+@@ -2223,7 +2223,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+                                                  &err, 0);
+               } else {
+                       /* Keep two messages in the pipe so it schedules better */
+-                      size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
++                      size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
+                       /* allow fallback to order-0 allocations */
+                       size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-annotate-data-races-around-sk-sk_state-in-se.patch b/queue-6.6/af_unix-annotate-data-races-around-sk-sk_state-in-se.patch
new file mode 100644 (file)
index 0000000..e781ee6
--- /dev/null
@@ -0,0 +1,72 @@
+From ba60cfb238d5a856abaf9d6ebde0b0b5be1bfd03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:33 -0700
+Subject: af_unix: Annotate data-races around sk->sk_state in sendmsg() and
+ recvmsg().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 8a34d4e8d9742a24f74998f45a6a98edd923319b ]
+
+The following functions read sk->sk_state locklessly and proceed only if
+the state is TCP_ESTABLISHED.
+
+  * unix_stream_sendmsg
+  * unix_stream_read_generic
+  * unix_seqpacket_sendmsg
+  * unix_seqpacket_recvmsg
+
+Let's use READ_ONCE() there.
+
+Fixes: a05d2ad1c1f3 ("af_unix: Only allow recv on connected seqpacket sockets.")
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 53d67d540a574..dfa013283f478 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2202,7 +2202,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+       }
+       if (msg->msg_namelen) {
+-              err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
++              err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+               goto out_err;
+       } else {
+               err = -ENOTCONN;
+@@ -2316,7 +2316,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
+       if (err)
+               return err;
+-      if (sk->sk_state != TCP_ESTABLISHED)
++      if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
+               return -ENOTCONN;
+       if (msg->msg_namelen)
+@@ -2330,7 +2330,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
+ {
+       struct sock *sk = sock->sk;
+-      if (sk->sk_state != TCP_ESTABLISHED)
++      if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
+               return -ENOTCONN;
+       return unix_dgram_recvmsg(sock, msg, size, flags);
+@@ -2654,7 +2654,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
+       size_t size = state->size;
+       unsigned int last_len;
+-      if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
++      if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
+               err = -EINVAL;
+               goto out;
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-annotate-data-races-around-sk-sk_state-in-un.patch b/queue-6.6/af_unix-annotate-data-races-around-sk-sk_state-in-un.patch
new file mode 100644 (file)
index 0000000..6628a3b
--- /dev/null
@@ -0,0 +1,128 @@
+From 96f29f372dfc8cc6f4de99a692013e957b8089e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:30 -0700
+Subject: af_unix: Annotate data-races around sk->sk_state in
+ unix_write_space() and poll().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit eb0718fb3e97ad0d6f4529b810103451c90adf94 ]
+
+unix_poll() and unix_dgram_poll() read sk->sk_state locklessly and
+calls unix_writable() which also reads sk->sk_state without holding
+unix_state_lock().
+
+Let's use READ_ONCE() in unix_poll() and unix_dgram_poll() and pass
+it to unix_writable().
+
+While at it, we remove TCP_SYN_SENT check in unix_dgram_poll() as
+that state does not exist for AF_UNIX socket since the code was added.
+
+Fixes: 1586a5877db9 ("af_unix: do not report POLLOUT on listeners")
+Fixes: 3c73419c09a5 ("af_unix: fix 'poll for write'/ connected DGRAM sockets")
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 25 ++++++++++++-------------
+ 1 file changed, 12 insertions(+), 13 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 8d0918a112a9d..4a43091c95419 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -531,9 +531,9 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+       return 0;
+ }
+-static int unix_writable(const struct sock *sk)
++static int unix_writable(const struct sock *sk, unsigned char state)
+ {
+-      return sk->sk_state != TCP_LISTEN &&
++      return state != TCP_LISTEN &&
+              (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+ }
+@@ -542,7 +542,7 @@ static void unix_write_space(struct sock *sk)
+       struct socket_wq *wq;
+       rcu_read_lock();
+-      if (unix_writable(sk)) {
++      if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
+               wq = rcu_dereference(sk->sk_wq);
+               if (skwq_has_sleeper(wq))
+                       wake_up_interruptible_sync_poll(&wq->wait,
+@@ -3095,12 +3095,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
+ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
+ {
+       struct sock *sk = sock->sk;
++      unsigned char state;
+       __poll_t mask;
+       u8 shutdown;
+       sock_poll_wait(file, sock, wait);
+       mask = 0;
+       shutdown = READ_ONCE(sk->sk_shutdown);
++      state = READ_ONCE(sk->sk_state);
+       /* exceptional events? */
+       if (READ_ONCE(sk->sk_err))
+@@ -3122,14 +3124,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
+       /* Connection-based need to check for termination and startup */
+       if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
+-          sk->sk_state == TCP_CLOSE)
++          state == TCP_CLOSE)
+               mask |= EPOLLHUP;
+       /*
+        * we set writable also when the other side has shut down the
+        * connection. This prevents stuck sockets.
+        */
+-      if (unix_writable(sk))
++      if (unix_writable(sk, state))
+               mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
+       return mask;
+@@ -3140,12 +3142,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ {
+       struct sock *sk = sock->sk, *other;
+       unsigned int writable;
++      unsigned char state;
+       __poll_t mask;
+       u8 shutdown;
+       sock_poll_wait(file, sock, wait);
+       mask = 0;
+       shutdown = READ_ONCE(sk->sk_shutdown);
++      state = READ_ONCE(sk->sk_state);
+       /* exceptional events? */
+       if (READ_ONCE(sk->sk_err) ||
+@@ -3165,19 +3169,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+               mask |= EPOLLIN | EPOLLRDNORM;
+       /* Connection-based need to check for termination and startup */
+-      if (sk->sk_type == SOCK_SEQPACKET) {
+-              if (sk->sk_state == TCP_CLOSE)
+-                      mask |= EPOLLHUP;
+-              /* connection hasn't started yet? */
+-              if (sk->sk_state == TCP_SYN_SENT)
+-                      return mask;
+-      }
++      if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
++              mask |= EPOLLHUP;
+       /* No write status requested, avoid expensive OUT tests. */
+       if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
+               return mask;
+-      writable = unix_writable(sk);
++      writable = unix_writable(sk, state);
+       if (writable) {
+               unix_state_lock(sk);
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-annotate-data-races-around-sk-sk_state-in-un.patch-11288 b/queue-6.6/af_unix-annotate-data-races-around-sk-sk_state-in-un.patch-11288
new file mode 100644 (file)
index 0000000..ef27550
--- /dev/null
@@ -0,0 +1,71 @@
+From 87d9c159f3e859a47af2df9f05481b8d4086b95e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:35 -0700
+Subject: af_unix: Annotate data-races around sk->sk_state in UNIX_DIAG.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 0aa3be7b3e1f8f997312cc4705f8165e02806f8f ]
+
+While dumping AF_UNIX sockets via UNIX_DIAG, sk->sk_state is read
+locklessly.
+
+Let's use READ_ONCE() there.
+
+Note that the result could be inconsistent if the socket is dumped
+during the state change.  This is common for other SOCK_DIAG and
+similar interfaces.
+
+Fixes: c9da99e6475f ("unix_diag: Fixup RQLEN extension report")
+Fixes: 2aac7a2cb0d9 ("unix_diag: Pending connections IDs NLA")
+Fixes: 45a96b9be6ec ("unix_diag: Dumping all sockets core")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/diag.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 3438b7af09af5..9151c72e742fc 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+       u32 *buf;
+       int i;
+-      if (sk->sk_state == TCP_LISTEN) {
++      if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+               spin_lock(&sk->sk_receive_queue.lock);
+               attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
+@@ -103,7 +103,7 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+ {
+       struct unix_diag_rqlen rql;
+-      if (sk->sk_state == TCP_LISTEN) {
++      if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+               rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+               rql.udiag_wqueue = sk->sk_max_ack_backlog;
+       } else {
+@@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
+       rep = nlmsg_data(nlh);
+       rep->udiag_family = AF_UNIX;
+       rep->udiag_type = sk->sk_type;
+-      rep->udiag_state = sk->sk_state;
++      rep->udiag_state = READ_ONCE(sk->sk_state);
+       rep->pad = 0;
+       rep->udiag_ino = sk_ino;
+       sock_diag_save_cookie(sk, rep->udiag_cookie);
+@@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+               sk_for_each(sk, &net->unx.table.buckets[slot]) {
+                       if (num < s_num)
+                               goto next;
+-                      if (!(req->udiag_states & (1 << sk->sk_state)))
++                      if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
+                               goto next;
+                       if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
+                                        NETLINK_CB(cb->skb).portid,
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-set-sk-sk_state-under-unix_state_lock-for-tr.patch b/queue-6.6/af_unix-set-sk-sk_state-under-unix_state_lock-for-tr.patch
new file mode 100644 (file)
index 0000000..d572c25
--- /dev/null
@@ -0,0 +1,90 @@
+From e4131c30c30e955025d38f3eddc42b96a9b59f23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:27 -0700
+Subject: af_unix: Set sk->sk_state under unix_state_lock() for truly
+ disconencted peer.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 26bfb8b57063f52b867f9b6c8d1742fcb5bd656c ]
+
+When a SOCK_DGRAM socket connect()s to another socket, the both sockets'
+sk->sk_state are changed to TCP_ESTABLISHED so that we can register them
+to BPF SOCKMAP.
+
+When the socket disconnects from the peer by connect(AF_UNSPEC), the state
+is set back to TCP_CLOSE.
+
+Then, the peer's state is also set to TCP_CLOSE, but the update is done
+locklessly and unconditionally.
+
+Let's say socket A connect()ed to B, B connect()ed to C, and A disconnects
+from B.
+
+After the first two connect()s, all three sockets' sk->sk_state are
+TCP_ESTABLISHED:
+
+  $ ss -xa
+  Netid State  Recv-Q Send-Q  Local Address:Port  Peer Address:PortProcess
+  u_dgr ESTAB  0      0       @A 641              * 642
+  u_dgr ESTAB  0      0       @B 642              * 643
+  u_dgr ESTAB  0      0       @C 643              * 0
+
+And after the disconnect, B's state is TCP_CLOSE even though it's still
+connected to C and C's state is TCP_ESTABLISHED.
+
+  $ ss -xa
+  Netid State  Recv-Q Send-Q  Local Address:Port  Peer Address:PortProcess
+  u_dgr UNCONN 0      0       @A 641              * 0
+  u_dgr UNCONN 0      0       @B 642              * 643
+  u_dgr ESTAB  0      0       @C 643              * 0
+
+In this case, we cannot register B to SOCKMAP.
+
+So, when a socket disconnects from the peer, we should not set TCP_CLOSE to
+the peer if the peer is connected to yet another socket, and this must be
+done under unix_state_lock().
+
+Note that we use WRITE_ONCE() for sk->sk_state as there are many lockless
+readers.  These data-races will be fixed in the following patches.
+
+Fixes: 83301b5367a9 ("af_unix: Set TCP_ESTABLISHED for datagram sockets too")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index d01314dc86ecb..348f9e34f6696 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -571,7 +571,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
+                       sk_error_report(other);
+               }
+       }
+-      other->sk_state = TCP_CLOSE;
+ }
+ static void unix_sock_destructor(struct sock *sk)
+@@ -1434,8 +1433,15 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+               unix_state_double_unlock(sk, other);
+-              if (other != old_peer)
++              if (other != old_peer) {
+                       unix_dgram_disconnected(sk, old_peer);
++
++                      unix_state_lock(old_peer);
++                      if (!unix_peer(old_peer))
++                              WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
++                      unix_state_unlock(old_peer);
++              }
++
+               sock_put(old_peer);
+       } else {
+               unix_peer(sk) = other;
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-use-skb_queue_empty_lockless-in-unix_release.patch b/queue-6.6/af_unix-use-skb_queue_empty_lockless-in-unix_release.patch
new file mode 100644 (file)
index 0000000..e337c2d
--- /dev/null
@@ -0,0 +1,44 @@
+From 8b8be27125e368ab6283a839d16c5082142886e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:39 -0700
+Subject: af_unix: Use skb_queue_empty_lockless() in unix_release_sock().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 83690b82d228b3570565ebd0b41873933238b97f ]
+
+If the socket type is SOCK_STREAM or SOCK_SEQPACKET, unix_release_sock()
+checks the length of the peer socket's recvq under unix_state_lock().
+
+However, unix_stream_read_generic() calls skb_unlink() after releasing
+the lock.  Also, for SOCK_SEQPACKET, __skb_try_recv_datagram() unlinks
+skb without unix_state_lock().
+
+Thues, unix_state_lock() does not protect qlen.
+
+Let's use skb_queue_empty_lockless() in unix_release_sock().
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index ea68472847cae..e6395647558af 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -632,7 +632,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+                       unix_state_lock(skpair);
+                       /* No more writes */
+                       WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
+-                      if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
++                      if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
+                               WRITE_ONCE(skpair->sk_err, ECONNRESET);
+                       unix_state_unlock(skpair);
+                       skpair->sk_state_change(skpair);
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-use-skb_queue_len_lockless-in-sk_diag_show_r.patch b/queue-6.6/af_unix-use-skb_queue_len_lockless-in-sk_diag_show_r.patch
new file mode 100644 (file)
index 0000000..a6b07e9
--- /dev/null
@@ -0,0 +1,41 @@
+From 37088efe544f1bc7ccb2891d6235a1127d7cccb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:40 -0700
+Subject: af_unix: Use skb_queue_len_lockless() in sk_diag_show_rqlen().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 5d915e584d8408211d4567c22685aae8820bfc55 ]
+
+We can dump the socket queue length via UNIX_DIAG by specifying
+UDIAG_SHOW_RQLEN.
+
+If sk->sk_state is TCP_LISTEN, we return the recv queue length,
+but here we do not hold recvq lock.
+
+Let's use skb_queue_len_lockless() in sk_diag_show_rqlen().
+
+Fixes: c9da99e6475f ("unix_diag: Fixup RQLEN extension report")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/diag.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 9151c72e742fc..fc56244214c30 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -104,7 +104,7 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+       struct unix_diag_rqlen rql;
+       if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+-              rql.udiag_rqueue = sk->sk_receive_queue.qlen;
++              rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
+               rql.udiag_wqueue = sk->sk_max_ack_backlog;
+       } else {
+               rql.udiag_rqueue = (u32) unix_inq_len(sk);
+-- 
+2.43.0
+
diff --git a/queue-6.6/af_unix-use-unix_recvq_full_lockless-in-unix_stream_.patch b/queue-6.6/af_unix-use-unix_recvq_full_lockless-in-unix_stream_.patch
new file mode 100644 (file)
index 0000000..888c7f8
--- /dev/null
@@ -0,0 +1,72 @@
+From 245b65fb304bb39a7fc47d9c337a404606ae6779 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 09:52:38 -0700
+Subject: af_unix: Use unix_recvq_full_lockless() in unix_stream_connect().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 45d872f0e65593176d880ec148f41ad7c02e40a7 ]
+
+Once sk->sk_state is changed to TCP_LISTEN, it never changes.
+
+unix_accept() takes advantage of this characteristics; it does not
+hold the listener's unix_state_lock() and only acquires recvq lock
+to pop one skb.
+
+It means unix_state_lock() does not prevent the queue length from
+changing in unix_stream_connect().
+
+Thus, we need to use unix_recvq_full_lockless() to avoid data-race.
+
+Now we remove unix_recvq_full() as no one uses it.
+
+Note that we can remove READ_ONCE() for sk->sk_max_ack_backlog in
+unix_recvq_full_lockless() because of the following reasons:
+
+  (1) For SOCK_DGRAM, it is a written-once field in unix_create1()
+
+  (2) For SOCK_STREAM and SOCK_SEQPACKET, it is changed under the
+      listener's unix_state_lock() in unix_listen(), and we hold
+      the lock in unix_stream_connect()
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 10 ++--------
+ 1 file changed, 2 insertions(+), 8 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 2b35c517be718..ea68472847cae 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -222,15 +222,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
+       return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
+ }
+-static inline int unix_recvq_full(const struct sock *sk)
+-{
+-      return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+-}
+-
+ static inline int unix_recvq_full_lockless(const struct sock *sk)
+ {
+-      return skb_queue_len_lockless(&sk->sk_receive_queue) >
+-              READ_ONCE(sk->sk_max_ack_backlog);
++      return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+ }
+ struct sock *unix_peer_get(struct sock *s)
+@@ -1551,7 +1545,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+       if (other->sk_shutdown & RCV_SHUTDOWN)
+               goto out_unlock;
+-      if (unix_recvq_full(other)) {
++      if (unix_recvq_full_lockless(other)) {
+               err = -EAGAIN;
+               if (!timeo)
+                       goto out_unlock;
+-- 
+2.43.0
+
diff --git a/queue-6.6/ax25-fix-refcount-imbalance-on-inbound-connections.patch b/queue-6.6/ax25-fix-refcount-imbalance-on-inbound-connections.patch
new file mode 100644 (file)
index 0000000..b71ea7d
--- /dev/null
@@ -0,0 +1,93 @@
+From 26b7c87d3ee390beec9e9518b48acb19a4ab195b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 17:02:43 -0400
+Subject: ax25: Fix refcount imbalance on inbound connections
+
+From: Lars Kellogg-Stedman <lars@oddbit.com>
+
+[ Upstream commit 3c34fb0bd4a4237592c5ecb5b2e2531900c55774 ]
+
+When releasing a socket in ax25_release(), we call netdev_put() to
+decrease the refcount on the associated ax.25 device. However, the
+execution path for accepting an incoming connection never calls
+netdev_hold(). This imbalance leads to refcount errors, and ultimately
+to kernel crashes.
+
+A typical call trace for the above situation will start with one of the
+following errors:
+
+    refcount_t: decrement hit 0; leaking memory.
+    refcount_t: underflow; use-after-free.
+
+And will then have a trace like:
+
+    Call Trace:
+    <TASK>
+    ? show_regs+0x64/0x70
+    ? __warn+0x83/0x120
+    ? refcount_warn_saturate+0xb2/0x100
+    ? report_bug+0x158/0x190
+    ? prb_read_valid+0x20/0x30
+    ? handle_bug+0x3e/0x70
+    ? exc_invalid_op+0x1c/0x70
+    ? asm_exc_invalid_op+0x1f/0x30
+    ? refcount_warn_saturate+0xb2/0x100
+    ? refcount_warn_saturate+0xb2/0x100
+    ax25_release+0x2ad/0x360
+    __sock_release+0x35/0xa0
+    sock_close+0x19/0x20
+    [...]
+
+On reboot (or any attempt to remove the interface), the kernel gets
+stuck in an infinite loop:
+
+    unregister_netdevice: waiting for ax0 to become free. Usage count = 0
+
+This patch corrects these issues by ensuring that we call netdev_hold()
+and ax25_dev_hold() for new connections in ax25_accept(). This makes the
+logic leading to ax25_accept() match the logic for ax25_bind(): in both
+cases we increment the refcount, which is ultimately decremented in
+ax25_release().
+
+Fixes: 9fd75b66b8f6 ("ax25: Fix refcount leaks caused by ax25_cb_del()")
+Signed-off-by: Lars Kellogg-Stedman <lars@oddbit.com>
+Tested-by: Duoming Zhou <duoming@zju.edu.cn>
+Tested-by: Dan Cross <crossd@gmail.com>
+Tested-by: Chris Maness <christopher.maness@gmail.com>
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/20240529210242.3346844-2-lars@oddbit.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ax25/af_ax25.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 9d11d26e46c0e..26a3095bec462 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+ {
+       struct sk_buff *skb;
+       struct sock *newsk;
++      ax25_dev *ax25_dev;
+       DEFINE_WAIT(wait);
+       struct sock *sk;
++      ax25_cb *ax25;
+       int err = 0;
+       if (sock->state != SS_UNCONNECTED)
+@@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+       kfree_skb(skb);
+       sk_acceptq_removed(sk);
+       newsock->state = SS_CONNECTED;
++      ax25 = sk_to_ax25(newsk);
++      ax25_dev = ax25->ax25_dev;
++      netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
++      ax25_dev_hold(ax25_dev);
+ out:
+       release_sock(sk);
+-- 
+2.43.0
+
diff --git a/queue-6.6/ax25-replace-kfree-in-ax25_dev_free-with-ax25_dev_pu.patch b/queue-6.6/ax25-replace-kfree-in-ax25_dev_free-with-ax25_dev_pu.patch
new file mode 100644 (file)
index 0000000..8f7e6b6
--- /dev/null
@@ -0,0 +1,39 @@
+From 76d3a341a96f5901a339ca0575679b60541ab59b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 May 2024 13:17:33 +0800
+Subject: ax25: Replace kfree() in ax25_dev_free() with ax25_dev_put()
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit 166fcf86cd34e15c7f383eda4642d7a212393008 ]
+
+The object "ax25_dev" is managed by reference counting. Thus it should
+not be directly released by kfree(), replace with ax25_dev_put().
+
+Fixes: d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs")
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/20240530051733.11416-1-duoming@zju.edu.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ax25/ax25_dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
+index c9d55b99a7a57..67ae6b8c52989 100644
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -193,7 +193,7 @@ void __exit ax25_dev_free(void)
+       list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
+               netdev_put(s->dev, &s->dev_tracker);
+               list_del(&s->list);
+-              kfree(s);
++              ax25_dev_put(s);
+       }
+       spin_unlock_bh(&ax25_dev_lock);
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.6/bpf-fix-a-potential-use-after-free-in-bpf_link_free.patch b/queue-6.6/bpf-fix-a-potential-use-after-free-in-bpf_link_free.patch
new file mode 100644 (file)
index 0000000..a7108f8
--- /dev/null
@@ -0,0 +1,74 @@
+From 52314dfb675481eed7aa68602187d9ed773a904f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Jun 2024 11:27:03 -0700
+Subject: bpf: Fix a potential use-after-free in bpf_link_free()
+
+From: Cong Wang <cong.wang@bytedance.com>
+
+[ Upstream commit 2884dc7d08d98a89d8d65121524bb7533183a63a ]
+
+After commit 1a80dbcb2dba, bpf_link can be freed by
+link->ops->dealloc_deferred, but the code still tests and uses
+link->ops->dealloc afterward, which leads to a use-after-free as
+reported by syzbot. Actually, one of them should be sufficient, so
+just call one of them instead of both. Also add a WARN_ON() in case
+of any problematic implementation.
+
+Fixes: 1a80dbcb2dba ("bpf: support deferring bpf_link dealloc to after RCU grace period")
+Reported-by: syzbot+1989ee16d94720836244@syzkaller.appspotmail.com
+Signed-off-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/bpf/20240602182703.207276-1-xiyou.wangcong@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/syscall.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index e9a68c6043ce5..65df92f5b1922 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -2830,6 +2830,7 @@ static int bpf_obj_get(const union bpf_attr *attr)
+ void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+                  const struct bpf_link_ops *ops, struct bpf_prog *prog)
+ {
++      WARN_ON(ops->dealloc && ops->dealloc_deferred);
+       atomic64_set(&link->refcnt, 1);
+       link->type = type;
+       link->id = 0;
+@@ -2888,16 +2889,17 @@ static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
+ /* bpf_link_free is guaranteed to be called from process context */
+ static void bpf_link_free(struct bpf_link *link)
+ {
++      const struct bpf_link_ops *ops = link->ops;
+       bool sleepable = false;
+       bpf_link_free_id(link->id);
+       if (link->prog) {
+               sleepable = link->prog->aux->sleepable;
+               /* detach BPF program, clean up used resources */
+-              link->ops->release(link);
++              ops->release(link);
+               bpf_prog_put(link->prog);
+       }
+-      if (link->ops->dealloc_deferred) {
++      if (ops->dealloc_deferred) {
+               /* schedule BPF link deallocation; if underlying BPF program
+                * is sleepable, we need to first wait for RCU tasks trace
+                * sync, then go through "classic" RCU grace period
+@@ -2906,9 +2908,8 @@ static void bpf_link_free(struct bpf_link *link)
+                       call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
+               else
+                       call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
+-      }
+-      if (link->ops->dealloc)
+-              link->ops->dealloc(link);
++      } else if (ops->dealloc)
++              ops->dealloc(link);
+ }
+ static void bpf_link_put_deferred(struct work_struct *work)
+-- 
+2.43.0
+
diff --git a/queue-6.6/bpf-fix-multi-uprobe-pid-filtering-logic.patch b/queue-6.6/bpf-fix-multi-uprobe-pid-filtering-logic.patch
new file mode 100644 (file)
index 0000000..5de9ffb
--- /dev/null
@@ -0,0 +1,87 @@
+From e64fb4a765c5dfb1cd0b26f2df392c89a00ff04d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 09:33:57 -0700
+Subject: bpf: fix multi-uprobe PID filtering logic
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+[ Upstream commit 46ba0e49b64232adac35a2bc892f1710c5b0fb7f ]
+
+Current implementation of PID filtering logic for multi-uprobes in
+uprobe_prog_run() is filtering down to exact *thread*, while the intent
+for PID filtering it to filter by *process* instead. The check in
+uprobe_prog_run() also differs from the analogous one in
+uprobe_multi_link_filter() for some reason. The latter is correct,
+checking task->mm, not the task itself.
+
+Fix the check in uprobe_prog_run() to perform the same task->mm check.
+
+While doing this, we also update get_pid_task() use to use PIDTYPE_TGID
+type of lookup, given the intent is to get a representative task of an
+entire process. This doesn't change behavior, but seems more logical. It
+would hold task group leader task now, not any random thread task.
+
+Last but not least, given multi-uprobe support is half-broken due to
+this PID filtering logic (depending on whether PID filtering is
+important or not), we need to make it easy for user space consumers
+(including libbpf) to easily detect whether PID filtering logic was
+already fixed.
+
+We do it here by adding an early check on passed pid parameter. If it's
+negative (and so has no chance of being a valid PID), we return -EINVAL.
+Previous behavior would eventually return -ESRCH ("No process found"),
+given there can't be any process with negative PID. This subtle change
+won't make any practical change in behavior, but will allow applications
+to detect PID filtering fixes easily. Libbpf fixes take advantage of
+this in the next patch.
+
+Cc: stable@vger.kernel.org
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Fixes: b733eeade420 ("bpf: Add pid filter support for uprobe_multi link")
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/r/20240521163401.3005045-2-andrii@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/bpf_trace.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 8edbafe0d4cdf..cc29bf49f7159 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -3099,7 +3099,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+       struct bpf_run_ctx *old_run_ctx;
+       int err = 0;
+-      if (link->task && current != link->task)
++      if (link->task && current->mm != link->task->mm)
+               return 0;
+       if (sleepable)
+@@ -3200,8 +3200,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+       upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
+       uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
+       cnt = attr->link_create.uprobe_multi.cnt;
++      pid = attr->link_create.uprobe_multi.pid;
+-      if (!upath || !uoffsets || !cnt)
++      if (!upath || !uoffsets || !cnt || pid < 0)
+               return -EINVAL;
+       if (cnt > MAX_UPROBE_MULTI_CNT)
+               return -E2BIG;
+@@ -3225,10 +3226,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+               goto error_path_put;
+       }
+-      pid = attr->link_create.uprobe_multi.pid;
+       if (pid) {
+               rcu_read_lock();
+-              task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
++              task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
+               rcu_read_unlock();
+               if (!task) {
+                       err = -ESRCH;
+-- 
+2.43.0
+
diff --git a/queue-6.6/bpf-optimize-the-free-of-inner-map.patch b/queue-6.6/bpf-optimize-the-free-of-inner-map.patch
new file mode 100644 (file)
index 0000000..6774ada
--- /dev/null
@@ -0,0 +1,146 @@
+From 6b63e0c5b1b4a84478ad049cb4dc08ab2ab9057a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Dec 2023 22:04:23 +0800
+Subject: bpf: Optimize the free of inner map
+
+From: Hou Tao <houtao1@huawei.com>
+
+[ Upstream commit af66bfd3c8538ed21cf72af18426fc4a408665cf ]
+
+When removing the inner map from the outer map, the inner map will be
+freed after one RCU grace period and one RCU tasks trace grace
+period, so it is certain that the bpf program, which may access the
+inner map, has exited before the inner map is freed.
+
+However there is no need to wait for one RCU tasks trace grace period if
+the outer map is only accessed by non-sleepable program. So adding
+sleepable_refcnt in bpf_map and increasing sleepable_refcnt when adding
+the outer map into env->used_maps for sleepable program. Although the
+max number of bpf program is INT_MAX - 1, the number of bpf programs
+which are being loaded may be greater than INT_MAX, so using atomic64_t
+instead of atomic_t for sleepable_refcnt. When removing the inner map
+from the outer map, using sleepable_refcnt to decide whether or not a
+RCU tasks trace grace period is needed before freeing the inner map.
+
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Link: https://lore.kernel.org/r/20231204140425.1480317-6-houtao@huaweicloud.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: 2884dc7d08d9 ("bpf: Fix a potential use-after-free in bpf_link_free()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h     |  2 ++
+ kernel/bpf/core.c       |  4 ++++
+ kernel/bpf/map_in_map.c | 14 +++++++++-----
+ kernel/bpf/syscall.c    |  8 ++++++++
+ kernel/bpf/verifier.c   |  4 +++-
+ 5 files changed, 26 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 2ebb5d4d43dc6..e4cd28c38b825 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -296,6 +296,8 @@ struct bpf_map {
+       bool bypass_spec_v1;
+       bool frozen; /* write-once; write-protected by freeze_mutex */
+       bool free_after_mult_rcu_gp;
++      bool free_after_rcu_gp;
++      atomic64_t sleepable_refcnt;
+       s64 __percpu *elem_count;
+ };
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 1333273a71ded..05445a4d55181 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2673,12 +2673,16 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+                         struct bpf_map **used_maps, u32 len)
+ {
+       struct bpf_map *map;
++      bool sleepable;
+       u32 i;
++      sleepable = aux->sleepable;
+       for (i = 0; i < len; i++) {
+               map = used_maps[i];
+               if (map->ops->map_poke_untrack)
+                       map->ops->map_poke_untrack(map, aux);
++              if (sleepable)
++                      atomic64_dec(&map->sleepable_refcnt);
+               bpf_map_put(map);
+       }
+ }
+diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
+index 3248ff5d81617..8ef269e66ba50 100644
+--- a/kernel/bpf/map_in_map.c
++++ b/kernel/bpf/map_in_map.c
+@@ -131,12 +131,16 @@ void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
+       struct bpf_map *inner_map = ptr;
+-      /* The inner map may still be used by both non-sleepable and sleepable
+-       * bpf program, so free it after one RCU grace period and one tasks
+-       * trace RCU grace period.
++      /* Defer the freeing of inner map according to the sleepable attribute
++       * of bpf program which owns the outer map, so unnecessary waiting for
++       * RCU tasks trace grace period can be avoided.
+        */
+-      if (need_defer)
+-              WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
++      if (need_defer) {
++              if (atomic64_read(&map->sleepable_refcnt))
++                      WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
++              else
++                      WRITE_ONCE(inner_map->free_after_rcu_gp, true);
++      }
+       bpf_map_put(inner_map);
+ }
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index e886157a9efbb..e9a68c6043ce5 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -753,8 +753,11 @@ void bpf_map_put(struct bpf_map *map)
+               /* bpf_map_free_id() must be called first */
+               bpf_map_free_id(map);
++              WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
+               if (READ_ONCE(map->free_after_mult_rcu_gp))
+                       call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
++              else if (READ_ONCE(map->free_after_rcu_gp))
++                      call_rcu(&map->rcu, bpf_map_free_rcu_gp);
+               else
+                       bpf_map_free_in_work(map);
+       }
+@@ -5358,6 +5361,11 @@ static int bpf_prog_bind_map(union bpf_attr *attr)
+               goto out_unlock;
+       }
++      /* The bpf program will not access the bpf map, but for the sake of
++       * simplicity, increase sleepable_refcnt for sleepable program as well.
++       */
++      if (prog->aux->sleepable)
++              atomic64_inc(&map->sleepable_refcnt);
+       memcpy(used_maps_new, used_maps_old,
+              sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
+       used_maps_new[prog->aux->used_map_cnt] = map;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 24d7a32f1710e..ec0464c075bb4 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -17732,10 +17732,12 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
+                               return -E2BIG;
+                       }
++                      if (env->prog->aux->sleepable)
++                              atomic64_inc(&map->sleepable_refcnt);
+                       /* hold the map. If the program is rejected by verifier,
+                        * the map will be released by release_maps() or it
+                        * will be used by the valid program until it's unloaded
+-                       * and all maps are released in free_used_maps()
++                       * and all maps are released in bpf_free_used_maps()
+                        */
+                       bpf_map_inc(map);
+-- 
+2.43.0
+
diff --git a/queue-6.6/bpf-set-run-context-for-rawtp-test_run-callback.patch b/queue-6.6/bpf-set-run-context-for-rawtp-test_run-callback.patch
new file mode 100644 (file)
index 0000000..5114d12
--- /dev/null
@@ -0,0 +1,52 @@
+From 16792e4767e068eb6bdcbe4aa1378f9c556bf2bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 17:00:24 +0200
+Subject: bpf: Set run context for rawtp test_run callback
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+[ Upstream commit d0d1df8ba18abc57f28fb3bc053b2bf319367f2c ]
+
+syzbot reported crash when rawtp program executed through the
+test_run interface calls bpf_get_attach_cookie helper or any
+other helper that touches task->bpf_ctx pointer.
+
+Setting the run context (task->bpf_ctx pointer) for test_run
+callback.
+
+Fixes: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
+Reported-by: syzbot+3ab78ff125b7979e45f9@syzkaller.appspotmail.com
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Closes: https://syzkaller.appspot.com/bug?extid=3ab78ff125b7979e45f9
+Link: https://lore.kernel.org/bpf/20240604150024.359247-1-jolsa@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bpf/test_run.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index 478ee7aba85f3..12a2934b28ffb 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -707,10 +707,16 @@ static void
+ __bpf_prog_test_run_raw_tp(void *data)
+ {
+       struct bpf_raw_tp_test_run_info *info = data;
++      struct bpf_trace_run_ctx run_ctx = {};
++      struct bpf_run_ctx *old_run_ctx;
++
++      old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+       rcu_read_lock();
+       info->retval = bpf_prog_run(info->prog, info->ctx);
+       rcu_read_unlock();
++
++      bpf_reset_run_ctx(old_run_ctx);
+ }
+ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+-- 
+2.43.0
+
diff --git a/queue-6.6/bpf-store-ref_ctr_offsets-values-in-bpf_uprobe-array.patch b/queue-6.6/bpf-store-ref_ctr_offsets-values-in-bpf_uprobe-array.patch
new file mode 100644 (file)
index 0000000..17c45a4
--- /dev/null
@@ -0,0 +1,89 @@
+From efa5a4c1a704aced85d41f838cb02edade141d99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Nov 2023 20:31:26 +0100
+Subject: bpf: Store ref_ctr_offsets values in bpf_uprobe array
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+[ Upstream commit 4930b7f53a298533bc31d7540b6ea8b79a000331 ]
+
+We will need to return ref_ctr_offsets values through link_info
+interface in following change, so we need to keep them around.
+
+Storing ref_ctr_offsets values directly into bpf_uprobe array.
+
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/bpf/20231125193130.834322-3-jolsa@kernel.org
+Stable-dep-of: 2884dc7d08d9 ("bpf: Fix a potential use-after-free in bpf_link_free()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/bpf_trace.c | 14 +++-----------
+ 1 file changed, 3 insertions(+), 11 deletions(-)
+
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 1e79084a9d9d2..8edbafe0d4cdf 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -3030,6 +3030,7 @@ struct bpf_uprobe_multi_link;
+ struct bpf_uprobe {
+       struct bpf_uprobe_multi_link *link;
+       loff_t offset;
++      unsigned long ref_ctr_offset;
+       u64 cookie;
+       struct uprobe_consumer consumer;
+ };
+@@ -3169,7 +3170,6 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ {
+       struct bpf_uprobe_multi_link *link = NULL;
+       unsigned long __user *uref_ctr_offsets;
+-      unsigned long *ref_ctr_offsets = NULL;
+       struct bpf_link_primer link_primer;
+       struct bpf_uprobe *uprobes = NULL;
+       struct task_struct *task = NULL;
+@@ -3244,18 +3244,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+       if (!uprobes || !link)
+               goto error_free;
+-      if (uref_ctr_offsets) {
+-              ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL);
+-              if (!ref_ctr_offsets)
+-                      goto error_free;
+-      }
+-
+       for (i = 0; i < cnt; i++) {
+               if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
+                       err = -EFAULT;
+                       goto error_free;
+               }
+-              if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) {
++              if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
+                       err = -EFAULT;
+                       goto error_free;
+               }
+@@ -3286,7 +3280,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+       for (i = 0; i < cnt; i++) {
+               err = uprobe_register_refctr(d_real_inode(link->path.dentry),
+                                            uprobes[i].offset,
+-                                           ref_ctr_offsets ? ref_ctr_offsets[i] : 0,
++                                           uprobes[i].ref_ctr_offset,
+                                            &uprobes[i].consumer);
+               if (err) {
+                       bpf_uprobe_unregister(&path, uprobes, i);
+@@ -3298,11 +3292,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+       if (err)
+               goto error_free;
+-      kvfree(ref_ctr_offsets);
+       return bpf_link_settle(&link_primer);
+ error_free:
+-      kvfree(ref_ctr_offsets);
+       kvfree(uprobes);
+       kfree(link);
+       if (task)
+-- 
+2.43.0
+
diff --git a/queue-6.6/eventfs-update-all-the-eventfs_inodes-from-the-event.patch b/queue-6.6/eventfs-update-all-the-eventfs_inodes-from-the-event.patch
new file mode 100644 (file)
index 0000000..df18f17
--- /dev/null
@@ -0,0 +1,119 @@
+From 37f214b9a41fc5430db2183e843cdbbc64a8daf9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 01:14:28 -0400
+Subject: eventfs: Update all the eventfs_inodes from the events descriptor
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit 340f0c7067a95281ad13734f8225f49c6cf52067 ]
+
+The change to update the permissions of the eventfs_inode had the
+misconception that using the tracefs_inode would find all the
+eventfs_inodes that have been updated and reset them on remount.
+The problem with this approach is that the eventfs_inodes are freed when
+they are no longer used (basically the reason the eventfs system exists).
+When they are freed, the updated eventfs_inodes are not reset on a remount
+because their tracefs_inodes have been freed.
+
+Instead, since the events directory eventfs_inode always has a
+tracefs_inode pointing to it (it is not freed when finished), and the
+events directory has a link to all its children, have the
+eventfs_remount() function only operate on the events eventfs_inode and
+have it descend into its children updating their uid and gids.
+
+Link: https://lore.kernel.org/all/CAK7LNARXgaWw3kH9JgrnH4vK6fr8LDkNKf3wq8NhMWJrVwJyVQ@mail.gmail.com/
+Link: https://lore.kernel.org/linux-trace-kernel/20240523051539.754424703@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Fixes: baa23a8d4360d ("tracefs: Reset permissions on remount if permissions are options")
+Reported-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/tracefs/event_inode.c | 51 ++++++++++++++++++++++++++++++----------
+ 1 file changed, 39 insertions(+), 12 deletions(-)
+
+diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
+index b521e904a7ce9..b406bb3430f3d 100644
+--- a/fs/tracefs/event_inode.c
++++ b/fs/tracefs/event_inode.c
+@@ -305,33 +305,60 @@ static const struct file_operations eventfs_file_operations = {
+       .llseek         = generic_file_llseek,
+ };
+-/*
+- * On a remount of tracefs, if UID or GID options are set, then
+- * the mount point inode permissions should be used.
+- * Reset the saved permission flags appropriately.
+- */
+-void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid)
++static void eventfs_set_attrs(struct eventfs_inode *ei, bool update_uid, kuid_t uid,
++                            bool update_gid, kgid_t gid, int level)
+ {
+-      struct eventfs_inode *ei = ti->private;
++      struct eventfs_inode *ei_child;
+-      if (!ei)
++      /* Update events/<system>/<event> */
++      if (WARN_ON_ONCE(level > 3))
+               return;
+-      if (update_uid)
++      if (update_uid) {
+               ei->attr.mode &= ~EVENTFS_SAVE_UID;
++              ei->attr.uid = uid;
++      }
+-      if (update_gid)
++      if (update_gid) {
+               ei->attr.mode &= ~EVENTFS_SAVE_GID;
++              ei->attr.gid = gid;
++      }
++
++      list_for_each_entry(ei_child, &ei->children, list) {
++              eventfs_set_attrs(ei_child, update_uid, uid, update_gid, gid, level + 1);
++      }
+       if (!ei->entry_attrs)
+               return;
+       for (int i = 0; i < ei->nr_entries; i++) {
+-              if (update_uid)
++              if (update_uid) {
+                       ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_UID;
+-              if (update_gid)
++                      ei->entry_attrs[i].uid = uid;
++              }
++              if (update_gid) {
+                       ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_GID;
++                      ei->entry_attrs[i].gid = gid;
++              }
+       }
++
++}
++
++/*
++ * On a remount of tracefs, if UID or GID options are set, then
++ * the mount point inode permissions should be used.
++ * Reset the saved permission flags appropriately.
++ */
++void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid)
++{
++      struct eventfs_inode *ei = ti->private;
++
++      /* Only the events directory does the updates */
++      if (!ei || !ei->is_events || ei->is_freed)
++              return;
++
++      eventfs_set_attrs(ei, update_uid, ti->vfs_inode.i_uid,
++                        update_gid, ti->vfs_inode.i_gid, 0);
+ }
+ /* Return the evenfs_inode of the "events" directory */
+-- 
+2.43.0
+
diff --git a/queue-6.6/ext4-avoid-overflow-when-setting-values-via-sysfs.patch b/queue-6.6/ext4-avoid-overflow-when-setting-values-via-sysfs.patch
new file mode 100644 (file)
index 0000000..8e8ce7d
--- /dev/null
@@ -0,0 +1,80 @@
+From 9fd5aac986651796cd9c05d89eb56da4b1700499 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Mar 2024 19:33:17 +0800
+Subject: ext4: avoid overflow when setting values via sysfs
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 9e8e819f8f272c4e5dcd0bd6c7450e36481ed139 ]
+
+When setting values of type unsigned int through sysfs, we use kstrtoul()
+to parse it and then truncate part of it as the final set value, when the
+set value is greater than UINT_MAX, the set value will not match what we
+see because of the truncation. As follows:
+
+  $ echo 4294967296 > /sys/fs/ext4/sda/mb_max_linear_groups
+  $ cat /sys/fs/ext4/sda/mb_max_linear_groups
+    0
+
+So we use kstrtouint() to parse the attr_pointer_ui type to avoid the
+inconsistency described above. In addition, a judgment is added to avoid
+setting s_resv_clusters less than 0.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20240319113325.3110393-2-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 13df4d44a3aa ("ext4: fix slab-out-of-bounds in ext4_mb_find_good_group_avg_frag_lists()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/sysfs.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index 6d332dff79ddc..ca820620b9742 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -104,7 +104,7 @@ static ssize_t reserved_clusters_store(struct ext4_sb_info *sbi,
+       int ret;
+       ret = kstrtoull(skip_spaces(buf), 0, &val);
+-      if (ret || val >= clusters)
++      if (ret || val >= clusters || (s64)val < 0)
+               return -EINVAL;
+       atomic64_set(&sbi->s_resv_clusters, val);
+@@ -451,7 +451,8 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+                                               s_kobj);
+       struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
+       void *ptr = calc_ptr(a, sbi);
+-      unsigned long t;
++      unsigned int t;
++      unsigned long lt;
+       int ret;
+       switch (a->attr_id) {
+@@ -460,7 +461,7 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+       case attr_pointer_ui:
+               if (!ptr)
+                       return 0;
+-              ret = kstrtoul(skip_spaces(buf), 0, &t);
++              ret = kstrtouint(skip_spaces(buf), 0, &t);
+               if (ret)
+                       return ret;
+               if (a->attr_ptr == ptr_ext4_super_block_offset)
+@@ -471,10 +472,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+       case attr_pointer_ul:
+               if (!ptr)
+                       return 0;
+-              ret = kstrtoul(skip_spaces(buf), 0, &t);
++              ret = kstrtoul(skip_spaces(buf), 0, &lt);
+               if (ret)
+                       return ret;
+-              *((unsigned long *) ptr) = t;
++              *((unsigned long *) ptr) = lt;
+               return len;
+       case attr_inode_readahead:
+               return inode_readahead_blks_store(sbi, buf, len);
+-- 
+2.43.0
+
diff --git a/queue-6.6/ext4-fix-slab-out-of-bounds-in-ext4_mb_find_good_gro.patch b/queue-6.6/ext4-fix-slab-out-of-bounds-in-ext4_mb_find_good_gro.patch
new file mode 100644 (file)
index 0000000..14b20de
--- /dev/null
@@ -0,0 +1,148 @@
+From 5b3aec93e827c7c6217daec225d3881015346967 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Mar 2024 19:33:20 +0800
+Subject: ext4: fix slab-out-of-bounds in
+ ext4_mb_find_good_group_avg_frag_lists()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 13df4d44a3aaabe61cd01d277b6ee23ead2a5206 ]
+
+We can trigger a slab-out-of-bounds with the following commands:
+
+    mkfs.ext4 -F /dev/$disk 10G
+    mount /dev/$disk /tmp/test
+    echo 2147483647 > /sys/fs/ext4/$disk/mb_group_prealloc
+    echo test > /tmp/test/file && sync
+
+==================================================================
+BUG: KASAN: slab-out-of-bounds in ext4_mb_find_good_group_avg_frag_lists+0x8a/0x200 [ext4]
+Read of size 8 at addr ffff888121b9d0f0 by task kworker/u2:0/11
+CPU: 0 PID: 11 Comm: kworker/u2:0 Tainted: GL 6.7.0-next-20240118 #521
+Call Trace:
+ dump_stack_lvl+0x2c/0x50
+ kasan_report+0xb6/0xf0
+ ext4_mb_find_good_group_avg_frag_lists+0x8a/0x200 [ext4]
+ ext4_mb_regular_allocator+0x19e9/0x2370 [ext4]
+ ext4_mb_new_blocks+0x88a/0x1370 [ext4]
+ ext4_ext_map_blocks+0x14f7/0x2390 [ext4]
+ ext4_map_blocks+0x569/0xea0 [ext4]
+ ext4_do_writepages+0x10f6/0x1bc0 [ext4]
+[...]
+==================================================================
+
+The flow of issue triggering is as follows:
+
+// Set s_mb_group_prealloc to 2147483647 via sysfs
+ext4_mb_new_blocks
+  ext4_mb_normalize_request
+    ext4_mb_normalize_group_request
+      ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc
+  ext4_mb_regular_allocator
+    ext4_mb_choose_next_group
+      ext4_mb_choose_next_group_best_avail
+        mb_avg_fragment_size_order
+          order = fls(len) - 2 = 29
+        ext4_mb_find_good_group_avg_frag_lists
+          frag_list = &sbi->s_mb_avg_fragment_size[order]
+          if (list_empty(frag_list)) // Trigger SOOB!
+
+At 4k block size, the length of the s_mb_avg_fragment_size list is 14,
+but an oversized s_mb_group_prealloc is set, causing slab-out-of-bounds
+to be triggered by an attempt to access an element at index 29.
+
+Add a new attr_id attr_clusters_in_group with values in the range
+[0, sbi->s_clusters_per_group] and declare mb_group_prealloc as
+that type to fix the issue. In addition avoid returning an order
+from mb_avg_fragment_size_order() greater than MB_NUM_ORDERS(sb)
+and reduce some useless loops.
+
+Fixes: 7e170922f06b ("ext4: Add allocation criteria 1.5 (CR1_5)")
+CC: stable@vger.kernel.org
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://lore.kernel.org/r/20240319113325.3110393-5-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c |  4 ++++
+ fs/ext4/sysfs.c   | 13 ++++++++++++-
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index aadfeb0f5b7f3..f55ab800a7539 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -831,6 +831,8 @@ static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
+               return 0;
+       if (order == MB_NUM_ORDERS(sb))
+               order--;
++      if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb)))
++              order = MB_NUM_ORDERS(sb) - 1;
+       return order;
+ }
+@@ -1008,6 +1010,8 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
+        * goal length.
+        */
+       order = fls(ac->ac_g_ex.fe_len) - 1;
++      if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb)))
++              order = MB_NUM_ORDERS(ac->ac_sb);
+       min_order = order - sbi->s_mb_best_avail_max_trim_order;
+       if (min_order < 0)
+               min_order = 0;
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index 295ea9a32de91..ca66e33f61815 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -29,6 +29,7 @@ typedef enum {
+       attr_trigger_test_error,
+       attr_first_error_time,
+       attr_last_error_time,
++      attr_clusters_in_group,
+       attr_feature,
+       attr_pointer_ui,
+       attr_pointer_ul,
+@@ -207,13 +208,14 @@ EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
+                ext4_sb_info, s_inode_readahead_blks);
++EXT4_ATTR_OFFSET(mb_group_prealloc, 0644, clusters_in_group,
++               ext4_sb_info, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
+ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
+ EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
+-EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(mb_max_linear_groups, s_mb_max_linear_groups);
+ EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
+ EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
+@@ -376,6 +378,7 @@ static ssize_t ext4_generic_attr_show(struct ext4_attr *a,
+       switch (a->attr_id) {
+       case attr_inode_readahead:
++      case attr_clusters_in_group:
+       case attr_pointer_ui:
+               if (a->attr_ptr == ptr_ext4_super_block_offset)
+                       return sysfs_emit(buf, "%u\n", le32_to_cpup(ptr));
+@@ -459,6 +462,14 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+               else
+                       *((unsigned int *) ptr) = t;
+               return len;
++      case attr_clusters_in_group:
++              ret = kstrtouint(skip_spaces(buf), 0, &t);
++              if (ret)
++                      return ret;
++              if (t > sbi->s_clusters_per_group)
++                      return -EINVAL;
++              *((unsigned int *) ptr) = t;
++              return len;
+       case attr_pointer_ul:
+               if (!ptr)
+                       return 0;
+-- 
+2.43.0
+
diff --git a/queue-6.6/ext4-refactor-out-ext4_generic_attr_show.patch b/queue-6.6/ext4-refactor-out-ext4_generic_attr_show.patch
new file mode 100644 (file)
index 0000000..30d2b98
--- /dev/null
@@ -0,0 +1,132 @@
+From ab049cbb779321b16f125be5e86889cc6c96892f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Mar 2024 19:33:19 +0800
+Subject: ext4: refactor out ext4_generic_attr_show()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 57341fe3179c7694c92dcf99e7f836cee4c800dd ]
+
+Refactor out the function ext4_generic_attr_show() to handle the reading
+of values of various common types, with no functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20240319113325.3110393-4-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 13df4d44a3aa ("ext4: fix slab-out-of-bounds in ext4_mb_find_good_group_avg_frag_lists()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/sysfs.c | 74 +++++++++++++++++++++----------------------------
+ 1 file changed, 32 insertions(+), 42 deletions(-)
+
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index ca820620b9742..295ea9a32de91 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -366,13 +366,42 @@ static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi)
+ #define print_tstamp(buf, es, tstamp) \
+       __print_tstamp(buf, (es)->tstamp, (es)->tstamp ## _hi)
++static ssize_t ext4_generic_attr_show(struct ext4_attr *a,
++                                    struct ext4_sb_info *sbi, char *buf)
++{
++      void *ptr = calc_ptr(a, sbi);
++
++      if (!ptr)
++              return 0;
++
++      switch (a->attr_id) {
++      case attr_inode_readahead:
++      case attr_pointer_ui:
++              if (a->attr_ptr == ptr_ext4_super_block_offset)
++                      return sysfs_emit(buf, "%u\n", le32_to_cpup(ptr));
++              return sysfs_emit(buf, "%u\n", *((unsigned int *) ptr));
++      case attr_pointer_ul:
++              return sysfs_emit(buf, "%lu\n", *((unsigned long *) ptr));
++      case attr_pointer_u8:
++              return sysfs_emit(buf, "%u\n", *((unsigned char *) ptr));
++      case attr_pointer_u64:
++              if (a->attr_ptr == ptr_ext4_super_block_offset)
++                      return sysfs_emit(buf, "%llu\n", le64_to_cpup(ptr));
++              return sysfs_emit(buf, "%llu\n", *((unsigned long long *) ptr));
++      case attr_pointer_string:
++              return sysfs_emit(buf, "%.*s\n", a->attr_size, (char *) ptr);
++      case attr_pointer_atomic:
++              return sysfs_emit(buf, "%d\n", atomic_read((atomic_t *) ptr));
++      }
++      return 0;
++}
++
+ static ssize_t ext4_attr_show(struct kobject *kobj,
+                             struct attribute *attr, char *buf)
+ {
+       struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
+                                               s_kobj);
+       struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
+-      void *ptr = calc_ptr(a, sbi);
+       switch (a->attr_id) {
+       case attr_delayed_allocation_blocks:
+@@ -391,45 +420,6 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
+               return sysfs_emit(buf, "%llu\n",
+                               (unsigned long long)
+                       percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
+-      case attr_inode_readahead:
+-      case attr_pointer_ui:
+-              if (!ptr)
+-                      return 0;
+-              if (a->attr_ptr == ptr_ext4_super_block_offset)
+-                      return sysfs_emit(buf, "%u\n",
+-                                      le32_to_cpup(ptr));
+-              else
+-                      return sysfs_emit(buf, "%u\n",
+-                                      *((unsigned int *) ptr));
+-      case attr_pointer_ul:
+-              if (!ptr)
+-                      return 0;
+-              return sysfs_emit(buf, "%lu\n",
+-                              *((unsigned long *) ptr));
+-      case attr_pointer_u8:
+-              if (!ptr)
+-                      return 0;
+-              return sysfs_emit(buf, "%u\n",
+-                              *((unsigned char *) ptr));
+-      case attr_pointer_u64:
+-              if (!ptr)
+-                      return 0;
+-              if (a->attr_ptr == ptr_ext4_super_block_offset)
+-                      return sysfs_emit(buf, "%llu\n",
+-                                      le64_to_cpup(ptr));
+-              else
+-                      return sysfs_emit(buf, "%llu\n",
+-                                      *((unsigned long long *) ptr));
+-      case attr_pointer_string:
+-              if (!ptr)
+-                      return 0;
+-              return sysfs_emit(buf, "%.*s\n", a->attr_size,
+-                              (char *) ptr);
+-      case attr_pointer_atomic:
+-              if (!ptr)
+-                      return 0;
+-              return sysfs_emit(buf, "%d\n",
+-                              atomic_read((atomic_t *) ptr));
+       case attr_feature:
+               return sysfs_emit(buf, "supported\n");
+       case attr_first_error_time:
+@@ -438,9 +428,9 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
+               return print_tstamp(buf, sbi->s_es, s_last_error_time);
+       case attr_journal_task:
+               return journal_task_show(sbi, buf);
++      default:
++              return ext4_generic_attr_show(a, sbi, buf);
+       }
+-
+-      return 0;
+ }
+ static ssize_t ext4_attr_store(struct kobject *kobj,
+-- 
+2.43.0
+
diff --git a/queue-6.6/firmware-qcom_scm-disable-clocks-if-qcom_scm_bw_enab.patch b/queue-6.6/firmware-qcom_scm-disable-clocks-if-qcom_scm_bw_enab.patch
new file mode 100644 (file)
index 0000000..49ab0dd
--- /dev/null
@@ -0,0 +1,98 @@
+From 3b0bc94bf5ada503debb1d6a2e20004f57177d8c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Mar 2024 14:14:53 +0100
+Subject: firmware: qcom_scm: disable clocks if qcom_scm_bw_enable() fails
+
+From: Gabor Juhos <j4g8y7@gmail.com>
+
+[ Upstream commit 0c50b7fcf2773b4853e83fc15aba1a196ba95966 ]
+
+There are several functions which are calling qcom_scm_bw_enable()
+then returns immediately if the call fails and leaves the clocks
+enabled.
+
+Change the code of these functions to disable clocks when the
+qcom_scm_bw_enable() call fails. This also fixes a possible dma
+buffer leak in the qcom_scm_pas_init_image() function.
+
+Compile tested only due to lack of hardware with interconnect
+support.
+
+Cc: stable@vger.kernel.org
+Fixes: 65b7ebda5028 ("firmware: qcom_scm: Add bw voting support to the SCM interface")
+Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
+Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Link: https://lore.kernel.org/r/20240304-qcom-scm-disable-clk-v1-1-b36e51577ca1@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/qcom_scm.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index ff7c155239e31..7af59985f1c1f 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -498,13 +498,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+       ret = qcom_scm_bw_enable();
+       if (ret)
+-              return ret;
++              goto disable_clk;
+       desc.args[1] = mdata_phys;
+       ret = qcom_scm_call(__scm->dev, &desc, &res);
+-
+       qcom_scm_bw_disable();
++
++disable_clk:
+       qcom_scm_clk_disable();
+ out:
+@@ -566,10 +567,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+       ret = qcom_scm_bw_enable();
+       if (ret)
+-              return ret;
++              goto disable_clk;
+       ret = qcom_scm_call(__scm->dev, &desc, &res);
+       qcom_scm_bw_disable();
++
++disable_clk:
+       qcom_scm_clk_disable();
+       return ret ? : res.result[0];
+@@ -601,10 +604,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
+       ret = qcom_scm_bw_enable();
+       if (ret)
+-              return ret;
++              goto disable_clk;
+       ret = qcom_scm_call(__scm->dev, &desc, &res);
+       qcom_scm_bw_disable();
++
++disable_clk:
+       qcom_scm_clk_disable();
+       return ret ? : res.result[0];
+@@ -635,11 +640,12 @@ int qcom_scm_pas_shutdown(u32 peripheral)
+       ret = qcom_scm_bw_enable();
+       if (ret)
+-              return ret;
++              goto disable_clk;
+       ret = qcom_scm_call(__scm->dev, &desc, &res);
+-
+       qcom_scm_bw_disable();
++
++disable_clk:
+       qcom_scm_clk_disable();
+       return ret ? : res.result[0];
+-- 
+2.43.0
+
diff --git a/queue-6.6/ice-add-flag-to-distinguish-reset-from-.ndo_bpf-in-x.patch b/queue-6.6/ice-add-flag-to-distinguish-reset-from-.ndo_bpf-in-x.patch
new file mode 100644 (file)
index 0000000..3ede438
--- /dev/null
@@ -0,0 +1,181 @@
+From bb63f5d12a4d58d790b4227255eaff353a6b217e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jun 2024 14:42:33 -0700
+Subject: ice: add flag to distinguish reset from .ndo_bpf in XDP rings config
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 744d197162c2070a6045a71e2666ed93a57cc65d ]
+
+Commit 6624e780a577 ("ice: split ice_vsi_setup into smaller functions")
+has placed ice_vsi_free_q_vectors() after ice_destroy_xdp_rings() in
+the rebuild process. The behaviour of the XDP rings config functions is
+context-dependent, so the change of order has led to
+ice_destroy_xdp_rings() doing additional work and removing XDP prog, when
+it was supposed to be preserved.
+
+Also, dependency on the PF state reset flags creates an additional,
+fortunately less common problem:
+
+* PFR is requested e.g. by tx_timeout handler
+* .ndo_bpf() is asked to delete the program, calls ice_destroy_xdp_rings(),
+  but reset flag is set, so rings are destroyed without deleting the
+  program
+* ice_vsi_rebuild tries to delete non-existent XDP rings, because the
+  program is still on the VSI
+* system crashes
+
+With a similar race, when requested to attach a program,
+ice_prepare_xdp_rings() can actually skip setting the program in the VSI
+and nevertheless report success.
+
+Instead of reverting to the old order of function calls, add an enum
+argument to both ice_prepare_xdp_rings() and ice_destroy_xdp_rings() in
+order to distinguish between calls from rebuild and .ndo_bpf().
+
+Fixes: efc2214b6047 ("ice: Add support for XDP")
+Reviewed-by: Igor Bagnucki <igor.bagnucki@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-4-e3563aa89b0c@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h      | 11 +++++++++--
+ drivers/net/ethernet/intel/ice/ice_lib.c  |  5 +++--
+ drivers/net/ethernet/intel/ice/ice_main.c | 22 ++++++++++++----------
+ 3 files changed, 24 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index cf00eaa3e9955..c7962f322db2d 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -892,9 +892,16 @@ int ice_down(struct ice_vsi *vsi);
+ int ice_down_up(struct ice_vsi *vsi);
+ int ice_vsi_cfg_lan(struct ice_vsi *vsi);
+ struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
++
++enum ice_xdp_cfg {
++      ICE_XDP_CFG_FULL,       /* Fully apply new config in .ndo_bpf() */
++      ICE_XDP_CFG_PART,       /* Save/use part of config in VSI rebuild */
++};
++
+ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
+-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
+-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
++int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
++                        enum ice_xdp_cfg cfg_type);
++int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+ int
+ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+            u32 flags);
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 5a7ba0355d338..13ca3342a0cea 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2462,7 +2462,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+                       ret = ice_vsi_determine_xdp_res(vsi);
+                       if (ret)
+                               goto unroll_vector_base;
+-                      ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
++                      ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
++                                                  ICE_XDP_CFG_PART);
+                       if (ret)
+                               goto unroll_vector_base;
+               }
+@@ -2613,7 +2614,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
+               /* return value check can be skipped here, it always returns
+                * 0 if reset is in progress
+                */
+-              ice_destroy_xdp_rings(vsi);
++              ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
+       ice_vsi_clear_rings(vsi);
+       ice_vsi_free_q_vectors(vsi);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 8ebb6517f6b96..5d71febdcd4dd 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2657,10 +2657,12 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
+  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
+  * @vsi: VSI to bring up Tx rings used by XDP
+  * @prog: bpf program that will be assigned to VSI
++ * @cfg_type: create from scratch or restore the existing configuration
+  *
+  * Return 0 on success and negative value on error
+  */
+-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
++int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
++                        enum ice_xdp_cfg cfg_type)
+ {
+       u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+       int xdp_rings_rem = vsi->num_xdp_txq;
+@@ -2736,7 +2738,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+        * taken into account at the end of ice_vsi_rebuild, where
+        * ice_cfg_vsi_lan is being called
+        */
+-      if (ice_is_reset_in_progress(pf->state))
++      if (cfg_type == ICE_XDP_CFG_PART)
+               return 0;
+       /* tell the Tx scheduler that right now we have
+@@ -2788,22 +2790,21 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+ /**
+  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
+  * @vsi: VSI to remove XDP rings
++ * @cfg_type: disable XDP permanently or allow it to be restored later
+  *
+  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
+  * resources
+  */
+-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
++int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
+ {
+       u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+       struct ice_pf *pf = vsi->back;
+       int i, v_idx;
+       /* q_vectors are freed in reset path so there's no point in detaching
+-       * rings; in case of rebuild being triggered not from reset bits
+-       * in pf->state won't be set, so additionally check first q_vector
+-       * against NULL
++       * rings
+        */
+-      if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
++      if (cfg_type == ICE_XDP_CFG_PART)
+               goto free_qmap;
+       ice_for_each_q_vector(vsi, v_idx) {
+@@ -2844,7 +2845,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+       if (static_key_enabled(&ice_xdp_locking_key))
+               static_branch_dec(&ice_xdp_locking_key);
+-      if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
++      if (cfg_type == ICE_XDP_CFG_PART)
+               return 0;
+       ice_vsi_assign_bpf_prog(vsi, NULL);
+@@ -2955,7 +2956,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+               if (xdp_ring_err) {
+                       NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+               } else {
+-                      xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
++                      xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
++                                                           ICE_XDP_CFG_FULL);
+                       if (xdp_ring_err)
+                               NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+               }
+@@ -2966,7 +2968,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+                       NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
+       } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+               xdp_features_clear_redirect_target(vsi->netdev);
+-              xdp_ring_err = ice_destroy_xdp_rings(vsi);
++              xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
+               if (xdp_ring_err)
+                       NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
+               /* reallocate Rx queues that were used for zero-copy */
+-- 
+2.43.0
+
diff --git a/queue-6.6/ice-fix-iteration-of-tlvs-in-preserved-fields-area.patch b/queue-6.6/ice-fix-iteration-of-tlvs-in-preserved-fields-area.patch
new file mode 100644 (file)
index 0000000..6fcc76b
--- /dev/null
@@ -0,0 +1,124 @@
+From ffd967a3e5e55bed80c4577e4afdb9c868aaf7e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jun 2024 14:42:30 -0700
+Subject: ice: fix iteration of TLVs in Preserved Fields Area
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit 03e4a092be8ce3de7c1baa7ae14e68b64e3ea644 ]
+
+The ice_get_pfa_module_tlv() function iterates over the Type-Length-Value
+structures in the Preserved Fields Area (PFA) of the NVM. This is used by
+the driver to access data such as the Part Board Assembly identifier.
+
+The function uses simple logic to iterate over the PFA. First, the pointer
+to the PFA in the NVM is read. Then the total length of the PFA is read
+from the first word.
+
+A pointer to the first TLV is initialized, and a simple loop iterates over
+each TLV. The pointer is moved forward through the NVM until it exceeds the
+PFA area.
+
+The logic seems sound, but it is missing a key detail. The Preserved
+Fields Area length includes one additional final word. This is documented
+in the device data sheet as a dummy word which contains 0xFFFF. All NVMs
+have this extra word.
+
+If the driver tries to scan for a TLV that is not in the PFA, it will read
+past the size of the PFA. It reads and interprets the last dummy word of
+the PFA as a TLV with type 0xFFFF. It then reads the word following the PFA
+as a length.
+
+The PFA resides within the Shadow RAM portion of the NVM, which is
+relatively small. All of its offsets are within a 16-bit size. The PFA
+pointer and TLV pointer are stored by the driver as 16-bit values.
+
+In almost all cases, the word following the PFA will be such that
+interpreting it as a length will result in 16-bit arithmetic overflow. Once
+overflowed, the new next_tlv value is now below the maximum offset of the
+PFA. Thus, the driver will continue to iterate the data as TLVs. In the
+worst case, the driver hits on a sequence of reads which loop back to
+reading the same offsets in an endless loop.
+
+To fix this, we need to correct the loop iteration check to account for
+this extra word at the end of the PFA. This alone is sufficient to resolve
+the known cases of this issue in the field. However, it is plausible that
+an NVM could be misconfigured or have corrupt data which results in the
+same kind of overflow. Protect against this by using check_add_overflow
+when calculating both the maximum offset of the TLVs, and when calculating
+the next_tlv offset at the end of each loop iteration. This ensures that
+the driver will not get stuck in an infinite loop when scanning the PFA.
+
+Fixes: e961b679fb0b ("ice: add board identifier info to devlink .info_get")
+Co-developed-by: Paul Greenwalt <paul.greenwalt@intel.com>
+Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-1-e3563aa89b0c@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_nvm.c | 28 ++++++++++++++++++------
+ 1 file changed, 21 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
+index f6f52a2480662..2fb43cded572c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
++++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
+@@ -441,8 +441,7 @@ int
+ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+                      u16 module_type)
+ {
+-      u16 pfa_len, pfa_ptr;
+-      u16 next_tlv;
++      u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
+       int status;
+       status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+@@ -455,11 +454,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+               ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+               return status;
+       }
++
++      /* The Preserved Fields Area contains a sequence of Type-Length-Value
++       * structures which define its contents. The PFA length includes all
++       * of the TLVs, plus the initial length word itself, *and* one final
++       * word at the end after all of the TLVs.
++       */
++      if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
++              dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
++                       pfa_ptr, pfa_len);
++              return -EINVAL;
++      }
++
+       /* Starting with first TLV after PFA length, iterate through the list
+        * of TLVs to find the requested one.
+        */
+       next_tlv = pfa_ptr + 1;
+-      while (next_tlv < pfa_ptr + pfa_len) {
++      while (next_tlv < max_tlv) {
+               u16 tlv_sub_module_type;
+               u16 tlv_len;
+@@ -483,10 +494,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+                       }
+                       return -EINVAL;
+               }
+-              /* Check next TLV, i.e. current TLV pointer + length + 2 words
+-               * (for current TLV's type and length)
+-               */
+-              next_tlv = next_tlv + tlv_len + 2;
++
++              if (check_add_overflow(next_tlv, 2, &next_tlv) ||
++                  check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
++                      dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
++                               tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
++                      return -EINVAL;
++              }
+       }
+       /* Module does not exist */
+       return -ENOENT;
+-- 
+2.43.0
+
diff --git a/queue-6.6/ice-remove-af_xdp_zc_qps-bitmap.patch b/queue-6.6/ice-remove-af_xdp_zc_qps-bitmap.patch
new file mode 100644 (file)
index 0000000..e04823f
--- /dev/null
@@ -0,0 +1,170 @@
+From 6c9a25771b74ed890a3f3f7b3510360e4067c49d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jun 2024 14:42:32 -0700
+Subject: ice: remove af_xdp_zc_qps bitmap
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit adbf5a42341f6ea038d3626cd4437d9f0ad0b2dd ]
+
+Referenced commit has introduced a bitmap to distinguish between ZC and
+copy-mode AF_XDP queues, because xsk_get_pool_from_qid() does not do this
+for us.
+
+The bitmap would be especially useful when restoring previous state after
+rebuild, if only it was not reallocated in the process. This leads to e.g.
+xdpsock dying after changing number of queues.
+
+Instead of preserving the bitmap during the rebuild, remove it completely
+and distinguish between ZC and copy-mode queues based on the presence of
+a device associated with the pool.
+
+Fixes: e102db780e1c ("ice: track AF_XDP ZC enabled queues in bitmap")
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-3-e3563aa89b0c@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h     | 32 ++++++++++++++++--------
+ drivers/net/ethernet/intel/ice/ice_lib.c |  8 ------
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 13 +++++-----
+ 3 files changed, 27 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 5022b036ca4f9..cf00eaa3e9955 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -407,7 +407,6 @@ struct ice_vsi {
+       struct ice_tc_cfg tc_cfg;
+       struct bpf_prog *xdp_prog;
+       struct ice_tx_ring **xdp_rings;  /* XDP ring array */
+-      unsigned long *af_xdp_zc_qps;    /* tracks AF_XDP ZC enabled qps */
+       u16 num_xdp_txq;                 /* Used XDP queues */
+       u8 xdp_mapping_mode;             /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+@@ -714,6 +713,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
+       ring->flags |= ICE_TX_FLAGS_RING_XDP;
+ }
++/**
++ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
++ * @vsi: pointer to VSI
++ * @qid: index of a queue to look at XSK buff pool presence
++ *
++ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
++ * attached and configured as zero-copy, NULL otherwise.
++ */
++static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
++                                                      u16 qid)
++{
++      struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
++
++      if (!ice_is_xdp_ena_vsi(vsi))
++              return NULL;
++
++      return (pool && pool->dev) ? pool : NULL;
++}
++
+ /**
+  * ice_xsk_pool - get XSK buffer pool bound to a ring
+  * @ring: Rx ring to use
+@@ -726,10 +744,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+       struct ice_vsi *vsi = ring->vsi;
+       u16 qid = ring->q_index;
+-      if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+-              return NULL;
+-
+-      return xsk_get_pool_from_qid(vsi->netdev, qid);
++      return ice_get_xp_from_qid(vsi, qid);
+ }
+ /**
+@@ -754,12 +769,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
+       if (!ring)
+               return;
+-      if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+-              ring->xsk_pool = NULL;
+-              return;
+-      }
+-
+-      ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
++      ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
+ }
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 2004120a58acd..5a7ba0355d338 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -117,14 +117,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
+       if (!vsi->q_vectors)
+               goto err_vectors;
+-      vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+-      if (!vsi->af_xdp_zc_qps)
+-              goto err_zc_qps;
+-
+       return 0;
+-err_zc_qps:
+-      devm_kfree(dev, vsi->q_vectors);
+ err_vectors:
+       devm_kfree(dev, vsi->rxq_map);
+ err_rxq_map:
+@@ -321,8 +315,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
+       dev = ice_pf_to_dev(pf);
+-      bitmap_free(vsi->af_xdp_zc_qps);
+-      vsi->af_xdp_zc_qps = NULL;
+       /* free the ring and vector containers */
+       devm_kfree(dev, vsi->q_vectors);
+       vsi->q_vectors = NULL;
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 7bd71660011e4..f53566cb6bfbd 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -289,7 +289,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
+       if (!pool)
+               return -EINVAL;
+-      clear_bit(qid, vsi->af_xdp_zc_qps);
+       xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
+       return 0;
+@@ -320,8 +319,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
+       if (err)
+               return err;
+-      set_bit(qid, vsi->af_xdp_zc_qps);
+-
+       return 0;
+ }
+@@ -369,11 +366,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+ {
+       struct ice_rx_ring *rx_ring;
+-      unsigned long q;
++      uint i;
++
++      ice_for_each_rxq(vsi, i) {
++              rx_ring = vsi->rx_rings[i];
++              if (!rx_ring->xsk_pool)
++                      continue;
+-      for_each_set_bit(q, vsi->af_xdp_zc_qps,
+-                       max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+-              rx_ring = vsi->rx_rings[q];
+               if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+                       return -ENOMEM;
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.6/ipv6-fix-possible-race-in-__fib6_drop_pcpu_from.patch b/queue-6.6/ipv6-fix-possible-race-in-__fib6_drop_pcpu_from.patch
new file mode 100644 (file)
index 0000000..dbbae73
--- /dev/null
@@ -0,0 +1,130 @@
+From 80172505be30e9eb896e3331991e19c0f692dc2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 19:35:49 +0000
+Subject: ipv6: fix possible race in __fib6_drop_pcpu_from()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit b01e1c030770ff3b4fe37fc7cc6bca03f594133f ]
+
+syzbot found a race in __fib6_drop_pcpu_from() [1]
+
+If compiler reads more than once (*ppcpu_rt),
+second read could read NULL, if another cpu clears
+the value in rt6_get_pcpu_route().
+
+Add a READ_ONCE() to prevent this race.
+
+Also add rcu_read_lock()/rcu_read_unlock() because
+we rely on RCU protection while dereferencing pcpu_rt.
+
+[1]
+
+Oops: general protection fault, probably for non-canonical address 0xdffffc0000000012: 0000 [#1] PREEMPT SMP KASAN PTI
+KASAN: null-ptr-deref in range [0x0000000000000090-0x0000000000000097]
+CPU: 0 PID: 7543 Comm: kworker/u8:17 Not tainted 6.10.0-rc1-syzkaller-00013-g2bfcfd584ff5 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/02/2024
+Workqueue: netns cleanup_net
+ RIP: 0010:__fib6_drop_pcpu_from.part.0+0x10a/0x370 net/ipv6/ip6_fib.c:984
+Code: f8 48 c1 e8 03 80 3c 28 00 0f 85 16 02 00 00 4d 8b 3f 4d 85 ff 74 31 e8 74 a7 fa f7 49 8d bf 90 00 00 00 48 89 f8 48 c1 e8 03 <80> 3c 28 00 0f 85 1e 02 00 00 49 8b 87 90 00 00 00 48 8b 0c 24 48
+RSP: 0018:ffffc900040df070 EFLAGS: 00010206
+RAX: 0000000000000012 RBX: 0000000000000001 RCX: ffffffff89932e16
+RDX: ffff888049dd1e00 RSI: ffffffff89932d7c RDI: 0000000000000091
+RBP: dffffc0000000000 R08: 0000000000000005 R09: 0000000000000007
+R10: 0000000000000001 R11: 0000000000000006 R12: ffff88807fa080b8
+R13: fffffbfff1a9a07d R14: ffffed100ff41022 R15: 0000000000000001
+FS:  0000000000000000(0000) GS:ffff8880b9200000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000001b32c26000 CR3: 000000005d56e000 CR4: 00000000003526f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+  __fib6_drop_pcpu_from net/ipv6/ip6_fib.c:966 [inline]
+  fib6_drop_pcpu_from net/ipv6/ip6_fib.c:1027 [inline]
+  fib6_purge_rt+0x7f2/0x9f0 net/ipv6/ip6_fib.c:1038
+  fib6_del_route net/ipv6/ip6_fib.c:1998 [inline]
+  fib6_del+0xa70/0x17b0 net/ipv6/ip6_fib.c:2043
+  fib6_clean_node+0x426/0x5b0 net/ipv6/ip6_fib.c:2205
+  fib6_walk_continue+0x44f/0x8d0 net/ipv6/ip6_fib.c:2127
+  fib6_walk+0x182/0x370 net/ipv6/ip6_fib.c:2175
+  fib6_clean_tree+0xd7/0x120 net/ipv6/ip6_fib.c:2255
+  __fib6_clean_all+0x100/0x2d0 net/ipv6/ip6_fib.c:2271
+  rt6_sync_down_dev net/ipv6/route.c:4906 [inline]
+  rt6_disable_ip+0x7ed/0xa00 net/ipv6/route.c:4911
+  addrconf_ifdown.isra.0+0x117/0x1b40 net/ipv6/addrconf.c:3855
+  addrconf_notify+0x223/0x19e0 net/ipv6/addrconf.c:3778
+  notifier_call_chain+0xb9/0x410 kernel/notifier.c:93
+  call_netdevice_notifiers_info+0xbe/0x140 net/core/dev.c:1992
+  call_netdevice_notifiers_extack net/core/dev.c:2030 [inline]
+  call_netdevice_notifiers net/core/dev.c:2044 [inline]
+  dev_close_many+0x333/0x6a0 net/core/dev.c:1585
+  unregister_netdevice_many_notify+0x46d/0x19f0 net/core/dev.c:11193
+  unregister_netdevice_many net/core/dev.c:11276 [inline]
+  default_device_exit_batch+0x85b/0xae0 net/core/dev.c:11759
+  ops_exit_list+0x128/0x180 net/core/net_namespace.c:178
+  cleanup_net+0x5b7/0xbf0 net/core/net_namespace.c:640
+  process_one_work+0x9fb/0x1b60 kernel/workqueue.c:3231
+  process_scheduled_works kernel/workqueue.c:3312 [inline]
+  worker_thread+0x6c8/0xf70 kernel/workqueue.c:3393
+  kthread+0x2c1/0x3a0 kernel/kthread.c:389
+  ret_from_fork+0x45/0x80 arch/x86/kernel/process.c:147
+  ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
+
+Fixes: d52d3997f843 ("ipv6: Create percpu rt6_info")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/r/20240604193549.981839-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ip6_fib.c | 6 +++++-
+ net/ipv6/route.c   | 1 +
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 8184076a3924e..4356806b52bd5 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -961,6 +961,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+       if (!fib6_nh->rt6i_pcpu)
+               return;
++      rcu_read_lock();
+       /* release the reference to this fib entry from
+        * all of its cached pcpu routes
+        */
+@@ -969,7 +970,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+               struct rt6_info *pcpu_rt;
+               ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
+-              pcpu_rt = *ppcpu_rt;
++
++              /* Paired with xchg() in rt6_get_pcpu_route() */
++              pcpu_rt = READ_ONCE(*ppcpu_rt);
+               /* only dropping the 'from' reference if the cached route
+                * is using 'match'. The cached pcpu_rt->from only changes
+@@ -983,6 +986,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+                       fib6_info_release(from);
+               }
+       }
++      rcu_read_unlock();
+ }
+ struct fib6_nh_pcpu_arg {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index c48eaa7c23401..0a37f04177337 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1399,6 +1399,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
+               struct rt6_info *prev, **p;
+               p = this_cpu_ptr(res->nh->rt6i_pcpu);
++              /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
+               prev = xchg(p, NULL);
+               if (prev) {
+                       dst_dev_put(&prev->dst);
+-- 
+2.43.0
+
diff --git a/queue-6.6/ipv6-ioam-block-bh-from-ioam6_output.patch b/queue-6.6/ipv6-ioam-block-bh-from-ioam6_output.patch
new file mode 100644 (file)
index 0000000..44e806e
--- /dev/null
@@ -0,0 +1,65 @@
+From 1628e1a1ec5d0f3d344f1c5d26c4241ef6d2f1d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 May 2024 13:26:32 +0000
+Subject: ipv6: ioam: block BH from ioam6_output()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 2fe40483ec257de2a0d819ef88e3e76c7e261319 ]
+
+As explained in commit 1378817486d6 ("tipc: block BH
+before using dst_cache"), net/core/dst_cache.c
+helpers need to be called with BH disabled.
+
+Disabling preemption in ioam6_output() is not good enough,
+because ioam6_output() is called from process context,
+lwtunnel_output() only uses rcu_read_lock().
+
+We might be interrupted by a softirq, re-enter ioam6_output()
+and corrupt dst_cache data structures.
+
+Fix the race by using local_bh_disable() instead of
+preempt_disable().
+
+Fixes: 8cb3bf8bff3c ("ipv6: ioam: Add support for the ip6ip6 encapsulation")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Justin Iurman <justin.iurman@uliege.be>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Link: https://lore.kernel.org/r/20240531132636.2637995-2-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ioam6_iptunnel.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
+index f6f5b83dd954d..a5cfc5b0b206b 100644
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -351,9 +351,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+               goto drop;
+       if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
+-              preempt_disable();
++              local_bh_disable();
+               dst = dst_cache_get(&ilwt->cache);
+-              preempt_enable();
++              local_bh_enable();
+               if (unlikely(!dst)) {
+                       struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -373,9 +373,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+                               goto drop;
+                       }
+-                      preempt_disable();
++                      local_bh_disable();
+                       dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
+-                      preempt_enable();
++                      local_bh_enable();
+               }
+               skb_dst_drop(skb);
+-- 
+2.43.0
+
diff --git a/queue-6.6/ipv6-sr-block-bh-in-seg6_output_core-and-seg6_input_.patch b/queue-6.6/ipv6-sr-block-bh-in-seg6_output_core-and-seg6_input_.patch
new file mode 100644 (file)
index 0000000..537e655
--- /dev/null
@@ -0,0 +1,94 @@
+From ae57cd72b4e762c57a13f83b368164b9cdfad1e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 May 2024 13:26:34 +0000
+Subject: ipv6: sr: block BH in seg6_output_core() and seg6_input_core()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit c0b98ac1cc104f48763cdb27b1e9ac25fd81fc90 ]
+
+As explained in commit 1378817486d6 ("tipc: block BH
+before using dst_cache"), net/core/dst_cache.c
+helpers need to be called with BH disabled.
+
+Disabling preemption in seg6_output_core() is not good enough,
+because seg6_output_core() is called from process context,
+lwtunnel_output() only uses rcu_read_lock().
+
+We might be interrupted by a softirq, re-enter seg6_output_core()
+and corrupt dst_cache data structures.
+
+Fix the race by using local_bh_disable() instead of
+preempt_disable().
+
+Apply a similar change in seg6_input_core().
+
+Fixes: fa79581ea66c ("ipv6: sr: fix several BUGs when preemption is enabled")
+Fixes: 6c8702c60b88 ("ipv6: sr: add support for SRH encapsulation and injection with lwtunnels")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: David Lebrun <dlebrun@google.com>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Link: https://lore.kernel.org/r/20240531132636.2637995-4-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_iptunnel.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index a75df2ec8db0d..098632adc9b5a 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -464,23 +464,21 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+       slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+-      preempt_disable();
++      local_bh_disable();
+       dst = dst_cache_get(&slwt->cache);
+-      preempt_enable();
+       if (!dst) {
+               ip6_route_input(skb);
+               dst = skb_dst(skb);
+               if (!dst->error) {
+-                      preempt_disable();
+                       dst_cache_set_ip6(&slwt->cache, dst,
+                                         &ipv6_hdr(skb)->saddr);
+-                      preempt_enable();
+               }
+       } else {
+               skb_dst_drop(skb);
+               skb_dst_set(skb, dst);
+       }
++      local_bh_enable();
+       err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+       if (unlikely(err))
+@@ -536,9 +534,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+       slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+-      preempt_disable();
++      local_bh_disable();
+       dst = dst_cache_get(&slwt->cache);
+-      preempt_enable();
++      local_bh_enable();
+       if (unlikely(!dst)) {
+               struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -558,9 +556,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+                       goto drop;
+               }
+-              preempt_disable();
++              local_bh_disable();
+               dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+-              preempt_enable();
++              local_bh_enable();
+       }
+       skb_dst_drop(skb);
+-- 
+2.43.0
+
diff --git a/queue-6.6/irqchip-riscv-intc-allow-large-non-standard-interrup.patch b/queue-6.6/irqchip-riscv-intc-allow-large-non-standard-interrup.patch
new file mode 100644 (file)
index 0000000..a1e124f
--- /dev/null
@@ -0,0 +1,98 @@
+From eed7836bce48533294afc37ef0c2453a6e9384b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Feb 2024 16:39:38 +0800
+Subject: irqchip/riscv-intc: Allow large non-standard interrupt number
+
+From: Yu Chien Peter Lin <peterlin@andestech.com>
+
+[ Upstream commit 96303bcb401c21dc1426d8d9bb1fc74aae5c02a9 ]
+
+Currently, the implementation of the RISC-V INTC driver uses the
+interrupt cause as the hardware interrupt number, with a maximum of
+64 interrupts. However, the platform can expand the interrupt number
+further for custom local interrupts.
+
+To fully utilize the available local interrupt sources, switch
+to using irq_domain_create_tree() that creates the radix tree
+map, add global variables (riscv_intc_nr_irqs, riscv_intc_custom_base
+and riscv_intc_custom_nr_irqs) to determine the valid range of local
+interrupt number (hwirq).
+
+Signed-off-by: Yu Chien Peter Lin <peterlin@andestech.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Randolph <randolph@andestech.com>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Reviewed-by: Atish Patra <atishp@rivosinc.com>
+Link: https://lore.kernel.org/r/20240222083946.3977135-3-peterlin@andestech.com
+Stable-dep-of: 0110c4b11047 ("irqchip/riscv-intc: Prevent memory leak when riscv_intc_init_common() fails")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-riscv-intc.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
+index e8d01b14ccdde..684875c397280 100644
+--- a/drivers/irqchip/irq-riscv-intc.c
++++ b/drivers/irqchip/irq-riscv-intc.c
+@@ -19,15 +19,16 @@
+ #include <linux/smp.h>
+ static struct irq_domain *intc_domain;
++static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
++static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
++static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
+ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
+ {
+       unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
+-      if (unlikely(cause >= BITS_PER_LONG))
+-              panic("unexpected interrupt cause");
+-
+-      generic_handle_domain_irq(intc_domain, cause);
++      if (generic_handle_domain_irq(intc_domain, cause))
++              pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
+ }
+ /*
+@@ -93,6 +94,14 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
+       if (ret)
+               return ret;
++      /*
++       * Only allow hwirq for which we have corresponding standard or
++       * custom interrupt enable register.
++       */
++      if ((hwirq >= riscv_intc_nr_irqs && hwirq < riscv_intc_custom_base) ||
++          (hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
++              return -EINVAL;
++
+       for (i = 0; i < nr_irqs; i++) {
+               ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
+               if (ret)
+@@ -117,8 +126,7 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
+ {
+       int rc;
+-      intc_domain = irq_domain_create_linear(fn, BITS_PER_LONG,
+-                                             &riscv_intc_domain_ops, NULL);
++      intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, NULL);
+       if (!intc_domain) {
+               pr_err("unable to add IRQ domain\n");
+               return -ENXIO;
+@@ -132,7 +140,11 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
+       riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
+-      pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
++      pr_info("%d local interrupts mapped\n", riscv_intc_nr_irqs);
++      if (riscv_intc_custom_nr_irqs) {
++              pr_info("%d custom local interrupts mapped\n",
++                      riscv_intc_custom_nr_irqs);
++      }
+       return 0;
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.6/irqchip-riscv-intc-introduce-andes-hart-level-interr.patch b/queue-6.6/irqchip-riscv-intc-introduce-andes-hart-level-interr.patch
new file mode 100644 (file)
index 0000000..03cc1d2
--- /dev/null
@@ -0,0 +1,196 @@
+From e10b5b43681415f4ca17eb091644e3918191621c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Feb 2024 16:39:39 +0800
+Subject: irqchip/riscv-intc: Introduce Andes hart-level interrupt controller
+
+From: Yu Chien Peter Lin <peterlin@andestech.com>
+
+[ Upstream commit f4cc33e78ba8624a79ba8dea98ce5c85aa9ca33c ]
+
+Add support for the Andes hart-level interrupt controller. This
+controller provides interrupt mask/unmask functions to access the
+custom register (SLIE) where the non-standard S-mode local interrupt
+enable bits are located. The base of custom interrupt number is set
+to 256.
+
+To share the riscv_intc_domain_map() with the generic RISC-V INTC and
+ACPI, add a chip parameter to riscv_intc_init_common(), so it can be
+passed to the irq_domain_set_info() as a private data.
+
+Andes hart-level interrupt controller requires the "andestech,cpu-intc"
+compatible string to be present in interrupt-controller of cpu node to
+enable the use of custom local interrupt source.
+e.g.,
+
+  cpu0: cpu@0 {
+      compatible = "andestech,ax45mp", "riscv";
+      ...
+      cpu0-intc: interrupt-controller {
+          #interrupt-cells = <0x01>;
+          compatible = "andestech,cpu-intc", "riscv,cpu-intc";
+          interrupt-controller;
+      };
+  };
+
+Signed-off-by: Yu Chien Peter Lin <peterlin@andestech.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Randolph <randolph@andestech.com>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Link: https://lore.kernel.org/r/20240222083946.3977135-4-peterlin@andestech.com
+Stable-dep-of: 0110c4b11047 ("irqchip/riscv-intc: Prevent memory leak when riscv_intc_init_common() fails")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-riscv-intc.c | 58 ++++++++++++++++++++++++++++----
+ include/linux/soc/andes/irq.h    | 18 ++++++++++
+ 2 files changed, 69 insertions(+), 7 deletions(-)
+ create mode 100644 include/linux/soc/andes/irq.h
+
+diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
+index 684875c397280..0cd6b48a5dbf9 100644
+--- a/drivers/irqchip/irq-riscv-intc.c
++++ b/drivers/irqchip/irq-riscv-intc.c
+@@ -17,6 +17,7 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/smp.h>
++#include <linux/soc/andes/irq.h>
+ static struct irq_domain *intc_domain;
+ static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
+@@ -48,6 +49,31 @@ static void riscv_intc_irq_unmask(struct irq_data *d)
+       csr_set(CSR_IE, BIT(d->hwirq));
+ }
++static void andes_intc_irq_mask(struct irq_data *d)
++{
++      /*
++       * Andes specific S-mode local interrupt causes (hwirq)
++       * are defined as (256 + n) and controlled by n-th bit
++       * of SLIE.
++       */
++      unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
++
++      if (d->hwirq < ANDES_SLI_CAUSE_BASE)
++              csr_clear(CSR_IE, mask);
++      else
++              csr_clear(ANDES_CSR_SLIE, mask);
++}
++
++static void andes_intc_irq_unmask(struct irq_data *d)
++{
++      unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
++
++      if (d->hwirq < ANDES_SLI_CAUSE_BASE)
++              csr_set(CSR_IE, mask);
++      else
++              csr_set(ANDES_CSR_SLIE, mask);
++}
++
+ static void riscv_intc_irq_eoi(struct irq_data *d)
+ {
+       /*
+@@ -71,12 +97,21 @@ static struct irq_chip riscv_intc_chip = {
+       .irq_eoi = riscv_intc_irq_eoi,
+ };
++static struct irq_chip andes_intc_chip = {
++      .name           = "RISC-V INTC",
++      .irq_mask       = andes_intc_irq_mask,
++      .irq_unmask     = andes_intc_irq_unmask,
++      .irq_eoi        = riscv_intc_irq_eoi,
++};
++
+ static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
+                                irq_hw_number_t hwirq)
+ {
++      struct irq_chip *chip = d->host_data;
++
+       irq_set_percpu_devid(irq);
+-      irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
+-                          handle_percpu_devid_irq, NULL, NULL);
++      irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq,
++                          NULL, NULL);
+       return 0;
+ }
+@@ -122,11 +157,12 @@ static struct fwnode_handle *riscv_intc_hwnode(void)
+       return intc_domain->fwnode;
+ }
+-static int __init riscv_intc_init_common(struct fwnode_handle *fn)
++static int __init riscv_intc_init_common(struct fwnode_handle *fn,
++                                       struct irq_chip *chip)
+ {
+       int rc;
+-      intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, NULL);
++      intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip);
+       if (!intc_domain) {
+               pr_err("unable to add IRQ domain\n");
+               return -ENXIO;
+@@ -152,8 +188,9 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
+ static int __init riscv_intc_init(struct device_node *node,
+                                 struct device_node *parent)
+ {
+-      int rc;
++      struct irq_chip *chip = &riscv_intc_chip;
+       unsigned long hartid;
++      int rc;
+       rc = riscv_of_parent_hartid(node, &hartid);
+       if (rc < 0) {
+@@ -178,10 +215,17 @@ static int __init riscv_intc_init(struct device_node *node,
+               return 0;
+       }
+-      return riscv_intc_init_common(of_node_to_fwnode(node));
++      if (of_device_is_compatible(node, "andestech,cpu-intc")) {
++              riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE;
++              riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST;
++              chip = &andes_intc_chip;
++      }
++
++      return riscv_intc_init_common(of_node_to_fwnode(node), chip);
+ }
+ IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
++IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
+ #ifdef CONFIG_ACPI
+@@ -208,7 +252,7 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
+               return -ENOMEM;
+       }
+-      return riscv_intc_init_common(fn);
++      return riscv_intc_init_common(fn, &riscv_intc_chip);
+ }
+ IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
+diff --git a/include/linux/soc/andes/irq.h b/include/linux/soc/andes/irq.h
+new file mode 100644
+index 0000000000000..edc3182d6e661
+--- /dev/null
++++ b/include/linux/soc/andes/irq.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (C) 2023 Andes Technology Corporation
++ */
++#ifndef __ANDES_IRQ_H
++#define __ANDES_IRQ_H
++
++/* Andes PMU irq number */
++#define ANDES_RV_IRQ_PMOVI            18
++#define ANDES_RV_IRQ_LAST             ANDES_RV_IRQ_PMOVI
++#define ANDES_SLI_CAUSE_BASE          256
++
++/* Andes PMU related registers */
++#define ANDES_CSR_SLIE                        0x9c4
++#define ANDES_CSR_SLIP                        0x9c5
++#define ANDES_CSR_SCOUNTEROF          0x9d4
++
++#endif /* __ANDES_IRQ_H */
+-- 
+2.43.0
+
diff --git a/queue-6.6/irqchip-riscv-intc-prevent-memory-leak-when-riscv_in.patch b/queue-6.6/irqchip-riscv-intc-prevent-memory-leak-when-riscv_in.patch
new file mode 100644 (file)
index 0000000..a819975
--- /dev/null
@@ -0,0 +1,55 @@
+From 3e094b6741d6f81ca0d381015936e068e4e65f8e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 May 2024 13:41:13 +0530
+Subject: irqchip/riscv-intc: Prevent memory leak when riscv_intc_init_common()
+ fails
+
+From: Sunil V L <sunilvl@ventanamicro.com>
+
+[ Upstream commit 0110c4b110477bb1f19b0d02361846be7ab08300 ]
+
+When riscv_intc_init_common() fails, the firmware node allocated is not
+freed. Add the missing free().
+
+Fixes: 7023b9d83f03 ("irqchip/riscv-intc: Add ACPI support")
+Signed-off-by: Sunil V L <sunilvl@ventanamicro.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240527081113.616189-1-sunilvl@ventanamicro.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-riscv-intc.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
+index 0cd6b48a5dbf9..627beae9649a2 100644
+--- a/drivers/irqchip/irq-riscv-intc.c
++++ b/drivers/irqchip/irq-riscv-intc.c
+@@ -232,8 +232,9 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
+ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
+                                      const unsigned long end)
+ {
+-      struct fwnode_handle *fn;
+       struct acpi_madt_rintc *rintc;
++      struct fwnode_handle *fn;
++      int rc;
+       rintc = (struct acpi_madt_rintc *)header;
+@@ -252,7 +253,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
+               return -ENOMEM;
+       }
+-      return riscv_intc_init_common(fn, &riscv_intc_chip);
++      rc = riscv_intc_init_common(fn, &riscv_intc_chip);
++      if (rc)
++              irq_domain_free_fwnode(fn);
++
++      return rc;
+ }
+ IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
+-- 
+2.43.0
+
diff --git a/queue-6.6/ksmbd-use-rwsem-instead-of-rwlock-for-lease-break.patch b/queue-6.6/ksmbd-use-rwsem-instead-of-rwlock-for-lease-break.patch
new file mode 100644 (file)
index 0000000..74bc46b
--- /dev/null
@@ -0,0 +1,287 @@
+From 8ba343ad504a3ba51867b1bf66266434373f93ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 May 2024 10:07:50 +0900
+Subject: ksmbd: use rwsem instead of rwlock for lease break
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit d1c189c6cb8b0fb7b5ee549237d27889c40c2f8b ]
+
+lease break wait for lease break acknowledgment.
+rwsem is more suitable than unlock while traversing the list for parent
+lease break in ->m_op_list.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/oplock.c     | 30 +++++++++++-------------------
+ fs/smb/server/smb2pdu.c    |  4 ++--
+ fs/smb/server/smb_common.c |  4 ++--
+ fs/smb/server/vfs_cache.c  | 28 ++++++++++++++--------------
+ fs/smb/server/vfs_cache.h  |  2 +-
+ 5 files changed, 30 insertions(+), 38 deletions(-)
+
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 7d17a14378e33..a8f52c4ebbdad 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -207,9 +207,9 @@ static void opinfo_add(struct oplock_info *opinfo)
+ {
+       struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
+-      write_lock(&ci->m_lock);
++      down_write(&ci->m_lock);
+       list_add_rcu(&opinfo->op_entry, &ci->m_op_list);
+-      write_unlock(&ci->m_lock);
++      up_write(&ci->m_lock);
+ }
+ static void opinfo_del(struct oplock_info *opinfo)
+@@ -221,9 +221,9 @@ static void opinfo_del(struct oplock_info *opinfo)
+               lease_del_list(opinfo);
+               write_unlock(&lease_list_lock);
+       }
+-      write_lock(&ci->m_lock);
++      down_write(&ci->m_lock);
+       list_del_rcu(&opinfo->op_entry);
+-      write_unlock(&ci->m_lock);
++      up_write(&ci->m_lock);
+ }
+ static unsigned long opinfo_count(struct ksmbd_file *fp)
+@@ -526,21 +526,18 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+        * Compare lease key and client_guid to know request from same owner
+        * of same client
+        */
+-      read_lock(&ci->m_lock);
++      down_read(&ci->m_lock);
+       list_for_each_entry(opinfo, &ci->m_op_list, op_entry) {
+               if (!opinfo->is_lease || !opinfo->conn)
+                       continue;
+-              read_unlock(&ci->m_lock);
+               lease = opinfo->o_lease;
+               ret = compare_guid_key(opinfo, client_guid, lctx->lease_key);
+               if (ret) {
+                       m_opinfo = opinfo;
+                       /* skip upgrading lease about breaking lease */
+-                      if (atomic_read(&opinfo->breaking_cnt)) {
+-                              read_lock(&ci->m_lock);
++                      if (atomic_read(&opinfo->breaking_cnt))
+                               continue;
+-                      }
+                       /* upgrading lease */
+                       if ((atomic_read(&ci->op_count) +
+@@ -570,9 +567,8 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+                               lease_none_upgrade(opinfo, lctx->req_state);
+                       }
+               }
+-              read_lock(&ci->m_lock);
+       }
+-      read_unlock(&ci->m_lock);
++      up_read(&ci->m_lock);
+       return m_opinfo;
+ }
+@@ -1119,7 +1115,7 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
+       if (!p_ci)
+               return;
+-      read_lock(&p_ci->m_lock);
++      down_read(&p_ci->m_lock);
+       list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
+               if (opinfo->conn == NULL || !opinfo->is_lease)
+                       continue;
+@@ -1137,13 +1133,11 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
+                               continue;
+                       }
+-                      read_unlock(&p_ci->m_lock);
+                       oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
+                       opinfo_conn_put(opinfo);
+-                      read_lock(&p_ci->m_lock);
+               }
+       }
+-      read_unlock(&p_ci->m_lock);
++      up_read(&p_ci->m_lock);
+       ksmbd_inode_put(p_ci);
+ }
+@@ -1164,7 +1158,7 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
+       if (!p_ci)
+               return;
+-      read_lock(&p_ci->m_lock);
++      down_read(&p_ci->m_lock);
+       list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
+               if (opinfo->conn == NULL || !opinfo->is_lease)
+                       continue;
+@@ -1178,13 +1172,11 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
+                               atomic_dec(&opinfo->conn->r_count);
+                               continue;
+                       }
+-                      read_unlock(&p_ci->m_lock);
+                       oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
+                       opinfo_conn_put(opinfo);
+-                      read_lock(&p_ci->m_lock);
+               }
+       }
+-      read_unlock(&p_ci->m_lock);
++      up_read(&p_ci->m_lock);
+       ksmbd_inode_put(p_ci);
+ }
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 6a15c5d64f415..8df93c9d4ee41 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -3376,9 +3376,9 @@ int smb2_open(struct ksmbd_work *work)
+        * after daccess, saccess, attrib_only, and stream are
+        * initialized.
+        */
+-      write_lock(&fp->f_ci->m_lock);
++      down_write(&fp->f_ci->m_lock);
+       list_add(&fp->node, &fp->f_ci->m_fp_list);
+-      write_unlock(&fp->f_ci->m_lock);
++      up_write(&fp->f_ci->m_lock);
+       /* Check delete pending among previous fp before oplock break */
+       if (ksmbd_inode_pending_delete(fp)) {
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index fcaf373cc0080..474dadf6b7b8b 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -646,7 +646,7 @@ int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp)
+        * Lookup fp in master fp list, and check desired access and
+        * shared mode between previous open and current open.
+        */
+-      read_lock(&curr_fp->f_ci->m_lock);
++      down_read(&curr_fp->f_ci->m_lock);
+       list_for_each_entry(prev_fp, &curr_fp->f_ci->m_fp_list, node) {
+               if (file_inode(filp) != file_inode(prev_fp->filp))
+                       continue;
+@@ -722,7 +722,7 @@ int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp)
+                       break;
+               }
+       }
+-      read_unlock(&curr_fp->f_ci->m_lock);
++      up_read(&curr_fp->f_ci->m_lock);
+       return rc;
+ }
+diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
+index 030f70700036c..6cb599cd287ee 100644
+--- a/fs/smb/server/vfs_cache.c
++++ b/fs/smb/server/vfs_cache.c
+@@ -165,7 +165,7 @@ static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
+       ci->m_fattr = 0;
+       INIT_LIST_HEAD(&ci->m_fp_list);
+       INIT_LIST_HEAD(&ci->m_op_list);
+-      rwlock_init(&ci->m_lock);
++      init_rwsem(&ci->m_lock);
+       ci->m_de = fp->filp->f_path.dentry;
+       return 0;
+ }
+@@ -261,14 +261,14 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp)
+       }
+       if (atomic_dec_and_test(&ci->m_count)) {
+-              write_lock(&ci->m_lock);
++              down_write(&ci->m_lock);
+               if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
+                       ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
+-                      write_unlock(&ci->m_lock);
++                      up_write(&ci->m_lock);
+                       ksmbd_vfs_unlink(filp);
+-                      write_lock(&ci->m_lock);
++                      down_write(&ci->m_lock);
+               }
+-              write_unlock(&ci->m_lock);
++              up_write(&ci->m_lock);
+               ksmbd_inode_free(ci);
+       }
+@@ -289,9 +289,9 @@ static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp
+       if (!has_file_id(fp->volatile_id))
+               return;
+-      write_lock(&fp->f_ci->m_lock);
++      down_write(&fp->f_ci->m_lock);
+       list_del_init(&fp->node);
+-      write_unlock(&fp->f_ci->m_lock);
++      up_write(&fp->f_ci->m_lock);
+       write_lock(&ft->lock);
+       idr_remove(ft->idr, fp->volatile_id);
+@@ -523,17 +523,17 @@ struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
+       if (!ci)
+               return NULL;
+-      read_lock(&ci->m_lock);
++      down_read(&ci->m_lock);
+       list_for_each_entry(lfp, &ci->m_fp_list, node) {
+               if (inode == file_inode(lfp->filp)) {
+                       atomic_dec(&ci->m_count);
+                       lfp = ksmbd_fp_get(lfp);
+-                      read_unlock(&ci->m_lock);
++                      up_read(&ci->m_lock);
+                       return lfp;
+               }
+       }
+       atomic_dec(&ci->m_count);
+-      read_unlock(&ci->m_lock);
++      up_read(&ci->m_lock);
+       return NULL;
+ }
+@@ -705,13 +705,13 @@ static bool session_fd_check(struct ksmbd_tree_connect *tcon,
+       conn = fp->conn;
+       ci = fp->f_ci;
+-      write_lock(&ci->m_lock);
++      down_write(&ci->m_lock);
+       list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
+               if (op->conn != conn)
+                       continue;
+               op->conn = NULL;
+       }
+-      write_unlock(&ci->m_lock);
++      up_write(&ci->m_lock);
+       fp->conn = NULL;
+       fp->tcon = NULL;
+@@ -801,13 +801,13 @@ int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
+       fp->tcon = work->tcon;
+       ci = fp->f_ci;
+-      write_lock(&ci->m_lock);
++      down_write(&ci->m_lock);
+       list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
+               if (op->conn)
+                       continue;
+               op->conn = fp->conn;
+       }
+-      write_unlock(&ci->m_lock);
++      up_write(&ci->m_lock);
+       __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
+       if (!has_file_id(fp->volatile_id)) {
+diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
+index ed44fb4e18e79..5a225e7055f19 100644
+--- a/fs/smb/server/vfs_cache.h
++++ b/fs/smb/server/vfs_cache.h
+@@ -47,7 +47,7 @@ struct stream {
+ };
+ struct ksmbd_inode {
+-      rwlock_t                        m_lock;
++      struct rw_semaphore             m_lock;
+       atomic_t                        m_count;
+       atomic_t                        op_count;
+       /* opinfo count for streams */
+-- 
+2.43.0
+
diff --git a/queue-6.6/kvm-sev-do-not-intercept-accesses-to-msr_ia32_xss-fo.patch b/queue-6.6/kvm-sev-do-not-intercept-accesses-to-msr_ia32_xss-fo.patch
new file mode 100644 (file)
index 0000000..5d7933f
--- /dev/null
@@ -0,0 +1,111 @@
+From 0d2adef0d6b8775bfaad1d626dd690bfc39fa8c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Oct 2023 08:27:32 -0500
+Subject: KVM: SEV: Do not intercept accesses to MSR_IA32_XSS for SEV-ES guests
+
+From: Michael Roth <michael.roth@amd.com>
+
+[ Upstream commit a26b7cd2254695f8258cc370f33280db0a9a3813 ]
+
+When intercepts are enabled for MSR_IA32_XSS, the host will swap in/out
+the guest-defined values while context-switching to/from guest mode.
+However, in the case of SEV-ES, vcpu->arch.guest_state_protected is set,
+so the guest-defined value is effectively ignored when switching to
+guest mode with the understanding that the VMSA will handle swapping
+in/out this register state.
+
+However, SVM is still configured to intercept these accesses for SEV-ES
+guests, so the values in the initial MSR_IA32_XSS are effectively
+read-only, and a guest will experience undefined behavior if it actually
+tries to write to this MSR. Fortunately, only CET/shadowstack makes use
+of this register on SEV-ES-capable systems currently, which isn't yet
+widely used, but this may become more of an issue in the future.
+
+Additionally, enabling intercepts of MSR_IA32_XSS results in #VC
+exceptions in the guest in certain paths that can lead to unexpected #VC
+nesting levels. One example is SEV-SNP guests when handling #VC
+exceptions for CPUID instructions involving leaf 0xD, subleaf 0x1, since
+they will access MSR_IA32_XSS as part of servicing the CPUID #VC, then
+generate another #VC when accessing MSR_IA32_XSS, which can lead to
+guest crashes if an NMI occurs at that point in time. Running perf on a
+guest while it is issuing such a sequence is one example where these can
+be problematic.
+
+Address this by disabling intercepts of MSR_IA32_XSS for SEV-ES guests
+if the host/guest configuration allows it. If the host/guest
+configuration doesn't allow for MSR_IA32_XSS, leave it intercepted so
+that it can be caught by the existing checks in
+kvm_{set,get}_msr_common() if the guest still attempts to access it.
+
+Fixes: 376c6d285017 ("KVM: SVM: Provide support for SEV-ES vCPU creation/loading")
+Cc: Alexey Kardashevskiy <aik@amd.com>
+Suggested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Michael Roth <michael.roth@amd.com>
+Message-Id: <20231016132819.1002933-4-michael.roth@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: b7e4be0a224f ("KVM: SEV-ES: Delegate LBR virtualization to the processor")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm/sev.c | 19 +++++++++++++++++++
+ arch/x86/kvm/svm/svm.c |  1 +
+ arch/x86/kvm/svm/svm.h |  2 +-
+ 3 files changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 0e643d7a06d9e..f809dcfacc8a3 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2994,6 +2994,25 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+               set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
+       }
++
++      /*
++       * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
++       * the host/guest supports its use.
++       *
++       * guest_can_use() checks a number of requirements on the host/guest to
++       * ensure that MSR_IA32_XSS is available, but it might report true even
++       * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
++       * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
++       * to further check that the guest CPUID actually supports
++       * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
++       * guests will still get intercepted and caught in the normal
++       * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
++       */
++      if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
++          guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
++              set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
++      else
++              set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
+ }
+ void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 9e084e22a12f7..08f1397138c80 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -103,6 +103,7 @@ static const struct svm_direct_access_msrs {
+       { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
+       { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
+       { .index = MSR_IA32_LASTINTTOIP,                .always = false },
++      { .index = MSR_IA32_XSS,                        .always = false },
+       { .index = MSR_EFER,                            .always = false },
+       { .index = MSR_IA32_CR_PAT,                     .always = false },
+       { .index = MSR_AMD64_SEV_ES_GHCB,               .always = true  },
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 53bc4b0e388be..fb0ac8497fb20 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -30,7 +30,7 @@
+ #define       IOPM_SIZE PAGE_SIZE * 3
+ #define       MSRPM_SIZE PAGE_SIZE * 2
+-#define MAX_DIRECT_ACCESS_MSRS        46
++#define MAX_DIRECT_ACCESS_MSRS        47
+ #define MSRPM_OFFSETS 32
+ extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
+ extern bool npt_enabled;
+-- 
+2.43.0
+
diff --git a/queue-6.6/kvm-sev-es-delegate-lbr-virtualization-to-the-proces.patch b/queue-6.6/kvm-sev-es-delegate-lbr-virtualization-to-the-proces.patch
new file mode 100644 (file)
index 0000000..43fad7d
--- /dev/null
@@ -0,0 +1,148 @@
+From a2c2128bf706bb2619e38ef0d65a87e26533be0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 May 2024 04:46:44 +0000
+Subject: KVM: SEV-ES: Delegate LBR virtualization to the processor
+
+From: Ravi Bangoria <ravi.bangoria@amd.com>
+
+[ Upstream commit b7e4be0a224fe5c6be30c1c8bdda8d2317ad6ba4 ]
+
+As documented in APM[1], LBR Virtualization must be enabled for SEV-ES
+guests. Although KVM currently enforces LBRV for SEV-ES guests, there
+are multiple issues with it:
+
+o MSR_IA32_DEBUGCTLMSR is still intercepted. Since MSR_IA32_DEBUGCTLMSR
+  interception is used to dynamically toggle LBRV for performance reasons,
+  this can be fatal for SEV-ES guests. For ex SEV-ES guest on Zen3:
+
+  [guest ~]# wrmsr 0x1d9 0x4
+  KVM: entry failed, hardware error 0xffffffff
+  EAX=00000004 EBX=00000000 ECX=000001d9 EDX=00000000
+
+  Fix this by never intercepting MSR_IA32_DEBUGCTLMSR for SEV-ES guests.
+  No additional save/restore logic is required since MSR_IA32_DEBUGCTLMSR
+  is of swap type A.
+
+o KVM will disable LBRV if userspace sets MSR_IA32_DEBUGCTLMSR before the
+  VMSA is encrypted. Fix this by moving LBRV enablement code post VMSA
+  encryption.
+
+[1]: AMD64 Architecture Programmer's Manual Pub. 40332, Rev. 4.07 - June
+     2023, Vol 2, 15.35.2 Enabling SEV-ES.
+     https://bugzilla.kernel.org/attachment.cgi?id=304653
+
+Fixes: 376c6d285017 ("KVM: SVM: Provide support for SEV-ES vCPU creation/loading")
+Co-developed-by: Nikunj A Dadhania <nikunj@amd.com>
+Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
+Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
+Message-ID: <20240531044644.768-4-ravi.bangoria@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm/sev.c | 13 ++++++++-----
+ arch/x86/kvm/svm/svm.c |  8 +++++++-
+ arch/x86/kvm/svm/svm.h |  3 ++-
+ 3 files changed, 17 insertions(+), 7 deletions(-)
+
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index f809dcfacc8a3..99e72b8a96ac0 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -664,6 +664,14 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
+         return ret;
+       vcpu->arch.guest_state_protected = true;
++
++      /*
++       * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
++       * only after setting guest_state_protected because KVM_SET_MSRS allows
++       * dynamic toggling of LBRV (for performance reason) on write access to
++       * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
++       */
++      svm_enable_lbrv(vcpu);
+       return 0;
+ }
+@@ -3035,7 +3043,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+       struct kvm_vcpu *vcpu = &svm->vcpu;
+       svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
+-      svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
+       /*
+        * An SEV-ES guest requires a VMSA area that is a separate from the
+@@ -3087,10 +3094,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+       /* Clear intercepts on selected MSRs */
+       set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
+       set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
+-      set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
+-      set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
+-      set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+-      set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+ }
+ void sev_init_vmcb(struct vcpu_svm *svm)
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 08f1397138c80..e3c2acc1adc73 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -99,6 +99,7 @@ static const struct svm_direct_access_msrs {
+       { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
+       { .index = MSR_IA32_PRED_CMD,                   .always = false },
+       { .index = MSR_IA32_FLUSH_CMD,                  .always = false },
++      { .index = MSR_IA32_DEBUGCTLMSR,                .always = false },
+       { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
+       { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
+       { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
+@@ -1008,7 +1009,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
+       vmcb_mark_dirty(to_vmcb, VMCB_LBR);
+ }
+-static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
++void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
+@@ -1018,6 +1019,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+       set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+       set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
++      if (sev_es_guest(vcpu->kvm))
++              set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
++
+       /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
+       if (is_guest_mode(vcpu))
+               svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
+@@ -1027,6 +1031,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
++      KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
++
+       svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
+       set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
+       set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index fb0ac8497fb20..37ada9808d9b5 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -30,7 +30,7 @@
+ #define       IOPM_SIZE PAGE_SIZE * 3
+ #define       MSRPM_SIZE PAGE_SIZE * 2
+-#define MAX_DIRECT_ACCESS_MSRS        47
++#define MAX_DIRECT_ACCESS_MSRS        48
+ #define MSRPM_OFFSETS 32
+ extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
+ extern bool npt_enabled;
+@@ -542,6 +542,7 @@ u32 *svm_vcpu_alloc_msrpm(void);
+ void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
+ void svm_vcpu_free_msrpm(u32 *msrpm);
+ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
++void svm_enable_lbrv(struct kvm_vcpu *vcpu);
+ void svm_update_lbrv(struct kvm_vcpu *vcpu);
+ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
+-- 
+2.43.0
+
diff --git a/queue-6.6/kvm-sev-es-disallow-sev-es-guests-when-x86_feature_l.patch b/queue-6.6/kvm-sev-es-disallow-sev-es-guests-when-x86_feature_l.patch
new file mode 100644 (file)
index 0000000..d5d1767
--- /dev/null
@@ -0,0 +1,100 @@
+From ff0ac631a134392bed4fad24c0fcc162204be63a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 May 2024 04:46:43 +0000
+Subject: KVM: SEV-ES: Disallow SEV-ES guests when X86_FEATURE_LBRV is absent
+
+From: Ravi Bangoria <ravi.bangoria@amd.com>
+
+[ Upstream commit d922056215617eedfbdbc29fe49953423686fe5e ]
+
+As documented in APM[1], LBR Virtualization must be enabled for SEV-ES
+guests. So, prevent SEV-ES guests when LBRV support is missing.
+
+[1]: AMD64 Architecture Programmer's Manual Pub. 40332, Rev. 4.07 - June
+     2023, Vol 2, 15.35.2 Enabling SEV-ES.
+     https://bugzilla.kernel.org/attachment.cgi?id=304653
+
+Fixes: 376c6d285017 ("KVM: SVM: Provide support for SEV-ES vCPU creation/loading")
+Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
+Message-ID: <20240531044644.768-3-ravi.bangoria@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm/sev.c |  6 ++++++
+ arch/x86/kvm/svm/svm.c | 16 +++++++---------
+ arch/x86/kvm/svm/svm.h |  1 +
+ 3 files changed, 14 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index c5845f31c34dc..0e643d7a06d9e 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2264,6 +2264,12 @@ void __init sev_hardware_setup(void)
+       if (!boot_cpu_has(X86_FEATURE_SEV_ES))
+               goto out;
++      if (!lbrv) {
++              WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
++                        "LBRV must be present for SEV-ES support");
++              goto out;
++      }
++
+       /* Has the system been allocated ASIDs for SEV-ES? */
+       if (min_sev_asid == 1)
+               goto out;
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 1efbe8b33f6a1..9e084e22a12f7 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -214,7 +214,7 @@ int vgif = true;
+ module_param(vgif, int, 0444);
+ /* enable/disable LBR virtualization */
+-static int lbrv = true;
++int lbrv = true;
+ module_param(lbrv, int, 0444);
+ static int tsc_scaling = true;
+@@ -5248,6 +5248,12 @@ static __init int svm_hardware_setup(void)
+       nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
++      if (lbrv) {
++              if (!boot_cpu_has(X86_FEATURE_LBRV))
++                      lbrv = false;
++              else
++                      pr_info("LBR virtualization supported\n");
++      }
+       /*
+        * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
+        * may be modified by svm_adjust_mmio_mask()), as well as nrips.
+@@ -5301,14 +5307,6 @@ static __init int svm_hardware_setup(void)
+               svm_x86_ops.set_vnmi_pending = NULL;
+       }
+-
+-      if (lbrv) {
+-              if (!boot_cpu_has(X86_FEATURE_LBRV))
+-                      lbrv = false;
+-              else
+-                      pr_info("LBR virtualization supported\n");
+-      }
+-
+       if (!enable_pmu)
+               pr_info("PMU virtualization is disabled\n");
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index be67ab7fdd104..53bc4b0e388be 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -39,6 +39,7 @@ extern int vgif;
+ extern bool intercept_smi;
+ extern bool x2avic_enabled;
+ extern bool vnmi;
++extern int lbrv;
+ /*
+  * Clean bits in VMCB.
+-- 
+2.43.0
+
diff --git a/queue-6.6/memory-failure-use-a-folio-in-me_huge_page.patch b/queue-6.6/memory-failure-use-a-folio-in-me_huge_page.patch
new file mode 100644 (file)
index 0000000..5ce3fec
--- /dev/null
@@ -0,0 +1,63 @@
+From ca963e2fedd01fd9b6180487a9c2b85fd5db4160 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Nov 2023 16:14:45 +0000
+Subject: memory-failure: use a folio in me_huge_page()
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit b6fd410c32f1a66a52a42d6aae1ab7b011b74547 ]
+
+This function was already explicitly calling compound_head();
+unfortunately the compiler can't know that and elide the redundant calls
+to compound_head() buried in page_mapping(), unlock_page(), etc.  Switch
+to using a folio, which does let us elide these calls.
+
+Link: https://lkml.kernel.org/r/20231117161447.2461643-5-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 8cf360b9d6a8 ("mm/memory-failure: fix handling of dissolved but not taken off from buddy pages")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/memory-failure.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 5378edad9df8f..9c27ec0a27a30 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1193,25 +1193,25 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
+  */
+ static int me_huge_page(struct page_state *ps, struct page *p)
+ {
++      struct folio *folio = page_folio(p);
+       int res;
+-      struct page *hpage = compound_head(p);
+       struct address_space *mapping;
+       bool extra_pins = false;
+-      mapping = page_mapping(hpage);
++      mapping = folio_mapping(folio);
+       if (mapping) {
+-              res = truncate_error_page(hpage, page_to_pfn(p), mapping);
++              res = truncate_error_page(&folio->page, page_to_pfn(p), mapping);
+               /* The page is kept in page cache. */
+               extra_pins = true;
+-              unlock_page(hpage);
++              folio_unlock(folio);
+       } else {
+-              unlock_page(hpage);
++              folio_unlock(folio);
+               /*
+                * migration entry prevents later access on error hugepage,
+                * so we can free and dissolve it into buddy to save healthy
+                * subpages.
+                */
+-              put_page(hpage);
++              folio_put(folio);
+               if (__page_handle_poison(p) >= 0) {
+                       page_ref_inc(p);
+                       res = MF_RECOVERED;
+-- 
+2.43.0
+
diff --git a/queue-6.6/mm-memory-failure-fix-handling-of-dissolved-but-not-.patch b/queue-6.6/mm-memory-failure-fix-handling-of-dissolved-but-not-.patch
new file mode 100644 (file)
index 0000000..e921c9c
--- /dev/null
@@ -0,0 +1,147 @@
+From bf13c3d590f238b92e3275732323c81088339738 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 15:12:17 +0800
+Subject: mm/memory-failure: fix handling of dissolved but not taken off from
+ buddy pages
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+[ Upstream commit 8cf360b9d6a840700e06864236a01a883b34bbad ]
+
+When I did memory failure tests recently, below panic occurs:
+
+page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x8cee00
+flags: 0x6fffe0000000000(node=1|zone=2|lastcpupid=0x7fff)
+raw: 06fffe0000000000 dead000000000100 dead000000000122 0000000000000000
+raw: 0000000000000000 0000000000000009 00000000ffffffff 0000000000000000
+page dumped because: VM_BUG_ON_PAGE(!PageBuddy(page))
+------------[ cut here ]------------
+kernel BUG at include/linux/page-flags.h:1009!
+invalid opcode: 0000 [#1] PREEMPT SMP NOPTI
+RIP: 0010:__del_page_from_free_list+0x151/0x180
+RSP: 0018:ffffa49c90437998 EFLAGS: 00000046
+RAX: 0000000000000035 RBX: 0000000000000009 RCX: ffff8dd8dfd1c9c8
+RDX: 0000000000000000 RSI: 0000000000000027 RDI: ffff8dd8dfd1c9c0
+RBP: ffffd901233b8000 R08: ffffffffab5511f8 R09: 0000000000008c69
+R10: 0000000000003c15 R11: ffffffffab5511f8 R12: ffff8dd8fffc0c80
+R13: 0000000000000001 R14: ffff8dd8fffc0c80 R15: 0000000000000009
+FS:  00007ff916304740(0000) GS:ffff8dd8dfd00000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000055eae50124c8 CR3: 00000008479e0000 CR4: 00000000000006f0
+Call Trace:
+ <TASK>
+ __rmqueue_pcplist+0x23b/0x520
+ get_page_from_freelist+0x26b/0xe40
+ __alloc_pages_noprof+0x113/0x1120
+ __folio_alloc_noprof+0x11/0xb0
+ alloc_buddy_hugetlb_folio.isra.0+0x5a/0x130
+ __alloc_fresh_hugetlb_folio+0xe7/0x140
+ alloc_pool_huge_folio+0x68/0x100
+ set_max_huge_pages+0x13d/0x340
+ hugetlb_sysctl_handler_common+0xe8/0x110
+ proc_sys_call_handler+0x194/0x280
+ vfs_write+0x387/0x550
+ ksys_write+0x64/0xe0
+ do_syscall_64+0xc2/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7ff916114887
+RSP: 002b:00007ffec8a2fd78 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 000055eae500e350 RCX: 00007ff916114887
+RDX: 0000000000000004 RSI: 000055eae500e390 RDI: 0000000000000003
+RBP: 000055eae50104c0 R08: 0000000000000000 R09: 000055eae50104c0
+R10: 0000000000000077 R11: 0000000000000246 R12: 0000000000000004
+R13: 0000000000000004 R14: 00007ff916216b80 R15: 00007ff916216a00
+ </TASK>
+Modules linked in: mce_inject hwpoison_inject
+---[ end trace 0000000000000000 ]---
+
+And before the panic, there had an warning about bad page state:
+
+BUG: Bad page state in process page-types  pfn:8cee00
+page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x8cee00
+flags: 0x6fffe0000000000(node=1|zone=2|lastcpupid=0x7fff)
+page_type: 0xffffff7f(buddy)
+raw: 06fffe0000000000 ffffd901241c0008 ffffd901240f8008 0000000000000000
+raw: 0000000000000000 0000000000000009 00000000ffffff7f 0000000000000000
+page dumped because: nonzero mapcount
+Modules linked in: mce_inject hwpoison_inject
+CPU: 8 PID: 154211 Comm: page-types Not tainted 6.9.0-rc4-00499-g5544ec3178e2-dirty #22
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x83/0xa0
+ bad_page+0x63/0xf0
+ free_unref_page+0x36e/0x5c0
+ unpoison_memory+0x50b/0x630
+ simple_attr_write_xsigned.constprop.0.isra.0+0xb3/0x110
+ debugfs_attr_write+0x42/0x60
+ full_proxy_write+0x5b/0x80
+ vfs_write+0xcd/0x550
+ ksys_write+0x64/0xe0
+ do_syscall_64+0xc2/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7f189a514887
+RSP: 002b:00007ffdcd899718 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f189a514887
+RDX: 0000000000000009 RSI: 00007ffdcd899730 RDI: 0000000000000003
+RBP: 00007ffdcd8997a0 R08: 0000000000000000 R09: 00007ffdcd8994b2
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffdcda199a8
+R13: 0000000000404af1 R14: 000000000040ad78 R15: 00007f189a7a5040
+ </TASK>
+
+The root cause should be the below race:
+
+ memory_failure
+  try_memory_failure_hugetlb
+   me_huge_page
+    __page_handle_poison
+     dissolve_free_hugetlb_folio
+     drain_all_pages -- Buddy page can be isolated e.g. for compaction.
+     take_page_off_buddy -- Failed as page is not in the buddy list.
+            -- Page can be putback into buddy after compaction.
+    page_ref_inc -- Leads to buddy page with refcnt = 1.
+
+Then unpoison_memory() can unpoison the page and send the buddy page back
+into buddy list again leading to the above bad page state warning.  And
+bad_page() will call page_mapcount_reset() to remove PageBuddy from buddy
+page leading to later VM_BUG_ON_PAGE(!PageBuddy(page)) when trying to
+allocate this page.
+
+Fix this issue by only treating __page_handle_poison() as successful when
+it returns 1.
+
+Link: https://lkml.kernel.org/r/20240523071217.1696196-1-linmiaohe@huawei.com
+Fixes: ceaf8fbea79a ("mm, hwpoison: skip raw hwpoison page in freeing 1GB hugepage")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/memory-failure.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 9c27ec0a27a30..c7e2b609184b6 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1212,7 +1212,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
+                * subpages.
+                */
+               folio_put(folio);
+-              if (__page_handle_poison(p) >= 0) {
++              if (__page_handle_poison(p) > 0) {
+                       page_ref_inc(p);
+                       res = MF_RECOVERED;
+               } else {
+@@ -2082,7 +2082,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
+        */
+       if (res == 0) {
+               folio_unlock(folio);
+-              if (__page_handle_poison(p) >= 0) {
++              if (__page_handle_poison(p) > 0) {
+                       page_ref_inc(p);
+                       res = MF_RECOVERED;
+               } else {
+-- 
+2.43.0
+
diff --git a/queue-6.6/mptcp-count-close-wait-sockets-for-mptcp_mib_currest.patch b/queue-6.6/mptcp-count-close-wait-sockets-for-mptcp_mib_currest.patch
new file mode 100644 (file)
index 0000000..9cb6bec
--- /dev/null
@@ -0,0 +1,61 @@
+From d24c56ac9d2f8400d8ea97477f8a506c309e7def Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 01:02:17 +0800
+Subject: mptcp: count CLOSE-WAIT sockets for MPTCP_MIB_CURRESTAB
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit 9633e9377e6af0244f7381e86b9aac5276f5be97 ]
+
+Like previous patch does in TCP, we need to adhere to RFC 1213:
+
+  "tcpCurrEstab OBJECT-TYPE
+   ...
+   The number of TCP connections for which the current state
+   is either ESTABLISHED or CLOSE- WAIT."
+
+So let's consider CLOSE-WAIT sockets.
+
+The logic of counting
+When we increment the counter?
+a) Only if we change the state to ESTABLISHED.
+
+When we decrement the counter?
+a) if the socket leaves ESTABLISHED and will never go into CLOSE-WAIT,
+say, on the client side, changing from ESTABLISHED to FIN-WAIT-1.
+b) if the socket leaves CLOSE-WAIT, say, on the server side, changing
+from CLOSE-WAIT to LAST-ACK.
+
+Fixes: d9cd27b8cd19 ("mptcp: add CurrEstab MIB counter support")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 618d80112d1e2..4ace52e4211ad 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2873,9 +2873,14 @@ void mptcp_set_state(struct sock *sk, int state)
+               if (oldstate != TCP_ESTABLISHED)
+                       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
+               break;
+-
++      case TCP_CLOSE_WAIT:
++              /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
++               * MPTCP "accepted" sockets will be created later on. So no
++               * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
++               */
++              break;
+       default:
+-              if (oldstate == TCP_ESTABLISHED)
++              if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
+                       MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-ethtool-fix-the-error-condition-in-ethtool_get_p.patch b/queue-6.6/net-ethtool-fix-the-error-condition-in-ethtool_get_p.patch
new file mode 100644 (file)
index 0000000..d90dad8
--- /dev/null
@@ -0,0 +1,44 @@
+From 7597eef1bf5f3cbcf838db6f39e7099437437502 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jun 2024 11:47:43 +0800
+Subject: net: ethtool: fix the error condition in
+ ethtool_get_phy_stats_ethtool()
+
+From: Su Hui <suhui@nfschina.com>
+
+[ Upstream commit 0dcc53abf58d572d34c5313de85f607cd33fc691 ]
+
+Clang static checker (scan-build) warning:
+net/ethtool/ioctl.c:line 2233, column 2
+Called function pointer is null (null dereference).
+
+Return '-EOPNOTSUPP' when 'ops->get_ethtool_phy_stats' is NULL to fix
+this typo error.
+
+Fixes: 201ed315f967 ("net/ethtool/ioctl: split ethtool_get_phy_stats into multiple helpers")
+Signed-off-by: Su Hui <suhui@nfschina.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Reviewed-by: Hariprasad Kelam <hkelam@marvell.com>
+Link: https://lore.kernel.org/r/20240605034742.921751-1-suhui@nfschina.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ethtool/ioctl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 0b0ce4f81c017..7cb23bcf8ef7a 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -2134,7 +2134,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+       int n_stats, ret;
+-      if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
++      if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats)
+               return -EOPNOTSUPP;
+       n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-mlx5-always-stop-health-timer-during-driver-remo.patch b/queue-6.6/net-mlx5-always-stop-health-timer-during-driver-remo.patch
new file mode 100644 (file)
index 0000000..744b788
--- /dev/null
@@ -0,0 +1,105 @@
+From 19a8d0c667bfb8613b3ba9953f573b5329ef8832 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 00:04:43 +0300
+Subject: net/mlx5: Always stop health timer during driver removal
+
+From: Shay Drory <shayd@nvidia.com>
+
+[ Upstream commit c8b3f38d2dae0397944814d691a419c451f9906f ]
+
+Currently, if teardown_hca fails to execute during driver removal, mlx5
+does not stop the health timer. Afterwards, mlx5 continue with driver
+teardown. This may lead to a UAF bug, which results in page fault
+Oops[1], since the health timer invokes after resources were freed.
+
+Hence, stop the health monitor even if teardown_hca fails.
+
+[1]
+mlx5_core 0000:18:00.0: E-Switch: Unload vfs: mode(LEGACY), nvfs(0), necvfs(0), active vports(0)
+mlx5_core 0000:18:00.0: E-Switch: Disable: mode(LEGACY), nvfs(0), necvfs(0), active vports(0)
+mlx5_core 0000:18:00.0: E-Switch: Disable: mode(LEGACY), nvfs(0), necvfs(0), active vports(0)
+mlx5_core 0000:18:00.0: E-Switch: cleanup
+mlx5_core 0000:18:00.0: wait_func:1155:(pid 1967079): TEARDOWN_HCA(0x103) timeout. Will cause a leak of a command resource
+mlx5_core 0000:18:00.0: mlx5_function_close:1288:(pid 1967079): tear_down_hca failed, skip cleanup
+BUG: unable to handle page fault for address: ffffa26487064230
+PGD 100c00067 P4D 100c00067 PUD 100e5a067 PMD 105ed7067 PTE 0
+Oops: 0000 [#1] PREEMPT SMP PTI
+CPU: 0 PID: 0 Comm: swapper/0 Tainted: G           OE     -------  ---  6.7.0-68.fc38.x86_64 #1
+Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0013.121520200651 12/15/2020
+RIP: 0010:ioread32be+0x34/0x60
+RSP: 0018:ffffa26480003e58 EFLAGS: 00010292
+RAX: ffffa26487064200 RBX: ffff9042d08161a0 RCX: ffff904c108222c0
+RDX: 000000010bbf1b80 RSI: ffffffffc055ddb0 RDI: ffffa26487064230
+RBP: ffff9042d08161a0 R08: 0000000000000022 R09: ffff904c108222e8
+R10: 0000000000000004 R11: 0000000000000441 R12: ffffffffc055ddb0
+R13: ffffa26487064200 R14: ffffa26480003f00 R15: ffff904c108222c0
+FS:  0000000000000000(0000) GS:ffff904c10800000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: ffffa26487064230 CR3: 00000002c4420006 CR4: 00000000007706f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+PKRU: 55555554
+Call Trace:
+ <IRQ>
+ ? __die+0x23/0x70
+ ? page_fault_oops+0x171/0x4e0
+ ? exc_page_fault+0x175/0x180
+ ? asm_exc_page_fault+0x26/0x30
+ ? __pfx_poll_health+0x10/0x10 [mlx5_core]
+ ? __pfx_poll_health+0x10/0x10 [mlx5_core]
+ ? ioread32be+0x34/0x60
+ mlx5_health_check_fatal_sensors+0x20/0x100 [mlx5_core]
+ ? __pfx_poll_health+0x10/0x10 [mlx5_core]
+ poll_health+0x42/0x230 [mlx5_core]
+ ? __next_timer_interrupt+0xbc/0x110
+ ? __pfx_poll_health+0x10/0x10 [mlx5_core]
+ call_timer_fn+0x21/0x130
+ ? __pfx_poll_health+0x10/0x10 [mlx5_core]
+ __run_timers+0x222/0x2c0
+ run_timer_softirq+0x1d/0x40
+ __do_softirq+0xc9/0x2c8
+ __irq_exit_rcu+0xa6/0xc0
+ sysvec_apic_timer_interrupt+0x72/0x90
+ </IRQ>
+ <TASK>
+ asm_sysvec_apic_timer_interrupt+0x1a/0x20
+RIP: 0010:cpuidle_enter_state+0xcc/0x440
+ ? cpuidle_enter_state+0xbd/0x440
+ cpuidle_enter+0x2d/0x40
+ do_idle+0x20d/0x270
+ cpu_startup_entry+0x2a/0x30
+ rest_init+0xd0/0xd0
+ arch_call_rest_init+0xe/0x30
+ start_kernel+0x709/0xa90
+ x86_64_start_reservations+0x18/0x30
+ x86_64_start_kernel+0x96/0xa0
+ secondary_startup_64_no_verify+0x18f/0x19b
+---[ end trace 0000000000000000 ]---
+
+Fixes: 9b98d395b85d ("net/mlx5: Start health poll at earlier stage of driver load")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 9710ddac1f1a8..2237b3d01e0e5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1287,6 +1287,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
+       if (!err)
+               mlx5_function_disable(dev, boot);
++      else
++              mlx5_stop_health_poll(dev, boot);
++
+       return err;
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-mlx5-fix-tainted-pointer-delete-is-case-of-flow-.patch b/queue-6.6/net-mlx5-fix-tainted-pointer-delete-is-case-of-flow-.patch
new file mode 100644 (file)
index 0000000..c6ec2b3
--- /dev/null
@@ -0,0 +1,51 @@
+From efc6b345f3586c85d0f061b43bc4710c5d95bdab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 13:05:52 +0300
+Subject: net/mlx5: Fix tainted pointer delete is case of flow rules creation
+ fail
+
+From: Aleksandr Mishin <amishin@t-argos.ru>
+
+[ Upstream commit 229bedbf62b13af5aba6525ad10b62ad38d9ccb5 ]
+
+In case of flow rule creation fail in mlx5_lag_create_port_sel_table(),
+instead of previously created rules, the tainted pointer is deleted
+deveral times.
+Fix this bug by using correct flow rules pointers.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 352899f384d4 ("net/mlx5: Lag, use buckets in hash mode")
+Signed-off-by: Aleksandr Mishin <amishin@t-argos.ru>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://lore.kernel.org/r/20240604100552.25201-1-amishin@t-argos.ru
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+index 7d9bbb494d95b..005661248c7e9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
+                                                                     &dest, 1);
+                       if (IS_ERR(lag_definer->rules[idx])) {
+                               err = PTR_ERR(lag_definer->rules[idx]);
+-                              while (i--)
+-                                      while (j--)
++                              do {
++                                      while (j--) {
++                                              idx = i * ldev->buckets + j;
+                                               mlx5_del_flow_rules(lag_definer->rules[idx]);
++                                      }
++                                      j = ldev->buckets;
++                              } while (i--);
+                               goto destroy_fg;
+                       }
+               }
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-mlx5-stop-waiting-for-pci-if-pci-channel-is-offl.patch b/queue-6.6/net-mlx5-stop-waiting-for-pci-if-pci-channel-is-offl.patch
new file mode 100644 (file)
index 0000000..99e718c
--- /dev/null
@@ -0,0 +1,86 @@
+From e88b9027c87901fe100c47670d360c31adda2fd5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 00:04:42 +0300
+Subject: net/mlx5: Stop waiting for PCI if pci channel is offline
+
+From: Moshe Shemesh <moshe@nvidia.com>
+
+[ Upstream commit 33afbfcc105a572159750f2ebee834a8a70fdd96 ]
+
+In case pci channel becomes offline the driver should not wait for PCI
+reads during health dump and recovery flow. The driver has timeout for
+each of these loops trying to read PCI, so it would fail anyway.
+However, in case of recovery waiting till timeout may cause the pci
+error_detected() callback fail to meet pci_dpc_recovered() wait timeout.
+
+Fixes: b3bd076f7501 ("net/mlx5: Report devlink health on FW fatal issues")
+Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
+Reviewed-by: Shay Drori <shayd@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/fw.c          | 4 ++++
+ drivers/net/ethernet/mellanox/mlx5/core/health.c      | 8 ++++++++
+ drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c | 4 ++++
+ 3 files changed, 16 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+index 58f4c0d0fafa2..70898f0a9866c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
+       do {
+               if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+                       break;
++              if (pci_channel_offline(dev->pdev)) {
++                      mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
++                      return -EACCES;
++              }
+               cond_resched();
+       } while (!time_after(jiffies, end));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 2fb2598b775ef..d798834c4e755 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -248,6 +248,10 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
+       do {
+               if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+                       break;
++              if (pci_channel_offline(dev->pdev)) {
++                      mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
++                      goto unlock;
++              }
+               msleep(20);
+       } while (!time_after(jiffies, end));
+@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
+                       mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
+                       return -ENODEV;
+               }
++              if (pci_channel_offline(dev->pdev)) {
++                      mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
++                      return -EACCES;
++              }
+               msleep(100);
+       }
+       return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+index 6b774e0c27665..d0b595ba61101 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
+                       ret = -EBUSY;
+                       goto pci_unlock;
+               }
++              if (pci_channel_offline(dev->pdev)) {
++                      ret = -EACCES;
++                      goto pci_unlock;
++              }
+               /* Check if semaphore is already locked */
+               ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-ncsi-fix-the-multi-thread-manner-of-ncsi-driver.patch b/queue-6.6/net-ncsi-fix-the-multi-thread-manner-of-ncsi-driver.patch
new file mode 100644 (file)
index 0000000..9328ddd
--- /dev/null
@@ -0,0 +1,220 @@
+From 4f2d6470d0422c2965ac6f511c08b8a976441ce5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 14:58:55 +0800
+Subject: net/ncsi: Fix the multi thread manner of NCSI driver
+
+From: DelphineCCChiu <delphine_cc_chiu@wiwynn.com>
+
+[ Upstream commit e85e271dec0270982afed84f70dc37703fcc1d52 ]
+
+Currently NCSI driver will send several NCSI commands back to back without
+waiting the response of previous NCSI command or timeout in some state
+when NIC have multi channel. This operation against the single thread
+manner defined by NCSI SPEC(section 6.3.2.3 in DSP0222_1.1.1)
+
+According to NCSI SPEC(section 6.2.13.1 in DSP0222_1.1.1), we should probe
+one channel at a time by sending NCSI commands (Clear initial state, Get
+version ID, Get capabilities...), than repeat this steps until the max
+number of channels which we got from NCSI command (Get capabilities) has
+been probed.
+
+Fixes: e6f44ed6d04d ("net/ncsi: Package and channel management")
+Signed-off-by: DelphineCCChiu <delphine_cc_chiu@wiwynn.com>
+Link: https://lore.kernel.org/r/20240529065856.825241-1-delphine_cc_chiu@wiwynn.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ncsi/internal.h    |  2 ++
+ net/ncsi/ncsi-manage.c | 73 +++++++++++++++++++++---------------------
+ net/ncsi/ncsi-rsp.c    |  4 ++-
+ 3 files changed, 41 insertions(+), 38 deletions(-)
+
+diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
+index 374412ed780b6..ef0f8f73826f5 100644
+--- a/net/ncsi/internal.h
++++ b/net/ncsi/internal.h
+@@ -325,6 +325,7 @@ struct ncsi_dev_priv {
+       spinlock_t          lock;            /* Protect the NCSI device    */
+       unsigned int        package_probe_id;/* Current ID during probe    */
+       unsigned int        package_num;     /* Number of packages         */
++      unsigned int        channel_probe_id;/* Current cahnnel ID during probe */
+       struct list_head    packages;        /* List of packages           */
+       struct ncsi_channel *hot_channel;    /* Channel was ever active    */
+       struct ncsi_request requests[256];   /* Request table              */
+@@ -343,6 +344,7 @@ struct ncsi_dev_priv {
+       bool                multi_package;   /* Enable multiple packages   */
+       bool                mlx_multi_host;  /* Enable multi host Mellanox */
+       u32                 package_whitelist; /* Packages to configure    */
++      unsigned char       channel_count;     /* Num of channels to probe   */
+ };
+ struct ncsi_cmd_arg {
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index f3d7fe86fea13..90c6cf676221a 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
+               break;
+       case ncsi_dev_state_suspend_gls:
+-              ndp->pending_req_num = np->channel_num;
++              ndp->pending_req_num = 1;
+               nca.type = NCSI_PKT_CMD_GLS;
+               nca.package = np->id;
++              nca.channel = ndp->channel_probe_id;
++              ret = ncsi_xmit_cmd(&nca);
++              if (ret)
++                      goto error;
++              ndp->channel_probe_id++;
+-              nd->state = ncsi_dev_state_suspend_dcnt;
+-              NCSI_FOR_EACH_CHANNEL(np, nc) {
+-                      nca.channel = nc->id;
+-                      ret = ncsi_xmit_cmd(&nca);
+-                      if (ret)
+-                              goto error;
++              if (ndp->channel_probe_id == ndp->channel_count) {
++                      ndp->channel_probe_id = 0;
++                      nd->state = ncsi_dev_state_suspend_dcnt;
+               }
+               break;
+@@ -1340,7 +1342,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ {
+       struct ncsi_dev *nd = &ndp->ndev;
+       struct ncsi_package *np;
+-      struct ncsi_channel *nc;
+       struct ncsi_cmd_arg nca;
+       unsigned char index;
+       int ret;
+@@ -1418,23 +1419,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+               nd->state = ncsi_dev_state_probe_cis;
+               break;
+-      case ncsi_dev_state_probe_cis:
+-              ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
+-
+-              /* Clear initial state */
+-              nca.type = NCSI_PKT_CMD_CIS;
+-              nca.package = ndp->active_package->id;
+-              for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
+-                      nca.channel = index;
+-                      ret = ncsi_xmit_cmd(&nca);
+-                      if (ret)
+-                              goto error;
+-              }
+-
+-              nd->state = ncsi_dev_state_probe_gvi;
+-              if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
+-                      nd->state = ncsi_dev_state_probe_keep_phy;
+-              break;
+       case ncsi_dev_state_probe_keep_phy:
+               ndp->pending_req_num = 1;
+@@ -1447,14 +1431,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+               nd->state = ncsi_dev_state_probe_gvi;
+               break;
++      case ncsi_dev_state_probe_cis:
+       case ncsi_dev_state_probe_gvi:
+       case ncsi_dev_state_probe_gc:
+       case ncsi_dev_state_probe_gls:
+               np = ndp->active_package;
+-              ndp->pending_req_num = np->channel_num;
++              ndp->pending_req_num = 1;
+-              /* Retrieve version, capability or link status */
+-              if (nd->state == ncsi_dev_state_probe_gvi)
++              /* Clear initial state Retrieve version, capability or link status */
++              if (nd->state == ncsi_dev_state_probe_cis)
++                      nca.type = NCSI_PKT_CMD_CIS;
++              else if (nd->state == ncsi_dev_state_probe_gvi)
+                       nca.type = NCSI_PKT_CMD_GVI;
+               else if (nd->state == ncsi_dev_state_probe_gc)
+                       nca.type = NCSI_PKT_CMD_GC;
+@@ -1462,19 +1449,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+                       nca.type = NCSI_PKT_CMD_GLS;
+               nca.package = np->id;
+-              NCSI_FOR_EACH_CHANNEL(np, nc) {
+-                      nca.channel = nc->id;
+-                      ret = ncsi_xmit_cmd(&nca);
+-                      if (ret)
+-                              goto error;
+-              }
++              nca.channel = ndp->channel_probe_id;
+-              if (nd->state == ncsi_dev_state_probe_gvi)
++              ret = ncsi_xmit_cmd(&nca);
++              if (ret)
++                      goto error;
++
++              if (nd->state == ncsi_dev_state_probe_cis) {
++                      nd->state = ncsi_dev_state_probe_gvi;
++                      if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
++                              nd->state = ncsi_dev_state_probe_keep_phy;
++              } else if (nd->state == ncsi_dev_state_probe_gvi) {
+                       nd->state = ncsi_dev_state_probe_gc;
+-              else if (nd->state == ncsi_dev_state_probe_gc)
++              } else if (nd->state == ncsi_dev_state_probe_gc) {
+                       nd->state = ncsi_dev_state_probe_gls;
+-              else
++              } else {
++                      nd->state = ncsi_dev_state_probe_cis;
++                      ndp->channel_probe_id++;
++              }
++
++              if (ndp->channel_probe_id == ndp->channel_count) {
++                      ndp->channel_probe_id = 0;
+                       nd->state = ncsi_dev_state_probe_dp;
++              }
+               break;
+       case ncsi_dev_state_probe_dp:
+               ndp->pending_req_num = 1;
+@@ -1775,6 +1772,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
+               ndp->requests[i].ndp = ndp;
+               timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
+       }
++      ndp->channel_count = NCSI_RESERVED_CHANNEL;
+       spin_lock_irqsave(&ncsi_dev_lock, flags);
+       list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
+@@ -1808,6 +1806,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
+       if (!(ndp->flags & NCSI_DEV_PROBED)) {
+               ndp->package_probe_id = 0;
++              ndp->channel_probe_id = 0;
+               nd->state = ncsi_dev_state_probe;
+               schedule_work(&ndp->work);
+               return 0;
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 480e80e3c2836..f22d67cb04d37 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -795,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
+       struct ncsi_rsp_gc_pkt *rsp;
+       struct ncsi_dev_priv *ndp = nr->ndp;
+       struct ncsi_channel *nc;
++      struct ncsi_package *np;
+       size_t size;
+       /* Find the channel */
+       rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
+       ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
+-                                    NULL, &nc);
++                                    &np, &nc);
+       if (!nc)
+               return -ENODEV;
+@@ -835,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
+        */
+       nc->vlan_filter.bitmap = U64_MAX;
+       nc->vlan_filter.n_vids = rsp->vlan_cnt;
++      np->ndp->channel_count = rsp->channel_cnt;
+       return 0;
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-ncsi-simplify-kconfig-dts-control-flow.patch b/queue-6.6/net-ncsi-simplify-kconfig-dts-control-flow.patch
new file mode 100644 (file)
index 0000000..7f7ea27
--- /dev/null
@@ -0,0 +1,152 @@
+From 9b5a2e9563620e69eb8d011babb5a8afc944bd12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Nov 2023 10:07:33 -0600
+Subject: net/ncsi: Simplify Kconfig/dts control flow
+
+From: Peter Delevoryas <peter@pjd.dev>
+
+[ Upstream commit c797ce168930ce3d62a9b7fc4d7040963ee6a01e ]
+
+Background:
+
+1. CONFIG_NCSI_OEM_CMD_KEEP_PHY
+
+If this is enabled, we send an extra OEM Intel command in the probe
+sequence immediately after discovering a channel (e.g. after "Clear
+Initial State").
+
+2. CONFIG_NCSI_OEM_CMD_GET_MAC
+
+If this is enabled, we send one of 3 OEM "Get MAC Address" commands from
+Broadcom, Mellanox (Nvidida), and Intel in the *configuration* sequence
+for a channel.
+
+3. mellanox,multi-host (or mlx,multi-host)
+
+Introduced by this patch:
+
+https://lore.kernel.org/all/20200108234341.2590674-1-vijaykhemka@fb.com/
+
+Which was actually originally from cosmo.chou@quantatw.com:
+
+https://github.com/facebook/openbmc-linux/commit/9f132a10ec48db84613519258cd8a317fb9c8f1b
+
+Cosmo claimed that the Nvidia ConnectX-4 and ConnectX-6 NIC's don't
+respond to Get Version ID, et. al in the probe sequence unless you send
+the Set MC Affinity command first.
+
+Problem Statement:
+
+We've been using a combination of #ifdef code blocks and IS_ENABLED()
+conditions to conditionally send these OEM commands.
+
+It makes adding any new code around these commands hard to understand.
+
+Solution:
+
+In this patch, I just want to remove the conditionally compiled blocks
+of code, and always use IS_ENABLED(...) to do dynamic control flow.
+
+I don't think the small amount of code this adds to non-users of the OEM
+Kconfigs is a big deal.
+
+Signed-off-by: Peter Delevoryas <peter@pjd.dev>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: e85e271dec02 ("net/ncsi: Fix the multi thread manner of NCSI driver")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ncsi/ncsi-manage.c | 20 +++-----------------
+ 1 file changed, 3 insertions(+), 17 deletions(-)
+
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index d9da942ad53dd..f3d7fe86fea13 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -689,8 +689,6 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
+       return 0;
+ }
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+-
+ static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
+ {
+       unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
+@@ -716,10 +714,6 @@ static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
+       return ret;
+ }
+-#endif
+-
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+-
+ /* NCSI OEM Command APIs */
+ static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
+ {
+@@ -856,8 +850,6 @@ static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
+       return nch->handler(nca);
+ }
+-#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+-
+ /* Determine if a given channel from the channel_queue should be used for Tx */
+ static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
+                              struct ncsi_channel *nc)
+@@ -1039,20 +1031,18 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
+                       goto error;
+               }
+-              nd->state = ncsi_dev_state_config_oem_gma;
++              nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
++                        ? ncsi_dev_state_config_oem_gma
++                        : ncsi_dev_state_config_clear_vids;
+               break;
+       case ncsi_dev_state_config_oem_gma:
+               nd->state = ncsi_dev_state_config_clear_vids;
+-              ret = -1;
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+               nca.type = NCSI_PKT_CMD_OEM;
+               nca.package = np->id;
+               nca.channel = nc->id;
+               ndp->pending_req_num = 1;
+               ret = ncsi_gma_handler(&nca, nc->version.mf_id);
+-#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+-
+               if (ret < 0)
+                       schedule_work(&ndp->work);
+@@ -1404,7 +1394,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+               schedule_work(&ndp->work);
+               break;
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+       case ncsi_dev_state_probe_mlx_gma:
+               ndp->pending_req_num = 1;
+@@ -1429,7 +1418,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+               nd->state = ncsi_dev_state_probe_cis;
+               break;
+-#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+       case ncsi_dev_state_probe_cis:
+               ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
+@@ -1447,7 +1435,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+               if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
+                       nd->state = ncsi_dev_state_probe_keep_phy;
+               break;
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+       case ncsi_dev_state_probe_keep_phy:
+               ndp->pending_req_num = 1;
+@@ -1460,7 +1447,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+               nd->state = ncsi_dev_state_probe_gvi;
+               break;
+-#endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */
+       case ncsi_dev_state_probe_gvi:
+       case ncsi_dev_state_probe_gc:
+       case ncsi_dev_state_probe_gls:
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-phy-micrel-fix-ksz9477-phy-issues-after-suspend-.patch b/queue-6.6/net-phy-micrel-fix-ksz9477-phy-issues-after-suspend-.patch
new file mode 100644 (file)
index 0000000..ad2c131
--- /dev/null
@@ -0,0 +1,125 @@
+From 8ce307f811a481f5666f6d71770b6f2fde167bd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 19:20:23 -0700
+Subject: net: phy: micrel: fix KSZ9477 PHY issues after suspend/resume
+
+From: Tristram Ha <tristram.ha@microchip.com>
+
+[ Upstream commit 6149db4997f582e958da675092f21c666e3b67b7 ]
+
+When the PHY is powered up after powered down most of the registers are
+reset, so the PHY setup code needs to be done again.  In addition the
+interrupt register will need to be setup again so that link status
+indication works again.
+
+Fixes: 26dd2974c5b5 ("net: phy: micrel: Move KSZ9477 errata fixes to PHY driver")
+Signed-off-by: Tristram Ha <tristram.ha@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/micrel.c | 62 ++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 56 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index fc31fcfb0cdb4..048704758b150 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1821,7 +1821,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
+       {0x1c, 0x20, 0xeeee},
+ };
+-static int ksz9477_config_init(struct phy_device *phydev)
++static int ksz9477_phy_errata(struct phy_device *phydev)
+ {
+       int err;
+       int i;
+@@ -1849,16 +1849,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
+                       return err;
+       }
++      err = genphy_restart_aneg(phydev);
++      if (err)
++              return err;
++
++      return err;
++}
++
++static int ksz9477_config_init(struct phy_device *phydev)
++{
++      int err;
++
++      /* Only KSZ9897 family of switches needs this fix. */
++      if ((phydev->phy_id & 0xf) == 1) {
++              err = ksz9477_phy_errata(phydev);
++              if (err)
++                      return err;
++      }
++
+       /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
+        * in this switch shall be regarded as broken.
+        */
+       if (phydev->dev_flags & MICREL_NO_EEE)
+               phydev->eee_broken_modes = -1;
+-      err = genphy_restart_aneg(phydev);
+-      if (err)
+-              return err;
+-
+       return kszphy_config_init(phydev);
+ }
+@@ -1967,6 +1981,42 @@ static int kszphy_resume(struct phy_device *phydev)
+       return 0;
+ }
++static int ksz9477_resume(struct phy_device *phydev)
++{
++      int ret;
++
++      /* No need to initialize registers if not powered down. */
++      ret = phy_read(phydev, MII_BMCR);
++      if (ret < 0)
++              return ret;
++      if (!(ret & BMCR_PDOWN))
++              return 0;
++
++      genphy_resume(phydev);
++
++      /* After switching from power-down to normal mode, an internal global
++       * reset is automatically generated. Wait a minimum of 1 ms before
++       * read/write access to the PHY registers.
++       */
++      usleep_range(1000, 2000);
++
++      /* Only KSZ9897 family of switches needs this fix. */
++      if ((phydev->phy_id & 0xf) == 1) {
++              ret = ksz9477_phy_errata(phydev);
++              if (ret)
++                      return ret;
++      }
++
++      /* Enable PHY Interrupts */
++      if (phy_interrupt_is_valid(phydev)) {
++              phydev->interrupts = PHY_INTERRUPT_ENABLED;
++              if (phydev->drv->config_intr)
++                      phydev->drv->config_intr(phydev);
++      }
++
++      return 0;
++}
++
+ static int kszphy_probe(struct phy_device *phydev)
+ {
+       const struct kszphy_type *type = phydev->drv->driver_data;
+@@ -4916,7 +4966,7 @@ static struct phy_driver ksphy_driver[] = {
+       .config_intr    = kszphy_config_intr,
+       .handle_interrupt = kszphy_handle_interrupt,
+       .suspend        = genphy_suspend,
+-      .resume         = genphy_resume,
++      .resume         = ksz9477_resume,
+       .get_features   = ksz9477_get_features,
+ } };
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-phy-micrel-ksz8061-fix-errata-solution-not-takin.patch b/queue-6.6/net-phy-micrel-ksz8061-fix-errata-solution-not-takin.patch
new file mode 100644 (file)
index 0000000..44050c6
--- /dev/null
@@ -0,0 +1,96 @@
+From cf3cc2c40c65e6e4ad3d3c74f3ed03125d227166 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 May 2024 18:38:01 -0700
+Subject: net: phy: Micrel KSZ8061: fix errata solution not taking effect
+ problem
+
+From: Tristram Ha <tristram.ha@microchip.com>
+
+[ Upstream commit 0a8d3f2e3e8d8aea8af017e14227b91d5989b696 ]
+
+KSZ8061 needs to write to a MMD register at driver initialization to fix
+an errata.  This worked in 5.0 kernel but not in newer kernels.  The
+issue is the main phylib code no longer resets PHY at the very beginning.
+Calling phy resuming code later will reset the chip if it is already
+powered down at the beginning.  This wipes out the MMD register write.
+Solution is to implement a phy resume function for KSZ8061 to take care
+of this problem.
+
+Fixes: 232ba3a51cc2 ("net: phy: Micrel KSZ8061: link failure after cable connect")
+Signed-off-by: Tristram Ha <tristram.ha@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/micrel.c | 42 +++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 048704758b150..366ae22534373 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -770,6 +770,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
+ {
+       int ret;
++      /* Chip can be powered down by the bootstrap code. */
++      ret = phy_read(phydev, MII_BMCR);
++      if (ret < 0)
++              return ret;
++      if (ret & BMCR_PDOWN) {
++              ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
++              if (ret < 0)
++                      return ret;
++              usleep_range(1000, 2000);
++      }
++
+       ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+       if (ret)
+               return ret;
+@@ -2017,6 +2028,35 @@ static int ksz9477_resume(struct phy_device *phydev)
+       return 0;
+ }
++static int ksz8061_resume(struct phy_device *phydev)
++{
++      int ret;
++
++      /* This function can be called twice when the Ethernet device is on. */
++      ret = phy_read(phydev, MII_BMCR);
++      if (ret < 0)
++              return ret;
++      if (!(ret & BMCR_PDOWN))
++              return 0;
++
++      genphy_resume(phydev);
++      usleep_range(1000, 2000);
++
++      /* Re-program the value after chip is reset. */
++      ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
++      if (ret)
++              return ret;
++
++      /* Enable PHY Interrupts */
++      if (phy_interrupt_is_valid(phydev)) {
++              phydev->interrupts = PHY_INTERRUPT_ENABLED;
++              if (phydev->drv->config_intr)
++                      phydev->drv->config_intr(phydev);
++      }
++
++      return 0;
++}
++
+ static int kszphy_probe(struct phy_device *phydev)
+ {
+       const struct kszphy_type *type = phydev->drv->driver_data;
+@@ -4812,7 +4852,7 @@ static struct phy_driver ksphy_driver[] = {
+       .config_intr    = kszphy_config_intr,
+       .handle_interrupt = kszphy_handle_interrupt,
+       .suspend        = kszphy_suspend,
+-      .resume         = kszphy_resume,
++      .resume         = ksz8061_resume,
+ }, {
+       .phy_id         = PHY_ID_KSZ9021,
+       .phy_id_mask    = 0x000ffffe,
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-sched-sch_multiq-fix-possible-oob-write-in-multi.patch b/queue-6.6/net-sched-sch_multiq-fix-possible-oob-write-in-multi.patch
new file mode 100644 (file)
index 0000000..43f7de4
--- /dev/null
@@ -0,0 +1,38 @@
+From 857559f24ab8ddbbc256c4a6945cbf4ca8ad379f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jun 2024 15:13:03 +0800
+Subject: net: sched: sch_multiq: fix possible OOB write in multiq_tune()
+
+From: Hangyu Hua <hbh25y@gmail.com>
+
+[ Upstream commit affc18fdc694190ca7575b9a86632a73b9fe043d ]
+
+q->bands will be assigned to qopt->bands to execute subsequent code logic
+after kmalloc. So the old q->bands should not be used in kmalloc.
+Otherwise, an out-of-bounds write will occur.
+
+Fixes: c2999f7fb05b ("net: sched: multiq: don't call qdisc_put() while holding tree lock")
+Signed-off-by: Hangyu Hua <hbh25y@gmail.com>
+Acked-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_multiq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
+index 75c9c860182b4..0d6649d937c9f 100644
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+       qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
+-      removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
++      removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
+                         GFP_KERNEL);
+       if (!removed)
+               return -ENOMEM;
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-sched-taprio-always-validate-tca_taprio_attr_pri.patch b/queue-6.6/net-sched-taprio-always-validate-tca_taprio_attr_pri.patch
new file mode 100644 (file)
index 0000000..abd91bb
--- /dev/null
@@ -0,0 +1,63 @@
+From 09a751f74b6baf7df1493612b34f6cabf7870ee0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 18:15:11 +0000
+Subject: net/sched: taprio: always validate TCA_TAPRIO_ATTR_PRIOMAP
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit f921a58ae20852d188f70842431ce6519c4fdc36 ]
+
+If one TCA_TAPRIO_ATTR_PRIOMAP attribute has been provided,
+taprio_parse_mqprio_opt() must validate it, or userspace
+can inject arbitrary data to the kernel, the second time
+taprio_change() is called.
+
+First call (with valid attributes) sets dev->num_tc
+to a non zero value.
+
+Second call (with arbitrary mqprio attributes)
+returns early from taprio_parse_mqprio_opt()
+and bad things can happen.
+
+Fixes: a3d43c0d56f1 ("taprio: Add support adding an admin schedule")
+Reported-by: Noam Rathaus <noamr@ssd-disclosure.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://lore.kernel.org/r/20240604181511.769870-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_taprio.c | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index a315748a5e531..418d4a846d04a 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1186,16 +1186,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
+ {
+       bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
+-      if (!qopt && !dev->num_tc) {
+-              NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+-              return -EINVAL;
+-      }
+-
+-      /* If num_tc is already set, it means that the user already
+-       * configured the mqprio part
+-       */
+-      if (dev->num_tc)
++      if (!qopt) {
++              if (!dev->num_tc) {
++                      NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
++                      return -EINVAL;
++              }
+               return 0;
++      }
+       /* taprio imposes that traffic classes map 1:n to tx queues */
+       if (qopt->num_tc > dev->num_tx_queues) {
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-smc-avoid-overwriting-when-adjusting-sock-bufsiz.patch b/queue-6.6/net-smc-avoid-overwriting-when-adjusting-sock-bufsiz.patch
new file mode 100644 (file)
index 0000000..2109eaf
--- /dev/null
@@ -0,0 +1,68 @@
+From 0f974c14b66ae11bfb9233c96d99c2d3d455c37a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 May 2024 16:54:17 +0800
+Subject: net/smc: avoid overwriting when adjusting sock bufsizes
+
+From: Wen Gu <guwen@linux.alibaba.com>
+
+[ Upstream commit fb0aa0781a5f457e3864da68af52c3b1f4f7fd8f ]
+
+When copying smc settings to clcsock, avoid setting clcsock's sk_sndbuf
+to sysctl_tcp_wmem[1], since this may overwrite the value set by
+tcp_sndbuf_expand() in TCP connection establishment.
+
+And the other setting sk_{snd|rcv}buf to sysctl value in
+smc_adjust_sock_bufsizes() can also be omitted since the initialization
+of smc sock and clcsock has set sk_{snd|rcv}buf to smc.sysctl_{w|r}mem
+or ipv4_sysctl_tcp_{w|r}mem[1].
+
+Fixes: 30c3c4a4497c ("net/smc: Use correct buffer sizes when switching between TCP and SMC")
+Link: https://lore.kernel.org/r/5eaf3858-e7fd-4db8-83e8-3d7a3e0e9ae2@linux.alibaba.com
+Signed-off-by: Wen Gu <guwen@linux.alibaba.com>
+Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
+Reviewed-by: Gerd Bayer <gbayer@linux.ibm.com>, too.
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/af_smc.c | 22 ++--------------------
+ 1 file changed, 2 insertions(+), 20 deletions(-)
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index ef5b5d498ef3e..3158b94fd347a 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -460,29 +460,11 @@ static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+ static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
+                                    unsigned long mask)
+ {
+-      struct net *nnet = sock_net(nsk);
+-
+       nsk->sk_userlocks = osk->sk_userlocks;
+-      if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
++      if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
+               nsk->sk_sndbuf = osk->sk_sndbuf;
+-      } else {
+-              if (mask == SK_FLAGS_SMC_TO_CLC)
+-                      WRITE_ONCE(nsk->sk_sndbuf,
+-                                 READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
+-              else
+-                      WRITE_ONCE(nsk->sk_sndbuf,
+-                                 2 * READ_ONCE(nnet->smc.sysctl_wmem));
+-      }
+-      if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
++      if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
+               nsk->sk_rcvbuf = osk->sk_rcvbuf;
+-      } else {
+-              if (mask == SK_FLAGS_SMC_TO_CLC)
+-                      WRITE_ONCE(nsk->sk_rcvbuf,
+-                                 READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
+-              else
+-                      WRITE_ONCE(nsk->sk_rcvbuf,
+-                                 2 * READ_ONCE(nnet->smc.sysctl_rmem));
+-      }
+ }
+ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-tls-fix-marking-packets-as-decrypted.patch b/queue-6.6/net-tls-fix-marking-packets-as-decrypted.patch
new file mode 100644 (file)
index 0000000..b4724a7
--- /dev/null
@@ -0,0 +1,46 @@
+From f8966de14c774dfea90e71e43c9e747256197ea1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 May 2024 16:26:07 -0700
+Subject: net: tls: fix marking packets as decrypted
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit a535d59432370343058755100ee75ab03c0e3f91 ]
+
+For TLS offload we mark packets with skb->decrypted to make sure
+they don't escape the host without getting encrypted first.
+The crypto state lives in the socket, so it may get detached
+by a call to skb_orphan(). As a safety check - the egress path
+drops all packets with skb->decrypted and no "crypto-safe" socket.
+
+The skb marking was added to sendpage only (and not sendmsg),
+because tls_device injected data into the TCP stack using sendpage.
+This special case was missed when sendpage got folded into sendmsg.
+
+Fixes: c5c37af6ecad ("tcp: Convert do_tcp_sendpages() to use MSG_SPLICE_PAGES")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20240530232607.82686-1-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 7bf774bdb9386..a9b33135513d8 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1158,6 +1158,9 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
+                       process_backlog++;
++#ifdef CONFIG_SKB_DECRYPTED
++                      skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
++#endif
+                       tcp_skb_entail(sk, skb);
+                       copy = size_goal;
+-- 
+2.43.0
+
diff --git a/queue-6.6/net-wwan-iosm-fix-tainted-pointer-delete-is-case-of-.patch b/queue-6.6/net-wwan-iosm-fix-tainted-pointer-delete-is-case-of-.patch
new file mode 100644 (file)
index 0000000..ac07e5c
--- /dev/null
@@ -0,0 +1,44 @@
+From a39d7419af3dff19bcdb42ccff2241ac593023eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 11:25:00 +0300
+Subject: net: wwan: iosm: Fix tainted pointer delete is case of region
+ creation fail
+
+From: Aleksandr Mishin <amishin@t-argos.ru>
+
+[ Upstream commit b0c9a26435413b81799047a7be53255640432547 ]
+
+In case of region creation fail in ipc_devlink_create_region(), previously
+created regions delete process starts from tainted pointer which actually
+holds error code value.
+Fix this bug by decreasing region index before delete.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 4dcd183fbd67 ("net: wwan: iosm: devlink registration")
+Signed-off-by: Aleksandr Mishin <amishin@t-argos.ru>
+Acked-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20240604082500.20769-1-amishin@t-argos.ru
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wwan/iosm/iosm_ipc_devlink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+index 2fe724d623c06..33c5a46f1b922 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+@@ -210,7 +210,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
+                       rc = PTR_ERR(devlink->cd_regions[i]);
+                       dev_err(devlink->dev, "Devlink region fail,err %d", rc);
+                       /* Delete previously created regions */
+-                      for ( ; i >= 0; i--)
++                      for (i--; i >= 0; i--)
+                               devlink_region_destroy(devlink->cd_regions[i]);
+                       goto region_create_fail;
+               }
+-- 
+2.43.0
+
diff --git a/queue-6.6/nilfs2-fix-nilfs_empty_dir-misjudgment-and-long-loop.patch b/queue-6.6/nilfs2-fix-nilfs_empty_dir-misjudgment-and-long-loop.patch
new file mode 100644 (file)
index 0000000..279bc10
--- /dev/null
@@ -0,0 +1,51 @@
+From ee51c6a7d475f7b66f41988c7f6915352a03a264 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 22:42:55 +0900
+Subject: nilfs2: fix nilfs_empty_dir() misjudgment and long loop on I/O errors
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+[ Upstream commit 7373a51e7998b508af7136530f3a997b286ce81c ]
+
+The error handling in nilfs_empty_dir() when a directory folio/page read
+fails is incorrect, as in the old ext2 implementation, and if the
+folio/page cannot be read or nilfs_check_folio() fails, it will falsely
+determine the directory as empty and corrupt the file system.
+
+In addition, since nilfs_empty_dir() does not immediately return on a
+failed folio/page read, but continues to loop, this can cause a long loop
+with I/O if i_size of the directory's inode is also corrupted, causing the
+log writer thread to wait and hang, as reported by syzbot.
+
+Fix these issues by making nilfs_empty_dir() immediately return a false
+value (0) if it fails to get a directory folio/page.
+
+Link: https://lkml.kernel.org/r/20240604134255.7165-1-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+c8166c541d3971bf6c87@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=c8166c541d3971bf6c87
+Fixes: 2ba466d74ed7 ("nilfs2: directory entry operations")
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nilfs2/dir.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index c6b88be8a9d73..23a8357f127bc 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -627,7 +627,7 @@ int nilfs_empty_dir(struct inode *inode)
+               kaddr = nilfs_get_page(inode, i, &page);
+               if (IS_ERR(kaddr))
+-                      continue;
++                      return 0;
+               de = (struct nilfs_dir_entry *)kaddr;
+               kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
+-- 
+2.43.0
+
diff --git a/queue-6.6/nilfs2-return-the-mapped-address-from-nilfs_get_page.patch b/queue-6.6/nilfs2-return-the-mapped-address-from-nilfs_get_page.patch
new file mode 100644 (file)
index 0000000..6cf483c
--- /dev/null
@@ -0,0 +1,146 @@
+From 3f1b1789ee70ff1a5669ae71b69cd4a4509467a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Nov 2023 23:30:25 +0900
+Subject: nilfs2: return the mapped address from nilfs_get_page()
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit 09a46acb3697e50548bb265afa1d79163659dd85 ]
+
+In prepartion for switching from kmap() to kmap_local(), return the kmap
+address from nilfs_get_page() instead of having the caller look up
+page_address().
+
+[konishi.ryusuke: fixed a missing blank line after declaration]
+Link: https://lkml.kernel.org/r/20231127143036.2425-7-konishi.ryusuke@gmail.com
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 7373a51e7998 ("nilfs2: fix nilfs_empty_dir() misjudgment and long loop on I/O errors")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nilfs2/dir.c | 57 +++++++++++++++++++++++--------------------------
+ 1 file changed, 27 insertions(+), 30 deletions(-)
+
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index 929edc0b101a0..c6b88be8a9d73 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -186,19 +186,24 @@ static bool nilfs_check_page(struct page *page)
+       return false;
+ }
+-static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
++static void *nilfs_get_page(struct inode *dir, unsigned long n,
++              struct page **pagep)
+ {
+       struct address_space *mapping = dir->i_mapping;
+       struct page *page = read_mapping_page(mapping, n, NULL);
++      void *kaddr;
+-      if (!IS_ERR(page)) {
+-              kmap(page);
+-              if (unlikely(!PageChecked(page))) {
+-                      if (!nilfs_check_page(page))
+-                              goto fail;
+-              }
++      if (IS_ERR(page))
++              return page;
++
++      kaddr = kmap(page);
++      if (unlikely(!PageChecked(page))) {
++              if (!nilfs_check_page(page))
++                      goto fail;
+       }
+-      return page;
++
++      *pagep = page;
++      return kaddr;
+ fail:
+       nilfs_put_page(page);
+@@ -275,14 +280,14 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
+       for ( ; n < npages; n++, offset = 0) {
+               char *kaddr, *limit;
+               struct nilfs_dir_entry *de;
+-              struct page *page = nilfs_get_page(inode, n);
++              struct page *page;
+-              if (IS_ERR(page)) {
++              kaddr = nilfs_get_page(inode, n, &page);
++              if (IS_ERR(kaddr)) {
+                       nilfs_error(sb, "bad page in #%lu", inode->i_ino);
+                       ctx->pos += PAGE_SIZE - offset;
+                       return -EIO;
+               }
+-              kaddr = page_address(page);
+               de = (struct nilfs_dir_entry *)(kaddr + offset);
+               limit = kaddr + nilfs_last_byte(inode, n) -
+                       NILFS_DIR_REC_LEN(1);
+@@ -345,11 +350,9 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
+               start = 0;
+       n = start;
+       do {
+-              char *kaddr;
++              char *kaddr = nilfs_get_page(dir, n, &page);
+-              page = nilfs_get_page(dir, n);
+-              if (!IS_ERR(page)) {
+-                      kaddr = page_address(page);
++              if (!IS_ERR(kaddr)) {
+                       de = (struct nilfs_dir_entry *)kaddr;
+                       kaddr += nilfs_last_byte(dir, n) - reclen;
+                       while ((char *) de <= kaddr) {
+@@ -387,15 +390,11 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
+ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
+ {
+-      struct page *page = nilfs_get_page(dir, 0);
+-      struct nilfs_dir_entry *de = NULL;
++      struct nilfs_dir_entry *de = nilfs_get_page(dir, 0, p);
+-      if (!IS_ERR(page)) {
+-              de = nilfs_next_entry(
+-                      (struct nilfs_dir_entry *)page_address(page));
+-              *p = page;
+-      }
+-      return de;
++      if (IS_ERR(de))
++              return NULL;
++      return nilfs_next_entry(de);
+ }
+ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
+@@ -459,12 +458,11 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
+       for (n = 0; n <= npages; n++) {
+               char *dir_end;
+-              page = nilfs_get_page(dir, n);
+-              err = PTR_ERR(page);
+-              if (IS_ERR(page))
++              kaddr = nilfs_get_page(dir, n, &page);
++              err = PTR_ERR(kaddr);
++              if (IS_ERR(kaddr))
+                       goto out;
+               lock_page(page);
+-              kaddr = page_address(page);
+               dir_end = kaddr + nilfs_last_byte(dir, n);
+               de = (struct nilfs_dir_entry *)kaddr;
+               kaddr += PAGE_SIZE - reclen;
+@@ -627,11 +625,10 @@ int nilfs_empty_dir(struct inode *inode)
+               char *kaddr;
+               struct nilfs_dir_entry *de;
+-              page = nilfs_get_page(inode, i);
+-              if (IS_ERR(page))
++              kaddr = nilfs_get_page(inode, i, &page);
++              if (IS_ERR(kaddr))
+                       continue;
+-              kaddr = page_address(page);
+               de = (struct nilfs_dir_entry *)kaddr;
+               kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
+-- 
+2.43.0
+
diff --git a/queue-6.6/octeontx2-af-always-allocate-pf-entries-from-low-pri.patch b/queue-6.6/octeontx2-af-always-allocate-pf-entries-from-low-pri.patch
new file mode 100644 (file)
index 0000000..ee132e5
--- /dev/null
@@ -0,0 +1,87 @@
+From 9b45cd4ceb9e6be0fa8640dd6920ff62e4a8076c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 20:59:44 +0530
+Subject: octeontx2-af: Always allocate PF entries from low prioriy zone
+
+From: Subbaraya Sundeep <sbhatta@marvell.com>
+
+[ Upstream commit 8b0f7410942cdc420c4557eda02bfcdf60ccec17 ]
+
+PF mcam entries has to be at low priority always so that VF
+can install longest prefix match rules at higher priority.
+This was taken care currently but when priority allocation
+wrt reference entry is requested then entries are allocated
+from mid-zone instead of low priority zone. Fix this and
+always allocate entries from low priority zone for PFs.
+
+Fixes: 7df5b4b260dd ("octeontx2-af: Allocate low priority entries for PF")
+Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeontx2/af/rvu_npc.c   | 33 ++++++++++++-------
+ 1 file changed, 22 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 91a4ea529d077..00ef6d201b973 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -2506,7 +2506,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+        * - when available free entries are less.
+        * Lower priority ones out of avaialble free entries are always
+        * chosen when 'high vs low' question arises.
++       *
++       * For a VF base MCAM match rule is set by its PF. And all the
++       * further MCAM rules installed by VF on its own are
++       * concatenated with the base rule set by its PF. Hence PF entries
++       * should be at lower priority compared to VF entries. Otherwise
++       * base rule is hit always and rules installed by VF will be of
++       * no use. Hence if the request is from PF then allocate low
++       * priority entries.
+        */
++      if (!(pcifunc & RVU_PFVF_FUNC_MASK))
++              goto lprio_alloc;
+       /* Get the search range for priority allocation request */
+       if (req->priority) {
+@@ -2515,17 +2525,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+               goto alloc;
+       }
+-      /* For a VF base MCAM match rule is set by its PF. And all the
+-       * further MCAM rules installed by VF on its own are
+-       * concatenated with the base rule set by its PF. Hence PF entries
+-       * should be at lower priority compared to VF entries. Otherwise
+-       * base rule is hit always and rules installed by VF will be of
+-       * no use. Hence if the request is from PF and NOT a priority
+-       * allocation request then allocate low priority entries.
+-       */
+-      if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+-              goto lprio_alloc;
+-
+       /* Find out the search range for non-priority allocation request
+        *
+        * Get MCAM free entry count in middle zone.
+@@ -2555,6 +2554,18 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+               reverse = true;
+               start = 0;
+               end = mcam->bmap_entries;
++              /* Ensure PF requests are always at bottom and if PF requests
++               * for higher/lower priority entry wrt reference entry then
++               * honour that criteria and start search for entries from bottom
++               * and not in mid zone.
++               */
++              if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
++                  req->priority == NPC_MCAM_HIGHER_PRIO)
++                      end = req->ref_entry;
++
++              if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
++                  req->priority == NPC_MCAM_LOWER_PRIO)
++                      start = req->ref_entry;
+       }
+ alloc:
+-- 
+2.43.0
+
diff --git a/queue-6.6/ptp-fix-error-message-on-failed-pin-verification.patch b/queue-6.6/ptp-fix-error-message-on-failed-pin-verification.patch
new file mode 100644 (file)
index 0000000..643ef1b
--- /dev/null
@@ -0,0 +1,42 @@
+From 096e6d27b84ca0bf3c15bb7df6965d5e9dd9db57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 14:05:27 +0200
+Subject: ptp: Fix error message on failed pin verification
+
+From: Karol Kolacinski <karol.kolacinski@intel.com>
+
+[ Upstream commit 323a359f9b077f382f4483023d096a4d316fd135 ]
+
+On failed verification of PTP clock pin, error message prints channel
+number instead of pin index after "pin", which is incorrect.
+
+Fix error message by adding channel number to the message and printing
+pin number instead of channel number.
+
+Fixes: 6092315dfdec ("ptp: introduce programmable pins.")
+Signed-off-by: Karol Kolacinski <karol.kolacinski@intel.com>
+Acked-by: Richard Cochran <richardcochran@gmail.com>
+Link: https://lore.kernel.org/r/20240604120555.16643-1-karol.kolacinski@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ptp/ptp_chardev.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index 5a3a4cc0bec82..91cc6ffa0095e 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -84,7 +84,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+       }
+       if (info->verify(info, pin, func, chan)) {
+-              pr_err("driver cannot use function %u on pin %u\n", func, chan);
++              pr_err("driver cannot use function %u and channel %u on pin %u\n",
++                     func, chan, pin);
+               return -EOPNOTSUPP;
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.6/risc-v-kvm-fix-incorrect-reg_subtype-labels-in-kvm_r.patch b/queue-6.6/risc-v-kvm-fix-incorrect-reg_subtype-labels-in-kvm_r.patch
new file mode 100644 (file)
index 0000000..d816933
--- /dev/null
@@ -0,0 +1,45 @@
+From c4e141d240577937df973647ba65469a5a13f73a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 10:13:34 +0800
+Subject: RISC-V: KVM: Fix incorrect reg_subtype labels in
+ kvm_riscv_vcpu_set_reg_isa_ext function
+
+From: Quan Zhou <zhouquan@iscas.ac.cn>
+
+[ Upstream commit c66f3b40b17d3dfc4b6abb5efde8e71c46971821 ]
+
+In the function kvm_riscv_vcpu_set_reg_isa_ext, the original code
+used incorrect reg_subtype labels KVM_REG_RISCV_SBI_MULTI_EN/DIS.
+These have been corrected to KVM_REG_RISCV_ISA_MULTI_EN/DIS respectively.
+Although they are numerically equivalent, the actual processing
+will not result in errors, but it may lead to ambiguous code semantics.
+
+Fixes: 613029442a4b ("RISC-V: KVM: Extend ONE_REG to enable/disable multiple ISA extensions")
+Signed-off-by: Quan Zhou <zhouquan@iscas.ac.cn>
+Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
+Link: https://lore.kernel.org/r/ff1c6771a67d660db94372ac9aaa40f51e5e0090.1716429371.git.zhouquan@iscas.ac.cn
+Signed-off-by: Anup Patel <anup@brainfault.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kvm/vcpu_onereg.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
+index b7e0e03c69b1e..d520b25d85616 100644
+--- a/arch/riscv/kvm/vcpu_onereg.c
++++ b/arch/riscv/kvm/vcpu_onereg.c
+@@ -614,9 +614,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_ISA_SINGLE:
+               return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
+-      case KVM_REG_RISCV_SBI_MULTI_EN:
++      case KVM_REG_RISCV_ISA_MULTI_EN:
+               return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
+-      case KVM_REG_RISCV_SBI_MULTI_DIS:
++      case KVM_REG_RISCV_ISA_MULTI_DIS:
+               return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
+       default:
+               return -ENOENT;
+-- 
+2.43.0
+
diff --git a/queue-6.6/risc-v-kvm-no-need-to-use-mask-when-hart-index-bit-i.patch b/queue-6.6/risc-v-kvm-no-need-to-use-mask-when-hart-index-bit-i.patch
new file mode 100644 (file)
index 0000000..a268b2e
--- /dev/null
@@ -0,0 +1,48 @@
+From 6f977ad4508099b59eb9bc41bc9cb56204ed6dad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Apr 2024 14:49:04 +0800
+Subject: RISC-V: KVM: No need to use mask when hart-index-bit is 0
+
+From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
+
+[ Upstream commit 2d707b4e37f9b0c37b8b2392f91b04c5b63ea538 ]
+
+When the maximum hart number within groups is 1, hart-index-bit is set to
+0. Consequently, there is no need to restore the hart ID from IMSIC
+addresses and hart-index-bit settings. Currently, QEMU and kvmtool do not
+pass correct hart-index-bit values when the maximum hart number is a
+power of 2, thereby avoiding this issue. Corresponding patches for QEMU
+and kvmtool will also be dispatched.
+
+Fixes: 89d01306e34d ("RISC-V: KVM: Implement device interface for AIA irqchip")
+Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
+Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
+Link: https://lore.kernel.org/r/20240415064905.25184-1-yongxuan.wang@sifive.com
+Signed-off-by: Anup Patel <anup@brainfault.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kvm/aia_device.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
+index 0eb689351b7d0..5cd407c6a8e4f 100644
+--- a/arch/riscv/kvm/aia_device.c
++++ b/arch/riscv/kvm/aia_device.c
+@@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
+ static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
+ {
+-      u32 hart, group = 0;
++      u32 hart = 0, group = 0;
+-      hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
+-              GENMASK_ULL(aia->nr_hart_bits - 1, 0);
++      if (aia->nr_hart_bits)
++              hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
++                     GENMASK_ULL(aia->nr_hart_bits - 1, 0);
+       if (aia->nr_group_bits)
+               group = (addr >> aia->nr_group_shift) &
+                       GENMASK_ULL(aia->nr_group_bits - 1, 0);
+-- 
+2.43.0
+
diff --git a/queue-6.6/scsi-ufs-mcq-fix-error-output-and-clean-up-ufshcd_mc.patch b/queue-6.6/scsi-ufs-mcq-fix-error-output-and-clean-up-ufshcd_mc.patch
new file mode 100644 (file)
index 0000000..da5e28e
--- /dev/null
@@ -0,0 +1,86 @@
+From 91685991175328a16ee4371b9b914d89a65998fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 May 2024 10:59:04 +0900
+Subject: scsi: ufs: mcq: Fix error output and clean up ufshcd_mcq_abort()
+
+From: Chanwoo Lee <cw9316.lee@samsung.com>
+
+[ Upstream commit d53b681ce9ca7db5ef4ecb8d2cf465ae4a031264 ]
+
+An error unrelated to ufshcd_try_to_abort_task is being logged and can
+cause confusion. Modify ufshcd_mcq_abort() to print the result of the abort
+failure. For readability, return immediately instead of 'goto'.
+
+Fixes: f1304d442077 ("scsi: ufs: mcq: Added ufshcd_mcq_abort()")
+Signed-off-by: Chanwoo Lee <cw9316.lee@samsung.com>
+Link: https://lore.kernel.org/r/20240524015904.1116005-1-cw9316.lee@samsung.com
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufs-mcq.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 7ae3096814282..4e84ee6564d4b 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -630,20 +630,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+       struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+       struct ufs_hw_queue *hwq;
+       unsigned long flags;
+-      int err = FAILED;
++      int err;
+       if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+               dev_err(hba->dev,
+                       "%s: skip abort. cmd at tag %d already completed.\n",
+                       __func__, tag);
+-              goto out;
++              return FAILED;
+       }
+       /* Skip task abort in case previous aborts failed and report failure */
+       if (lrbp->req_abort_skip) {
+               dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
+                       __func__, tag);
+-              goto out;
++              return FAILED;
+       }
+       hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+@@ -655,7 +655,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+                */
+               dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
+                       __func__, hwq->id, tag);
+-              goto out;
++              return FAILED;
+       }
+       /*
+@@ -663,18 +663,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+        * in the completion queue either. Query the device to see if
+        * the command is being processed in the device.
+        */
+-      if (ufshcd_try_to_abort_task(hba, tag)) {
++      err = ufshcd_try_to_abort_task(hba, tag);
++      if (err) {
+               dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
+               lrbp->req_abort_skip = true;
+-              goto out;
++              return FAILED;
+       }
+-      err = SUCCESS;
+       spin_lock_irqsave(&hwq->cq_lock, flags);
+       if (ufshcd_cmd_inflight(lrbp->cmd))
+               ufshcd_release_scsi_cmd(hba, lrbp);
+       spin_unlock_irqrestore(&hwq->cq_lock, flags);
+-out:
+-      return err;
++      return SUCCESS;
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.6/selftests-mm-compaction_test-fix-bogus-test-success-.patch b/queue-6.6/selftests-mm-compaction_test-fix-bogus-test-success-.patch
new file mode 100644 (file)
index 0000000..e484d8b
--- /dev/null
@@ -0,0 +1,109 @@
+From 278704bd01c53651dd762299d1ee11d376b15ad8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 13:13:56 +0530
+Subject: selftests/mm: compaction_test: fix bogus test success on Aarch64
+
+From: Dev Jain <dev.jain@arm.com>
+
+[ Upstream commit d4202e66a4b1fe6968f17f9f09bbc30d08f028a1 ]
+
+Patch series "Fixes for compaction_test", v2.
+
+The compaction_test memory selftest introduces fragmentation in memory
+and then tries to allocate as many hugepages as possible. This series
+addresses some problems.
+
+On Aarch64, if nr_hugepages == 0, then the test trivially succeeds since
+compaction_index becomes 0, which is less than 3, due to no division by
+zero exception being raised. We fix that by checking for division by
+zero.
+
+Secondly, correctly set the number of hugepages to zero before trying
+to set a large number of them.
+
+Now, consider a situation in which, at the start of the test, a non-zero
+number of hugepages have been already set (while running the entire
+selftests/mm suite, or manually by the admin). The test operates on 80%
+of memory to avoid OOM-killer invocation, and because some memory is
+already blocked by hugepages, it would increase the chance of OOM-killing.
+Also, since mem_free used in check_compaction() is the value before we
+set nr_hugepages to zero, the chance that the compaction_index will
+be small is very high if the preset nr_hugepages was high, leading to a
+bogus test success.
+
+This patch (of 3):
+
+Currently, if at runtime we are not able to allocate a huge page, the test
+will trivially pass on Aarch64 due to no exception being raised on
+division by zero while computing compaction_index.  Fix that by checking
+for nr_hugepages == 0.  Anyways, in general, avoid a division by zero by
+exiting the program beforehand.  While at it, fix a typo, and handle the
+case where the number of hugepages may overflow an integer.
+
+Link: https://lkml.kernel.org/r/20240521074358.675031-1-dev.jain@arm.com
+Link: https://lkml.kernel.org/r/20240521074358.675031-2-dev.jain@arm.com
+Fixes: bd67d5c15cc1 ("Test compaction of mlocked memory")
+Signed-off-by: Dev Jain <dev.jain@arm.com>
+Cc: Anshuman Khandual <anshuman.khandual@arm.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Sri Jayaramappa <sjayaram@akamai.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/mm/compaction_test.c | 20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
+index 6aa6460b854ea..309b3750e57e1 100644
+--- a/tools/testing/selftests/mm/compaction_test.c
++++ b/tools/testing/selftests/mm/compaction_test.c
+@@ -82,12 +82,13 @@ int prereq(void)
+       return -1;
+ }
+-int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
++int check_compaction(unsigned long mem_free, unsigned long hugepage_size)
+ {
++      unsigned long nr_hugepages_ul;
+       int fd, ret = -1;
+       int compaction_index = 0;
+-      char initial_nr_hugepages[10] = {0};
+-      char nr_hugepages[10] = {0};
++      char initial_nr_hugepages[20] = {0};
++      char nr_hugepages[20] = {0};
+       /* We want to test with 80% of available memory. Else, OOM killer comes
+          in to play */
+@@ -136,7 +137,12 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+       /* We should have been able to request at least 1/3 rd of the memory in
+          huge pages */
+-      compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
++      nr_hugepages_ul = strtoul(nr_hugepages, NULL, 10);
++      if (!nr_hugepages_ul) {
++              ksft_print_msg("ERROR: No memory is available as huge pages\n");
++              goto close_fd;
++      }
++      compaction_index = mem_free/(nr_hugepages_ul * hugepage_size);
+       lseek(fd, 0, SEEK_SET);
+@@ -147,11 +153,11 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+               goto close_fd;
+       }
+-      ksft_print_msg("Number of huge pages allocated = %d\n",
+-                     atoi(nr_hugepages));
++      ksft_print_msg("Number of huge pages allocated = %lu\n",
++                     nr_hugepages_ul);
+       if (compaction_index > 3) {
+-              ksft_print_msg("ERROR: Less that 1/%d of memory is available\n"
++              ksft_print_msg("ERROR: Less than 1/%d of memory is available\n"
+                              "as huge pages\n", compaction_index);
+               goto close_fd;
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.6/selftests-mm-conform-test-to-tap-format-output.patch b/queue-6.6/selftests-mm-conform-test-to-tap-format-output.patch
new file mode 100644 (file)
index 0000000..dfcdf81
--- /dev/null
@@ -0,0 +1,229 @@
+From 42f14c56f2cd1ae93434e99c7a4dd97d83dd6adb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Jan 2024 13:36:12 +0500
+Subject: selftests/mm: conform test to TAP format output
+
+From: Muhammad Usama Anjum <usama.anjum@collabora.com>
+
+[ Upstream commit 9a21701edc41465de56f97914741bfb7bfc2517d ]
+
+Conform the layout, informational and status messages to TAP.  No
+functional change is intended other than the layout of output messages.
+
+Link: https://lkml.kernel.org/r/20240101083614.1076768-1-usama.anjum@collabora.com
+Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: d4202e66a4b1 ("selftests/mm: compaction_test: fix bogus test success on Aarch64")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/mm/compaction_test.c | 91 ++++++++++----------
+ 1 file changed, 44 insertions(+), 47 deletions(-)
+
+diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
+index 55dec92e1e58c..f81931c1f8386 100644
+--- a/tools/testing/selftests/mm/compaction_test.c
++++ b/tools/testing/selftests/mm/compaction_test.c
+@@ -33,7 +33,7 @@ int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
+       FILE *cmdfile = popen(cmd, "r");
+       if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
+-              perror("Failed to read meminfo\n");
++              ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno));
+               return -1;
+       }
+@@ -44,7 +44,7 @@ int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
+       cmdfile = popen(cmd, "r");
+       if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
+-              perror("Failed to read meminfo\n");
++              ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno));
+               return -1;
+       }
+@@ -62,14 +62,14 @@ int prereq(void)
+       fd = open("/proc/sys/vm/compact_unevictable_allowed",
+                 O_RDONLY | O_NONBLOCK);
+       if (fd < 0) {
+-              perror("Failed to open\n"
+-                     "/proc/sys/vm/compact_unevictable_allowed\n");
++              ksft_print_msg("Failed to open /proc/sys/vm/compact_unevictable_allowed: %s\n",
++                             strerror(errno));
+               return -1;
+       }
+       if (read(fd, &allowed, sizeof(char)) != sizeof(char)) {
+-              perror("Failed to read from\n"
+-                     "/proc/sys/vm/compact_unevictable_allowed\n");
++              ksft_print_msg("Failed to read from /proc/sys/vm/compact_unevictable_allowed: %s\n",
++                             strerror(errno));
+               close(fd);
+               return -1;
+       }
+@@ -78,12 +78,13 @@ int prereq(void)
+       if (allowed == '1')
+               return 0;
++      ksft_print_msg("Compaction isn't allowed\n");
+       return -1;
+ }
+ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+ {
+-      int fd;
++      int fd, ret = -1;
+       int compaction_index = 0;
+       char initial_nr_hugepages[10] = {0};
+       char nr_hugepages[10] = {0};
+@@ -94,12 +95,14 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+       fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
+       if (fd < 0) {
+-              perror("Failed to open /proc/sys/vm/nr_hugepages");
++              ksft_test_result_fail("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
++                                    strerror(errno));
+               return -1;
+       }
+       if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
+-              perror("Failed to read from /proc/sys/vm/nr_hugepages");
++              ksft_test_result_fail("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
++                                    strerror(errno));
+               goto close_fd;
+       }
+@@ -107,7 +110,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+       /* Start with the initial condition of 0 huge pages*/
+       if (write(fd, "0", sizeof(char)) != sizeof(char)) {
+-              perror("Failed to write 0 to /proc/sys/vm/nr_hugepages\n");
++              ksft_test_result_fail("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
++                                    strerror(errno));
+               goto close_fd;
+       }
+@@ -116,14 +120,16 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+       /* Request a large number of huge pages. The Kernel will allocate
+          as much as it can */
+       if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
+-              perror("Failed to write 100000 to /proc/sys/vm/nr_hugepages\n");
++              ksft_test_result_fail("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n",
++                                    strerror(errno));
+               goto close_fd;
+       }
+       lseek(fd, 0, SEEK_SET);
+       if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
+-              perror("Failed to re-read from /proc/sys/vm/nr_hugepages\n");
++              ksft_test_result_fail("Failed to re-read from /proc/sys/vm/nr_hugepages: %s\n",
++                                    strerror(errno));
+               goto close_fd;
+       }
+@@ -131,67 +137,58 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+          huge pages */
+       compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
+-      if (compaction_index > 3) {
+-              printf("No of huge pages allocated = %d\n",
+-                     (atoi(nr_hugepages)));
+-              fprintf(stderr, "ERROR: Less that 1/%d of memory is available\n"
+-                      "as huge pages\n", compaction_index);
+-              goto close_fd;
+-      }
+-
+-      printf("No of huge pages allocated = %d\n",
+-             (atoi(nr_hugepages)));
+-
+       lseek(fd, 0, SEEK_SET);
+       if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
+           != strlen(initial_nr_hugepages)) {
+-              perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
++              ksft_test_result_fail("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
++                                    strerror(errno));
+               goto close_fd;
+       }
+-      close(fd);
+-      return 0;
++      if (compaction_index > 3) {
++              ksft_print_msg("ERROR: Less that 1/%d of memory is available\n"
++                             "as huge pages\n", compaction_index);
++              ksft_test_result_fail("No of huge pages allocated = %d\n", (atoi(nr_hugepages)));
++              goto close_fd;
++      }
++
++      ksft_test_result_pass("Memory compaction succeeded. No of huge pages allocated = %d\n",
++                            (atoi(nr_hugepages)));
++      ret = 0;
+  close_fd:
+       close(fd);
+-      printf("Not OK. Compaction test failed.");
+-      return -1;
++      return ret;
+ }
+ int main(int argc, char **argv)
+ {
+       struct rlimit lim;
+-      struct map_list *list, *entry;
++      struct map_list *list = NULL, *entry;
+       size_t page_size, i;
+       void *map = NULL;
+       unsigned long mem_free = 0;
+       unsigned long hugepage_size = 0;
+       long mem_fragmentable_MB = 0;
+-      if (prereq() != 0) {
+-              printf("Either the sysctl compact_unevictable_allowed is not\n"
+-                     "set to 1 or couldn't read the proc file.\n"
+-                     "Skipping the test\n");
+-              return KSFT_SKIP;
+-      }
++      ksft_print_header();
++
++      if (prereq() != 0)
++              return ksft_exit_pass();
++
++      ksft_set_plan(1);
+       lim.rlim_cur = RLIM_INFINITY;
+       lim.rlim_max = RLIM_INFINITY;
+-      if (setrlimit(RLIMIT_MEMLOCK, &lim)) {
+-              perror("Failed to set rlimit:\n");
+-              return -1;
+-      }
++      if (setrlimit(RLIMIT_MEMLOCK, &lim))
++              ksft_exit_fail_msg("Failed to set rlimit: %s\n", strerror(errno));
+       page_size = getpagesize();
+-      list = NULL;
+-
+-      if (read_memory_info(&mem_free, &hugepage_size) != 0) {
+-              printf("ERROR: Cannot read meminfo\n");
+-              return -1;
+-      }
++      if (read_memory_info(&mem_free, &hugepage_size) != 0)
++              ksft_exit_fail_msg("Failed to get meminfo\n");
+       mem_fragmentable_MB = mem_free * 0.8 / 1024;
+@@ -227,7 +224,7 @@ int main(int argc, char **argv)
+       }
+       if (check_compaction(mem_free, hugepage_size) == 0)
+-              return 0;
++              return ksft_exit_pass();
+-      return -1;
++      return ksft_exit_fail();
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.6/selftests-mm-log-a-consistent-test-name-for-check_co.patch b/queue-6.6/selftests-mm-log-a-consistent-test-name-for-check_co.patch
new file mode 100644 (file)
index 0000000..7b17409
--- /dev/null
@@ -0,0 +1,124 @@
+From e000985fd213b09a0f3301b78b68a20a51ad08f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Feb 2024 14:30:04 +0000
+Subject: selftests/mm: log a consistent test name for check_compaction
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit f3b7568c49420d2dcd251032c9ca1e069ec8a6c9 ]
+
+Every test result report in the compaction test prints a distinct log
+messae, and some of the reports print a name that varies at runtime.  This
+causes problems for automation since a lot of automation software uses the
+printed string as the name of the test, if the name varies from run to run
+and from pass to fail then the automation software can't identify that a
+test changed result or that the same tests are being run.
+
+Refactor the logging to use a consistent name when printing the result of
+the test, printing the existing messages as diagnostic information instead
+so they are still available for people trying to interpret the results.
+
+Link: https://lkml.kernel.org/r/20240209-kselftest-mm-cleanup-v1-2-a3c0386496b5@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: d4202e66a4b1 ("selftests/mm: compaction_test: fix bogus test success on Aarch64")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/mm/compaction_test.c | 35 +++++++++++---------
+ 1 file changed, 19 insertions(+), 16 deletions(-)
+
+diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
+index f81931c1f8386..6aa6460b854ea 100644
+--- a/tools/testing/selftests/mm/compaction_test.c
++++ b/tools/testing/selftests/mm/compaction_test.c
+@@ -95,14 +95,15 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+       fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
+       if (fd < 0) {
+-              ksft_test_result_fail("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
+-                                    strerror(errno));
+-              return -1;
++              ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
++                             strerror(errno));
++              ret = -1;
++              goto out;
+       }
+       if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
+-              ksft_test_result_fail("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
+-                                    strerror(errno));
++              ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
++                             strerror(errno));
+               goto close_fd;
+       }
+@@ -110,8 +111,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+       /* Start with the initial condition of 0 huge pages*/
+       if (write(fd, "0", sizeof(char)) != sizeof(char)) {
+-              ksft_test_result_fail("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
+-                                    strerror(errno));
++              ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
++                             strerror(errno));
+               goto close_fd;
+       }
+@@ -120,16 +121,16 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+       /* Request a large number of huge pages. The Kernel will allocate
+          as much as it can */
+       if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
+-              ksft_test_result_fail("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n",
+-                                    strerror(errno));
++              ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n",
++                             strerror(errno));
+               goto close_fd;
+       }
+       lseek(fd, 0, SEEK_SET);
+       if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
+-              ksft_test_result_fail("Failed to re-read from /proc/sys/vm/nr_hugepages: %s\n",
+-                                    strerror(errno));
++              ksft_print_msg("Failed to re-read from /proc/sys/vm/nr_hugepages: %s\n",
++                             strerror(errno));
+               goto close_fd;
+       }
+@@ -141,24 +142,26 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+       if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
+           != strlen(initial_nr_hugepages)) {
+-              ksft_test_result_fail("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
+-                                    strerror(errno));
++              ksft_print_msg("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
++                             strerror(errno));
+               goto close_fd;
+       }
++      ksft_print_msg("Number of huge pages allocated = %d\n",
++                     atoi(nr_hugepages));
++
+       if (compaction_index > 3) {
+               ksft_print_msg("ERROR: Less that 1/%d of memory is available\n"
+                              "as huge pages\n", compaction_index);
+-              ksft_test_result_fail("No of huge pages allocated = %d\n", (atoi(nr_hugepages)));
+               goto close_fd;
+       }
+-      ksft_test_result_pass("Memory compaction succeeded. No of huge pages allocated = %d\n",
+-                            (atoi(nr_hugepages)));
+       ret = 0;
+  close_fd:
+       close(fd);
++ out:
++      ksft_test_result(ret == 0, "check_compaction\n");
+       return ret;
+ }
+-- 
+2.43.0
+
index e46106ce743d21dde839fb5ed1afd4fcd073f85a..2c353f57a208fbbaf70d68c7ff09300e65b8544e 100644 (file)
@@ -134,3 +134,83 @@ powerpc-bpf-enforce-full-ordering-for-atomic-operations-with-bpf_fetch.patch
 smb-client-fix-deadlock-in-smb2_find_smb_tcon.patch
 selftests-net-more-strict-check-in-net_helper.patch
 smp-provide-setup_max_cpus-definition-on-up-too.patch
+wifi-mac80211-mesh-fix-leak-of-mesh_preq_queue-objec.patch
+wifi-mac80211-fix-deadlock-in-ieee80211_sta_ps_deliv.patch
+wifi-cfg80211-fully-move-wiphy-work-to-unbound-workq.patch
+wifi-cfg80211-lock-wiphy-in-cfg80211_get_station.patch
+wifi-cfg80211-pmsr-use-correct-nla_get_ux-functions.patch
+wifi-iwlwifi-mvm-don-t-initialize-csa_work-twice.patch
+wifi-iwlwifi-mvm-revert-gen2-tx-a-mpdu-size-to-64.patch
+wifi-iwlwifi-mvm-set-properly-mac-header.patch
+wifi-iwlwifi-dbg_ini-move-iwl_dbg_tlv_free-outside-o.patch
+wifi-iwlwifi-mvm-check-n_ssids-before-accessing-the-.patch
+wifi-iwlwifi-mvm-don-t-read-past-the-mfuart-notifcat.patch
+wifi-mac80211-correctly-parse-spatial-reuse-paramete.patch
+scsi-ufs-mcq-fix-error-output-and-clean-up-ufshcd_mc.patch
+risc-v-kvm-no-need-to-use-mask-when-hart-index-bit-i.patch
+risc-v-kvm-fix-incorrect-reg_subtype-labels-in-kvm_r.patch
+ax25-fix-refcount-imbalance-on-inbound-connections.patch
+ax25-replace-kfree-in-ax25_dev_free-with-ax25_dev_pu.patch
+net-ncsi-simplify-kconfig-dts-control-flow.patch
+net-ncsi-fix-the-multi-thread-manner-of-ncsi-driver.patch
+net-phy-micrel-fix-ksz9477-phy-issues-after-suspend-.patch
+bpf-store-ref_ctr_offsets-values-in-bpf_uprobe-array.patch
+bpf-optimize-the-free-of-inner-map.patch
+bpf-fix-a-potential-use-after-free-in-bpf_link_free.patch
+kvm-sev-es-disallow-sev-es-guests-when-x86_feature_l.patch
+kvm-sev-do-not-intercept-accesses-to-msr_ia32_xss-fo.patch
+kvm-sev-es-delegate-lbr-virtualization-to-the-proces.patch
+vmxnet3-disable-rx-data-ring-on-dma-allocation-failu.patch
+ipv6-ioam-block-bh-from-ioam6_output.patch
+ipv6-sr-block-bh-in-seg6_output_core-and-seg6_input_.patch
+net-tls-fix-marking-packets-as-decrypted.patch
+bpf-set-run-context-for-rawtp-test_run-callback.patch
+octeontx2-af-always-allocate-pf-entries-from-low-pri.patch
+net-smc-avoid-overwriting-when-adjusting-sock-bufsiz.patch
+net-phy-micrel-ksz8061-fix-errata-solution-not-takin.patch
+net-sched-sch_multiq-fix-possible-oob-write-in-multi.patch
+vxlan-fix-regression-when-dropping-packets-due-to-in.patch
+tcp-count-close-wait-sockets-for-tcp_mib_currestab.patch
+mptcp-count-close-wait-sockets-for-mptcp_mib_currest.patch
+net-mlx5-stop-waiting-for-pci-if-pci-channel-is-offl.patch
+net-mlx5-always-stop-health-timer-during-driver-remo.patch
+net-mlx5-fix-tainted-pointer-delete-is-case-of-flow-.patch
+net-sched-taprio-always-validate-tca_taprio_attr_pri.patch
+ptp-fix-error-message-on-failed-pin-verification.patch
+ice-fix-iteration-of-tlvs-in-preserved-fields-area.patch
+ice-remove-af_xdp_zc_qps-bitmap.patch
+ice-add-flag-to-distinguish-reset-from-.ndo_bpf-in-x.patch
+net-wwan-iosm-fix-tainted-pointer-delete-is-case-of-.patch
+af_unix-set-sk-sk_state-under-unix_state_lock-for-tr.patch
+af_unix-annodate-data-races-around-sk-sk_state-for-w.patch
+af_unix-annotate-data-race-of-sk-sk_state-in-unix_in.patch
+af_unix-annotate-data-races-around-sk-sk_state-in-un.patch
+af_unix-annotate-data-race-of-sk-sk_state-in-unix_st.patch
+af_unix-annotate-data-races-around-sk-sk_state-in-se.patch
+af_unix-annotate-data-race-of-sk-sk_state-in-unix_st.patch-2156
+af_unix-annotate-data-races-around-sk-sk_state-in-un.patch-11288
+af_unix-annotate-data-races-around-sk-sk_sndbuf.patch
+af_unix-annotate-data-race-of-net-unx.sysctl_max_dgr.patch
+af_unix-use-unix_recvq_full_lockless-in-unix_stream_.patch
+af_unix-use-skb_queue_empty_lockless-in-unix_release.patch
+af_unix-use-skb_queue_len_lockless-in-sk_diag_show_r.patch
+af_unix-annotate-data-race-of-sk-sk_shutdown-in-sk_d.patch
+ipv6-fix-possible-race-in-__fib6_drop_pcpu_from.patch
+net-ethtool-fix-the-error-condition-in-ethtool_get_p.patch
+ksmbd-use-rwsem-instead-of-rwlock-for-lease-break.patch
+firmware-qcom_scm-disable-clocks-if-qcom_scm_bw_enab.patch
+memory-failure-use-a-folio-in-me_huge_page.patch
+mm-memory-failure-fix-handling-of-dissolved-but-not-.patch
+selftests-mm-conform-test-to-tap-format-output.patch
+selftests-mm-log-a-consistent-test-name-for-check_co.patch
+selftests-mm-compaction_test-fix-bogus-test-success-.patch
+irqchip-riscv-intc-allow-large-non-standard-interrup.patch
+irqchip-riscv-intc-introduce-andes-hart-level-interr.patch
+irqchip-riscv-intc-prevent-memory-leak-when-riscv_in.patch
+ext4-avoid-overflow-when-setting-values-via-sysfs.patch
+ext4-refactor-out-ext4_generic_attr_show.patch
+ext4-fix-slab-out-of-bounds-in-ext4_mb_find_good_gro.patch
+eventfs-update-all-the-eventfs_inodes-from-the-event.patch
+bpf-fix-multi-uprobe-pid-filtering-logic.patch
+nilfs2-return-the-mapped-address-from-nilfs_get_page.patch
+nilfs2-fix-nilfs_empty_dir-misjudgment-and-long-loop.patch
diff --git a/queue-6.6/tcp-count-close-wait-sockets-for-tcp_mib_currestab.patch b/queue-6.6/tcp-count-close-wait-sockets-for-tcp_mib_currestab.patch
new file mode 100644 (file)
index 0000000..a2781ab
--- /dev/null
@@ -0,0 +1,71 @@
+From 1f1047cd8d5eca76bb9ef8d80e84ddea3c17dc1a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 01:02:16 +0800
+Subject: tcp: count CLOSE-WAIT sockets for TCP_MIB_CURRESTAB
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit a46d0ea5c94205f40ecf912d1bb7806a8a64704f ]
+
+According to RFC 1213, we should also take CLOSE-WAIT sockets into
+consideration:
+
+  "tcpCurrEstab OBJECT-TYPE
+   ...
+   The number of TCP connections for which the current state
+   is either ESTABLISHED or CLOSE- WAIT."
+
+After this, CurrEstab counter will display the total number of
+ESTABLISHED and CLOSE-WAIT sockets.
+
+The logic of counting
+When we increment the counter?
+a) if we change the state to ESTABLISHED.
+b) if we change the state from SYN-RECEIVED to CLOSE-WAIT.
+
+When we decrement the counter?
+a) if the socket leaves ESTABLISHED and will never go into CLOSE-WAIT,
+say, on the client side, changing from ESTABLISHED to FIN-WAIT-1.
+b) if the socket leaves CLOSE-WAIT, say, on the server side, changing
+from CLOSE-WAIT to LAST-ACK.
+
+Please note: there are two chances that old state of socket can be changed
+to CLOSE-WAIT in tcp_fin(). One is SYN-RECV, the other is ESTABLISHED.
+So we have to take care of the former case.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index a9b33135513d8..2df05ea2e00fe 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2640,6 +2640,10 @@ void tcp_set_state(struct sock *sk, int state)
+               if (oldstate != TCP_ESTABLISHED)
+                       TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+               break;
++      case TCP_CLOSE_WAIT:
++              if (oldstate == TCP_SYN_RECV)
++                      TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
++              break;
+       case TCP_CLOSE:
+               if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
+@@ -2651,7 +2655,7 @@ void tcp_set_state(struct sock *sk, int state)
+                       inet_put_port(sk);
+               fallthrough;
+       default:
+-              if (oldstate == TCP_ESTABLISHED)
++              if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
+                       TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.6/vmxnet3-disable-rx-data-ring-on-dma-allocation-failu.patch b/queue-6.6/vmxnet3-disable-rx-data-ring-on-dma-allocation-failu.patch
new file mode 100644 (file)
index 0000000..6853486
--- /dev/null
@@ -0,0 +1,116 @@
+From 723e56905b13966536e7bc6fee19cc844504788d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 May 2024 12:37:11 +0200
+Subject: vmxnet3: disable rx data ring on dma allocation failure
+
+From: Matthias Stocker <mstocker@barracuda.com>
+
+[ Upstream commit ffbe335b8d471f79b259e950cb20999700670456 ]
+
+When vmxnet3_rq_create() fails to allocate memory for rq->data_ring.base,
+the subsequent call to vmxnet3_rq_destroy_all_rxdataring does not reset
+rq->data_ring.desc_size for the data ring that failed, which presumably
+causes the hypervisor to reference it on packet reception.
+
+To fix this bug, rq->data_ring.desc_size needs to be set to 0 to tell
+the hypervisor to disable this feature.
+
+[   95.436876] kernel BUG at net/core/skbuff.c:207!
+[   95.439074] invalid opcode: 0000 [#1] PREEMPT SMP NOPTI
+[   95.440411] CPU: 7 PID: 0 Comm: swapper/7 Not tainted 6.9.3-dirty #1
+[   95.441558] Hardware name: VMware, Inc. VMware Virtual
+Platform/440BX Desktop Reference Platform, BIOS 6.00 12/12/2018
+[   95.443481] RIP: 0010:skb_panic+0x4d/0x4f
+[   95.444404] Code: 4f 70 50 8b 87 c0 00 00 00 50 8b 87 bc 00 00 00 50
+ff b7 d0 00 00 00 4c 8b 8f c8 00 00 00 48 c7 c7 68 e8 be 9f e8 63 58 f9
+ff <0f> 0b 48 8b 14 24 48 c7 c1 d0 73 65 9f e8 a1 ff ff ff 48 8b 14 24
+[   95.447684] RSP: 0018:ffffa13340274dd0 EFLAGS: 00010246
+[   95.448762] RAX: 0000000000000089 RBX: ffff8fbbc72b02d0 RCX: 000000000000083f
+[   95.450148] RDX: 0000000000000000 RSI: 00000000000000f6 RDI: 000000000000083f
+[   95.451520] RBP: 000000000000002d R08: 0000000000000000 R09: ffffa13340274c60
+[   95.452886] R10: ffffffffa04ed468 R11: 0000000000000002 R12: 0000000000000000
+[   95.454293] R13: ffff8fbbdab3c2d0 R14: ffff8fbbdbd829e0 R15: ffff8fbbdbd809e0
+[   95.455682] FS:  0000000000000000(0000) GS:ffff8fbeefd80000(0000) knlGS:0000000000000000
+[   95.457178] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[   95.458340] CR2: 00007fd0d1f650c8 CR3: 0000000115f28000 CR4: 00000000000406f0
+[   95.459791] Call Trace:
+[   95.460515]  <IRQ>
+[   95.461180]  ? __die_body.cold+0x19/0x27
+[   95.462150]  ? die+0x2e/0x50
+[   95.462976]  ? do_trap+0xca/0x110
+[   95.463973]  ? do_error_trap+0x6a/0x90
+[   95.464966]  ? skb_panic+0x4d/0x4f
+[   95.465901]  ? exc_invalid_op+0x50/0x70
+[   95.466849]  ? skb_panic+0x4d/0x4f
+[   95.467718]  ? asm_exc_invalid_op+0x1a/0x20
+[   95.468758]  ? skb_panic+0x4d/0x4f
+[   95.469655]  skb_put.cold+0x10/0x10
+[   95.470573]  vmxnet3_rq_rx_complete+0x862/0x11e0 [vmxnet3]
+[   95.471853]  vmxnet3_poll_rx_only+0x36/0xb0 [vmxnet3]
+[   95.473185]  __napi_poll+0x2b/0x160
+[   95.474145]  net_rx_action+0x2c6/0x3b0
+[   95.475115]  handle_softirqs+0xe7/0x2a0
+[   95.476122]  __irq_exit_rcu+0x97/0xb0
+[   95.477109]  common_interrupt+0x85/0xa0
+[   95.478102]  </IRQ>
+[   95.478846]  <TASK>
+[   95.479603]  asm_common_interrupt+0x26/0x40
+[   95.480657] RIP: 0010:pv_native_safe_halt+0xf/0x20
+[   95.481801] Code: 22 d7 e9 54 87 01 00 0f 1f 40 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa eb 07 0f 00 2d 93 ba 3b 00 fb f4 <e9> 2c 87 01 00 66 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90
+[   95.485563] RSP: 0018:ffffa133400ffe58 EFLAGS: 00000246
+[   95.486882] RAX: 0000000000004000 RBX: ffff8fbbc1d14064 RCX: 0000000000000000
+[   95.488477] RDX: ffff8fbeefd80000 RSI: ffff8fbbc1d14000 RDI: 0000000000000001
+[   95.490067] RBP: ffff8fbbc1d14064 R08: ffffffffa0652260 R09: 00000000000010d3
+[   95.491683] R10: 0000000000000018 R11: ffff8fbeefdb4764 R12: ffffffffa0652260
+[   95.493389] R13: ffffffffa06522e0 R14: 0000000000000001 R15: 0000000000000000
+[   95.495035]  acpi_safe_halt+0x14/0x20
+[   95.496127]  acpi_idle_do_entry+0x2f/0x50
+[   95.497221]  acpi_idle_enter+0x7f/0xd0
+[   95.498272]  cpuidle_enter_state+0x81/0x420
+[   95.499375]  cpuidle_enter+0x2d/0x40
+[   95.500400]  do_idle+0x1e5/0x240
+[   95.501385]  cpu_startup_entry+0x29/0x30
+[   95.502422]  start_secondary+0x11c/0x140
+[   95.503454]  common_startup_64+0x13e/0x141
+[   95.504466]  </TASK>
+[   95.505197] Modules linked in: nft_fib_inet nft_fib_ipv4
+nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6
+nft_reject nft_ct nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6
+nf_defrag_ipv4 rfkill ip_set nf_tables vsock_loopback
+vmw_vsock_virtio_transport_common qrtr vmw_vsock_vmci_transport vsock
+sunrpc binfmt_misc pktcdvd vmw_balloon pcspkr vmw_vmci i2c_piix4 joydev
+loop dm_multipath nfnetlink zram crct10dif_pclmul crc32_pclmul vmwgfx
+crc32c_intel polyval_clmulni polyval_generic ghash_clmulni_intel
+sha512_ssse3 sha256_ssse3 vmxnet3 sha1_ssse3 drm_ttm_helper vmw_pvscsi
+ttm ata_generic pata_acpi serio_raw scsi_dh_rdac scsi_dh_emc
+scsi_dh_alua ip6_tables ip_tables fuse
+[   95.516536] ---[ end trace 0000000000000000 ]---
+
+Fixes: 6f4833383e85 ("net: vmxnet3: Fix NULL pointer dereference in vmxnet3_rq_rx_complete()")
+Signed-off-by: Matthias Stocker <mstocker@barracuda.com>
+Reviewed-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Reviewed-by: Ronak Doshi <ronak.doshi@broadcom.com>
+Link: https://lore.kernel.org/r/20240531103711.101961-1-mstocker@barracuda.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vmxnet3/vmxnet3_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 0578864792b60..beebe09eb88ff 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
+                                         rq->data_ring.base,
+                                         rq->data_ring.basePA);
+                       rq->data_ring.base = NULL;
+-                      rq->data_ring.desc_size = 0;
+               }
++              rq->data_ring.desc_size = 0;
+       }
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.6/vxlan-fix-regression-when-dropping-packets-due-to-in.patch b/queue-6.6/vxlan-fix-regression-when-dropping-packets-due-to-in.patch
new file mode 100644 (file)
index 0000000..f53338a
--- /dev/null
@@ -0,0 +1,65 @@
+From f679d0d54570b3cd07f02145a1e8e067ea0a9558 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jun 2024 10:59:26 +0200
+Subject: vxlan: Fix regression when dropping packets due to invalid src
+ addresses
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 1cd4bc987abb2823836cbb8f887026011ccddc8a ]
+
+Commit f58f45c1e5b9 ("vxlan: drop packets from invalid src-address")
+has recently been added to vxlan mainly in the context of source
+address snooping/learning so that when it is enabled, an entry in the
+FDB is not being created for an invalid address for the corresponding
+tunnel endpoint.
+
+Before commit f58f45c1e5b9 vxlan was similarly behaving as geneve in
+that it passed through whichever macs were set in the L2 header. It
+turns out that this change in behavior breaks setups, for example,
+Cilium with netkit in L3 mode for Pods as well as tunnel mode has been
+passing before the change in f58f45c1e5b9 for both vxlan and geneve.
+After mentioned change it is only passing for geneve as in case of
+vxlan packets are dropped due to vxlan_set_mac() returning false as
+source and destination macs are zero which for E/W traffic via tunnel
+is totally fine.
+
+Fix it by only opting into the is_valid_ether_addr() check in
+vxlan_set_mac() when in fact source address snooping/learning is
+actually enabled in vxlan. This is done by moving the check into
+vxlan_snoop(). With this change, the Cilium connectivity test suite
+passes again for both tunnel flavors.
+
+Fixes: f58f45c1e5b9 ("vxlan: drop packets from invalid src-address")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: David Bauer <mail@david-bauer.net>
+Cc: Ido Schimmel <idosch@nvidia.com>
+Cc: Nikolay Aleksandrov <razor@blackwall.org>
+Cc: Martin KaFai Lau <martin.lau@kernel.org>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
+Reviewed-by: David Bauer <mail@david-bauer.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vxlan/vxlan_core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index c24ff08abe0da..8268fa331826e 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
+       struct vxlan_fdb *f;
+       u32 ifindex = 0;
++      /* Ignore packets from invalid src-address */
++      if (!is_valid_ether_addr(src_mac))
++              return true;
++
+ #if IS_ENABLED(CONFIG_IPV6)
+       if (src_ip->sa.sa_family == AF_INET6 &&
+           (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-cfg80211-fully-move-wiphy-work-to-unbound-workq.patch b/queue-6.6/wifi-cfg80211-fully-move-wiphy-work-to-unbound-workq.patch
new file mode 100644 (file)
index 0000000..6467804
--- /dev/null
@@ -0,0 +1,62 @@
+From 83691708b4e46023aad354aa5c61ffc8a892079a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 12:41:25 +0200
+Subject: wifi: cfg80211: fully move wiphy work to unbound workqueue
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit e296c95eac655008d5a709b8cf54d0018da1c916 ]
+
+Previously I had moved the wiphy work to the unbound
+system workqueue, but missed that when it restarts and
+during resume it was still using the normal system
+workqueue. Fix that.
+
+Fixes: 91d20ab9d9ca ("wifi: cfg80211: use system_unbound_wq for wiphy work")
+Reviewed-by: Miriam Rachel Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://msgid.link/20240522124126.7ca959f2cbd3.I3e2a71ef445d167b84000ccf934ea245aef8d395@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/core.c  | 2 +-
+ net/wireless/sysfs.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index ff743e1f2e2cb..68aa8f0d70140 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -431,7 +431,7 @@ static void cfg80211_wiphy_work(struct work_struct *work)
+       if (wk) {
+               list_del_init(&wk->entry);
+               if (!list_empty(&rdev->wiphy_work_list))
+-                      schedule_work(work);
++                      queue_work(system_unbound_wq, work);
+               spin_unlock_irq(&rdev->wiphy_work_lock);
+               wk->func(&rdev->wiphy, wk);
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index 565511a3f461e..62f26618f6747 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -5,7 +5,7 @@
+  *
+  * Copyright 2005-2006        Jiri Benc <jbenc@suse.cz>
+  * Copyright 2006     Johannes Berg <johannes@sipsolutions.net>
+- * Copyright (C) 2020-2021, 2023 Intel Corporation
++ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
+  */
+ #include <linux/device.h>
+@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
+       if (rdev->wiphy.registered && rdev->ops->resume)
+               ret = rdev_resume(rdev);
+       rdev->suspended = false;
+-      schedule_work(&rdev->wiphy_work);
++      queue_work(system_unbound_wq, &rdev->wiphy_work);
+       wiphy_unlock(&rdev->wiphy);
+       if (ret)
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-cfg80211-lock-wiphy-in-cfg80211_get_station.patch b/queue-6.6/wifi-cfg80211-lock-wiphy-in-cfg80211_get_station.patch
new file mode 100644 (file)
index 0000000..c1236e7
--- /dev/null
@@ -0,0 +1,103 @@
+From 2da9ee7ecd4a649b5a2a8f360b980eb17f9beea7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 21:47:26 +0200
+Subject: wifi: cfg80211: Lock wiphy in cfg80211_get_station
+
+From: Remi Pommarel <repk@triplefau.lt>
+
+[ Upstream commit 642f89daa34567d02f312d03e41523a894906dae ]
+
+Wiphy should be locked before calling rdev_get_station() (see lockdep
+assert in ieee80211_get_station()).
+
+This fixes the following kernel NULL dereference:
+
+ Unable to handle kernel NULL pointer dereference at virtual address 0000000000000050
+ Mem abort info:
+   ESR = 0x0000000096000006
+   EC = 0x25: DABT (current EL), IL = 32 bits
+   SET = 0, FnV = 0
+   EA = 0, S1PTW = 0
+   FSC = 0x06: level 2 translation fault
+ Data abort info:
+   ISV = 0, ISS = 0x00000006
+   CM = 0, WnR = 0
+ user pgtable: 4k pages, 48-bit VAs, pgdp=0000000003001000
+ [0000000000000050] pgd=0800000002dca003, p4d=0800000002dca003, pud=08000000028e9003, pmd=0000000000000000
+ Internal error: Oops: 0000000096000006 [#1] SMP
+ Modules linked in: netconsole dwc3_meson_g12a dwc3_of_simple dwc3 ip_gre gre ath10k_pci ath10k_core ath9k ath9k_common ath9k_hw ath
+ CPU: 0 PID: 1091 Comm: kworker/u8:0 Not tainted 6.4.0-02144-g565f9a3a7911-dirty #705
+ Hardware name: RPT (r1) (DT)
+ Workqueue: bat_events batadv_v_elp_throughput_metric_update
+ pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : ath10k_sta_statistics+0x10/0x2dc [ath10k_core]
+ lr : sta_set_sinfo+0xcc/0xbd4
+ sp : ffff000007b43ad0
+ x29: ffff000007b43ad0 x28: ffff0000071fa900 x27: ffff00000294ca98
+ x26: ffff000006830880 x25: ffff000006830880 x24: ffff00000294c000
+ x23: 0000000000000001 x22: ffff000007b43c90 x21: ffff800008898acc
+ x20: ffff00000294c6e8 x19: ffff000007b43c90 x18: 0000000000000000
+ x17: 445946354d552d78 x16: 62661f7200000000 x15: 57464f445946354d
+ x14: 0000000000000000 x13: 00000000000000e3 x12: d5f0acbcebea978e
+ x11: 00000000000000e3 x10: 000000010048fe41 x9 : 0000000000000000
+ x8 : ffff000007b43d90 x7 : 000000007a1e2125 x6 : 0000000000000000
+ x5 : ffff0000024e0900 x4 : ffff800000a0250c x3 : ffff000007b43c90
+ x2 : ffff00000294ca98 x1 : ffff000006831920 x0 : 0000000000000000
+ Call trace:
+  ath10k_sta_statistics+0x10/0x2dc [ath10k_core]
+  sta_set_sinfo+0xcc/0xbd4
+  ieee80211_get_station+0x2c/0x44
+  cfg80211_get_station+0x80/0x154
+  batadv_v_elp_get_throughput+0x138/0x1fc
+  batadv_v_elp_throughput_metric_update+0x1c/0xa4
+  process_one_work+0x1ec/0x414
+  worker_thread+0x70/0x46c
+  kthread+0xdc/0xe0
+  ret_from_fork+0x10/0x20
+ Code: a9bb7bfd 910003fd a90153f3 f9411c40 (f9402814)
+
+This happens because STA has time to disconnect and reconnect before
+batadv_v_elp_throughput_metric_update() delayed work gets scheduled. In
+this situation, ath10k_sta_state() can be in the middle of resetting
+arsta data when the work queue get chance to be scheduled and ends up
+accessing it. Locking wiphy prevents that.
+
+Fixes: 7406353d43c8 ("cfg80211: implement cfg80211_get_station cfg80211 API")
+Signed-off-by: Remi Pommarel <repk@triplefau.lt>
+Reviewed-by: Nicolas Escande <nico.escande@gmail.com>
+Acked-by: Antonio Quartulli <a@unstable.cc>
+Link: https://msgid.link/983b24a6a176e0800c01aedcd74480d9b551cb13.1716046653.git.repk@triplefau.lt
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/util.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 9aa7bdce20b26..57ea6d5b092d4 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -2399,6 +2399,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+ {
+       struct cfg80211_registered_device *rdev;
+       struct wireless_dev *wdev;
++      int ret;
+       wdev = dev->ieee80211_ptr;
+       if (!wdev)
+@@ -2410,7 +2411,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+       memset(sinfo, 0, sizeof(*sinfo));
+-      return rdev_get_station(rdev, dev, mac_addr, sinfo);
++      wiphy_lock(&rdev->wiphy);
++      ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
++      wiphy_unlock(&rdev->wiphy);
++
++      return ret;
+ }
+ EXPORT_SYMBOL(cfg80211_get_station);
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-cfg80211-pmsr-use-correct-nla_get_ux-functions.patch b/queue-6.6/wifi-cfg80211-pmsr-use-correct-nla_get_ux-functions.patch
new file mode 100644 (file)
index 0000000..0a1043e
--- /dev/null
@@ -0,0 +1,85 @@
+From 31bed732ca3a12a59ab37c5f6d4c4ad54a234f8d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 15:50:59 +0800
+Subject: wifi: cfg80211: pmsr: use correct nla_get_uX functions
+
+From: Lin Ma <linma@zju.edu.cn>
+
+[ Upstream commit ab904521f4de52fef4f179d2dfc1877645ef5f5c ]
+
+The commit 9bb7e0f24e7e ("cfg80211: add peer measurement with FTM
+initiator API") defines four attributes NL80211_PMSR_FTM_REQ_ATTR_
+{NUM_BURSTS_EXP}/{BURST_PERIOD}/{BURST_DURATION}/{FTMS_PER_BURST} in
+following ways.
+
+static const struct nla_policy
+nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
+    ...
+    [NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP] =
+        NLA_POLICY_MAX(NLA_U8, 15),
+    [NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD] = { .type = NLA_U16 },
+    [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] =
+        NLA_POLICY_MAX(NLA_U8, 15),
+    [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] =
+        NLA_POLICY_MAX(NLA_U8, 31),
+    ...
+};
+
+That is, those attributes are expected to be NLA_U8 and NLA_U16 types.
+However, the consumers of these attributes in `pmsr_parse_ftm` blindly
+all use `nla_get_u32`, which is incorrect and causes functionality issues
+on little-endian platforms. Hence, fix them with the correct `nla_get_u8`
+and `nla_get_u16` functions.
+
+Fixes: 9bb7e0f24e7e ("cfg80211: add peer measurement with FTM initiator API")
+Signed-off-by: Lin Ma <linma@zju.edu.cn>
+Link: https://msgid.link/20240521075059.47999-1-linma@zju.edu.cn
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/pmsr.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
+index 9611aa0bd0513..841a4516793b1 100644
+--- a/net/wireless/pmsr.c
++++ b/net/wireless/pmsr.c
+@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+       out->ftm.burst_period = 0;
+       if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
+               out->ftm.burst_period =
+-                      nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
++                      nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+       out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
+       if (out->ftm.asap && !capa->ftm.asap) {
+@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+       out->ftm.num_bursts_exp = 0;
+       if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
+               out->ftm.num_bursts_exp =
+-                      nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
++                      nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+       if (capa->ftm.max_bursts_exponent >= 0 &&
+           out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
+@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+       out->ftm.burst_duration = 15;
+       if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
+               out->ftm.burst_duration =
+-                      nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
++                      nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+       out->ftm.ftms_per_burst = 0;
+       if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
+@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+       out->ftm.ftmr_retries = 3;
+       if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
+               out->ftm.ftmr_retries =
+-                      nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
++                      nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+       out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
+       if (out->ftm.request_lci && !capa->ftm.request_lci) {
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-iwlwifi-dbg_ini-move-iwl_dbg_tlv_free-outside-o.patch b/queue-6.6/wifi-iwlwifi-dbg_ini-move-iwl_dbg_tlv_free-outside-o.patch
new file mode 100644 (file)
index 0000000..1933a1b
--- /dev/null
@@ -0,0 +1,41 @@
+From 69698e8515f0d90e21494433aa46cd2f5e582ed6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 May 2024 17:06:39 +0300
+Subject: wifi: iwlwifi: dbg_ini: move iwl_dbg_tlv_free outside of debugfs
+ ifdef
+
+From: Shahar S Matityahu <shahar.s.matityahu@intel.com>
+
+[ Upstream commit 87821b67dea87addbc4ab093ba752753b002176a ]
+
+The driver should call iwl_dbg_tlv_free even if debugfs is not defined
+since ini mode does not depend on debugfs ifdef.
+
+Fixes: 68f6f492c4fa ("iwlwifi: trans: support loading ini TLVs from external file")
+Signed-off-by: Shahar S Matityahu <shahar.s.matityahu@intel.com>
+Reviewed-by: Luciano Coelho <luciano.coelho@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://msgid.link/20240510170500.c8e3723f55b0.I5e805732b0be31ee6b83c642ec652a34e974ff10@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index 8faf4e7872bb9..a56593b6135f6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1824,8 +1824,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ err_fw:
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+       debugfs_remove_recursive(drv->dbgfs_drv);
+-      iwl_dbg_tlv_free(drv->trans);
+ #endif
++      iwl_dbg_tlv_free(drv->trans);
+       kfree(drv);
+ err:
+       return ERR_PTR(ret);
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-iwlwifi-mvm-check-n_ssids-before-accessing-the-.patch b/queue-6.6/wifi-iwlwifi-mvm-check-n_ssids-before-accessing-the-.patch
new file mode 100644 (file)
index 0000000..dc6619b
--- /dev/null
@@ -0,0 +1,49 @@
+From d0f1dc0266e7dcf3889986408711a0457518cc77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 May 2024 13:27:12 +0300
+Subject: wifi: iwlwifi: mvm: check n_ssids before accessing the ssids
+
+From: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+
+[ Upstream commit 60d62757df30b74bf397a2847a6db7385c6ee281 ]
+
+In some versions of cfg80211, the ssids poinet might be a valid one even
+though n_ssids is 0. Accessing the pointer in this case will cuase an
+out-of-bound access. Fix this by checking n_ssids first.
+
+Fixes: c1a7515393e4 ("iwlwifi: mvm: add adaptive dwell support")
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Reviewed-by: Ilan Peer <ilan.peer@intel.com>
+Reviewed-by: Johannes Berg <johannes.berg@intel.com>
+Link: https://msgid.link/20240513132416.6e4d1762bf0d.I5a0e6cc8f02050a766db704d15594c61fe583d45@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index 03ec900a33433..0841f1d6dc475 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -1304,7 +1304,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
+               if (IWL_MVM_ADWELL_MAX_BUDGET)
+                       cmd->v7.adwell_max_budget =
+                               cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+-              else if (params->ssids && params->ssids[0].ssid_len)
++              else if (params->n_ssids && params->ssids[0].ssid_len)
+                       cmd->v7.adwell_max_budget =
+                               cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+               else
+@@ -1406,7 +1406,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
+       if (IWL_MVM_ADWELL_MAX_BUDGET)
+               general_params->adwell_max_budget =
+                       cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+-      else if (params->ssids && params->ssids[0].ssid_len)
++      else if (params->n_ssids && params->ssids[0].ssid_len)
+               general_params->adwell_max_budget =
+                       cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+       else
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-iwlwifi-mvm-don-t-initialize-csa_work-twice.patch b/queue-6.6/wifi-iwlwifi-mvm-don-t-initialize-csa_work-twice.patch
new file mode 100644 (file)
index 0000000..080fa5b
--- /dev/null
@@ -0,0 +1,39 @@
+From ea977dd5acec25d66e09d8fcd09ddc6cc86f73b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 12 May 2024 15:25:00 +0300
+Subject: wifi: iwlwifi: mvm: don't initialize csa_work twice
+
+From: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+
+[ Upstream commit 92158790ce4391ce4c35d8dfbce759195e4724cb ]
+
+The initialization of this worker moved to iwl_mvm_mac_init_mvmvif
+but we removed only from the pre-MLD version of the add_interface
+callback. Remove it also from the MLD version.
+
+Fixes: 0bcc2155983e ("wifi: iwlwifi: mvm: init vif works only once")
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Reviewed-by: Johannes Berg <johannes.berg@intel.com>
+Link: https://msgid.link/20240512152312.4f15b41604f0.Iec912158e5a706175531d3736d77d25adf02fba4@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+index aef8824469e1e..4d9a872818a52 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+@@ -73,8 +73,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+               goto out_free_bf;
+       iwl_mvm_tcm_add_vif(mvm, vif);
+-      INIT_DELAYED_WORK(&mvmvif->csa_work,
+-                        iwl_mvm_channel_switch_disconnect_wk);
+       if (vif->type == NL80211_IFTYPE_MONITOR) {
+               mvm->monitor_on = true;
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-iwlwifi-mvm-don-t-read-past-the-mfuart-notifcat.patch b/queue-6.6/wifi-iwlwifi-mvm-don-t-read-past-the-mfuart-notifcat.patch
new file mode 100644 (file)
index 0000000..cdfa096
--- /dev/null
@@ -0,0 +1,55 @@
+From 74c9edf84eee9b2e70aa9f0a16feaf790b6e8fa6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 May 2024 13:27:14 +0300
+Subject: wifi: iwlwifi: mvm: don't read past the mfuart notifcation
+
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+
+[ Upstream commit 4bb95f4535489ed830cf9b34b0a891e384d1aee4 ]
+
+In case the firmware sends a notification that claims it has more data
+than it has, we will read past that was allocated for the notification.
+Remove the print of the buffer, we won't see it by default. If needed,
+we can see the content with tracing.
+
+This was reported by KFENCE.
+
+Fixes: bdccdb854f2f ("iwlwifi: mvm: support MFUART dump in case of MFUART assert")
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Reviewed-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://msgid.link/20240513132416.ba82a01a559e.Ia91dd20f5e1ca1ad380b95e68aebf2794f553d9b@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 1d5ee4330f29f..51f396287dc69 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -92,20 +92,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ {
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+-      __le32 *dump_data = mfu_dump_notif->data;
+-      int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
+-      int i;
+       if (mfu_dump_notif->index_num == 0)
+               IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+                        le32_to_cpu(mfu_dump_notif->assert_id));
+-
+-      for (i = 0; i < n_words; i++)
+-              IWL_DEBUG_INFO(mvm,
+-                             "MFUART assert dump, dword %u: 0x%08x\n",
+-                             le16_to_cpu(mfu_dump_notif->index_num) *
+-                             n_words + i,
+-                             le32_to_cpu(dump_data[i]));
+ }
+ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-iwlwifi-mvm-revert-gen2-tx-a-mpdu-size-to-64.patch b/queue-6.6/wifi-iwlwifi-mvm-revert-gen2-tx-a-mpdu-size-to-64.patch
new file mode 100644 (file)
index 0000000..63b78be
--- /dev/null
@@ -0,0 +1,49 @@
+From 0a0be8ecd046348419591638ccf9ea9b1c835c3c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 May 2024 17:06:33 +0300
+Subject: wifi: iwlwifi: mvm: revert gen2 TX A-MPDU size to 64
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 4a7aace2899711592327463c1a29ffee44fcc66e ]
+
+We don't actually support >64 even for HE devices, so revert
+back to 64. This fixes an issue where the session is refused
+because the queue is configured differently from the actual
+session later.
+
+Fixes: 514c30696fbc ("iwlwifi: add support for IEEE802.11ax")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Reviewed-by: Liad Kaufman <liad.kaufman@intel.com>
+Reviewed-by: Luciano Coelho <luciano.coelho@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://msgid.link/20240510170500.52f7b4cf83aa.If47e43adddf7fe250ed7f5571fbb35d8221c7c47@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/rs.h | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+index 1ca375a5cf6b5..639cecc7a6e60 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+@@ -122,13 +122,8 @@ enum {
+ #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
+ #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
+-/*
+- * FIXME - various places in firmware API still use u8,
+- * e.g. LQ command and SCD config command.
+- * This should be 256 instead.
+- */
+-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF    (255)
+-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX    (255)
++#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF    (64)
++#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX    (64)
+ #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
+ #define LQ_SIZE               2       /* 2 mode tables:  "Active" and "Search" */
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-iwlwifi-mvm-set-properly-mac-header.patch b/queue-6.6/wifi-iwlwifi-mvm-set-properly-mac-header.patch
new file mode 100644 (file)
index 0000000..0f90b68
--- /dev/null
@@ -0,0 +1,45 @@
+From 25b87d9a8ede43973b52e7c0c5cfcc1cbab21738 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 May 2024 17:06:35 +0300
+Subject: wifi: iwlwifi: mvm: set properly mac header
+
+From: Mordechay Goodstein <mordechay.goodstein@intel.com>
+
+[ Upstream commit 0f2e9f6f21d1ff292363cdfb5bc4d492eeaff76e ]
+
+In the driver we only use skb_put* for adding data to the skb, hence data
+never moves and skb_reset_mac_haeder would set mac_header to the first
+time data was added and not to mac80211 header, fix this my using the
+actual len of bytes added for setting the mac header.
+
+Fixes: 3f7a9d577d47 ("wifi: iwlwifi: mvm: simplify by using SKB MAC header pointer")
+Signed-off-by: Mordechay Goodstein <mordechay.goodstein@intel.com>
+Reviewed-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://msgid.link/20240510170500.12f2de2909c3.I72a819b96f2fe55bde192a8fd31a4b96c301aa73@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index e9360b555ac93..8cff24d5f5f40 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -2730,8 +2730,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
+        *
+        * We mark it as mac header, for upper layers to know where
+        * all radio tap header ends.
++       *
++       * Since data doesn't move data while putting data on skb and that is
++       * the only way we use, data + len is the next place that hdr would be put
+        */
+-      skb_reset_mac_header(skb);
++      skb_set_mac_header(skb, skb->len);
+       /*
+        * Override the nss from the rx_vec since the rate_n_flags has
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-mac80211-correctly-parse-spatial-reuse-paramete.patch b/queue-6.6/wifi-mac80211-correctly-parse-spatial-reuse-paramete.patch
new file mode 100644 (file)
index 0000000..d7a7786
--- /dev/null
@@ -0,0 +1,65 @@
+From c5dfb8503cb89cb63920fe680e039076c166744d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 May 2024 10:18:54 +0800
+Subject: wifi: mac80211: correctly parse Spatial Reuse Parameter Set element
+
+From: Lingbo Kong <quic_lingbok@quicinc.com>
+
+[ Upstream commit a26d8dc5227f449a54518a8b40733a54c6600a8b ]
+
+Currently, the way of parsing Spatial Reuse Parameter Set element is
+incorrect and some members of struct ieee80211_he_obss_pd are not assigned.
+
+To address this issue, it must be parsed in the order of the elements of
+Spatial Reuse Parameter Set defined in the IEEE Std 802.11ax specification.
+
+The diagram of the Spatial Reuse Parameter Set element (IEEE Std 802.11ax
+-2021-9.4.2.252).
+
+-------------------------------------------------------------------------
+|       |      |         |       |Non-SRG|  SRG  | SRG   | SRG  | SRG   |
+|Element|Length| Element |  SR   |OBSS PD|OBSS PD|OBSS PD| BSS  |Partial|
+|   ID  |      |   ID    |Control|  Max  |  Min  | Max   |Color | BSSID |
+|       |      |Extension|       | Offset| Offset|Offset |Bitmap|Bitmap |
+-------------------------------------------------------------------------
+
+Fixes: 1ced169cc1c2 ("mac80211: allow setting spatial reuse parameters from bss_conf")
+Signed-off-by: Lingbo Kong <quic_lingbok@quicinc.com>
+Link: https://msgid.link/20240516021854.5682-3-quic_lingbok@quicinc.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/he.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/net/mac80211/he.c b/net/mac80211/he.c
+index 9f5ffdc9db284..ecbb042dd0433 100644
+--- a/net/mac80211/he.c
++++ b/net/mac80211/he.c
+@@ -230,15 +230,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
+       if (!he_spr_ie_elem)
+               return;
++
++      he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
+       data = he_spr_ie_elem->optional;
+       if (he_spr_ie_elem->he_sr_control &
+           IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+-              data++;
++              he_obss_pd->non_srg_max_offset = *data++;
++
+       if (he_spr_ie_elem->he_sr_control &
+           IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
+-              he_obss_pd->max_offset = *data++;
+               he_obss_pd->min_offset = *data++;
++              he_obss_pd->max_offset = *data++;
++              memcpy(he_obss_pd->bss_color_bitmap, data, 8);
++              data += 8;
++              memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
+               he_obss_pd->enable = true;
+       }
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-mac80211-fix-deadlock-in-ieee80211_sta_ps_deliv.patch b/queue-6.6/wifi-mac80211-fix-deadlock-in-ieee80211_sta_ps_deliv.patch
new file mode 100644 (file)
index 0000000..1205d4b
--- /dev/null
@@ -0,0 +1,109 @@
+From 372d642d3493ee4d27306fc6ab5846b68d45b158 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 08:57:53 +0200
+Subject: wifi: mac80211: Fix deadlock in ieee80211_sta_ps_deliver_wakeup()
+
+From: Remi Pommarel <repk@triplefau.lt>
+
+[ Upstream commit 44c06bbde6443de206b30f513100b5670b23fc5e ]
+
+The ieee80211_sta_ps_deliver_wakeup() function takes sta->ps_lock to
+synchronizes with ieee80211_tx_h_unicast_ps_buf() which is called from
+softirq context. However using only spin_lock() to get sta->ps_lock in
+ieee80211_sta_ps_deliver_wakeup() does not prevent softirq to execute
+on this same CPU, to run ieee80211_tx_h_unicast_ps_buf() and try to
+take this same lock ending in deadlock. Below is an example of rcu stall
+that arises in such situation.
+
+ rcu: INFO: rcu_sched self-detected stall on CPU
+ rcu:    2-....: (42413413 ticks this GP) idle=b154/1/0x4000000000000000 softirq=1763/1765 fqs=21206996
+ rcu:    (t=42586894 jiffies g=2057 q=362405 ncpus=4)
+ CPU: 2 PID: 719 Comm: wpa_supplicant Tainted: G        W          6.4.0-02158-g1b062f552873 #742
+ Hardware name: RPT (r1) (DT)
+ pstate: 00000005 (nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : queued_spin_lock_slowpath+0x58/0x2d0
+ lr : invoke_tx_handlers_early+0x5b4/0x5c0
+ sp : ffff00001ef64660
+ x29: ffff00001ef64660 x28: ffff000009bc1070 x27: ffff000009bc0ad8
+ x26: ffff000009bc0900 x25: ffff00001ef647a8 x24: 0000000000000000
+ x23: ffff000009bc0900 x22: ffff000009bc0900 x21: ffff00000ac0e000
+ x20: ffff00000a279e00 x19: ffff00001ef646e8 x18: 0000000000000000
+ x17: ffff800016468000 x16: ffff00001ef608c0 x15: 0010533c93f64f80
+ x14: 0010395c9faa3946 x13: 0000000000000000 x12: 00000000fa83b2da
+ x11: 000000012edeceea x10: ffff0000010fbe00 x9 : 0000000000895440
+ x8 : 000000000010533c x7 : ffff00000ad8b740 x6 : ffff00000c350880
+ x5 : 0000000000000007 x4 : 0000000000000001 x3 : 0000000000000000
+ x2 : 0000000000000000 x1 : 0000000000000001 x0 : ffff00000ac0e0e8
+ Call trace:
+  queued_spin_lock_slowpath+0x58/0x2d0
+  ieee80211_tx+0x80/0x12c
+  ieee80211_tx_pending+0x110/0x278
+  tasklet_action_common.constprop.0+0x10c/0x144
+  tasklet_action+0x20/0x28
+  _stext+0x11c/0x284
+  ____do_softirq+0xc/0x14
+  call_on_irq_stack+0x24/0x34
+  do_softirq_own_stack+0x18/0x20
+  do_softirq+0x74/0x7c
+  __local_bh_enable_ip+0xa0/0xa4
+  _ieee80211_wake_txqs+0x3b0/0x4b8
+  __ieee80211_wake_queue+0x12c/0x168
+  ieee80211_add_pending_skbs+0xec/0x138
+  ieee80211_sta_ps_deliver_wakeup+0x2a4/0x480
+  ieee80211_mps_sta_status_update.part.0+0xd8/0x11c
+  ieee80211_mps_sta_status_update+0x18/0x24
+  sta_apply_parameters+0x3bc/0x4c0
+  ieee80211_change_station+0x1b8/0x2dc
+  nl80211_set_station+0x444/0x49c
+  genl_family_rcv_msg_doit.isra.0+0xa4/0xfc
+  genl_rcv_msg+0x1b0/0x244
+  netlink_rcv_skb+0x38/0x10c
+  genl_rcv+0x34/0x48
+  netlink_unicast+0x254/0x2bc
+  netlink_sendmsg+0x190/0x3b4
+  ____sys_sendmsg+0x1e8/0x218
+  ___sys_sendmsg+0x68/0x8c
+  __sys_sendmsg+0x44/0x84
+  __arm64_sys_sendmsg+0x20/0x28
+  do_el0_svc+0x6c/0xe8
+  el0_svc+0x14/0x48
+  el0t_64_sync_handler+0xb0/0xb4
+  el0t_64_sync+0x14c/0x150
+
+Using spin_lock_bh()/spin_unlock_bh() instead prevents softirq to raise
+on the same CPU that is holding the lock.
+
+Fixes: 1d147bfa6429 ("mac80211: fix AP powersave TX vs. wakeup race")
+Signed-off-by: Remi Pommarel <repk@triplefau.lt>
+Link: https://msgid.link/8e36fe07d0fbc146f89196cd47a53c8a0afe84aa.1716910344.git.repk@triplefau.lt
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/sta_info.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index c61eb867bb4a7..984f8f67492fd 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1709,7 +1709,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+       skb_queue_head_init(&pending);
+       /* sync with ieee80211_tx_h_unicast_ps_buf */
+-      spin_lock(&sta->ps_lock);
++      spin_lock_bh(&sta->ps_lock);
+       /* Send all buffered frames to the station */
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               int count = skb_queue_len(&pending), tmp;
+@@ -1738,7 +1738,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+        */
+       clear_sta_flag(sta, WLAN_STA_PSPOLL);
+       clear_sta_flag(sta, WLAN_STA_UAPSD);
+-      spin_unlock(&sta->ps_lock);
++      spin_unlock_bh(&sta->ps_lock);
+       atomic_dec(&ps->num_sta_ps);
+-- 
+2.43.0
+
diff --git a/queue-6.6/wifi-mac80211-mesh-fix-leak-of-mesh_preq_queue-objec.patch b/queue-6.6/wifi-mac80211-mesh-fix-leak-of-mesh_preq_queue-objec.patch
new file mode 100644 (file)
index 0000000..5e1c17c
--- /dev/null
@@ -0,0 +1,100 @@
+From 7d970bc63270145f7dc07eb9fe6f12c36539799a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 16:26:05 +0200
+Subject: wifi: mac80211: mesh: Fix leak of mesh_preq_queue objects
+
+From: Nicolas Escande <nico.escande@gmail.com>
+
+[ Upstream commit b7d7f11a291830fdf69d3301075dd0fb347ced84 ]
+
+The hwmp code use objects of type mesh_preq_queue, added to a list in
+ieee80211_if_mesh, to keep track of mpath we need to resolve. If the mpath
+gets deleted, ex mesh interface is removed, the entries in that list will
+never get cleaned. Fix this by flushing all corresponding items of the
+preq_queue in mesh_path_flush_pending().
+
+This should take care of KASAN reports like this:
+
+unreferenced object 0xffff00000668d800 (size 128):
+  comm "kworker/u8:4", pid 67, jiffies 4295419552 (age 1836.444s)
+  hex dump (first 32 bytes):
+    00 1f 05 09 00 00 ff ff 00 d5 68 06 00 00 ff ff  ..........h.....
+    8e 97 ea eb 3e b8 01 00 00 00 00 00 00 00 00 00  ....>...........
+  backtrace:
+    [<000000007302a0b6>] __kmem_cache_alloc_node+0x1e0/0x35c
+    [<00000000049bd418>] kmalloc_trace+0x34/0x80
+    [<0000000000d792bb>] mesh_queue_preq+0x44/0x2a8
+    [<00000000c99c3696>] mesh_nexthop_resolve+0x198/0x19c
+    [<00000000926bf598>] ieee80211_xmit+0x1d0/0x1f4
+    [<00000000fc8c2284>] __ieee80211_subif_start_xmit+0x30c/0x764
+    [<000000005926ee38>] ieee80211_subif_start_xmit+0x9c/0x7a4
+    [<000000004c86e916>] dev_hard_start_xmit+0x174/0x440
+    [<0000000023495647>] __dev_queue_xmit+0xe24/0x111c
+    [<00000000cfe9ca78>] batadv_send_skb_packet+0x180/0x1e4
+    [<000000007bacc5d5>] batadv_v_elp_periodic_work+0x2f4/0x508
+    [<00000000adc3cd94>] process_one_work+0x4b8/0xa1c
+    [<00000000b36425d1>] worker_thread+0x9c/0x634
+    [<0000000005852dd5>] kthread+0x1bc/0x1c4
+    [<000000005fccd770>] ret_from_fork+0x10/0x20
+unreferenced object 0xffff000009051f00 (size 128):
+  comm "kworker/u8:4", pid 67, jiffies 4295419553 (age 1836.440s)
+  hex dump (first 32 bytes):
+    90 d6 92 0d 00 00 ff ff 00 d8 68 06 00 00 ff ff  ..........h.....
+    36 27 92 e4 02 e0 01 00 00 58 79 06 00 00 ff ff  6'.......Xy.....
+  backtrace:
+    [<000000007302a0b6>] __kmem_cache_alloc_node+0x1e0/0x35c
+    [<00000000049bd418>] kmalloc_trace+0x34/0x80
+    [<0000000000d792bb>] mesh_queue_preq+0x44/0x2a8
+    [<00000000c99c3696>] mesh_nexthop_resolve+0x198/0x19c
+    [<00000000926bf598>] ieee80211_xmit+0x1d0/0x1f4
+    [<00000000fc8c2284>] __ieee80211_subif_start_xmit+0x30c/0x764
+    [<000000005926ee38>] ieee80211_subif_start_xmit+0x9c/0x7a4
+    [<000000004c86e916>] dev_hard_start_xmit+0x174/0x440
+    [<0000000023495647>] __dev_queue_xmit+0xe24/0x111c
+    [<00000000cfe9ca78>] batadv_send_skb_packet+0x180/0x1e4
+    [<000000007bacc5d5>] batadv_v_elp_periodic_work+0x2f4/0x508
+    [<00000000adc3cd94>] process_one_work+0x4b8/0xa1c
+    [<00000000b36425d1>] worker_thread+0x9c/0x634
+    [<0000000005852dd5>] kthread+0x1bc/0x1c4
+    [<000000005fccd770>] ret_from_fork+0x10/0x20
+
+Fixes: 050ac52cbe1f ("mac80211: code for on-demand Hybrid Wireless Mesh Protocol")
+Signed-off-by: Nicolas Escande <nico.escande@gmail.com>
+Link: https://msgid.link/20240528142605.1060566-1-nico.escande@gmail.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/mesh_pathtbl.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index 59f7264194ce3..530581ba812b4 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -1011,10 +1011,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
+  */
+ void mesh_path_flush_pending(struct mesh_path *mpath)
+ {
++      struct ieee80211_sub_if_data *sdata = mpath->sdata;
++      struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
++      struct mesh_preq_queue *preq, *tmp;
+       struct sk_buff *skb;
+       while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
+               mesh_path_discard_frame(mpath->sdata, skb);
++
++      spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
++      list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
++              if (ether_addr_equal(mpath->dst, preq->dst)) {
++                      list_del(&preq->list);
++                      kfree(preq);
++                      --ifmsh->preq_queue_len;
++              }
++      }
++      spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
+ }
+ /**
+-- 
+2.43.0
+