]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.1
authorSasha Levin <sashal@kernel.org>
Sun, 18 Feb 2024 16:39:34 +0000 (11:39 -0500)
committerSasha Levin <sashal@kernel.org>
Sun, 18 Feb 2024 16:39:34 +0000 (11:39 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
22 files changed:
queue-6.1/asoc-rt5645-fix-deadlock-in-rt5645_jack_detect_work.patch [new file with mode: 0644]
queue-6.1/i40e-do-not-allow-untrusted-vf-to-remove-administrat.patch [new file with mode: 0644]
queue-6.1/i40e-fix-waiting-for-queues-of-all-vsis-to-be-disabl.patch [new file with mode: 0644]
queue-6.1/kvm-selftests-clear-dirty-ring-states-between-two-mo.patch [new file with mode: 0644]
queue-6.1/kvm-selftests-fix-a-semaphore-imbalance-in-the-dirty.patch [new file with mode: 0644]
queue-6.1/lan966x-fix-crash-when-adding-interface-under-a-lag.patch [new file with mode: 0644]
queue-6.1/mips-add-memory-clobber-to-csum_ipv6_magic-inline-as.patch [new file with mode: 0644]
queue-6.1/net-openvswitch-limit-the-number-of-recursions-from-.patch [new file with mode: 0644]
queue-6.1/net-sysfs-fix-sys-class-net-iface-path-for-statistic.patch [new file with mode: 0644]
queue-6.1/net-tls-factor-out-tls_-crypt_async_wait.patch [new file with mode: 0644]
queue-6.1/net-tls-fix-returned-read-length-with-async-decrypt.patch [new file with mode: 0644]
queue-6.1/net-tls-fix-use-after-free-with-partial-reads-and-as.patch [new file with mode: 0644]
queue-6.1/nouveau-svm-fix-kvcalloc-argument-order.patch [new file with mode: 0644]
queue-6.1/of-property-improve-finding-the-supplier-of-a-remote.patch [new file with mode: 0644]
queue-6.1/of-unittest-fix-compile-in-the-non-dynamic-case.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/spi-ppc4xx-drop-write-only-variable.patch [new file with mode: 0644]
queue-6.1/tls-extract-context-alloc-initialization-out-of-tls_.patch [new file with mode: 0644]
queue-6.1/tls-fix-race-between-async-notify-and-socket-close.patch [new file with mode: 0644]
queue-6.1/tls-sw-use-splice_eof-to-flush.patch [new file with mode: 0644]
queue-6.1/wifi-iwlwifi-fix-some-error-codes.patch [new file with mode: 0644]
queue-6.1/wifi-iwlwifi-uninitialized-variable-in-iwl_acpi_get_.patch [new file with mode: 0644]

diff --git a/queue-6.1/asoc-rt5645-fix-deadlock-in-rt5645_jack_detect_work.patch b/queue-6.1/asoc-rt5645-fix-deadlock-in-rt5645_jack_detect_work.patch
new file mode 100644 (file)
index 0000000..640cfca
--- /dev/null
@@ -0,0 +1,39 @@
+From de3892d1fe18e1df9fa504768ab70b4707bcfc90 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Feb 2024 12:58:34 +0300
+Subject: ASoC: rt5645: Fix deadlock in rt5645_jack_detect_work()
+
+From: Alexey Khoroshilov <khoroshilov@ispras.ru>
+
+[ Upstream commit 6ef5d5b92f7117b324efaac72b3db27ae8bb3082 ]
+
+There is a path in rt5645_jack_detect_work(), where rt5645->jd_mutex
+is left locked forever. That may lead to deadlock
+when rt5645_jack_detect_work() is called for the second time.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: cdba4301adda ("ASoC: rt5650: add mutex to avoid the jack detection failure")
+Signed-off-by: Alexey Khoroshilov <khoroshilov@ispras.ru>
+Link: https://lore.kernel.org/r/1707645514-21196-1-git-send-email-khoroshilov@ispras.ru
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/rt5645.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index fd3dca08460b..844d14d4c9a5 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -3288,6 +3288,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+                                   report, SND_JACK_HEADPHONE);
+               snd_soc_jack_report(rt5645->mic_jack,
+                                   report, SND_JACK_MICROPHONE);
++              mutex_unlock(&rt5645->jd_mutex);
+               return;
+       case 4:
+               val = snd_soc_component_read(rt5645->component, RT5645_A_JD_CTRL1) & 0x0020;
+-- 
+2.43.0
+
diff --git a/queue-6.1/i40e-do-not-allow-untrusted-vf-to-remove-administrat.patch b/queue-6.1/i40e-do-not-allow-untrusted-vf-to-remove-administrat.patch
new file mode 100644 (file)
index 0000000..eec5865
--- /dev/null
@@ -0,0 +1,124 @@
+From 609dbaf95ccd64a6e876f2d0ac49f567ac6b465d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Feb 2024 10:03:33 -0800
+Subject: i40e: Do not allow untrusted VF to remove administratively set MAC
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit 73d9629e1c8c1982f13688c4d1019c3994647ccc ]
+
+Currently when PF administratively sets VF's MAC address and the VF
+is put down (VF tries to delete all MACs) then the MAC is removed
+from MAC filters and primary VF MAC is zeroed.
+
+Do not allow untrusted VF to remove primary MAC when it was set
+administratively by PF.
+
+Reproducer:
+1) Create VF
+2) Set VF interface up
+3) Administratively set the VF's MAC
+4) Put VF interface down
+
+[root@host ~]# echo 1 > /sys/class/net/enp2s0f0/device/sriov_numvfs
+[root@host ~]# ip link set enp2s0f0v0 up
+[root@host ~]# ip link set enp2s0f0 vf 0 mac fe:6c:b5:da:c7:7d
+[root@host ~]# ip link show enp2s0f0
+23: enp2s0f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
+    link/ether 3c:ec:ef:b7:dd:04 brd ff:ff:ff:ff:ff:ff
+    vf 0     link/ether fe:6c:b5:da:c7:7d brd ff:ff:ff:ff:ff:ff, spoof checking on, link-state auto, trust off
+[root@host ~]# ip link set enp2s0f0v0 down
+[root@host ~]# ip link show enp2s0f0
+23: enp2s0f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
+    link/ether 3c:ec:ef:b7:dd:04 brd ff:ff:ff:ff:ff:ff
+    vf 0     link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff, spoof checking on, link-state auto, trust off
+
+Fixes: 700bbf6c1f9e ("i40e: allow VF to remove any MAC filter")
+Fixes: ceb29474bbbc ("i40e: Add support for VF to specify its primary MAC address")
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://lore.kernel.org/r/20240208180335.1844996-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/intel/i40e/i40e_virtchnl_pf.c    | 38 ++++++++++++++++---
+ 1 file changed, 33 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 3d3db58090ed..ed4be80fec2a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2846,6 +2846,24 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
+                                     (u8 *)&stats, sizeof(stats));
+ }
++/**
++ * i40e_can_vf_change_mac
++ * @vf: pointer to the VF info
++ *
++ * Return true if the VF is allowed to change its MAC filters, false otherwise
++ */
++static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
++{
++      /* If the VF MAC address has been set administratively (via the
++       * ndo_set_vf_mac command), then deny permission to the VF to
++       * add/delete unicast MAC addresses, unless the VF is trusted
++       */
++      if (vf->pf_set_mac && !vf->trusted)
++              return false;
++
++      return true;
++}
++
+ #define I40E_MAX_MACVLAN_PER_HW 3072
+ #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
+       (num_ports))
+@@ -2905,8 +2923,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
+                * The VF may request to set the MAC address filter already
+                * assigned to it so do not return an error in that case.
+                */
+-              if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+-                  !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
++              if (!i40e_can_vf_change_mac(vf) &&
++                  !is_multicast_ether_addr(addr) &&
+                   !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+                       dev_err(&pf->pdev->dev,
+                               "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+@@ -3049,19 +3067,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+                       ret = I40E_ERR_INVALID_MAC_ADDR;
+                       goto error_param;
+               }
+-              if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
+-                      was_unimac_deleted = true;
+       }
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       spin_lock_bh(&vsi->mac_filter_hash_lock);
+       /* delete addresses from the list */
+-      for (i = 0; i < al->num_elements; i++)
++      for (i = 0; i < al->num_elements; i++) {
++              const u8 *addr = al->list[i].addr;
++
++              /* Allow to delete VF primary MAC only if it was not set
++               * administratively by PF or if VF is trusted.
++               */
++              if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
++                  i40e_can_vf_change_mac(vf))
++                      was_unimac_deleted = true;
++              else
++                      continue;
++
+               if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
+                       ret = I40E_ERR_INVALID_MAC_ADDR;
+                       spin_unlock_bh(&vsi->mac_filter_hash_lock);
+                       goto error_param;
+               }
++      }
+       spin_unlock_bh(&vsi->mac_filter_hash_lock);
+-- 
+2.43.0
+
diff --git a/queue-6.1/i40e-fix-waiting-for-queues-of-all-vsis-to-be-disabl.patch b/queue-6.1/i40e-fix-waiting-for-queues-of-all-vsis-to-be-disabl.patch
new file mode 100644 (file)
index 0000000..dfb78a9
--- /dev/null
@@ -0,0 +1,41 @@
+From 950a1421a98ac606232418a158383615ea574c04 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Nov 2023 17:01:03 +0100
+Subject: i40e: Fix waiting for queues of all VSIs to be disabled
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit c73729b64bb692186da080602cd13612783f52ac ]
+
+The function i40e_pf_wait_queues_disabled() iterates all PF's VSIs
+up to 'pf->hw.func_caps.num_vsis' but this is incorrect because
+the real number of VSIs can be up to 'pf->num_alloc_vsi' that
+can be higher. Fix this loop.
+
+Fixes: 69129dc39fac ("i40e: Modify Tx disable wait flow in case of DCB reconfiguration")
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 63d43ef86f9b..76455405a6d8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -5333,7 +5333,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
+ {
+       int v, ret = 0;
+-      for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
++      for (v = 0; v < pf->num_alloc_vsi; v++) {
+               if (pf->vsi[v]) {
+                       ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
+                       if (ret)
+-- 
+2.43.0
+
diff --git a/queue-6.1/kvm-selftests-clear-dirty-ring-states-between-two-mo.patch b/queue-6.1/kvm-selftests-clear-dirty-ring-states-between-two-mo.patch
new file mode 100644 (file)
index 0000000..f1ec3e1
--- /dev/null
@@ -0,0 +1,170 @@
+From b81388a209f8a61ac75b88948e4da028974e2f2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Nov 2022 18:49:13 +0800
+Subject: KVM: selftests: Clear dirty ring states between two modes in
+ dirty_log_test
+
+From: Gavin Shan <gshan@redhat.com>
+
+[ Upstream commit 7167190ddb863bd061c0c6b61f4cec94184b40da ]
+
+There are two states, which need to be cleared before next mode
+is executed. Otherwise, we will hit failure as the following messages
+indicate.
+
+- The variable 'dirty_ring_vcpu_ring_full' shared by main and vcpu
+  thread. It's indicating if the vcpu exit due to full ring buffer.
+  The value can be carried from previous mode (VM_MODE_P40V48_4K) to
+  current one (VM_MODE_P40V48_64K) when VM_MODE_P40V48_16K isn't
+  supported.
+
+- The current ring buffer index needs to be reset before next mode
+  (VM_MODE_P40V48_64K) is executed. Otherwise, the stale value is
+  carried from previous mode (VM_MODE_P40V48_4K).
+
+  # ./dirty_log_test -M dirty-ring
+  Setting log mode to: 'dirty-ring'
+  Test iterations: 32, interval: 10 (ms)
+  Testing guest mode: PA-bits:40,  VA-bits:48,  4K pages
+  guest physical test memory offset: 0xffbfffc000
+    :
+  Dirtied 995328 pages
+  Total bits checked: dirty (1012434), clear (7114123), track_next (966700)
+  Testing guest mode: PA-bits:40,  VA-bits:48, 64K pages
+  guest physical test memory offset: 0xffbffc0000
+  vcpu stops because vcpu is kicked out...
+  vcpu continues now.
+  Notifying vcpu to continue
+  Iteration 1 collected 0 pages
+  vcpu stops because dirty ring is full...
+  vcpu continues now.
+  vcpu stops because dirty ring is full...
+  vcpu continues now.
+  vcpu stops because dirty ring is full...
+  ==== Test Assertion Failure ====
+  dirty_log_test.c:369: cleared == count
+  pid=10541 tid=10541 errno=22 - Invalid argument
+     1 0x0000000000403087: dirty_ring_collect_dirty_pages at dirty_log_test.c:369
+     2 0x0000000000402a0b: log_mode_collect_dirty_pages at dirty_log_test.c:492
+     3  (inlined by) run_test at dirty_log_test.c:795
+     4  (inlined by) run_test at dirty_log_test.c:705
+     5 0x0000000000403a37: for_each_guest_mode at guest_modes.c:100
+     6 0x0000000000401ccf: main at dirty_log_test.c:938
+     7 0x0000ffff9ecd279b: ?? ??:0
+     8 0x0000ffff9ecd286b: ?? ??:0
+     9 0x0000000000401def: _start at ??:?
+  Reset dirty pages (0) mismatch with collected (35566)
+
+Fix the issues by clearing 'dirty_ring_vcpu_ring_full' and the ring
+buffer index before next new mode is to be executed.
+
+Signed-off-by: Gavin Shan <gshan@redhat.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20221110104914.31280-7-gshan@redhat.com
+Stable-dep-of: ba58f873cdee ("KVM: selftests: Fix a semaphore imbalance in the dirty ring logging test")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kvm/dirty_log_test.c | 27 ++++++++++++--------
+ 1 file changed, 17 insertions(+), 10 deletions(-)
+
+diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
+index b5234d6efbe1..8758c10ec850 100644
+--- a/tools/testing/selftests/kvm/dirty_log_test.c
++++ b/tools/testing/selftests/kvm/dirty_log_test.c
+@@ -226,13 +226,15 @@ static void clear_log_create_vm_done(struct kvm_vm *vm)
+ }
+ static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+-                                        void *bitmap, uint32_t num_pages)
++                                        void *bitmap, uint32_t num_pages,
++                                        uint32_t *unused)
+ {
+       kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
+ }
+ static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+-                                        void *bitmap, uint32_t num_pages)
++                                        void *bitmap, uint32_t num_pages,
++                                        uint32_t *unused)
+ {
+       kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
+       kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
+@@ -329,10 +331,9 @@ static void dirty_ring_continue_vcpu(void)
+ }
+ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+-                                         void *bitmap, uint32_t num_pages)
++                                         void *bitmap, uint32_t num_pages,
++                                         uint32_t *ring_buf_idx)
+ {
+-      /* We only have one vcpu */
+-      static uint32_t fetch_index = 0;
+       uint32_t count = 0, cleared;
+       bool continued_vcpu = false;
+@@ -349,7 +350,8 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+       /* Only have one vcpu */
+       count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
+-                                     slot, bitmap, num_pages, &fetch_index);
++                                     slot, bitmap, num_pages,
++                                     ring_buf_idx);
+       cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
+@@ -406,7 +408,8 @@ struct log_mode {
+       void (*create_vm_done)(struct kvm_vm *vm);
+       /* Hook to collect the dirty pages into the bitmap provided */
+       void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
+-                                   void *bitmap, uint32_t num_pages);
++                                   void *bitmap, uint32_t num_pages,
++                                   uint32_t *ring_buf_idx);
+       /* Hook to call when after each vcpu run */
+       void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
+       void (*before_vcpu_join) (void);
+@@ -471,13 +474,14 @@ static void log_mode_create_vm_done(struct kvm_vm *vm)
+ }
+ static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+-                                       void *bitmap, uint32_t num_pages)
++                                       void *bitmap, uint32_t num_pages,
++                                       uint32_t *ring_buf_idx)
+ {
+       struct log_mode *mode = &log_modes[host_log_mode];
+       TEST_ASSERT(mode->collect_dirty_pages != NULL,
+                   "collect_dirty_pages() is required for any log mode!");
+-      mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages);
++      mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages, ring_buf_idx);
+ }
+ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+@@ -696,6 +700,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+       unsigned long *bmap;
++      uint32_t ring_buf_idx = 0;
+       if (!log_mode_supported()) {
+               print_skip("Log mode '%s' not supported",
+@@ -771,6 +776,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+       host_dirty_count = 0;
+       host_clear_count = 0;
+       host_track_next_count = 0;
++      WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
+       pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
+@@ -778,7 +784,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+               /* Give the vcpu thread some time to dirty some pages */
+               usleep(p->interval * 1000);
+               log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
+-                                           bmap, host_num_pages);
++                                           bmap, host_num_pages,
++                                           &ring_buf_idx);
+               /*
+                * See vcpu_sync_stop_requested definition for details on why
+-- 
+2.43.0
+
diff --git a/queue-6.1/kvm-selftests-fix-a-semaphore-imbalance-in-the-dirty.patch b/queue-6.1/kvm-selftests-fix-a-semaphore-imbalance-in-the-dirty.patch
new file mode 100644 (file)
index 0000000..64a6392
--- /dev/null
@@ -0,0 +1,183 @@
+From 3a15ae15dab22e33effd9becb9e7c00b76946a5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Feb 2024 15:18:31 -0800
+Subject: KVM: selftests: Fix a semaphore imbalance in the dirty ring logging
+ test
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit ba58f873cdeec30b6da48e28dd5782c5a3e1371b ]
+
+When finishing the final iteration of dirty_log_test testcase, set
+host_quit _before_ the final "continue" so that the vCPU worker doesn't
+run an extra iteration, and delete the hack-a-fix of an extra "continue"
+from the dirty ring testcase.  This fixes a bug where the extra post to
+sem_vcpu_cont may not be consumed, which results in failures in subsequent
+runs of the testcases.  The bug likely was missed during development as
+x86 supports only a single "guest mode", i.e. there aren't any subsequent
+testcases after the dirty ring test, because for_each_guest_mode() only
+runs a single iteration.
+
+For the regular dirty log testcases, letting the vCPU run one extra
+iteration is a non-issue as the vCPU worker waits on sem_vcpu_cont if and
+only if the worker is explicitly told to stop (vcpu_sync_stop_requested).
+But for the dirty ring test, which needs to periodically stop the vCPU to
+reap the dirty ring, letting the vCPU resume the guest _after_ the last
+iteration means the vCPU will get stuck without an extra "continue".
+
+However, blindly firing off an post to sem_vcpu_cont isn't guaranteed to
+be consumed, e.g. if the vCPU worker sees host_quit==true before resuming
+the guest.  This results in a dangling sem_vcpu_cont, which leads to
+subsequent iterations getting out of sync, as the vCPU worker will
+continue on before the main task is ready for it to resume the guest,
+leading to a variety of asserts, e.g.
+
+  ==== Test Assertion Failure ====
+  dirty_log_test.c:384: dirty_ring_vcpu_ring_full
+  pid=14854 tid=14854 errno=22 - Invalid argument
+     1  0x00000000004033eb: dirty_ring_collect_dirty_pages at dirty_log_test.c:384
+     2  0x0000000000402d27: log_mode_collect_dirty_pages at dirty_log_test.c:505
+     3   (inlined by) run_test at dirty_log_test.c:802
+     4  0x0000000000403dc7: for_each_guest_mode at guest_modes.c:100
+     5  0x0000000000401dff: main at dirty_log_test.c:941 (discriminator 3)
+     6  0x0000ffff9be173c7: ?? ??:0
+     7  0x0000ffff9be1749f: ?? ??:0
+     8  0x000000000040206f: _start at ??:?
+  Didn't continue vcpu even without ring full
+
+Alternatively, the test could simply reset the semaphores before each
+testcase, but papering over hacks with more hacks usually ends in tears.
+
+Reported-by: Shaoqin Huang <shahuang@redhat.com>
+Fixes: 84292e565951 ("KVM: selftests: Add dirty ring buffer test")
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
+Link: https://lore.kernel.org/r/20240202231831.354848-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kvm/dirty_log_test.c | 50 +++++++++++---------
+ 1 file changed, 27 insertions(+), 23 deletions(-)
+
+diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
+index 8758c10ec850..ec40a33c29fd 100644
+--- a/tools/testing/selftests/kvm/dirty_log_test.c
++++ b/tools/testing/selftests/kvm/dirty_log_test.c
+@@ -355,7 +355,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+       cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
+-      /* Cleared pages should be the same as collected */
++      /*
++       * Cleared pages should be the same as collected, as KVM is supposed to
++       * clear only the entries that have been harvested.
++       */
+       TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
+                   "with collected (%u)", cleared, count);
+@@ -394,12 +397,6 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+       }
+ }
+-static void dirty_ring_before_vcpu_join(void)
+-{
+-      /* Kick another round of vcpu just to make sure it will quit */
+-      sem_post(&sem_vcpu_cont);
+-}
+-
+ struct log_mode {
+       const char *name;
+       /* Return true if this mode is supported, otherwise false */
+@@ -412,7 +409,6 @@ struct log_mode {
+                                    uint32_t *ring_buf_idx);
+       /* Hook to call when after each vcpu run */
+       void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
+-      void (*before_vcpu_join) (void);
+ } log_modes[LOG_MODE_NUM] = {
+       {
+               .name = "dirty-log",
+@@ -431,7 +427,6 @@ struct log_mode {
+               .supported = dirty_ring_supported,
+               .create_vm_done = dirty_ring_create_vm_done,
+               .collect_dirty_pages = dirty_ring_collect_dirty_pages,
+-              .before_vcpu_join = dirty_ring_before_vcpu_join,
+               .after_vcpu_run = dirty_ring_after_vcpu_run,
+       },
+ };
+@@ -492,14 +487,6 @@ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+               mode->after_vcpu_run(vcpu, ret, err);
+ }
+-static void log_mode_before_vcpu_join(void)
+-{
+-      struct log_mode *mode = &log_modes[host_log_mode];
+-
+-      if (mode->before_vcpu_join)
+-              mode->before_vcpu_join();
+-}
+-
+ static void generate_random_array(uint64_t *guest_array, uint64_t size)
+ {
+       uint64_t i;
+@@ -701,6 +688,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+       struct kvm_vm *vm;
+       unsigned long *bmap;
+       uint32_t ring_buf_idx = 0;
++      int sem_val;
+       if (!log_mode_supported()) {
+               print_skip("Log mode '%s' not supported",
+@@ -772,12 +760,22 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+       /* Start the iterations */
+       iteration = 1;
+       sync_global_to_guest(vm, iteration);
+-      host_quit = false;
++      WRITE_ONCE(host_quit, false);
+       host_dirty_count = 0;
+       host_clear_count = 0;
+       host_track_next_count = 0;
+       WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
++      /*
++       * Ensure the previous iteration didn't leave a dangling semaphore, i.e.
++       * that the main task and vCPU worker were synchronized and completed
++       * verification of all iterations.
++       */
++      sem_getvalue(&sem_vcpu_stop, &sem_val);
++      TEST_ASSERT_EQ(sem_val, 0);
++      sem_getvalue(&sem_vcpu_cont, &sem_val);
++      TEST_ASSERT_EQ(sem_val, 0);
++
+       pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
+       while (iteration < p->iterations) {
+@@ -803,15 +801,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+               assert(host_log_mode == LOG_MODE_DIRTY_RING ||
+                      atomic_read(&vcpu_sync_stop_requested) == false);
+               vm_dirty_log_verify(mode, bmap);
+-              sem_post(&sem_vcpu_cont);
+-              iteration++;
++              /*
++               * Set host_quit before sem_vcpu_cont in the final iteration to
++               * ensure that the vCPU worker doesn't resume the guest.  As
++               * above, the dirty ring test may stop and wait even when not
++               * explicitly request to do so, i.e. would hang waiting for a
++               * "continue" if it's allowed to resume the guest.
++               */
++              if (++iteration == p->iterations)
++                      WRITE_ONCE(host_quit, true);
++
++              sem_post(&sem_vcpu_cont);
+               sync_global_to_guest(vm, iteration);
+       }
+-      /* Tell the vcpu thread to quit */
+-      host_quit = true;
+-      log_mode_before_vcpu_join();
+       pthread_join(vcpu_thread, NULL);
+       pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
+-- 
+2.43.0
+
diff --git a/queue-6.1/lan966x-fix-crash-when-adding-interface-under-a-lag.patch b/queue-6.1/lan966x-fix-crash-when-adding-interface-under-a-lag.patch
new file mode 100644 (file)
index 0000000..dd1aa3f
--- /dev/null
@@ -0,0 +1,67 @@
+From b79cf074ffc924c8b207b79e493ea4eeaf3d7a54 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Feb 2024 13:30:54 +0100
+Subject: lan966x: Fix crash when adding interface under a lag
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit 15faa1f67ab405d47789d4702f587ec7df7ef03e ]
+
+There is a crash when adding one of the lan966x interfaces under a lag
+interface. The issue can be reproduced like this:
+ip link add name bond0 type bond miimon 100 mode balance-xor
+ip link set dev eth0 master bond0
+
+The reason is because when adding a interface under the lag it would go
+through all the ports and try to figure out which other ports are under
+that lag interface. And the issue is that lan966x can have ports that are
+NULL pointer as they are not probed. So then iterating over these ports
+it would just crash as they are NULL pointers.
+The fix consists in actually checking for NULL pointers before accessing
+something from the ports. Like we do in other places.
+
+Fixes: cabc9d49333d ("net: lan966x: Add lag support for lan966x")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20240206123054.3052966-1-horatiu.vultur@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan966x/lan966x_lag.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+index 41fa2523d91d..5f2cd9a8cf8f 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+@@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
+       /* Now, set PGIDs for each active LAG */
+       for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
+-              struct net_device *bond = lan966x->ports[lag]->bond;
++              struct lan966x_port *port = lan966x->ports[lag];
+               int num_active_ports = 0;
++              struct net_device *bond;
+               unsigned long bond_mask;
+               u8 aggr_idx[16];
+-              if (!bond || (visited & BIT(lag)))
++              if (!port || !port->bond || (visited & BIT(lag)))
+                       continue;
++              bond = port->bond;
+               bond_mask = lan966x_lag_get_mask(lan966x, bond);
+               for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
+                       struct lan966x_port *port = lan966x->ports[p];
++                      if (!port)
++                              continue;
++
+                       lan_wr(ANA_PGID_PGID_SET(bond_mask),
+                              lan966x, ANA_PGID(p));
+                       if (port->lag_tx_active)
+-- 
+2.43.0
+
diff --git a/queue-6.1/mips-add-memory-clobber-to-csum_ipv6_magic-inline-as.patch b/queue-6.1/mips-add-memory-clobber-to-csum_ipv6_magic-inline-as.patch
new file mode 100644 (file)
index 0000000..bcd242d
--- /dev/null
@@ -0,0 +1,54 @@
+From 0d295ae8d58fb60a602903705e5912ad80254b6b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Feb 2024 08:08:37 -0800
+Subject: MIPS: Add 'memory' clobber to csum_ipv6_magic() inline assembler
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit d55347bfe4e66dce2e1e7501e5492f4af3e315f8 ]
+
+After 'lib: checksum: Use aligned accesses for ip_fast_csum and
+csum_ipv6_magic tests' was applied, the test_csum_ipv6_magic unit test
+started failing for all mips platforms, both little and bit endian.
+Oddly enough, adding debug code into test_csum_ipv6_magic() made the
+problem disappear.
+
+The gcc manual says:
+
+"The "memory" clobber tells the compiler that the assembly code performs
+ memory reads or writes to items other than those listed in the input
+ and output operands (for example, accessing the memory pointed to by one
+ of the input parameters)
+"
+
+This is definitely the case for csum_ipv6_magic(). Indeed, adding the
+'memory' clobber fixes the problem.
+
+Cc: Charlie Jenkins <charlie@rivosinc.com>
+Cc: Palmer Dabbelt <palmer@rivosinc.com>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/checksum.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
+index 4044eaf989ac..0921ddda11a4 100644
+--- a/arch/mips/include/asm/checksum.h
++++ b/arch/mips/include/asm/checksum.h
+@@ -241,7 +241,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+       "       .set    pop"
+       : "=&r" (sum), "=&r" (tmp)
+       : "r" (saddr), "r" (daddr),
+-        "0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
++        "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
++      : "memory");
+       return csum_fold(sum);
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-openvswitch-limit-the-number-of-recursions-from-.patch b/queue-6.1/net-openvswitch-limit-the-number-of-recursions-from-.patch
new file mode 100644 (file)
index 0000000..eef0682
--- /dev/null
@@ -0,0 +1,214 @@
+From cb00ea87654a4f2afe3b9965082be79bb32525c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Feb 2024 08:24:15 -0500
+Subject: net: openvswitch: limit the number of recursions from action sets
+
+From: Aaron Conole <aconole@redhat.com>
+
+[ Upstream commit 6e2f90d31fe09f2b852de25125ca875aabd81367 ]
+
+The ovs module allows for some actions to recursively contain an action
+list for complex scenarios, such as sampling, checking lengths, etc.
+When these actions are copied into the internal flow table, they are
+evaluated to validate that such actions make sense, and these calls
+happen recursively.
+
+The ovs-vswitchd userspace won't emit more than 16 recursion levels
+deep.  However, the module has no such limit and will happily accept
+limits larger than 16 levels nested.  Prevent this by tracking the
+number of recursions happening and manually limiting it to 16 levels
+nested.
+
+The initial implementation of the sample action would track this depth
+and prevent more than 3 levels of recursion, but this was removed to
+support the clone use case, rather than limited at the current userspace
+limit.
+
+Fixes: 798c166173ff ("openvswitch: Optimize sample action for the clone use cases")
+Signed-off-by: Aaron Conole <aconole@redhat.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20240207132416.1488485-2-aconole@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/openvswitch/flow_netlink.c | 49 +++++++++++++++++++++++-----------
+ 1 file changed, 33 insertions(+), 16 deletions(-)
+
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index ead5418c126e..e3c85ceb1f0a 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -47,6 +47,7 @@ struct ovs_len_tbl {
+ #define OVS_ATTR_NESTED -1
+ #define OVS_ATTR_VARIABLE -2
++#define OVS_COPY_ACTIONS_MAX_DEPTH 16
+ static bool actions_may_change_flow(const struct nlattr *actions)
+ {
+@@ -2543,13 +2544,15 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+                                 const struct sw_flow_key *key,
+                                 struct sw_flow_actions **sfa,
+                                 __be16 eth_type, __be16 vlan_tci,
+-                                u32 mpls_label_count, bool log);
++                                u32 mpls_label_count, bool log,
++                                u32 depth);
+ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
+                                   const struct sw_flow_key *key,
+                                   struct sw_flow_actions **sfa,
+                                   __be16 eth_type, __be16 vlan_tci,
+-                                  u32 mpls_label_count, bool log, bool last)
++                                  u32 mpls_label_count, bool log, bool last,
++                                  u32 depth)
+ {
+       const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+       const struct nlattr *probability, *actions;
+@@ -2600,7 +2603,8 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
+               return err;
+       err = __ovs_nla_copy_actions(net, actions, key, sfa,
+-                                   eth_type, vlan_tci, mpls_label_count, log);
++                                   eth_type, vlan_tci, mpls_label_count, log,
++                                   depth + 1);
+       if (err)
+               return err;
+@@ -2615,7 +2619,8 @@ static int validate_and_copy_dec_ttl(struct net *net,
+                                    const struct sw_flow_key *key,
+                                    struct sw_flow_actions **sfa,
+                                    __be16 eth_type, __be16 vlan_tci,
+-                                   u32 mpls_label_count, bool log)
++                                   u32 mpls_label_count, bool log,
++                                   u32 depth)
+ {
+       const struct nlattr *attrs[OVS_DEC_TTL_ATTR_MAX + 1];
+       int start, action_start, err, rem;
+@@ -2658,7 +2663,8 @@ static int validate_and_copy_dec_ttl(struct net *net,
+               return action_start;
+       err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type,
+-                                   vlan_tci, mpls_label_count, log);
++                                   vlan_tci, mpls_label_count, log,
++                                   depth + 1);
+       if (err)
+               return err;
+@@ -2672,7 +2678,8 @@ static int validate_and_copy_clone(struct net *net,
+                                  const struct sw_flow_key *key,
+                                  struct sw_flow_actions **sfa,
+                                  __be16 eth_type, __be16 vlan_tci,
+-                                 u32 mpls_label_count, bool log, bool last)
++                                 u32 mpls_label_count, bool log, bool last,
++                                 u32 depth)
+ {
+       int start, err;
+       u32 exec;
+@@ -2692,7 +2699,8 @@ static int validate_and_copy_clone(struct net *net,
+               return err;
+       err = __ovs_nla_copy_actions(net, attr, key, sfa,
+-                                   eth_type, vlan_tci, mpls_label_count, log);
++                                   eth_type, vlan_tci, mpls_label_count, log,
++                                   depth + 1);
+       if (err)
+               return err;
+@@ -3061,7 +3069,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
+                                          struct sw_flow_actions **sfa,
+                                          __be16 eth_type, __be16 vlan_tci,
+                                          u32 mpls_label_count,
+-                                         bool log, bool last)
++                                         bool log, bool last, u32 depth)
+ {
+       const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
+       struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1];
+@@ -3109,7 +3117,8 @@ static int validate_and_copy_check_pkt_len(struct net *net,
+               return nested_acts_start;
+       err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
+-                                   eth_type, vlan_tci, mpls_label_count, log);
++                                   eth_type, vlan_tci, mpls_label_count, log,
++                                   depth + 1);
+       if (err)
+               return err;
+@@ -3122,7 +3131,8 @@ static int validate_and_copy_check_pkt_len(struct net *net,
+               return nested_acts_start;
+       err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
+-                                   eth_type, vlan_tci, mpls_label_count, log);
++                                   eth_type, vlan_tci, mpls_label_count, log,
++                                   depth + 1);
+       if (err)
+               return err;
+@@ -3150,12 +3160,16 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+                                 const struct sw_flow_key *key,
+                                 struct sw_flow_actions **sfa,
+                                 __be16 eth_type, __be16 vlan_tci,
+-                                u32 mpls_label_count, bool log)
++                                u32 mpls_label_count, bool log,
++                                u32 depth)
+ {
+       u8 mac_proto = ovs_key_mac_proto(key);
+       const struct nlattr *a;
+       int rem, err;
++      if (depth > OVS_COPY_ACTIONS_MAX_DEPTH)
++              return -EOVERFLOW;
++
+       nla_for_each_nested(a, attr, rem) {
+               /* Expected argument lengths, (u32)-1 for variable length. */
+               static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
+@@ -3350,7 +3364,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+                       err = validate_and_copy_sample(net, a, key, sfa,
+                                                      eth_type, vlan_tci,
+                                                      mpls_label_count,
+-                                                     log, last);
++                                                     log, last, depth);
+                       if (err)
+                               return err;
+                       skip_copy = true;
+@@ -3421,7 +3435,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+                       err = validate_and_copy_clone(net, a, key, sfa,
+                                                     eth_type, vlan_tci,
+                                                     mpls_label_count,
+-                                                    log, last);
++                                                    log, last, depth);
+                       if (err)
+                               return err;
+                       skip_copy = true;
+@@ -3435,7 +3449,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+                                                             eth_type,
+                                                             vlan_tci,
+                                                             mpls_label_count,
+-                                                            log, last);
++                                                            log, last,
++                                                            depth);
+                       if (err)
+                               return err;
+                       skip_copy = true;
+@@ -3445,7 +3460,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+               case OVS_ACTION_ATTR_DEC_TTL:
+                       err = validate_and_copy_dec_ttl(net, a, key, sfa,
+                                                       eth_type, vlan_tci,
+-                                                      mpls_label_count, log);
++                                                      mpls_label_count, log,
++                                                      depth);
+                       if (err)
+                               return err;
+                       skip_copy = true;
+@@ -3485,7 +3501,8 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+       (*sfa)->orig_len = nla_len(attr);
+       err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
+-                                   key->eth.vlan.tci, mpls_label_count, log);
++                                   key->eth.vlan.tci, mpls_label_count, log,
++                                   0);
+       if (err)
+               ovs_nla_free_flow_actions(*sfa);
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-sysfs-fix-sys-class-net-iface-path-for-statistic.patch b/queue-6.1/net-sysfs-fix-sys-class-net-iface-path-for-statistic.patch
new file mode 100644 (file)
index 0000000..d1c05d1
--- /dev/null
@@ -0,0 +1,244 @@
+From 4bf6f3db282b51969fb4be39db3823714d885094 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Feb 2024 01:55:18 -0800
+Subject: net: sysfs: Fix /sys/class/net/<iface> path for statistics
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit 5b3fbd61b9d1f4ed2db95aaf03f9adae0373784d ]
+
+The Documentation/ABI/testing/sysfs-class-net-statistics documentation
+is pointing to the wrong path for the interface.  Documentation is
+pointing to /sys/class/<iface>, instead of /sys/class/net/<iface>.
+
+Fix it by adding the `net/` directory before the interface.
+
+Fixes: 6044f9700645 ("net: sysfs: document /sys/class/net/statistics/*")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ABI/testing/sysfs-class-net-statistics    | 48 +++++++++----------
+ 1 file changed, 24 insertions(+), 24 deletions(-)
+
+diff --git a/Documentation/ABI/testing/sysfs-class-net-statistics b/Documentation/ABI/testing/sysfs-class-net-statistics
+index 55db27815361..53e508c6936a 100644
+--- a/Documentation/ABI/testing/sysfs-class-net-statistics
++++ b/Documentation/ABI/testing/sysfs-class-net-statistics
+@@ -1,4 +1,4 @@
+-What:         /sys/class/<iface>/statistics/collisions
++What:         /sys/class/net/<iface>/statistics/collisions
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -6,7 +6,7 @@ Description:
+               Indicates the number of collisions seen by this network device.
+               This value might not be relevant with all MAC layers.
+-What:         /sys/class/<iface>/statistics/multicast
++What:         /sys/class/net/<iface>/statistics/multicast
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -14,7 +14,7 @@ Description:
+               Indicates the number of multicast packets received by this
+               network device.
+-What:         /sys/class/<iface>/statistics/rx_bytes
++What:         /sys/class/net/<iface>/statistics/rx_bytes
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -23,7 +23,7 @@ Description:
+               See the network driver for the exact meaning of when this
+               value is incremented.
+-What:         /sys/class/<iface>/statistics/rx_compressed
++What:         /sys/class/net/<iface>/statistics/rx_compressed
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -32,7 +32,7 @@ Description:
+               network device. This value might only be relevant for interfaces
+               that support packet compression (e.g: PPP).
+-What:         /sys/class/<iface>/statistics/rx_crc_errors
++What:         /sys/class/net/<iface>/statistics/rx_crc_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -41,7 +41,7 @@ Description:
+               by this network device. Note that the specific meaning might
+               depend on the MAC layer used by the interface.
+-What:         /sys/class/<iface>/statistics/rx_dropped
++What:         /sys/class/net/<iface>/statistics/rx_dropped
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -51,7 +51,7 @@ Description:
+               packet processing. See the network driver for the exact
+               meaning of this value.
+-What:         /sys/class/<iface>/statistics/rx_errors
++What:         /sys/class/net/<iface>/statistics/rx_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -59,7 +59,7 @@ Description:
+               Indicates the number of receive errors on this network device.
+               See the network driver for the exact meaning of this value.
+-What:         /sys/class/<iface>/statistics/rx_fifo_errors
++What:         /sys/class/net/<iface>/statistics/rx_fifo_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -68,7 +68,7 @@ Description:
+               network device. See the network driver for the exact
+               meaning of this value.
+-What:         /sys/class/<iface>/statistics/rx_frame_errors
++What:         /sys/class/net/<iface>/statistics/rx_frame_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -78,7 +78,7 @@ Description:
+               on the MAC layer protocol used. See the network driver for
+               the exact meaning of this value.
+-What:         /sys/class/<iface>/statistics/rx_length_errors
++What:         /sys/class/net/<iface>/statistics/rx_length_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -87,7 +87,7 @@ Description:
+               error, oversized or undersized. See the network driver for the
+               exact meaning of this value.
+-What:         /sys/class/<iface>/statistics/rx_missed_errors
++What:         /sys/class/net/<iface>/statistics/rx_missed_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -96,7 +96,7 @@ Description:
+               due to lack of capacity in the receive side. See the network
+               driver for the exact meaning of this value.
+-What:         /sys/class/<iface>/statistics/rx_nohandler
++What:         /sys/class/net/<iface>/statistics/rx_nohandler
+ Date:         February 2016
+ KernelVersion:        4.6
+ Contact:      netdev@vger.kernel.org
+@@ -104,7 +104,7 @@ Description:
+               Indicates the number of received packets that were dropped on
+               an inactive device by the network core.
+-What:         /sys/class/<iface>/statistics/rx_over_errors
++What:         /sys/class/net/<iface>/statistics/rx_over_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -114,7 +114,7 @@ Description:
+               (e.g: larger than MTU). See the network driver for the exact
+               meaning of this value.
+-What:         /sys/class/<iface>/statistics/rx_packets
++What:         /sys/class/net/<iface>/statistics/rx_packets
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -122,7 +122,7 @@ Description:
+               Indicates the total number of good packets received by this
+               network device.
+-What:         /sys/class/<iface>/statistics/tx_aborted_errors
++What:         /sys/class/net/<iface>/statistics/tx_aborted_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -132,7 +132,7 @@ Description:
+               a medium collision). See the network driver for the exact
+               meaning of this value.
+-What:         /sys/class/<iface>/statistics/tx_bytes
++What:         /sys/class/net/<iface>/statistics/tx_bytes
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -143,7 +143,7 @@ Description:
+               transmitted packets or all packets that have been queued for
+               transmission.
+-What:         /sys/class/<iface>/statistics/tx_carrier_errors
++What:         /sys/class/net/<iface>/statistics/tx_carrier_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -152,7 +152,7 @@ Description:
+               because of carrier errors (e.g: physical link down). See the
+               network driver for the exact meaning of this value.
+-What:         /sys/class/<iface>/statistics/tx_compressed
++What:         /sys/class/net/<iface>/statistics/tx_compressed
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -161,7 +161,7 @@ Description:
+               this might only be relevant for devices that support
+               compression (e.g: PPP).
+-What:         /sys/class/<iface>/statistics/tx_dropped
++What:         /sys/class/net/<iface>/statistics/tx_dropped
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -170,7 +170,7 @@ Description:
+               See the driver for the exact reasons as to why the packets were
+               dropped.
+-What:         /sys/class/<iface>/statistics/tx_errors
++What:         /sys/class/net/<iface>/statistics/tx_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -179,7 +179,7 @@ Description:
+               a network device. See the driver for the exact reasons as to
+               why the packets were dropped.
+-What:         /sys/class/<iface>/statistics/tx_fifo_errors
++What:         /sys/class/net/<iface>/statistics/tx_fifo_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -188,7 +188,7 @@ Description:
+               FIFO error. See the driver for the exact reasons as to why the
+               packets were dropped.
+-What:         /sys/class/<iface>/statistics/tx_heartbeat_errors
++What:         /sys/class/net/<iface>/statistics/tx_heartbeat_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -197,7 +197,7 @@ Description:
+               reported as heartbeat errors. See the driver for the exact
+               reasons as to why the packets were dropped.
+-What:         /sys/class/<iface>/statistics/tx_packets
++What:         /sys/class/net/<iface>/statistics/tx_packets
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+@@ -206,7 +206,7 @@ Description:
+               device. See the driver for whether this reports the number of all
+               attempted or successful transmissions.
+-What:         /sys/class/<iface>/statistics/tx_window_errors
++What:         /sys/class/net/<iface>/statistics/tx_window_errors
+ Date:         April 2005
+ KernelVersion:        2.6.12
+ Contact:      netdev@vger.kernel.org
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-tls-factor-out-tls_-crypt_async_wait.patch b/queue-6.1/net-tls-factor-out-tls_-crypt_async_wait.patch
new file mode 100644 (file)
index 0000000..24d79ef
--- /dev/null
@@ -0,0 +1,189 @@
+From 840e58b03220c51999b25d2a021f41876c740373 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Feb 2024 17:18:18 -0800
+Subject: net: tls: factor out tls_*crypt_async_wait()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit c57ca512f3b68ddcd62bda9cc24a8f5584ab01b1 ]
+
+Factor out waiting for async encrypt and decrypt to finish.
+There are already multiple copies and a subsequent fix will
+need more. No functional changes.
+
+Note that crypto_wait_req() returns wait->err
+
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: aec7961916f3 ("tls: fix race between async notify and socket close")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls_sw.c | 96 +++++++++++++++++++++++-------------------------
+ 1 file changed, 45 insertions(+), 51 deletions(-)
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 47ae429e50e3..b146be099a3f 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -229,6 +229,20 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+       spin_unlock_bh(&ctx->decrypt_compl_lock);
+ }
++static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
++{
++      int pending;
++
++      spin_lock_bh(&ctx->decrypt_compl_lock);
++      reinit_completion(&ctx->async_wait.completion);
++      pending = atomic_read(&ctx->decrypt_pending);
++      spin_unlock_bh(&ctx->decrypt_compl_lock);
++      if (pending)
++              crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++
++      return ctx->async_wait.err;
++}
++
+ static int tls_do_decryption(struct sock *sk,
+                            struct scatterlist *sgin,
+                            struct scatterlist *sgout,
+@@ -496,6 +510,28 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+               schedule_delayed_work(&ctx->tx_work.work, 1);
+ }
++static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
++{
++      int pending;
++
++      spin_lock_bh(&ctx->encrypt_compl_lock);
++      ctx->async_notify = true;
++
++      pending = atomic_read(&ctx->encrypt_pending);
++      spin_unlock_bh(&ctx->encrypt_compl_lock);
++      if (pending)
++              crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++      else
++              reinit_completion(&ctx->async_wait.completion);
++
++      /* There can be no concurrent accesses, since we have no
++       * pending encrypt operations
++       */
++      WRITE_ONCE(ctx->async_notify, false);
++
++      return ctx->async_wait.err;
++}
++
+ static int tls_do_encryption(struct sock *sk,
+                            struct tls_context *tls_ctx,
+                            struct tls_sw_context_tx *ctx,
+@@ -953,7 +989,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+       int num_zc = 0;
+       int orig_size;
+       int ret = 0;
+-      int pending;
+       if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+                              MSG_CMSG_COMPAT))
+@@ -1122,24 +1157,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+       if (!num_async) {
+               goto send_end;
+       } else if (num_zc) {
+-              /* Wait for pending encryptions to get completed */
+-              spin_lock_bh(&ctx->encrypt_compl_lock);
+-              ctx->async_notify = true;
+-
+-              pending = atomic_read(&ctx->encrypt_pending);
+-              spin_unlock_bh(&ctx->encrypt_compl_lock);
+-              if (pending)
+-                      crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+-              else
+-                      reinit_completion(&ctx->async_wait.completion);
+-
+-              /* There can be no concurrent accesses, since we have no
+-               * pending encrypt operations
+-               */
+-              WRITE_ONCE(ctx->async_notify, false);
++              int err;
+-              if (ctx->async_wait.err) {
+-                      ret = ctx->async_wait.err;
++              /* Wait for pending encryptions to get completed */
++              err = tls_encrypt_async_wait(ctx);
++              if (err) {
++                      ret = err;
+                       copied = 0;
+               }
+       }
+@@ -1171,7 +1194,6 @@ void tls_sw_splice_eof(struct socket *sock)
+       ssize_t copied = 0;
+       bool retrying = false;
+       int ret = 0;
+-      int pending;
+       if (!ctx->open_rec)
+               return;
+@@ -1203,22 +1225,7 @@ void tls_sw_splice_eof(struct socket *sock)
+       }
+       /* Wait for pending encryptions to get completed */
+-      spin_lock_bh(&ctx->encrypt_compl_lock);
+-      ctx->async_notify = true;
+-
+-      pending = atomic_read(&ctx->encrypt_pending);
+-      spin_unlock_bh(&ctx->encrypt_compl_lock);
+-      if (pending)
+-              crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+-      else
+-              reinit_completion(&ctx->async_wait.completion);
+-
+-      /* There can be no concurrent accesses, since we have no pending
+-       * encrypt operations
+-       */
+-      WRITE_ONCE(ctx->async_notify, false);
+-
+-      if (ctx->async_wait.err)
++      if (tls_encrypt_async_wait(ctx))
+               goto unlock;
+       /* Transmit if any encryptions have completed */
+@@ -2197,16 +2204,10 @@ int tls_sw_recvmsg(struct sock *sk,
+ recv_end:
+       if (async) {
+-              int ret, pending;
++              int ret;
+               /* Wait for all previously submitted records to be decrypted */
+-              spin_lock_bh(&ctx->decrypt_compl_lock);
+-              reinit_completion(&ctx->async_wait.completion);
+-              pending = atomic_read(&ctx->decrypt_pending);
+-              spin_unlock_bh(&ctx->decrypt_compl_lock);
+-              ret = 0;
+-              if (pending)
+-                      ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++              ret = tls_decrypt_async_wait(ctx);
+               __skb_queue_purge(&ctx->async_hold);
+               if (ret) {
+@@ -2425,16 +2426,9 @@ void tls_sw_release_resources_tx(struct sock *sk)
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+       struct tls_rec *rec, *tmp;
+-      int pending;
+       /* Wait for any pending async encryptions to complete */
+-      spin_lock_bh(&ctx->encrypt_compl_lock);
+-      ctx->async_notify = true;
+-      pending = atomic_read(&ctx->encrypt_pending);
+-      spin_unlock_bh(&ctx->encrypt_compl_lock);
+-
+-      if (pending)
+-              crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++      tls_encrypt_async_wait(ctx);
+       tls_tx_records(sk, -1);
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-tls-fix-returned-read-length-with-async-decrypt.patch b/queue-6.1/net-tls-fix-returned-read-length-with-async-decrypt.patch
new file mode 100644 (file)
index 0000000..efb14a2
--- /dev/null
@@ -0,0 +1,40 @@
+From 0e9d9558927a792f4cae934b31bbb0c586ae507b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Feb 2024 17:18:24 -0800
+Subject: net: tls: fix returned read length with async decrypt
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit ac437a51ce662364062f704e321227f6728e6adc ]
+
+We double count async, non-zc rx data. The previous fix was
+lucky because if we fully zc async_copy_bytes is 0 so we add 0.
+Decrypted already has all the bytes we handled, in all cases.
+We don't have to adjust anything, delete the erroneous line.
+
+Fixes: 4d42cd6bc2ac ("tls: rx: fix return value for async crypto")
+Co-developed-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls_sw.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index d651c50746a8..09d258bb2df7 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2202,7 +2202,6 @@ int tls_sw_recvmsg(struct sock *sk,
+               else
+                       err = process_rx_list(ctx, msg, &control, 0,
+                                             async_copy_bytes, is_peek);
+-              decrypted += max(err, 0);
+       }
+       copied += decrypted;
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-tls-fix-use-after-free-with-partial-reads-and-as.patch b/queue-6.1/net-tls-fix-use-after-free-with-partial-reads-and-as.patch
new file mode 100644 (file)
index 0000000..3d79ce1
--- /dev/null
@@ -0,0 +1,64 @@
+From f9e855e542c0503e87715fcc5e12e2fefac655c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Feb 2024 17:18:22 -0800
+Subject: net: tls: fix use-after-free with partial reads and async decrypt
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit 32b55c5ff9103b8508c1e04bfa5a08c64e7a925f ]
+
+tls_decrypt_sg doesn't take a reference on the pages from clear_skb,
+so the put_page() in tls_decrypt_done releases them, and we trigger
+a use-after-free in process_rx_list when we try to read from the
+partially-read skb.
+
+Fixes: fd31f3996af2 ("tls: rx: decrypt into a fresh skb")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls_sw.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index ee11932237c0..d651c50746a8 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -62,6 +62,7 @@ struct tls_decrypt_ctx {
+       u8 iv[MAX_IV_SIZE];
+       u8 aad[TLS_MAX_AAD_SIZE];
+       u8 tail;
++      bool free_sgout;
+       struct scatterlist sg[];
+ };
+@@ -186,7 +187,6 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+       struct aead_request *aead_req = crypto_get_completion_data(data);
+       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+       struct scatterlist *sgout = aead_req->dst;
+-      struct scatterlist *sgin = aead_req->src;
+       struct tls_sw_context_rx *ctx;
+       struct tls_decrypt_ctx *dctx;
+       struct tls_context *tls_ctx;
+@@ -212,7 +212,7 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+       }
+       /* Free the destination pages if skb was not decrypted inplace */
+-      if (sgout != sgin) {
++      if (dctx->free_sgout) {
+               /* Skip the first S/G entry as it points to AAD */
+               for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
+                       if (!sg)
+@@ -1653,6 +1653,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+       } else if (out_sg) {
+               memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
+       }
++      dctx->free_sgout = !!pages;
+       /* Prepare and submit AEAD request */
+       err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
+-- 
+2.43.0
+
diff --git a/queue-6.1/nouveau-svm-fix-kvcalloc-argument-order.patch b/queue-6.1/nouveau-svm-fix-kvcalloc-argument-order.patch
new file mode 100644 (file)
index 0000000..2fcee76
--- /dev/null
@@ -0,0 +1,46 @@
+From cb4388deb4ff83627a5739ace734d779649d0788 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Feb 2024 12:22:17 +0100
+Subject: nouveau/svm: fix kvcalloc() argument order
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 2c80a2b715df75881359d07dbaacff8ad411f40e ]
+
+The conversion to kvcalloc() mixed up the object size and count
+arguments, causing a warning:
+
+drivers/gpu/drm/nouveau/nouveau_svm.c: In function 'nouveau_svm_fault_buffer_ctor':
+drivers/gpu/drm/nouveau/nouveau_svm.c:1010:40: error: 'kvcalloc' sizes specified with 'sizeof' in the earlier argument and not in the later argument [-Werror=calloc-transposed-args]
+ 1010 |         buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
+      |                                        ^
+drivers/gpu/drm/nouveau/nouveau_svm.c:1010:40: note: earlier argument should specify number of elements, later size of each element
+
+The behavior is still correct aside from the warning, but fixing it avoids
+the warnings and can help the compiler track the individual objects better.
+
+Fixes: 71e4bbca070e ("nouveau/svm: Use kvcalloc() instead of kvzalloc()")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Danilo Krummrich <dakr@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240212112230.1117284-1-arnd@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_svm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
+index 31a5b81ee9fc..be6674fb1af7 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
+@@ -997,7 +997,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
+       if (ret)
+               return ret;
+-      buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
++      buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
+       if (!buffer->fault)
+               return -ENOMEM;
+-- 
+2.43.0
+
diff --git a/queue-6.1/of-property-improve-finding-the-supplier-of-a-remote.patch b/queue-6.1/of-property-improve-finding-the-supplier-of-a-remote.patch
new file mode 100644 (file)
index 0000000..4c7bb7a
--- /dev/null
@@ -0,0 +1,58 @@
+From 2fe0645dff1a08d8274c8c1b5cffd11be79c709e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Feb 2024 17:18:01 -0800
+Subject: of: property: Improve finding the supplier of a remote-endpoint
+ property
+
+From: Saravana Kannan <saravanak@google.com>
+
+[ Upstream commit 782bfd03c3ae2c0e6e01b661b8e18f1de50357be ]
+
+After commit 4a032827daa8 ("of: property: Simplify of_link_to_phandle()"),
+remote-endpoint properties created a fwnode link from the consumer device
+to the supplier endpoint. This is a tiny bit inefficient (not buggy) when
+trying to create device links or detecting cycles. So, improve this the
+same way we improved finding the consumer of a remote-endpoint property.
+
+Fixes: 4a032827daa8 ("of: property: Simplify of_link_to_phandle()")
+Signed-off-by: Saravana Kannan <saravanak@google.com>
+Link: https://lore.kernel.org/r/20240207011803.2637531-3-saravanak@google.com
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/of/property.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index b636777e6f7c..e1946cc17030 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -1261,7 +1261,6 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
+-DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL)
+ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
+ DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
+ DEFINE_SIMPLE_PROP(leds, "leds", NULL)
+@@ -1326,6 +1325,17 @@ static struct device_node *parse_interrupts(struct device_node *np,
+       return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
+ }
++static struct device_node *parse_remote_endpoint(struct device_node *np,
++                                               const char *prop_name,
++                                               int index)
++{
++      /* Return NULL for index > 0 to signify end of remote-endpoints. */
++      if (!index || strcmp(prop_name, "remote-endpoint"))
++              return NULL;
++
++      return of_graph_get_remote_port_parent(np);
++}
++
+ static const struct supplier_bindings of_supplier_bindings[] = {
+       { .parse_prop = parse_clocks, },
+       { .parse_prop = parse_interconnects, },
+-- 
+2.43.0
+
diff --git a/queue-6.1/of-unittest-fix-compile-in-the-non-dynamic-case.patch b/queue-6.1/of-unittest-fix-compile-in-the-non-dynamic-case.patch
new file mode 100644 (file)
index 0000000..3859788
--- /dev/null
@@ -0,0 +1,68 @@
+From 84b598f116008cec9a7b64e90cd14ba41448cd7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Jan 2024 20:25:56 +0100
+Subject: of: unittest: Fix compile in the non-dynamic case
+
+From: Christian A. Ehrhardt <lk@c--e.de>
+
+[ Upstream commit 607aad1e4356c210dbef9022955a3089377909b2 ]
+
+If CONFIG_OF_KOBJ is not set, a device_node does not contain a
+kobj and attempts to access the embedded kobj via kref_read break
+the compile.
+
+Replace affected kref_read calls with a macro that reads the
+refcount if it exists and returns 1 if there is no embedded kobj.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202401291740.VP219WIz-lkp@intel.com/
+Fixes: 4dde83569832 ("of: Fix double free in of_parse_phandle_with_args_map")
+Signed-off-by: Christian A. Ehrhardt <lk@c--e.de>
+Link: https://lore.kernel.org/r/20240129192556.403271-1-lk@c--e.de
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/of/unittest.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index e541a8960f1d..ce1386074e66 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -49,6 +49,12 @@ static struct unittest_results {
+       failed; \
+ })
++#ifdef CONFIG_OF_KOBJ
++#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref)
++#else
++#define OF_KREF_READ(NODE) 1
++#endif
++
+ /*
+  * Expected message may have a message level other than KERN_INFO.
+  * Print the expected message only if the current loglevel will allow
+@@ -562,7 +568,7 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+                       pr_err("missing testcase data\n");
+                       return;
+               }
+-              prefs[i] = kref_read(&p[i]->kobj.kref);
++              prefs[i] = OF_KREF_READ(p[i]);
+       }
+       rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
+@@ -685,9 +691,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+       unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+       for (i = 0; i < ARRAY_SIZE(p); ++i) {
+-              unittest(prefs[i] == kref_read(&p[i]->kobj.kref),
++              unittest(prefs[i] == OF_KREF_READ(p[i]),
+                        "provider%d: expected:%d got:%d\n",
+-                       i, prefs[i], kref_read(&p[i]->kobj.kref));
++                       i, prefs[i], OF_KREF_READ(p[i]));
+               of_node_put(p[i]);
+       }
+ }
+-- 
+2.43.0
+
index 6250b58b9c32dcfceb4ee24fe5dec791cac702ed..c86ffb077b8f6ae414238fcad6b7973c0f94d5f3 100644 (file)
@@ -10,3 +10,24 @@ btrfs-don-t-reserve-space-for-checksums-when-writing-to-nocow-files.patch
 btrfs-reject-encoded-write-if-inode-has-nodatasum-flag-set.patch
 btrfs-don-t-drop-extent_map-for-free-space-inode-on-write-error.patch
 driver-core-fix-device_link_flag_is_sync_state_only.patch
+of-unittest-fix-compile-in-the-non-dynamic-case.patch
+kvm-selftests-clear-dirty-ring-states-between-two-mo.patch
+kvm-selftests-fix-a-semaphore-imbalance-in-the-dirty.patch
+wifi-iwlwifi-fix-some-error-codes.patch
+wifi-iwlwifi-uninitialized-variable-in-iwl_acpi_get_.patch
+of-property-improve-finding-the-supplier-of-a-remote.patch
+net-openvswitch-limit-the-number-of-recursions-from-.patch
+lan966x-fix-crash-when-adding-interface-under-a-lag.patch
+tls-sw-use-splice_eof-to-flush.patch
+tls-extract-context-alloc-initialization-out-of-tls_.patch
+net-tls-factor-out-tls_-crypt_async_wait.patch
+tls-fix-race-between-async-notify-and-socket-close.patch
+net-tls-fix-use-after-free-with-partial-reads-and-as.patch
+net-tls-fix-returned-read-length-with-async-decrypt.patch
+spi-ppc4xx-drop-write-only-variable.patch
+asoc-rt5645-fix-deadlock-in-rt5645_jack_detect_work.patch
+net-sysfs-fix-sys-class-net-iface-path-for-statistic.patch
+nouveau-svm-fix-kvcalloc-argument-order.patch
+mips-add-memory-clobber-to-csum_ipv6_magic-inline-as.patch
+i40e-do-not-allow-untrusted-vf-to-remove-administrat.patch
+i40e-fix-waiting-for-queues-of-all-vsis-to-be-disabl.patch
diff --git a/queue-6.1/spi-ppc4xx-drop-write-only-variable.patch b/queue-6.1/spi-ppc4xx-drop-write-only-variable.patch
new file mode 100644 (file)
index 0000000..281dc0c
--- /dev/null
@@ -0,0 +1,55 @@
+From 9e163109fab123b9a6b9292d19848ee61abae0bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Feb 2024 17:40:08 +0100
+Subject: spi: ppc4xx: Drop write-only variable
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit b3aa619a8b4706f35cb62f780c14e68796b37f3f ]
+
+Since commit 24778be20f87 ("spi: convert drivers to use
+bits_per_word_mask") the bits_per_word variable is only written to. The
+check that was there before isn't needed any more as the spi core
+ensures that only 8 bit transfers are used, so the variable can go away
+together with all assignments to it.
+
+Fixes: 24778be20f87 ("spi: convert drivers to use bits_per_word_mask")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20240210164006.208149-8-u.kleine-koenig@pengutronix.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-ppc4xx.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
+index d65f047b6c82..1179a1115137 100644
+--- a/drivers/spi/spi-ppc4xx.c
++++ b/drivers/spi/spi-ppc4xx.c
+@@ -166,10 +166,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+       int scr;
+       u8 cdm = 0;
+       u32 speed;
+-      u8 bits_per_word;
+       /* Start with the generic configuration for this device. */
+-      bits_per_word = spi->bits_per_word;
+       speed = spi->max_speed_hz;
+       /*
+@@ -177,9 +175,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+        * the transfer to overwrite the generic configuration with zeros.
+        */
+       if (t) {
+-              if (t->bits_per_word)
+-                      bits_per_word = t->bits_per_word;
+-
+               if (t->speed_hz)
+                       speed = min(t->speed_hz, spi->max_speed_hz);
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.1/tls-extract-context-alloc-initialization-out-of-tls_.patch b/queue-6.1/tls-extract-context-alloc-initialization-out-of-tls_.patch
new file mode 100644 (file)
index 0000000..c967e3c
--- /dev/null
@@ -0,0 +1,133 @@
+From efb8b2e188883018b894ca5df068e7d2ae76c132 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Oct 2023 22:50:46 +0200
+Subject: tls: extract context alloc/initialization out of tls_set_sw_offload
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit 615580cbc99af0da2d1c7226fab43a3d5003eb97 ]
+
+Simplify tls_set_sw_offload a bit.
+
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: aec7961916f3 ("tls: fix race between async notify and socket close")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls_sw.c | 86 ++++++++++++++++++++++++++++--------------------
+ 1 file changed, 51 insertions(+), 35 deletions(-)
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index fbe6aab5f5b2..47ae429e50e3 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2587,6 +2587,48 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
+               tls_ctx->prot_info.version != TLS_1_3_VERSION;
+ }
++static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
++{
++      struct tls_sw_context_tx *sw_ctx_tx;
++
++      if (!ctx->priv_ctx_tx) {
++              sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
++              if (!sw_ctx_tx)
++                      return NULL;
++      } else {
++              sw_ctx_tx = ctx->priv_ctx_tx;
++      }
++
++      crypto_init_wait(&sw_ctx_tx->async_wait);
++      spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
++      INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
++      INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
++      sw_ctx_tx->tx_work.sk = sk;
++
++      return sw_ctx_tx;
++}
++
++static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
++{
++      struct tls_sw_context_rx *sw_ctx_rx;
++
++      if (!ctx->priv_ctx_rx) {
++              sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
++              if (!sw_ctx_rx)
++                      return NULL;
++      } else {
++              sw_ctx_rx = ctx->priv_ctx_rx;
++      }
++
++      crypto_init_wait(&sw_ctx_rx->async_wait);
++      spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
++      init_waitqueue_head(&sw_ctx_rx->wq);
++      skb_queue_head_init(&sw_ctx_rx->rx_list);
++      skb_queue_head_init(&sw_ctx_rx->async_hold);
++
++      return sw_ctx_rx;
++}
++
+ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+ {
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+@@ -2608,48 +2650,22 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+       }
+       if (tx) {
+-              if (!ctx->priv_ctx_tx) {
+-                      sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
+-                      if (!sw_ctx_tx) {
+-                              rc = -ENOMEM;
+-                              goto out;
+-                      }
+-                      ctx->priv_ctx_tx = sw_ctx_tx;
+-              } else {
+-                      sw_ctx_tx =
+-                              (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
+-              }
+-      } else {
+-              if (!ctx->priv_ctx_rx) {
+-                      sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
+-                      if (!sw_ctx_rx) {
+-                              rc = -ENOMEM;
+-                              goto out;
+-                      }
+-                      ctx->priv_ctx_rx = sw_ctx_rx;
+-              } else {
+-                      sw_ctx_rx =
+-                              (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
+-              }
+-      }
++              ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
++              if (!ctx->priv_ctx_tx)
++                      return -ENOMEM;
+-      if (tx) {
+-              crypto_init_wait(&sw_ctx_tx->async_wait);
+-              spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
++              sw_ctx_tx = ctx->priv_ctx_tx;
+               crypto_info = &ctx->crypto_send.info;
+               cctx = &ctx->tx;
+               aead = &sw_ctx_tx->aead_send;
+-              INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
+-              INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
+-              sw_ctx_tx->tx_work.sk = sk;
+       } else {
+-              crypto_init_wait(&sw_ctx_rx->async_wait);
+-              spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
+-              init_waitqueue_head(&sw_ctx_rx->wq);
++              ctx->priv_ctx_rx = init_ctx_rx(ctx);
++              if (!ctx->priv_ctx_rx)
++                      return -ENOMEM;
++
++              sw_ctx_rx = ctx->priv_ctx_rx;
+               crypto_info = &ctx->crypto_recv.info;
+               cctx = &ctx->rx;
+-              skb_queue_head_init(&sw_ctx_rx->rx_list);
+-              skb_queue_head_init(&sw_ctx_rx->async_hold);
+               aead = &sw_ctx_rx->aead_recv;
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.1/tls-fix-race-between-async-notify-and-socket-close.patch b/queue-6.1/tls-fix-race-between-async-notify-and-socket-close.patch
new file mode 100644 (file)
index 0000000..7cf2273
--- /dev/null
@@ -0,0 +1,171 @@
+From 9aec22d212ddf7329573e97e8a8135c379dc7512 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Feb 2024 17:18:19 -0800
+Subject: tls: fix race between async notify and socket close
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit aec7961916f3f9e88766e2688992da6980f11b8d ]
+
+The submitting thread (one which called recvmsg/sendmsg)
+may exit as soon as the async crypto handler calls complete()
+so any code past that point risks touching already freed data.
+
+Try to avoid the locking and extra flags altogether.
+Have the main thread hold an extra reference, this way
+we can depend solely on the atomic ref counter for
+synchronization.
+
+Don't futz with reiniting the completion, either, we are now
+tightly controlling when completion fires.
+
+Reported-by: valis <sec@valis.email>
+Fixes: 0cada33241d9 ("net/tls: fix race condition causing kernel panic")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/tls.h |  5 -----
+ net/tls/tls_sw.c  | 43 ++++++++++---------------------------------
+ 2 files changed, 10 insertions(+), 38 deletions(-)
+
+diff --git a/include/net/tls.h b/include/net/tls.h
+index c36bf4c50027..899c863aba02 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -108,9 +108,6 @@ struct tls_sw_context_tx {
+       struct tls_rec *open_rec;
+       struct list_head tx_list;
+       atomic_t encrypt_pending;
+-      /* protect crypto_wait with encrypt_pending */
+-      spinlock_t encrypt_compl_lock;
+-      int async_notify;
+       u8 async_capable:1;
+ #define BIT_TX_SCHEDULED      0
+@@ -147,8 +144,6 @@ struct tls_sw_context_rx {
+       struct tls_strparser strp;
+       atomic_t decrypt_pending;
+-      /* protect crypto_wait with decrypt_pending*/
+-      spinlock_t decrypt_compl_lock;
+       struct sk_buff_head async_hold;
+       struct wait_queue_head wq;
+ };
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index b146be099a3f..ee11932237c0 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -223,22 +223,15 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+       kfree(aead_req);
+-      spin_lock_bh(&ctx->decrypt_compl_lock);
+-      if (!atomic_dec_return(&ctx->decrypt_pending))
++      if (atomic_dec_and_test(&ctx->decrypt_pending))
+               complete(&ctx->async_wait.completion);
+-      spin_unlock_bh(&ctx->decrypt_compl_lock);
+ }
+ static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
+ {
+-      int pending;
+-
+-      spin_lock_bh(&ctx->decrypt_compl_lock);
+-      reinit_completion(&ctx->async_wait.completion);
+-      pending = atomic_read(&ctx->decrypt_pending);
+-      spin_unlock_bh(&ctx->decrypt_compl_lock);
+-      if (pending)
++      if (!atomic_dec_and_test(&ctx->decrypt_pending))
+               crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++      atomic_inc(&ctx->decrypt_pending);
+       return ctx->async_wait.err;
+ }
+@@ -266,6 +259,7 @@ static int tls_do_decryption(struct sock *sk,
+               aead_request_set_callback(aead_req,
+                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                         tls_decrypt_done, aead_req);
++              DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
+               atomic_inc(&ctx->decrypt_pending);
+       } else {
+               aead_request_set_callback(aead_req,
+@@ -455,7 +449,6 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+       struct tls_rec *rec;
+       bool ready = false;
+       struct sock *sk;
+-      int pending;
+       rec = container_of(aead_req, struct tls_rec, aead_req);
+       msg_en = &rec->msg_encrypted;
+@@ -495,12 +488,8 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+                       ready = true;
+       }
+-      spin_lock_bh(&ctx->encrypt_compl_lock);
+-      pending = atomic_dec_return(&ctx->encrypt_pending);
+-
+-      if (!pending && ctx->async_notify)
++      if (atomic_dec_and_test(&ctx->encrypt_pending))
+               complete(&ctx->async_wait.completion);
+-      spin_unlock_bh(&ctx->encrypt_compl_lock);
+       if (!ready)
+               return;
+@@ -512,22 +501,9 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+ static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
+ {
+-      int pending;
+-
+-      spin_lock_bh(&ctx->encrypt_compl_lock);
+-      ctx->async_notify = true;
+-
+-      pending = atomic_read(&ctx->encrypt_pending);
+-      spin_unlock_bh(&ctx->encrypt_compl_lock);
+-      if (pending)
++      if (!atomic_dec_and_test(&ctx->encrypt_pending))
+               crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+-      else
+-              reinit_completion(&ctx->async_wait.completion);
+-
+-      /* There can be no concurrent accesses, since we have no
+-       * pending encrypt operations
+-       */
+-      WRITE_ONCE(ctx->async_notify, false);
++      atomic_inc(&ctx->encrypt_pending);
+       return ctx->async_wait.err;
+ }
+@@ -578,6 +554,7 @@ static int tls_do_encryption(struct sock *sk,
+       /* Add the record in tx_list */
+       list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
++      DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
+       atomic_inc(&ctx->encrypt_pending);
+       rc = crypto_aead_encrypt(aead_req);
+@@ -2594,7 +2571,7 @@ static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct soc
+       }
+       crypto_init_wait(&sw_ctx_tx->async_wait);
+-      spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
++      atomic_set(&sw_ctx_tx->encrypt_pending, 1);
+       INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
+       INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
+       sw_ctx_tx->tx_work.sk = sk;
+@@ -2615,7 +2592,7 @@ static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
+       }
+       crypto_init_wait(&sw_ctx_rx->async_wait);
+-      spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
++      atomic_set(&sw_ctx_rx->decrypt_pending, 1);
+       init_waitqueue_head(&sw_ctx_rx->wq);
+       skb_queue_head_init(&sw_ctx_rx->rx_list);
+       skb_queue_head_init(&sw_ctx_rx->async_hold);
+-- 
+2.43.0
+
diff --git a/queue-6.1/tls-sw-use-splice_eof-to-flush.patch b/queue-6.1/tls-sw-use-splice_eof-to-flush.patch
new file mode 100644 (file)
index 0000000..3bf5087
--- /dev/null
@@ -0,0 +1,152 @@
+From 9884bf4a8ccbf928e29e14b91e6148849c13e8c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 19:19:11 +0100
+Subject: tls/sw: Use splice_eof() to flush
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit df720d288dbb1793e82b6ccbfc670ec871e9def4 ]
+
+Allow splice to end a TLS record after prematurely ending a splice/sendfile
+due to getting an EOF condition (->splice_read() returned 0) after splice
+had called TLS with a sendmsg() with MSG_MORE set when the user didn't set
+MSG_MORE.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lore.kernel.org/r/CAHk-=wh=V579PDYvkpnTobCLGczbgxpMgGmmhqiTyE34Cpi5Gg@mail.gmail.com/
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+cc: Chuck Lever <chuck.lever@oracle.com>
+cc: Boris Pismenny <borisp@nvidia.com>
+cc: John Fastabend <john.fastabend@gmail.com>
+cc: Jens Axboe <axboe@kernel.dk>
+cc: Matthew Wilcox <willy@infradead.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: aec7961916f3 ("tls: fix race between async notify and socket close")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls.h      |  1 +
+ net/tls/tls_main.c |  2 ++
+ net/tls/tls_sw.c   | 74 ++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 77 insertions(+)
+
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 0672acab2773..4922668fefaa 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -97,6 +97,7 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
+ void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
+ void tls_sw_strparser_done(struct tls_context *tls_ctx);
+ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
++void tls_sw_splice_eof(struct socket *sock);
+ int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+                          int offset, size_t size, int flags);
+ int tls_sw_sendpage(struct sock *sk, struct page *page,
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 338a443fa47b..80b42a3e7883 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -922,6 +922,7 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG]
+       ops[TLS_BASE][TLS_BASE] = *base;
+       ops[TLS_SW  ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
++      ops[TLS_SW  ][TLS_BASE].splice_eof      = tls_sw_splice_eof;
+       ops[TLS_SW  ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
+       ops[TLS_BASE][TLS_SW  ] = ops[TLS_BASE][TLS_BASE];
+@@ -990,6 +991,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+       prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
+       prot[TLS_SW][TLS_BASE].sendmsg          = tls_sw_sendmsg;
++      prot[TLS_SW][TLS_BASE].splice_eof       = tls_sw_splice_eof;
+       prot[TLS_SW][TLS_BASE].sendpage         = tls_sw_sendpage;
+       prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 0323040d34bc..fbe6aab5f5b2 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1158,6 +1158,80 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+       return copied > 0 ? copied : ret;
+ }
++/*
++ * Handle unexpected EOF during splice without SPLICE_F_MORE set.
++ */
++void tls_sw_splice_eof(struct socket *sock)
++{
++      struct sock *sk = sock->sk;
++      struct tls_context *tls_ctx = tls_get_ctx(sk);
++      struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
++      struct tls_rec *rec;
++      struct sk_msg *msg_pl;
++      ssize_t copied = 0;
++      bool retrying = false;
++      int ret = 0;
++      int pending;
++
++      if (!ctx->open_rec)
++              return;
++
++      mutex_lock(&tls_ctx->tx_lock);
++      lock_sock(sk);
++
++retry:
++      rec = ctx->open_rec;
++      if (!rec)
++              goto unlock;
++
++      msg_pl = &rec->msg_plaintext;
++
++      /* Check the BPF advisor and perform transmission. */
++      ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
++                                &copied, 0);
++      switch (ret) {
++      case 0:
++      case -EAGAIN:
++              if (retrying)
++                      goto unlock;
++              retrying = true;
++              goto retry;
++      case -EINPROGRESS:
++              break;
++      default:
++              goto unlock;
++      }
++
++      /* Wait for pending encryptions to get completed */
++      spin_lock_bh(&ctx->encrypt_compl_lock);
++      ctx->async_notify = true;
++
++      pending = atomic_read(&ctx->encrypt_pending);
++      spin_unlock_bh(&ctx->encrypt_compl_lock);
++      if (pending)
++              crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++      else
++              reinit_completion(&ctx->async_wait.completion);
++
++      /* There can be no concurrent accesses, since we have no pending
++       * encrypt operations
++       */
++      WRITE_ONCE(ctx->async_notify, false);
++
++      if (ctx->async_wait.err)
++              goto unlock;
++
++      /* Transmit if any encryptions have completed */
++      if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
++              cancel_delayed_work(&ctx->tx_work.work);
++              tls_tx_records(sk, 0);
++      }
++
++unlock:
++      release_sock(sk);
++      mutex_unlock(&tls_ctx->tx_lock);
++}
++
+ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
+                             int offset, size_t size, int flags)
+ {
+-- 
+2.43.0
+
diff --git a/queue-6.1/wifi-iwlwifi-fix-some-error-codes.patch b/queue-6.1/wifi-iwlwifi-fix-some-error-codes.patch
new file mode 100644 (file)
index 0000000..f1d334e
--- /dev/null
@@ -0,0 +1,83 @@
+From 6ead6fd60e709c89026e928e3a28f974a256b961 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Feb 2024 13:17:06 +0300
+Subject: wifi: iwlwifi: Fix some error codes
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit c6ebb5b67641994de8bc486b33457fe0b681d6fe ]
+
+This saves the error as PTR_ERR(wifi_pkg).  The problem is that
+"wifi_pkg" is a valid pointer, not an error pointer.  Set the error code
+to -EINVAL instead.
+
+Fixes: 2a8084147bff ("iwlwifi: acpi: support reading and storing WRDS revision 1 and 2")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://msgid.link/9620bb77-2d7c-4d76-b255-ad824ebf8e35@moroto.mountain
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/fw/acpi.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index 585e8cd2d332..bdb8464cd432 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -576,7 +576,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+                                        &tbl_rev);
+       if (!IS_ERR(wifi_pkg)) {
+               if (tbl_rev != 2) {
+-                      ret = PTR_ERR(wifi_pkg);
++                      ret = -EINVAL;
+                       goto out_free;
+               }
+@@ -592,7 +592,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+                                        &tbl_rev);
+       if (!IS_ERR(wifi_pkg)) {
+               if (tbl_rev != 1) {
+-                      ret = PTR_ERR(wifi_pkg);
++                      ret = -EINVAL;
+                       goto out_free;
+               }
+@@ -608,7 +608,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+                                        &tbl_rev);
+       if (!IS_ERR(wifi_pkg)) {
+               if (tbl_rev != 0) {
+-                      ret = PTR_ERR(wifi_pkg);
++                      ret = -EINVAL;
+                       goto out_free;
+               }
+@@ -665,7 +665,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+                                        &tbl_rev);
+       if (!IS_ERR(wifi_pkg)) {
+               if (tbl_rev != 2) {
+-                      ret = PTR_ERR(wifi_pkg);
++                      ret = -EINVAL;
+                       goto out_free;
+               }
+@@ -681,7 +681,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+                                        &tbl_rev);
+       if (!IS_ERR(wifi_pkg)) {
+               if (tbl_rev != 1) {
+-                      ret = PTR_ERR(wifi_pkg);
++                      ret = -EINVAL;
+                       goto out_free;
+               }
+@@ -697,7 +697,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+                                        &tbl_rev);
+       if (!IS_ERR(wifi_pkg)) {
+               if (tbl_rev != 0) {
+-                      ret = PTR_ERR(wifi_pkg);
++                      ret = -EINVAL;
+                       goto out_free;
+               }
+-- 
+2.43.0
+
diff --git a/queue-6.1/wifi-iwlwifi-uninitialized-variable-in-iwl_acpi_get_.patch b/queue-6.1/wifi-iwlwifi-uninitialized-variable-in-iwl_acpi_get_.patch
new file mode 100644 (file)
index 0000000..7dbcffe
--- /dev/null
@@ -0,0 +1,40 @@
+From da7a5e8ff773b33f20be007969101fdb02a24dd9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Feb 2024 13:17:31 +0300
+Subject: wifi: iwlwifi: uninitialized variable in iwl_acpi_get_ppag_table()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 65c6ee90455053cfd3067c17aaa4a42b0c766543 ]
+
+This is an error path and Smatch complains that "tbl_rev" is uninitialized
+on this path.  All the other functions follow this same patter where they
+set the error code and goto out_free so that's probably what was intended
+here as well.
+
+Fixes: e8e10a37c51c ("iwlwifi: acpi: move ppag code from mvm to fw/acpi")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://msgid.link/09900c01-6540-4a32-9451-563da0029cb6@moroto.mountain
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/fw/acpi.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index bdb8464cd432..f5fcc547de39 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -1044,6 +1044,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+               goto read_table;
+       }
++      ret = PTR_ERR(wifi_pkg);
++      goto out_free;
++
+ read_table:
+       fwrt->ppag_ver = tbl_rev;
+       flags = &wifi_pkg->package.elements[1];
+-- 
+2.43.0
+