]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Mar 2024 14:50:41 +0000 (15:50 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Mar 2024 14:50:41 +0000 (15:50 +0100)
added patches:
cpufreq-brcmstb-avs-cpufreq-fix-up-add-check-for-cpufreq_cpu_get-s-return-value.patch
drivers-hv-vmbus-calculate-ring-buffer-size-for-more-efficient-use-of-memory.patch
netfilter-nf_tables-disallow-anonymous-set-with-timeout-flag.patch
netfilter-nf_tables-mark-set-as-dead-when-unbinding-anonymous-set-with-timeout.patch
netfilter-nf_tables-reject-constant-set-with-timeout.patch
platform-x86-p2sb-on-goldmont-only-cache-p2sb-and-spi-devfn-bar.patch
tls-fix-race-between-tx-work-scheduling-and-socket-close.patch
xfrm-avoid-clang-fortify-warning-in-copy_to_user_tmpl.patch

queue-6.1/cpufreq-brcmstb-avs-cpufreq-fix-up-add-check-for-cpufreq_cpu_get-s-return-value.patch [new file with mode: 0644]
queue-6.1/drivers-hv-vmbus-calculate-ring-buffer-size-for-more-efficient-use-of-memory.patch [new file with mode: 0644]
queue-6.1/netfilter-nf_tables-disallow-anonymous-set-with-timeout-flag.patch [new file with mode: 0644]
queue-6.1/netfilter-nf_tables-mark-set-as-dead-when-unbinding-anonymous-set-with-timeout.patch [new file with mode: 0644]
queue-6.1/netfilter-nf_tables-reject-constant-set-with-timeout.patch [new file with mode: 0644]
queue-6.1/platform-x86-p2sb-on-goldmont-only-cache-p2sb-and-spi-devfn-bar.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/tls-fix-race-between-tx-work-scheduling-and-socket-close.patch [new file with mode: 0644]
queue-6.1/xfrm-avoid-clang-fortify-warning-in-copy_to_user_tmpl.patch [new file with mode: 0644]

diff --git a/queue-6.1/cpufreq-brcmstb-avs-cpufreq-fix-up-add-check-for-cpufreq_cpu_get-s-return-value.patch b/queue-6.1/cpufreq-brcmstb-avs-cpufreq-fix-up-add-check-for-cpufreq_cpu_get-s-return-value.patch
new file mode 100644 (file)
index 0000000..1e1283b
--- /dev/null
@@ -0,0 +1,54 @@
+From 6ae10467fb966d6efb10a13cafd47f8d2a264880 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Wed, 27 Mar 2024 15:21:45 +0100
+Subject: cpufreq: brcmstb-avs-cpufreq: fix up "add check for cpufreq_cpu_get's return value"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+In commit e72160cb6e23 ("cpufreq: brcmstb-avs-cpufreq: add check for
+cpufreq_cpu_get's return value"), build warnings occur because a
+variable is created after some logic, resulting in:
+
+drivers/cpufreq/brcmstb-avs-cpufreq.c: In function 'brcm_avs_cpufreq_get':
+drivers/cpufreq/brcmstb-avs-cpufreq.c:486:9: error: ISO C90 forbids mixed
+declarations and code [-Werror=declaration-after-statement]
+  486 |         struct private_data *priv = policy->driver_data;
+      |         ^~~~~~
+cc1: all warnings being treated as errors
+make[2]: *** [scripts/Makefile.build:289:
+drivers/cpufreq/brcmstb-avs-cpufreq.o] Error 1
+make[1]: *** [scripts/Makefile.build:552: drivers/cpufreq] Error 2
+make[1]: *** Waiting for unfinished jobs....
+make: *** [Makefile:1907: drivers] Error 2
+
+Fix this up.
+
+Link: https://lore.kernel.org/r/e114d9e5-26af-42be-9baa-72c3a6ec8fe5@oracle.com
+Link: https://lore.kernel.org/stable/20240327015023.GC7502@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net/T/#m15bff0fe96986ef780e848b4fff362bf8ea03f08
+Reported-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Reported-by: Linux Kernel Functional Testing <lkft@linaro.org>
+Fixes: e72160cb6e23 ("cpufreq: brcmstb-avs-cpufreq: add check for cpufreq_cpu_get's return value")
+Cc: Anastasia Belova <abelova@astralinux.ru>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/brcmstb-avs-cpufreq.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -481,10 +481,11 @@ static bool brcm_avs_is_firmware_loaded(
+ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+ {
+       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++      struct private_data *priv;
++
+       if (!policy)
+               return 0;
+-      struct private_data *priv = policy->driver_data;
+-
++      priv = policy->driver_data;
+       cpufreq_cpu_put(policy);
+       return brcm_avs_get_frequency(priv->base);
diff --git a/queue-6.1/drivers-hv-vmbus-calculate-ring-buffer-size-for-more-efficient-use-of-memory.patch b/queue-6.1/drivers-hv-vmbus-calculate-ring-buffer-size-for-more-efficient-use-of-memory.patch
new file mode 100644 (file)
index 0000000..9de3d30
--- /dev/null
@@ -0,0 +1,89 @@
+From b8209544296edbd1af186e2ea9c648642c37b18c Mon Sep 17 00:00:00 2001
+From: Michael Kelley <mhklinux@outlook.com>
+Date: Wed, 28 Feb 2024 16:45:33 -0800
+Subject: Drivers: hv: vmbus: Calculate ring buffer size for more efficient use of memory
+
+From: Michael Kelley <mhklinux@outlook.com>
+
+commit b8209544296edbd1af186e2ea9c648642c37b18c upstream.
+
+The VMBUS_RING_SIZE macro adds space for a ring buffer header to the
+requested ring buffer size.  The header size is always 1 page, and so
+its size varies based on the PAGE_SIZE for which the kernel is built.
+If the requested ring buffer size is a large power-of-2 size and the header
+size is small, the resulting size is inefficient in its use of memory.
+For example, a 512 Kbyte ring buffer with a 4 Kbyte page size results in
+a 516 Kbyte allocation, which is rounded to up 1 Mbyte by the memory
+allocator, and wastes 508 Kbytes of memory.
+
+In such situations, the exact size of the ring buffer isn't that important,
+and it's OK to allocate the 4 Kbyte header at the beginning of the 512
+Kbytes, leaving the ring buffer itself with just 508 Kbytes. The memory
+allocation can be 512 Kbytes instead of 1 Mbyte and nothing is wasted.
+
+Update VMBUS_RING_SIZE to implement this approach for "large" ring buffer
+sizes.  "Large" is somewhat arbitrarily defined as 8 times the size of
+the ring buffer header (which is of size PAGE_SIZE).  For example, for
+4 Kbyte PAGE_SIZE, ring buffers of 32 Kbytes and larger use the first
+4 Kbytes as the ring buffer header.  For 64 Kbyte PAGE_SIZE, ring buffers
+of 512 Kbytes and larger use the first 64 Kbytes as the ring buffer
+header.  In both cases, smaller sizes add space for the header so
+the ring size isn't reduced too much by using part of the space for
+the header.  For example, with a 64 Kbyte page size, we don't want
+a 128 Kbyte ring buffer to be reduced to 64 Kbytes by allocating half
+of the space for the header.  In such a case, the memory allocation
+is less efficient, but it's the best that can be done.
+
+While the new algorithm slightly changes the amount of space allocated
+for ring buffers by drivers that use VMBUS_RING_SIZE, the devices aren't
+known to be sensitive to small changes in ring buffer size, so there
+shouldn't be any effect.
+
+Fixes: c1135c7fd0e9 ("Drivers: hv: vmbus: Introduce types of GPADL")
+Fixes: 6941f67ad37d ("hv_netvsc: Calculate correct ring size when PAGE_SIZE is not 4 Kbytes")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218502
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael Kelley <mhklinux@outlook.com>
+Reviewed-by: Saurabh Sengar <ssengar@linux.microsoft.com>
+Reviewed-by: Dexuan Cui <decui@microsoft.com>
+Tested-by: Souradeep Chakrabarti <schakrabarti@linux.microsoft.com>
+Link: https://lore.kernel.org/r/20240229004533.313662-1-mhklinux@outlook.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Message-ID: <20240229004533.313662-1-mhklinux@outlook.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/hyperv.h |   22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -164,8 +164,28 @@ struct hv_ring_buffer {
+       u8 buffer[];
+ } __packed;
++
++/*
++ * If the requested ring buffer size is at least 8 times the size of the
++ * header, steal space from the ring buffer for the header. Otherwise, add
++ * space for the header so that is doesn't take too much of the ring buffer
++ * space.
++ *
++ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
++ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
++ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
++ * large allocation that will be almost half wasted. As a contrasting example,
++ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
++ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
++ * In this latter case, we must add 64 Kbytes for the header and not worry
++ * about what's wasted.
++ */
++#define VMBUS_HEADER_ADJ(payload_sz) \
++      ((payload_sz) >=  8 * sizeof(struct hv_ring_buffer) ? \
++      0 : sizeof(struct hv_ring_buffer))
++
+ /* Calculate the proper size of a ringbuffer, it must be page-aligned */
+-#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
++#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
+                                              (payload_sz))
+ struct hv_ring_buffer_info {
diff --git a/queue-6.1/netfilter-nf_tables-disallow-anonymous-set-with-timeout-flag.patch b/queue-6.1/netfilter-nf_tables-disallow-anonymous-set-with-timeout-flag.patch
new file mode 100644 (file)
index 0000000..a871b1f
--- /dev/null
@@ -0,0 +1,33 @@
+From 16603605b667b70da974bea8216c93e7db043bf1 Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 1 Mar 2024 00:11:10 +0100
+Subject: netfilter: nf_tables: disallow anonymous set with timeout flag
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit 16603605b667b70da974bea8216c93e7db043bf1 upstream.
+
+Anonymous sets are never used with timeout from userspace, reject this.
+Exception to this rule is NFT_SET_EVAL to ensure legacy meters still work.
+
+Cc: stable@vger.kernel.org
+Fixes: 761da2935d6e ("netfilter: nf_tables: add set timeout API support")
+Reported-by: lonial con <kongln9170@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4711,6 +4711,9 @@ static int nf_tables_newset(struct sk_bu
+               if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
+                            (NFT_SET_EVAL | NFT_SET_OBJECT))
+                       return -EOPNOTSUPP;
++              if ((flags & (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT | NFT_SET_EVAL)) ==
++                           (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT))
++                      return -EOPNOTSUPP;
+       }
+       desc.dtype = 0;
diff --git a/queue-6.1/netfilter-nf_tables-mark-set-as-dead-when-unbinding-anonymous-set-with-timeout.patch b/queue-6.1/netfilter-nf_tables-mark-set-as-dead-when-unbinding-anonymous-set-with-timeout.patch
new file mode 100644 (file)
index 0000000..60501ec
--- /dev/null
@@ -0,0 +1,45 @@
+From 552705a3650bbf46a22b1adedc1b04181490fc36 Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Mon, 4 Mar 2024 14:22:12 +0100
+Subject: netfilter: nf_tables: mark set as dead when unbinding anonymous set with timeout
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit 552705a3650bbf46a22b1adedc1b04181490fc36 upstream.
+
+While the rhashtable set gc runs asynchronously, a race allows it to
+collect elements from anonymous sets with timeouts while it is being
+released from the commit path.
+
+Mingi Cho originally reported this issue in a different path in 6.1.x
+with a pipapo set with low timeouts which is not possible upstream since
+7395dfacfff6 ("netfilter: nf_tables: use timestamp to check for set
+element timeout").
+
+Fix this by setting on the dead flag for anonymous sets to skip async gc
+in this case.
+
+According to 08e4c8c5919f ("netfilter: nf_tables: mark newset as dead on
+transaction abort"), Florian plans to accelerate abort path by releasing
+objects via workqueue, therefore, this sets on the dead flag for abort
+path too.
+
+Cc: stable@vger.kernel.org
+Fixes: 5f68718b34a5 ("netfilter: nf_tables: GC transaction API to avoid race with control plane")
+Reported-by: Mingi Cho <mgcho.minic@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5132,6 +5132,7 @@ static void nf_tables_unbind_set(const s
+       if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
+               list_del_rcu(&set->list);
++              set->dead = 1;
+               if (event)
+                       nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
+                                            GFP_KERNEL);
diff --git a/queue-6.1/netfilter-nf_tables-reject-constant-set-with-timeout.patch b/queue-6.1/netfilter-nf_tables-reject-constant-set-with-timeout.patch
new file mode 100644 (file)
index 0000000..eb2461d
--- /dev/null
@@ -0,0 +1,34 @@
+From 5f4fc4bd5cddb4770ab120ce44f02695c4505562 Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 1 Mar 2024 01:04:11 +0100
+Subject: netfilter: nf_tables: reject constant set with timeout
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit 5f4fc4bd5cddb4770ab120ce44f02695c4505562 upstream.
+
+This set combination is weird: it allows for elements to be
+added/deleted, but once bound to the rule it cannot be updated anymore.
+Eventually, all elements expire, leading to an empty set which cannot
+be updated anymore. Reject this flags combination.
+
+Cc: stable@vger.kernel.org
+Fixes: 761da2935d6e ("netfilter: nf_tables: add set timeout API support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4714,6 +4714,9 @@ static int nf_tables_newset(struct sk_bu
+               if ((flags & (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT | NFT_SET_EVAL)) ==
+                            (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT))
+                       return -EOPNOTSUPP;
++              if ((flags & (NFT_SET_CONSTANT | NFT_SET_TIMEOUT)) ==
++                           (NFT_SET_CONSTANT | NFT_SET_TIMEOUT))
++                      return -EOPNOTSUPP;
+       }
+       desc.dtype = 0;
diff --git a/queue-6.1/platform-x86-p2sb-on-goldmont-only-cache-p2sb-and-spi-devfn-bar.patch b/queue-6.1/platform-x86-p2sb-on-goldmont-only-cache-p2sb-and-spi-devfn-bar.patch
new file mode 100644 (file)
index 0000000..08db330
--- /dev/null
@@ -0,0 +1,75 @@
+From aec7d25b497ce4a8d044e9496de0aa433f7f8f06 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Mon, 4 Mar 2024 14:43:55 +0100
+Subject: platform/x86: p2sb: On Goldmont only cache P2SB and SPI devfn BAR
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit aec7d25b497ce4a8d044e9496de0aa433f7f8f06 upstream.
+
+On Goldmont p2sb_bar() only ever gets called for 2 devices, the actual P2SB
+devfn 13,0 and the SPI controller which is part of the P2SB, devfn 13,2.
+
+But the current p2sb code tries to cache BAR0 info for all of
+devfn 13,0 to 13,7 . This involves calling pci_scan_single_device()
+for device 13 functions 0-7 and the hw does not seem to like
+pci_scan_single_device() getting called for some of the other hidden
+devices. E.g. on an ASUS VivoBook D540NV-GQ065T this leads to continuous
+ACPI errors leading to high CPU usage.
+
+Fix this by only caching BAR0 info and thus only calling
+pci_scan_single_device() for the P2SB and the SPI controller.
+
+Fixes: 5913320eb0b3 ("platform/x86: p2sb: Allow p2sb_bar() calls during PCI device probe")
+Reported-by: Danil Rybakov <danilrybakov249@gmail.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218531
+Tested-by: Danil Rybakov <danilrybakov249@gmail.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20240304134356.305375-2-hdegoede@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/p2sb.c |   23 ++++++++---------------
+ 1 file changed, 8 insertions(+), 15 deletions(-)
+
+--- a/drivers/platform/x86/p2sb.c
++++ b/drivers/platform/x86/p2sb.c
+@@ -20,9 +20,11 @@
+ #define P2SBC_HIDE            BIT(8)
+ #define P2SB_DEVFN_DEFAULT    PCI_DEVFN(31, 1)
++#define P2SB_DEVFN_GOLDMONT   PCI_DEVFN(13, 0)
++#define SPI_DEVFN_GOLDMONT    PCI_DEVFN(13, 2)
+ static const struct x86_cpu_id p2sb_cpu_ids[] = {
+-      X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,       PCI_DEVFN(13, 0)),
++      X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
+       {}
+ };
+@@ -98,21 +100,12 @@ static void p2sb_scan_and_cache_devfn(st
+ static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
+ {
+-      unsigned int slot, fn;
++      /* Scan the P2SB device and cache its BAR0 */
++      p2sb_scan_and_cache_devfn(bus, devfn);
+-      if (PCI_FUNC(devfn) == 0) {
+-              /*
+-               * When function number of the P2SB device is zero, scan it and
+-               * other function numbers, and if devices are available, cache
+-               * their BAR0s.
+-               */
+-              slot = PCI_SLOT(devfn);
+-              for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
+-                      p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
+-      } else {
+-              /* Scan the P2SB device and cache its BAR0 */
+-              p2sb_scan_and_cache_devfn(bus, devfn);
+-      }
++      /* On Goldmont p2sb_bar() also gets called for the SPI controller */
++      if (devfn == P2SB_DEVFN_GOLDMONT)
++              p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
+       if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
+               return -ENOENT;
index 1032b063cf501e70334fd5a7e4a405362dd28d43..7ef4842725e9fd9917b1fab2b657612bcd6ca22c 100644 (file)
@@ -141,3 +141,11 @@ drm-amd-display-return-the-correct-hdcp-error-code.patch
 drm-amd-display-fix-noise-issue-on-hdmi-av-mute.patch
 dm-snapshot-fix-lockup-in-dm_exception_table_exit.patch
 x86-pm-work-around-false-positive-kmemleak-report-in.patch
+cpufreq-brcmstb-avs-cpufreq-fix-up-add-check-for-cpufreq_cpu_get-s-return-value.patch
+platform-x86-p2sb-on-goldmont-only-cache-p2sb-and-spi-devfn-bar.patch
+tls-fix-race-between-tx-work-scheduling-and-socket-close.patch
+netfilter-nf_tables-mark-set-as-dead-when-unbinding-anonymous-set-with-timeout.patch
+netfilter-nf_tables-disallow-anonymous-set-with-timeout-flag.patch
+netfilter-nf_tables-reject-constant-set-with-timeout.patch
+drivers-hv-vmbus-calculate-ring-buffer-size-for-more-efficient-use-of-memory.patch
+xfrm-avoid-clang-fortify-warning-in-copy_to_user_tmpl.patch
diff --git a/queue-6.1/tls-fix-race-between-tx-work-scheduling-and-socket-close.patch b/queue-6.1/tls-fix-race-between-tx-work-scheduling-and-socket-close.patch
new file mode 100644 (file)
index 0000000..812fb61
--- /dev/null
@@ -0,0 +1,64 @@
+From e01e3934a1b2d122919f73bc6ddbe1cdafc4bbdb Mon Sep 17 00:00:00 2001
+From: Jakub Kicinski <kuba@kernel.org>
+Date: Tue, 6 Feb 2024 17:18:20 -0800
+Subject: tls: fix race between tx work scheduling and socket close
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+commit e01e3934a1b2d122919f73bc6ddbe1cdafc4bbdb upstream.
+
+Similarly to previous commit, the submitting thread (recvmsg/sendmsg)
+may exit as soon as the async crypto handler calls complete().
+Reorder scheduling the work before calling complete().
+This seems more logical in the first place, as it's
+the inverse order of what the submitting thread will do.
+
+Reported-by: valis <sec@valis.email>
+Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[Lee: Fixed merge-conflict in Stable branches linux-6.1.y and older]
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_sw.c |   16 ++++++----------
+ 1 file changed, 6 insertions(+), 10 deletions(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -449,7 +449,6 @@ static void tls_encrypt_done(crypto_comp
+       struct scatterlist *sge;
+       struct sk_msg *msg_en;
+       struct tls_rec *rec;
+-      bool ready = false;
+       struct sock *sk;
+       rec = container_of(aead_req, struct tls_rec, aead_req);
+@@ -486,19 +485,16 @@ static void tls_encrypt_done(crypto_comp
+               /* If received record is at head of tx_list, schedule tx */
+               first_rec = list_first_entry(&ctx->tx_list,
+                                            struct tls_rec, list);
+-              if (rec == first_rec)
+-                      ready = true;
++              if (rec == first_rec) {
++                      /* Schedule the transmission */
++                      if (!test_and_set_bit(BIT_TX_SCHEDULED,
++                                            &ctx->tx_bitmask))
++                              schedule_delayed_work(&ctx->tx_work.work, 1);
++              }
+       }
+       if (atomic_dec_and_test(&ctx->encrypt_pending))
+               complete(&ctx->async_wait.completion);
+-
+-      if (!ready)
+-              return;
+-
+-      /* Schedule the transmission */
+-      if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+-              schedule_delayed_work(&ctx->tx_work.work, 1);
+ }
+ static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
diff --git a/queue-6.1/xfrm-avoid-clang-fortify-warning-in-copy_to_user_tmpl.patch b/queue-6.1/xfrm-avoid-clang-fortify-warning-in-copy_to_user_tmpl.patch
new file mode 100644 (file)
index 0000000..f4021ff
--- /dev/null
@@ -0,0 +1,56 @@
+From 1a807e46aa93ebad1dfbed4f82dc3bf779423a6e Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Wed, 21 Feb 2024 14:46:21 -0700
+Subject: xfrm: Avoid clang fortify warning in copy_to_user_tmpl()
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 1a807e46aa93ebad1dfbed4f82dc3bf779423a6e upstream.
+
+After a couple recent changes in LLVM, there is a warning (or error with
+CONFIG_WERROR=y or W=e) from the compile time fortify source routines,
+specifically the memset() in copy_to_user_tmpl().
+
+  In file included from net/xfrm/xfrm_user.c:14:
+  ...
+  include/linux/fortify-string.h:438:4: error: call to '__write_overflow_field' declared with 'warning' attribute: detected write beyond size of field (1st parameter); maybe use struct_group()? [-Werror,-Wattribute-warning]
+    438 |                         __write_overflow_field(p_size_field, size);
+        |                         ^
+  1 error generated.
+
+While ->xfrm_nr has been validated against XFRM_MAX_DEPTH when its value
+is first assigned in copy_templates() by calling validate_tmpl() first
+(so there should not be any issue in practice), LLVM/clang cannot really
+deduce that across the boundaries of these functions. Without that
+knowledge, it cannot assume that the loop stops before i is greater than
+XFRM_MAX_DEPTH, which would indeed result a stack buffer overflow in the
+memset().
+
+To make the bounds of ->xfrm_nr clear to the compiler and add additional
+defense in case copy_to_user_tmpl() is ever used in a path where
+->xfrm_nr has not been properly validated against XFRM_MAX_DEPTH first,
+add an explicit bound check and early return, which clears up the
+warning.
+
+Cc: stable@vger.kernel.org
+Link: https://github.com/ClangBuiltLinux/linux/issues/1985
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/xfrm/xfrm_user.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1979,6 +1979,9 @@ static int copy_to_user_tmpl(struct xfrm
+       if (xp->xfrm_nr == 0)
+               return 0;
++      if (xp->xfrm_nr > XFRM_MAX_DEPTH)
++              return -ENOBUFS;
++
+       for (i = 0; i < xp->xfrm_nr; i++) {
+               struct xfrm_user_tmpl *up = &vec[i];
+               struct xfrm_tmpl *kp = &xp->xfrm_vec[i];