]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 May 2021 09:54:27 +0000 (11:54 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 May 2021 09:54:27 +0000 (11:54 +0200)
added patches:
netfilter-x_tables-use-correct-memory-barriers.patch

queue-4.14/netfilter-x_tables-use-correct-memory-barriers.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/netfilter-x_tables-use-correct-memory-barriers.patch b/queue-4.14/netfilter-x_tables-use-correct-memory-barriers.patch
new file mode 100644 (file)
index 0000000..50b9a56
--- /dev/null
@@ -0,0 +1,60 @@
+From 175e476b8cdf2a4de7432583b49c871345e4f8a1 Mon Sep 17 00:00:00 2001
+From: Mark Tomlinson <mark.tomlinson@alliedtelesis.co.nz>
+Date: Mon, 8 Mar 2021 14:24:13 +1300
+Subject: netfilter: x_tables: Use correct memory barriers.
+
+From: Mark Tomlinson <mark.tomlinson@alliedtelesis.co.nz>
+
+commit 175e476b8cdf2a4de7432583b49c871345e4f8a1 upstream.
+
+When a new table value was assigned, it was followed by a write memory
+barrier. This ensured that all writes before this point would complete
+before any writes after this point. However, to determine whether the
+rules are unused, the sequence counter is read. To ensure that all
+writes have been done before these reads, a full memory barrier is
+needed, not just a write memory barrier. The same argument applies when
+incrementing the counter, before the rules are read.
+
+Changing to using smp_mb() instead of smp_wmb() fixes the kernel panic
+reported in cc00bcaa5899 (which is still present), while still
+maintaining the same speed of replacing tables.
+
+The smb_mb() barriers potentially slow the packet path, however testing
+has shown no measurable change in performance on a 4-core MIPS64
+platform.
+
+Fixes: 7f5c6d4f665b ("netfilter: get rid of atomic ops in fast path")
+Signed-off-by: Mark Tomlinson <mark.tomlinson@alliedtelesis.co.nz>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+[Ported to stable, affected barrier is added by d3d40f237480abf3268956daf18cdc56edd32834 in mainline]
+Signed-off-by: Pavel Machek (CIP) <pavel@denx.de>
+Signed-off-by: Nobuhiro Iwamatsu (CIP) <nobuhiro1.iwamatsu@toshiba.co.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/netfilter/x_tables.h |    2 +-
+ net/netfilter/x_tables.c           |    3 +++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -373,7 +373,7 @@ static inline unsigned int xt_write_recs
+        * since addend is most likely 1
+        */
+       __this_cpu_add(xt_recseq.sequence, addend);
+-      smp_wmb();
++      smp_mb();
+       return addend;
+ }
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -1249,6 +1249,9 @@ xt_replace_table(struct xt_table *table,
+       smp_wmb();
+       table->private = newinfo;
++      /* make sure all cpus see new ->private value */
++      smp_mb();
++
+       /*
+        * Even though table entries have now been swapped, other CPU's
+        * may still be using the old entries. This is okay, because
index 374e5bdbeba3e7caca3575d9677fe139120fc314..1ad299cc4b3fefbf640f55155f5b55c8fbad96e8 100644 (file)
@@ -2,3 +2,4 @@ mm-vmstat-drop-zone-lock-in-proc-pagetypeinfo.patch
 tweewide-fix-most-shebang-lines.patch
 scripts-switch-explicitly-to-python-3.patch
 usb-dwc3-gadget-enable-suspend-events.patch
+netfilter-x_tables-use-correct-memory-barriers.patch