]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Jun 2022 16:05:33 +0000 (18:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Jun 2022 16:05:33 +0000 (18:05 +0200)
added patches:
tcp-add-small-random-increments-to-the-source-port.patch
tcp-add-some-entropy-in-__inet_hash_connect.patch
tcp-drop-the-hash_32-part-from-the-index-calculation.patch
tcp-dynamically-allocate-the-perturb-table-used-by-source-ports.patch
tcp-increase-source-port-perturb-table-to-2-16.patch
tcp-use-different-parts-of-the-port_offset-for-index-and-offset.patch
xprtrdma-fix-incorrect-header-size-calculations.patch

queue-4.14/series
queue-4.14/tcp-add-small-random-increments-to-the-source-port.patch [new file with mode: 0644]
queue-4.14/tcp-add-some-entropy-in-__inet_hash_connect.patch [new file with mode: 0644]
queue-4.14/tcp-drop-the-hash_32-part-from-the-index-calculation.patch [new file with mode: 0644]
queue-4.14/tcp-dynamically-allocate-the-perturb-table-used-by-source-ports.patch [new file with mode: 0644]
queue-4.14/tcp-increase-source-port-perturb-table-to-2-16.patch [new file with mode: 0644]
queue-4.14/tcp-use-different-parts-of-the-port_offset-for-index-and-offset.patch [new file with mode: 0644]
queue-4.14/xprtrdma-fix-incorrect-header-size-calculations.patch [new file with mode: 0644]

index 79e47e7f2b53ce5b79db1fd91b048787674efaf8..b7441f335bff14f32de07afad660398c4110099f 100644 (file)
@@ -229,3 +229,10 @@ l2tp-don-t-use-inet_shutdown-on-ppp-session-destroy.patch
 l2tp-fix-race-in-pppol2tp_release-with-session-object-destroy.patch
 s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch
 usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch
+xprtrdma-fix-incorrect-header-size-calculations.patch
+tcp-add-some-entropy-in-__inet_hash_connect.patch
+tcp-use-different-parts-of-the-port_offset-for-index-and-offset.patch
+tcp-add-small-random-increments-to-the-source-port.patch
+tcp-dynamically-allocate-the-perturb-table-used-by-source-ports.patch
+tcp-increase-source-port-perturb-table-to-2-16.patch
+tcp-drop-the-hash_32-part-from-the-index-calculation.patch
diff --git a/queue-4.14/tcp-add-small-random-increments-to-the-source-port.patch b/queue-4.14/tcp-add-small-random-increments-to-the-source-port.patch
new file mode 100644 (file)
index 0000000..1bd0c03
--- /dev/null
@@ -0,0 +1,53 @@
+From foo@baz Thu Jun 23 06:02:16 PM CEST 2022
+From: Willy Tarreau <w@1wt.eu>
+Date: Mon, 2 May 2022 10:46:11 +0200
+Subject: tcp: add small random increments to the source port
+
+From: Willy Tarreau <w@1wt.eu>
+
+commit ca7af0402550f9a0b3316d5f1c30904e42ed257d upstream.
+
+Here we're randomly adding between 0 and 7 random increments to the
+selected source port in order to add some noise in the source port
+selection that will make the next port less predictable.
+
+With the default port range of 32768-60999 this means a worst case
+reuse scenario of 14116/8=1764 connections between two consecutive
+uses of the same port, with an average of 14116/4.5=3137. This code
+was stressed at more than 800000 connections per second to a fixed
+target with all connections closed by the client using RSTs (worst
+condition) and only 2 connections failed among 13 billion, despite
+the hash being reseeded every 10 seconds, indicating a perfectly
+safe situation.
+
+Cc: Moshe Kol <moshe.kol@mail.huji.ac.il>
+Cc: Yossi Gilad <yossi.gilad@mail.huji.ac.il>
+Cc: Amit Klein <aksecurity@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_hashtables.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -694,11 +694,12 @@ next_port:
+       return -EADDRNOTAVAIL;
+ ok:
+-      /* If our first attempt found a candidate, skip next candidate
+-       * in 1/16 of cases to add some noise.
++      /* Here we want to add a little bit of randomness to the next source
++       * port that will be chosen. We use a max() with a random here so that
++       * on low contention the randomness is maximal and on high contention
++       * it may be inexistent.
+        */
+-      if (!i && !(prandom_u32() % 16))
+-              i = 2;
++      i = max_t(int, i, (prandom_u32() & 7) * 2);
+       WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
+       /* Head lock still held and bh's disabled */
diff --git a/queue-4.14/tcp-add-some-entropy-in-__inet_hash_connect.patch b/queue-4.14/tcp-add-some-entropy-in-__inet_hash_connect.patch
new file mode 100644 (file)
index 0000000..e87b1eb
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Thu Jun 23 06:02:16 PM CEST 2022
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 9 Feb 2021 11:20:28 -0800
+Subject: tcp: add some entropy in __inet_hash_connect()
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit c579bd1b4021c42ae247108f1e6f73dd3f08600c upstream.
+
+Even when implementing RFC 6056 3.3.4 (Algorithm 4: Double-Hash
+Port Selection Algorithm), a patient attacker could still be able
+to collect enough state from an otherwise idle host.
+
+Idea of this patch is to inject some noise, in the
+cases __inet_hash_connect() found a candidate in the first
+attempt.
+
+This noise should not significantly reduce the collision
+avoidance, and should be zero if connection table
+is already well used.
+
+Note that this is not implementing RFC 6056 3.3.5
+because we think Algorithm 5 could hurt typical
+workloads.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: David Dworken <ddworken@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_hashtables.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -694,6 +694,11 @@ next_port:
+       return -EADDRNOTAVAIL;
+ ok:
++      /* If our first attempt found a candidate, skip next candidate
++       * in 1/16 of cases to add some noise.
++       */
++      if (!i && !(prandom_u32() % 16))
++              i = 2;
+       WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
+       /* Head lock still held and bh's disabled */
diff --git a/queue-4.14/tcp-drop-the-hash_32-part-from-the-index-calculation.patch b/queue-4.14/tcp-drop-the-hash_32-part-from-the-index-calculation.patch
new file mode 100644 (file)
index 0000000..d92ca8b
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Thu Jun 23 06:02:16 PM CEST 2022
+From: Willy Tarreau <w@1wt.eu>
+Date: Mon, 2 May 2022 10:46:14 +0200
+Subject: tcp: drop the hash_32() part from the index calculation
+
+From: Willy Tarreau <w@1wt.eu>
+
+commit e8161345ddbb66e449abde10d2fdce93f867eba9 upstream.
+
+In commit 190cc82489f4 ("tcp: change source port randomizarion at
+connect() time"), the table_perturb[] array was introduced and an
+index was taken from the port_offset via hash_32(). But it turns
+out that hash_32() performs a multiplication while the input here
+comes from the output of SipHash in secure_seq, that is well
+distributed enough to avoid the need for yet another hash.
+
+Suggested-by: Amit Klein <aksecurity@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_hashtables.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -640,7 +640,7 @@ int __inet_hash_connect(struct inet_time
+       net_get_random_once(table_perturb,
+                           INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
+-      index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
++      index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
+       offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
+       offset %= remaining;
diff --git a/queue-4.14/tcp-dynamically-allocate-the-perturb-table-used-by-source-ports.patch b/queue-4.14/tcp-dynamically-allocate-the-perturb-table-used-by-source-ports.patch
new file mode 100644 (file)
index 0000000..803f336
--- /dev/null
@@ -0,0 +1,68 @@
+From foo@baz Thu Jun 23 06:02:16 PM CEST 2022
+From: Willy Tarreau <w@1wt.eu>
+Date: Mon, 2 May 2022 10:46:12 +0200
+Subject: tcp: dynamically allocate the perturb table used by source ports
+
+From: Willy Tarreau <w@1wt.eu>
+
+commit e9261476184be1abd486c9434164b2acbe0ed6c2 upstream.
+
+We'll need to further increase the size of this table and it's likely
+that at some point its size will not be suitable anymore for a static
+table. Let's allocate it on boot from inet_hashinfo2_init(), which is
+called from tcp_init().
+
+Cc: Moshe Kol <moshe.kol@mail.huji.ac.il>
+Cc: Yossi Gilad <yossi.gilad@mail.huji.ac.il>
+Cc: Amit Klein <aksecurity@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[bwh: Backported to 4.14:
+ - There is no inet_hashinfo2_init(), so allocate the table in
+   inet_hashinfo_init() when called by TCP
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_hashtables.c |   15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -596,7 +596,8 @@ EXPORT_SYMBOL_GPL(inet_unhash);
+  * privacy, this only consumes 1 KB of kernel memory.
+  */
+ #define INET_TABLE_PERTURB_SHIFT 8
+-static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
++#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
++static u32 *table_perturb;
+ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+               struct sock *sk, u64 port_offset,
+@@ -636,7 +637,8 @@ int __inet_hash_connect(struct inet_time
+       if (likely(remaining > 1))
+               remaining &= ~1U;
+-      net_get_random_once(table_perturb, sizeof(table_perturb));
++      net_get_random_once(table_perturb,
++                          INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
+       index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
+       offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
+@@ -741,6 +743,15 @@ void inet_hashinfo_init(struct inet_hash
+               INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
+                                     i + LISTENING_NULLS_BASE);
+       }
++
++      if (h != &tcp_hashinfo)
++              return;
++
++      /* this one is used for source ports of outgoing connections */
++      table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE,
++                                    sizeof(*table_perturb), GFP_KERNEL);
++      if (!table_perturb)
++              panic("TCP: failed to alloc table_perturb");
+ }
+ EXPORT_SYMBOL_GPL(inet_hashinfo_init);
diff --git a/queue-4.14/tcp-increase-source-port-perturb-table-to-2-16.patch b/queue-4.14/tcp-increase-source-port-perturb-table-to-2-16.patch
new file mode 100644 (file)
index 0000000..3a28d9d
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Thu Jun 23 06:02:16 PM CEST 2022
+From: Willy Tarreau <w@1wt.eu>
+Date: Mon, 2 May 2022 10:46:13 +0200
+Subject: tcp: increase source port perturb table to 2^16
+
+From: Willy Tarreau <w@1wt.eu>
+
+commit 4c2c8f03a5ab7cb04ec64724d7d176d00bcc91e5 upstream.
+
+Moshe Kol, Amit Klein, and Yossi Gilad reported being able to accurately
+identify a client by forcing it to emit only 40 times more connections
+than there are entries in the table_perturb[] table. The previous two
+improvements consisting in resalting the secret every 10s and adding
+randomness to each port selection only slightly improved the situation,
+and the current value of 2^8 was too small as it's not very difficult
+to make a client emit 10k connections in less than 10 seconds.
+
+Thus we're increasing the perturb table from 2^8 to 2^16 so that the
+same precision now requires 2.6M connections, which is more difficult in
+this time frame and harder to hide as a background activity. The impact
+is that the table now uses 256 kB instead of 1 kB, which could mostly
+affect devices making frequent outgoing connections. However such
+components usually target a small set of destinations (load balancers,
+database clients, perf assessment tools), and in practice only a few
+entries will be visited, like before.
+
+A live test at 1 million connections per second showed no performance
+difference from the previous value.
+
+Reported-by: Moshe Kol <moshe.kol@mail.huji.ac.il>
+Reported-by: Yossi Gilad <yossi.gilad@mail.huji.ac.il>
+Reported-by: Amit Klein <aksecurity@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_hashtables.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -591,11 +591,12 @@ EXPORT_SYMBOL_GPL(inet_unhash);
+  * Note that we use 32bit integers (vs RFC 'short integers')
+  * because 2^16 is not a multiple of num_ephemeral and this
+  * property might be used by clever attacker.
+- * RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
+- * we use 256 instead to really give more isolation and
+- * privacy, this only consumes 1 KB of kernel memory.
++ * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
++ * attacks were since demonstrated, thus we use 65536 instead to really
++ * give more isolation and privacy, at the expense of 256kB of kernel
++ * memory.
+  */
+-#define INET_TABLE_PERTURB_SHIFT 8
++#define INET_TABLE_PERTURB_SHIFT 16
+ #define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
+ static u32 *table_perturb;
diff --git a/queue-4.14/tcp-use-different-parts-of-the-port_offset-for-index-and-offset.patch b/queue-4.14/tcp-use-different-parts-of-the-port_offset-for-index-and-offset.patch
new file mode 100644 (file)
index 0000000..f24dfd3
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Thu Jun 23 06:02:16 PM CEST 2022
+From: Willy Tarreau <w@1wt.eu>
+Date: Mon, 2 May 2022 10:46:09 +0200
+Subject: tcp: use different parts of the port_offset for index and offset
+
+From: Willy Tarreau <w@1wt.eu>
+
+commit 9e9b70ae923baf2b5e8a0ea4fd0c8451801ac526 upstream.
+
+Amit Klein suggests that we use different parts of port_offset for the
+table's index and the port offset so that there is no direct relation
+between them.
+
+Cc: Jason A. Donenfeld <Jason@zx2c4.com>
+Cc: Moshe Kol <moshe.kol@mail.huji.ac.il>
+Cc: Yossi Gilad <yossi.gilad@mail.huji.ac.il>
+Cc: Amit Klein <aksecurity@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_hashtables.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -639,7 +639,7 @@ int __inet_hash_connect(struct inet_time
+       net_get_random_once(table_perturb, sizeof(table_perturb));
+       index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
+-      offset = READ_ONCE(table_perturb[index]) + port_offset;
++      offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
+       offset %= remaining;
+       /* In first pass we try ports of @low parity.
diff --git a/queue-4.14/xprtrdma-fix-incorrect-header-size-calculations.patch b/queue-4.14/xprtrdma-fix-incorrect-header-size-calculations.patch
new file mode 100644 (file)
index 0000000..ee3f4af
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Thu Jun 23 06:02:16 PM CEST 2022
+From: Colin Ian King <colin.king@canonical.com>
+Date: Wed, 15 Jul 2020 17:26:04 +0100
+Subject: xprtrdma: fix incorrect header size calculations
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit 912288442cb2f431bf3c8cb097a5de83bc6dbac1 upstream.
+
+Currently the header size calculations are using an assignment
+operator instead of a += operator when accumulating the header
+size leading to incorrect sizes.  Fix this by using the correct
+operator.
+
+Addresses-Coverity: ("Unused value")
+Fixes: 302d3deb2068 ("xprtrdma: Prevent inline overflow")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+[bwh: Backported to 4.14: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/xprtrdma/rpc_rdma.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -75,7 +75,7 @@ static unsigned int rpcrdma_max_call_hea
+       /* Maximum Read list size */
+       maxsegs += 2;   /* segment for head and tail buffers */
+-      size = maxsegs * sizeof(struct rpcrdma_read_chunk);
++      size += maxsegs * sizeof(struct rpcrdma_read_chunk);
+       /* Minimal Read chunk size */
+       size += sizeof(__be32); /* segment count */
+@@ -101,7 +101,7 @@ static unsigned int rpcrdma_max_reply_he
+       /* Maximum Write list size */
+       maxsegs += 2;   /* segment for head and tail buffers */
+-      size = sizeof(__be32);          /* segment count */
++      size += sizeof(__be32);         /* segment count */
+       size += maxsegs * sizeof(struct rpcrdma_segment);
+       size += sizeof(__be32); /* list discriminator */