From: Greg Kroah-Hartman Date: Thu, 26 May 2022 12:24:25 +0000 (+0200) Subject: 4.19-stable patches X-Git-Tag: v5.18.1~19 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=db6dca5d1f047316a661eb0f4c104127f8b0a39d;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: secure_seq-use-the-64-bits-of-the-siphash-for-port-offset-calculation.patch tcp-change-source-port-randomizarion-at-connect-time.patch --- diff --git a/queue-4.19/secure_seq-use-the-64-bits-of-the-siphash-for-port-offset-calculation.patch b/queue-4.19/secure_seq-use-the-64-bits-of-the-siphash-for-port-offset-calculation.patch new file mode 100644 index 00000000000..21e137fddfa --- /dev/null +++ b/queue-4.19/secure_seq-use-the-64-bits-of-the-siphash-for-port-offset-calculation.patch @@ -0,0 +1,139 @@ +From b2d057560b8107c633b39aabe517ff9d93f285e3 Mon Sep 17 00:00:00 2001 +From: Willy Tarreau +Date: Mon, 2 May 2022 10:46:08 +0200 +Subject: secure_seq: use the 64 bits of the siphash for port offset calculation + +From: Willy Tarreau + +commit b2d057560b8107c633b39aabe517ff9d93f285e3 upstream. + +SipHash replaced MD5 in secure_ipv{4,6}_port_ephemeral() via commit +7cd23e5300c1 ("secure_seq: use SipHash in place of MD5"), but the output +remained truncated to 32-bit only. In order to exploit more bits from the +hash, let's make the functions return the full 64-bit of siphash_3u32(). +We also make sure the port offset calculation in __inet_hash_connect() +remains done on 32-bit to avoid the need for div_u64_rem() and an extra +cost on 32-bit systems. + +Cc: Jason A. Donenfeld +Cc: Moshe Kol +Cc: Yossi Gilad +Cc: Amit Klein +Reviewed-by: Eric Dumazet +Signed-off-by: Willy Tarreau +Signed-off-by: Jakub Kicinski +[SG: Adjusted context] +Signed-off-by: Stefan Ghinea +Signed-off-by: Greg Kroah-Hartman +--- + include/net/inet_hashtables.h | 2 +- + include/net/secure_seq.h | 4 ++-- + net/core/secure_seq.c | 4 ++-- + net/ipv4/inet_hashtables.c | 10 ++++++---- + net/ipv6/inet6_hashtables.c | 4 ++-- + 5 files changed, 13 insertions(+), 11 deletions(-) + +--- a/include/net/inet_hashtables.h ++++ b/include/net/inet_hashtables.h +@@ -407,7 +407,7 @@ static inline void sk_rcv_saddr_set(stru + } + + int __inet_hash_connect(struct inet_timewait_death_row *death_row, +- struct sock *sk, u32 port_offset, ++ struct sock *sk, u64 port_offset, + int (*check_established)(struct inet_timewait_death_row *, + struct sock *, __u16, + struct inet_timewait_sock **)); +--- a/include/net/secure_seq.h ++++ b/include/net/secure_seq.h +@@ -4,8 +4,8 @@ + + #include + +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, + __be16 dport); + u32 secure_tcp_seq(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport); +--- a/net/core/secure_seq.c ++++ b/net/core/secure_seq.c +@@ -96,7 +96,7 @@ u32 secure_tcpv6_seq(const __be32 *saddr + } + EXPORT_SYMBOL(secure_tcpv6_seq); + +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, + __be16 dport) + { + const struct { +@@ -146,7 +146,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 + } + EXPORT_SYMBOL_GPL(secure_tcp_seq); + +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) + { + net_secret_init(); + return siphash_4u32((__force u32)saddr, (__force u32)daddr, +--- a/net/ipv4/inet_hashtables.c ++++ b/net/ipv4/inet_hashtables.c +@@ -507,7 +507,7 @@ not_unique: + return -EADDRNOTAVAIL; + } + +-static u32 inet_sk_port_offset(const struct sock *sk) ++static u64 inet_sk_port_offset(const struct sock *sk) + { + const struct inet_sock *inet = inet_sk(sk); + +@@ -726,7 +726,7 @@ EXPORT_SYMBOL_GPL(inet_unhash); + static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT]; + + int __inet_hash_connect(struct inet_timewait_death_row *death_row, +- struct sock *sk, u32 port_offset, ++ struct sock *sk, u64 port_offset, + int (*check_established)(struct inet_timewait_death_row *, + struct sock *, __u16, struct inet_timewait_sock **)) + { +@@ -766,7 +766,9 @@ int __inet_hash_connect(struct inet_time + net_get_random_once(table_perturb, sizeof(table_perturb)); + index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT); + +- offset = (READ_ONCE(table_perturb[index]) + port_offset) % remaining; ++ offset = READ_ONCE(table_perturb[index]) + port_offset; ++ offset %= remaining; ++ + /* In first pass we try ports of @low parity. + * inet_csk_get_port() does the opposite choice. + */ +@@ -842,7 +844,7 @@ ok: + int inet_hash_connect(struct inet_timewait_death_row *death_row, + struct sock *sk) + { +- u32 port_offset = 0; ++ u64 port_offset = 0; + + if (!inet_sk(sk)->inet_num) + port_offset = inet_sk_port_offset(sk); +--- a/net/ipv6/inet6_hashtables.c ++++ b/net/ipv6/inet6_hashtables.c +@@ -311,7 +311,7 @@ not_unique: + return -EADDRNOTAVAIL; + } + +-static u32 inet6_sk_port_offset(const struct sock *sk) ++static u64 inet6_sk_port_offset(const struct sock *sk) + { + const struct inet_sock *inet = inet_sk(sk); + +@@ -323,7 +323,7 @@ static u32 inet6_sk_port_offset(const st + int inet6_hash_connect(struct inet_timewait_death_row *death_row, + struct sock *sk) + { +- u32 port_offset = 0; ++ u64 port_offset = 0; + + if (!inet_sk(sk)->inet_num) + port_offset = inet6_sk_port_offset(sk); diff --git a/queue-4.19/series b/queue-4.19/series index a3dddf37ed8..0edebbe36df 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -1,2 +1,4 @@ x86-pci-xen-disable-pci-msi-masking-for-xen_hvm-guests.patch staging-rtl8723bs-prevent-ssid-overflow-in-rtw_wx_set_scan.patch +tcp-change-source-port-randomizarion-at-connect-time.patch +secure_seq-use-the-64-bits-of-the-siphash-for-port-offset-calculation.patch diff --git a/queue-4.19/tcp-change-source-port-randomizarion-at-connect-time.patch b/queue-4.19/tcp-change-source-port-randomizarion-at-connect-time.patch new file mode 100644 index 00000000000..ab2b132265e --- /dev/null +++ b/queue-4.19/tcp-change-source-port-randomizarion-at-connect-time.patch @@ -0,0 +1,98 @@ +From 190cc82489f46f9d88e73c81a47e14f80a791e1a Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Tue, 9 Feb 2021 11:20:27 -0800 +Subject: tcp: change source port randomizarion at connect() time + +From: Eric Dumazet + +commit 190cc82489f46f9d88e73c81a47e14f80a791e1a upstream. + +RFC 6056 (Recommendations for Transport-Protocol Port Randomization) +provides good summary of why source selection needs extra care. + +David Dworken reminded us that linux implements Algorithm 3 +as described in RFC 6056 3.3.3 + +Quoting David : + In the context of the web, this creates an interesting info leak where + websites can count how many TCP connections a user's computer is + establishing over time. For example, this allows a website to count + exactly how many subresources a third party website loaded. + This also allows: + - Distinguishing between different users behind a VPN based on + distinct source port ranges. + - Tracking users over time across multiple networks. + - Covert communication channels between different browsers/browser + profiles running on the same computer + - Tracking what applications are running on a computer based on + the pattern of how fast source ports are getting incremented. + +Section 3.3.4 describes an enhancement, that reduces +attackers ability to use the basic information currently +stored into the shared 'u32 hint'. + +This change also decreases collision rate when +multiple applications need to connect() to +different destinations. + +Signed-off-by: Eric Dumazet +Reported-by: David Dworken +Cc: Willem de Bruijn +Signed-off-by: David S. Miller +[SG: Adjusted context] +Signed-off-by: Stefan Ghinea +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv4/inet_hashtables.c | 20 +++++++++++++++++--- + 1 file changed, 17 insertions(+), 3 deletions(-) + +--- a/net/ipv4/inet_hashtables.c ++++ b/net/ipv4/inet_hashtables.c +@@ -714,6 +714,17 @@ unlock: + } + EXPORT_SYMBOL_GPL(inet_unhash); + ++/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm ++ * Note that we use 32bit integers (vs RFC 'short integers') ++ * because 2^16 is not a multiple of num_ephemeral and this ++ * property might be used by clever attacker. ++ * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, ++ * we use 256 instead to really give more isolation and ++ * privacy, this only consumes 1 KB of kernel memory. ++ */ ++#define INET_TABLE_PERTURB_SHIFT 8 ++static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT]; ++ + int __inet_hash_connect(struct inet_timewait_death_row *death_row, + struct sock *sk, u32 port_offset, + int (*check_established)(struct inet_timewait_death_row *, +@@ -727,7 +738,7 @@ int __inet_hash_connect(struct inet_time + struct inet_bind_bucket *tb; + u32 remaining, offset; + int ret, i, low, high; +- static u32 hint; ++ u32 index; + + if (port) { + head = &hinfo->bhash[inet_bhashfn(net, port, +@@ -752,7 +763,10 @@ int __inet_hash_connect(struct inet_time + if (likely(remaining > 1)) + remaining &= ~1U; + +- offset = (hint + port_offset) % remaining; ++ net_get_random_once(table_perturb, sizeof(table_perturb)); ++ index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT); ++ ++ offset = (READ_ONCE(table_perturb[index]) + port_offset) % remaining; + /* In first pass we try ports of @low parity. + * inet_csk_get_port() does the opposite choice. + */ +@@ -805,7 +819,7 @@ next_port: + return -EADDRNOTAVAIL; + + ok: +- hint += i + 2; ++ WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); + + /* Head lock still held and bh's disabled */ + inet_bind_hash(sk, tb, port);