From: Greg Kroah-Hartman Date: Tue, 22 Jun 2021 10:35:51 +0000 (+0200) Subject: 4.9-stable patches X-Git-Tag: v5.4.128~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=e30d337d82d04d0604a5bbb7b46b201d5b49eb82;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: inet-use-bigger-hash-table-for-ip-id-generation.patch x86-fpu-reset-state-for-all-signal-restore-failures.patch --- diff --git a/queue-4.9/inet-use-bigger-hash-table-for-ip-id-generation.patch b/queue-4.9/inet-use-bigger-hash-table-for-ip-id-generation.patch new file mode 100644 index 00000000000..47d17ce15f3 --- /dev/null +++ b/queue-4.9/inet-use-bigger-hash-table-for-ip-id-generation.patch @@ -0,0 +1,113 @@ +From aa6dd211e4b1dde9d5dc25d699d35f789ae7eeba Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Wed, 24 Mar 2021 14:53:37 -0700 +Subject: inet: use bigger hash table for IP ID generation + +From: Eric Dumazet + +commit aa6dd211e4b1dde9d5dc25d699d35f789ae7eeba upstream. + +In commit 73f156a6e8c1 ("inetpeer: get rid of ip_id_count") +I used a very small hash table that could be abused +by patient attackers to reveal sensitive information. + +Switch to a dynamic sizing, depending on RAM size. + +Typical big hosts will now use 128x more storage (2 MB) +to get a similar increase in security and reduction +of hash collisions. + +As a bonus, use of alloc_large_system_hash() spreads +allocated memory among all NUMA nodes. + +Fixes: 73f156a6e8c1 ("inetpeer: get rid of ip_id_count") +Reported-by: Amit Klein +Signed-off-by: Eric Dumazet +Cc: Willy Tarreau +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv4/route.c | 46 +++++++++++++++++++++++++++++++--------------- + 1 file changed, 31 insertions(+), 15 deletions(-) + +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -70,6 +70,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -463,8 +464,10 @@ static struct neighbour *ipv4_neigh_look + return neigh_create(&arp_tbl, pkey, dev); + } + +-#define IP_IDENTS_SZ 2048u +- ++/* Hash tables of size 2048..262144 depending on RAM size. ++ * Each bucket uses 8 bytes. ++ */ ++static u32 ip_idents_mask __read_mostly; + static atomic_t *ip_idents __read_mostly; + static u32 *ip_tstamps __read_mostly; + +@@ -474,12 +477,16 @@ static u32 *ip_tstamps __read_mostly; + */ + u32 ip_idents_reserve(u32 hash, int segs) + { +- u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; +- atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; +- u32 old = ACCESS_ONCE(*p_tstamp); +- u32 now = (u32)jiffies; ++ u32 bucket, old, now = (u32)jiffies; ++ atomic_t *p_id; ++ u32 *p_tstamp; + u32 delta = 0; + ++ bucket = hash & ip_idents_mask; ++ p_tstamp = ip_tstamps + bucket; ++ p_id = ip_idents + bucket; ++ old = ACCESS_ONCE(*p_tstamp); ++ + if (old != now && cmpxchg(p_tstamp, old, now) == old) + delta = prandom_u32_max(now - old); + +@@ -2936,18 +2943,27 @@ struct ip_rt_acct __percpu *ip_rt_acct _ + + int __init ip_rt_init(void) + { ++ void *idents_hash; + int rc = 0; + int cpu; + +- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL); +- if (!ip_idents) +- panic("IP: failed to allocate ip_idents\n"); +- +- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); +- +- ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL); +- if (!ip_tstamps) +- panic("IP: failed to allocate ip_tstamps\n"); ++ /* For modern hosts, this will use 2 MB of memory */ ++ idents_hash = alloc_large_system_hash("IP idents", ++ sizeof(*ip_idents) + sizeof(*ip_tstamps), ++ 0, ++ 16, /* one bucket per 64 KB */ ++ 0, ++ NULL, ++ &ip_idents_mask, ++ 2048, ++ 256*1024); ++ ++ ip_idents = idents_hash; ++ ++ prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); ++ ++ ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents); ++ memset(ip_tstamps, 0, (ip_idents_mask + 1) * sizeof(*ip_tstamps)); + + for_each_possible_cpu(cpu) { + struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); diff --git a/queue-4.9/series b/queue-4.9/series index d8816346817..d328753d851 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -46,3 +46,5 @@ can-bcm-raw-isotp-use-per-module-netdevice-notifier.patch usb-dwc3-core-fix-kernel-panic-when-do-reboot.patch tracing-do-not-stop-recording-cmdlines-when-tracing-is-off.patch tracing-do-not-stop-recording-comms-if-the-trace-file-is-being-read.patch +x86-fpu-reset-state-for-all-signal-restore-failures.patch +inet-use-bigger-hash-table-for-ip-id-generation.patch diff --git a/queue-4.9/x86-fpu-reset-state-for-all-signal-restore-failures.patch b/queue-4.9/x86-fpu-reset-state-for-all-signal-restore-failures.patch new file mode 100644 index 00000000000..2223356d67e --- /dev/null +++ b/queue-4.9/x86-fpu-reset-state-for-all-signal-restore-failures.patch @@ -0,0 +1,56 @@ +From efa165504943f2128d50f63de0c02faf6dcceb0d Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Wed, 9 Jun 2021 21:18:00 +0200 +Subject: x86/fpu: Reset state for all signal restore failures + +From: Thomas Gleixner + +commit efa165504943f2128d50f63de0c02faf6dcceb0d upstream. + +If access_ok() or fpregs_soft_set() fails in __fpu__restore_sig() then the +function just returns but does not clear the FPU state as it does for all +other fatal failures. + +Clear the FPU state for these failures as well. + +Fixes: 72a671ced66d ("x86, fpu: Unify signal handling code paths for x86 and x86_64 kernels") +Signed-off-by: Thomas Gleixner +Signed-off-by: Borislav Petkov +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/87mtryyhhz.ffs@nanos.tec.linutronix.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/fpu/signal.c | 18 +++++++++++++----- + 1 file changed, 13 insertions(+), 5 deletions(-) + +--- a/arch/x86/kernel/fpu/signal.c ++++ b/arch/x86/kernel/fpu/signal.c +@@ -276,15 +276,23 @@ static int __fpu__restore_sig(void __use + return 0; + } + +- if (!access_ok(VERIFY_READ, buf, size)) ++ if (!access_ok(VERIFY_READ, buf, size)) { ++ fpu__clear(fpu); + return -EACCES; ++ } + + fpu__activate_curr(fpu); + +- if (!static_cpu_has(X86_FEATURE_FPU)) +- return fpregs_soft_set(current, NULL, +- 0, sizeof(struct user_i387_ia32_struct), +- NULL, buf) != 0; ++ if (!static_cpu_has(X86_FEATURE_FPU)) { ++ int ret = fpregs_soft_set(current, NULL, 0, ++ sizeof(struct user_i387_ia32_struct), ++ NULL, buf); ++ ++ if (ret) ++ fpu__clear(fpu); ++ ++ return ret != 0; ++ } + + if (use_xsave()) { + struct _fpx_sw_bytes fx_sw_user;