]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
random32: update the net random state on interrupt and activity
authorWilly Tarreau <w@1wt.eu>
Fri, 10 Jul 2020 13:23:19 +0000 (15:23 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 7 Aug 2020 07:36:20 +0000 (09:36 +0200)
commit f227e3ec3b5cad859ad15666874405e8c1bbc1d4 upstream.

This modifies the first 32 bits out of the 128 bits of a random CPU's
net_rand_state on interrupt or CPU activity to complicate remote
observations that could lead to guessing the network RNG's internal
state.

Note that depending on some network devices' interrupt rate moderation
or binding, this re-seeding might happen on every packet or even almost
never.

In addition, with NOHZ some CPUs might not even get timer interrupts,
leaving their local state rarely updated, while they are running
networked processes making use of the random state.  For this reason, we
also perform this update in update_process_times() in order to at least
update the state when there is user or system activity, since it's the
only case we care about.

Reported-by: Amit Klein <aksecurity@gmail.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Willy Tarreau <w@1wt.eu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/char/random.c
include/linux/random.h
kernel/time/timer.c
lib/random32.c

index d5f970d039bbac1a7fdb81a78e3b17c72f161c95..6a5d4dfafc474f2a52885a0af414b927381e6a7a 100644 (file)
@@ -1257,6 +1257,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
 
        fast_mix(fast_pool);
        add_interrupt_bench(cycles);
+       this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
 
        if (unlikely(crng_init == 0)) {
                if ((fast_pool->count >= 64) &&
index 445a0ea4ff49a1de4edaf8e60ab82e2e36aa5080..d729f7614215adfa62bf613a1467b7e6295ca451 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/list.h>
 #include <linux/once.h>
+#include <linux/percpu.h>
 
 #include <uapi/linux/random.h>
 
@@ -115,6 +116,8 @@ struct rnd_state {
        __u32 s1, s2, s3, s4;
 };
 
+DECLARE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
+
 u32 prandom_u32_state(struct rnd_state *state);
 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
 void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
index 6c54cf481fdef73b9636e416603260b968fab54a..61e41ea3a96ec2cb1660bf61e3b9db14a16c338c 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/sched/debug.h>
 #include <linux/slab.h>
 #include <linux/compat.h>
+#include <linux/random.h>
 
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
@@ -1654,6 +1655,13 @@ void update_process_times(int user_tick)
        scheduler_tick();
        if (IS_ENABLED(CONFIG_POSIX_TIMERS))
                run_posix_cpu_timers(p);
+
+       /* The current CPU might make use of net randoms without receiving IRQs
+        * to renew them often enough. Let's update the net_rand_state from a
+        * non-constant value that's not affine to the number of calls to make
+        * sure it's updated when there's some activity (we don't care in idle).
+        */
+       this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
 }
 
 /**
index 4aaa76404d561b86609bbd1c6eb3ca4620ca81bf..7abd634a718e247010404d65bcd527a6c99e089f 100644 (file)
@@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
 }
 #endif
 
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
 
 /**
  *     prandom_u32_state - seeded pseudo-random number generator.