]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.5-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Aug 2012 19:34:09 +0000 (12:34 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Aug 2012 19:34:09 +0000 (12:34 -0700)
added patches:
random-make-add_interrupt_randomness-do-something-sane.patch
random-use-lockless-techniques-in-the-interrupt-path.patch

queue-3.5/random-make-add_interrupt_randomness-do-something-sane.patch [new file with mode: 0644]
queue-3.5/random-use-lockless-techniques-in-the-interrupt-path.patch [new file with mode: 0644]
queue-3.5/series

diff --git a/queue-3.5/random-make-add_interrupt_randomness-do-something-sane.patch b/queue-3.5/random-make-add_interrupt_randomness-do-something-sane.patch
new file mode 100644 (file)
index 0000000..0b6ecff
--- /dev/null
@@ -0,0 +1,279 @@
+From 775f4b297b780601e61787b766f306ed3e1d23eb Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Mon, 2 Jul 2012 07:52:16 -0400
+Subject: random: make 'add_interrupt_randomness()' do something sane
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 775f4b297b780601e61787b766f306ed3e1d23eb upstream.
+
+We've been moving away from add_interrupt_randomness() for various
+reasons: it's too expensive to do on every interrupt, and flooding the
+CPU with interrupts could theoretically cause bogus floods of entropy
+from a somewhat externally controllable source.
+
+This solves both problems by limiting the actual randomness addition
+to just once a second or after 64 interrupts, whicever comes first.
+During that time, the interrupt cycle data is buffered up in a per-cpu
+pool.  Also, we make sure the the nonblocking pool used by urandom is
+initialized before we start feeding the normal input pool.  This
+assures that /dev/urandom is returning unpredictable data as soon as
+possible.
+
+(Based on an original patch by Linus, but significantly modified by
+tytso.)
+
+Tested-by: Eric Wustrow <ewust@umich.edu>
+Reported-by: Eric Wustrow <ewust@umich.edu>
+Reported-by: Nadia Heninger <nadiah@cs.ucsd.edu>
+Reported-by: Zakir Durumeric <zakir@umich.edu>
+Reported-by: J. Alex Halderman <jhalderm@umich.edu>.
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c     |  103 ++++++++++++++++++++++++++++++++++++++--------
+ drivers/mfd/ab3100-core.c |    2 
+ include/linux/random.h    |    2 
+ kernel/irq/handle.c       |    7 +--
+ 4 files changed, 90 insertions(+), 24 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -127,19 +127,15 @@
+  *
+  *    void add_input_randomness(unsigned int type, unsigned int code,
+  *                                unsigned int value);
+- *    void add_interrupt_randomness(int irq);
++ *    void add_interrupt_randomness(int irq, int irq_flags);
+  *    void add_disk_randomness(struct gendisk *disk);
+  *
+  * add_input_randomness() uses the input layer interrupt timing, as well as
+  * the event type information from the hardware.
+  *
+- * add_interrupt_randomness() uses the inter-interrupt timing as random
+- * inputs to the entropy pool.  Note that not all interrupts are good
+- * sources of randomness!  For example, the timer interrupts is not a
+- * good choice, because the periodicity of the interrupts is too
+- * regular, and hence predictable to an attacker.  Network Interface
+- * Controller interrupts are a better measure, since the timing of the
+- * NIC interrupts are more unpredictable.
++ * add_interrupt_randomness() uses the interrupt timing as random
++ * inputs to the entropy pool. Using the cycle counters and the irq source
++ * as inputs, it feeds the randomness roughly once a second.
+  *
+  * add_disk_randomness() uses what amounts to the seek time of block
+  * layer request events, on a per-disk_devt basis, as input to the
+@@ -248,6 +244,7 @@
+ #include <linux/percpu.h>
+ #include <linux/cryptohash.h>
+ #include <linux/fips.h>
++#include <linux/ptrace.h>
+ #ifdef CONFIG_GENERIC_HARDIRQS
+ # include <linux/irq.h>
+@@ -256,6 +253,7 @@
+ #include <asm/processor.h>
+ #include <asm/uaccess.h>
+ #include <asm/irq.h>
++#include <asm/irq_regs.h>
+ #include <asm/io.h>
+ /*
+@@ -421,7 +419,9 @@ struct entropy_store {
+       spinlock_t lock;
+       unsigned add_ptr;
+       int entropy_count;
++      int entropy_total;
+       int input_rotate;
++      unsigned int initialized:1;
+       __u8 last_data[EXTRACT_SIZE];
+ };
+@@ -454,6 +454,10 @@ static struct entropy_store nonblocking_
+       .pool = nonblocking_pool_data
+ };
++static __u32 const twist_table[8] = {
++      0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
++      0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
++
+ /*
+  * This function adds bytes into the entropy "pool".  It does not
+  * update the entropy estimate.  The caller should call
+@@ -467,9 +471,6 @@ static struct entropy_store nonblocking_
+ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+                                  int nbytes, __u8 out[64])
+ {
+-      static __u32 const twist_table[8] = {
+-              0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+-              0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+       unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+       int input_rotate;
+       int wordmask = r->poolinfo->poolwords - 1;
+@@ -528,6 +529,36 @@ static void mix_pool_bytes(struct entrop
+        mix_pool_bytes_extract(r, in, bytes, NULL);
+ }
++struct fast_pool {
++      __u32           pool[4];
++      unsigned long   last;
++      unsigned short  count;
++      unsigned char   rotate;
++      unsigned char   last_timer_intr;
++};
++
++/*
++ * This is a fast mixing routine used by the interrupt randomness
++ * collector.  It's hardcoded for an 128 bit pool and assumes that any
++ * locks that might be needed are taken by the caller.
++ */
++static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
++{
++      const char      *bytes = in;
++      __u32           w;
++      unsigned        i = f->count;
++      unsigned        input_rotate = f->rotate;
++
++      while (nbytes--) {
++              w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
++                      f->pool[(i + 1) & 3];
++              f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
++              input_rotate += (i++ & 3) ? 7 : 14;
++      }
++      f->count = i;
++      f->rotate = input_rotate;
++}
++
+ /*
+  * Credit (or debit) the entropy store with n bits of entropy
+  */
+@@ -551,6 +582,12 @@ static void credit_entropy_bits(struct e
+               entropy_count = r->poolinfo->POOLBITS;
+       r->entropy_count = entropy_count;
++      if (!r->initialized && nbits > 0) {
++              r->entropy_total += nbits;
++              if (r->entropy_total > 128)
++                      r->initialized = 1;
++      }
++
+       /* should we wake readers? */
+       if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
+               wake_up_interruptible(&random_read_wait);
+@@ -700,17 +737,48 @@ void add_input_randomness(unsigned int t
+ }
+ EXPORT_SYMBOL_GPL(add_input_randomness);
+-void add_interrupt_randomness(int irq)
++static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
++
++void add_interrupt_randomness(int irq, int irq_flags)
+ {
+-      struct timer_rand_state *state;
++      struct entropy_store    *r;
++      struct fast_pool        *fast_pool = &__get_cpu_var(irq_randomness);
++      struct pt_regs          *regs = get_irq_regs();
++      unsigned long           now = jiffies;
++      __u32                   input[4], cycles = get_cycles();
++
++      input[0] = cycles ^ jiffies;
++      input[1] = irq;
++      if (regs) {
++              __u64 ip = instruction_pointer(regs);
++              input[2] = ip;
++              input[3] = ip >> 32;
++      }
+-      state = get_timer_rand_state(irq);
++      fast_mix(fast_pool, input, sizeof(input));
+-      if (state == NULL)
++      if ((fast_pool->count & 1023) &&
++          !time_after(now, fast_pool->last + HZ))
+               return;
+-      DEBUG_ENT("irq event %d\n", irq);
+-      add_timer_randomness(state, 0x100 + irq);
++      fast_pool->last = now;
++
++      r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
++      mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
++      /*
++       * If we don't have a valid cycle counter, and we see
++       * back-to-back timer interrupts, then skip giving credit for
++       * any entropy.
++       */
++      if (cycles == 0) {
++              if (irq_flags & __IRQF_TIMER) {
++                      if (fast_pool->last_timer_intr)
++                              return;
++                      fast_pool->last_timer_intr = 1;
++              } else
++                      fast_pool->last_timer_intr = 0;
++      }
++      credit_entropy_bits(r, 1);
+ }
+ #ifdef CONFIG_BLOCK
+@@ -971,6 +1039,7 @@ static void init_std_data(struct entropy
+       spin_lock_irqsave(&r->lock, flags);
+       r->entropy_count = 0;
++      r->entropy_total = 0;
+       spin_unlock_irqrestore(&r->lock, flags);
+       now = ktime_get_real();
+--- a/drivers/mfd/ab3100-core.c
++++ b/drivers/mfd/ab3100-core.c
+@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(in
+       u32 fatevent;
+       int err;
+-      add_interrupt_randomness(irq);
+-
+       err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
+                                      event_regs, 3);
+       if (err)
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -52,7 +52,7 @@ extern void rand_initialize_irq(int irq)
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+                                unsigned int value);
+-extern void add_interrupt_randomness(int irq);
++extern void add_interrupt_randomness(int irq, int irq_flags);
+ extern void get_random_bytes(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -133,7 +133,7 @@ irqreturn_t
+ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ {
+       irqreturn_t retval = IRQ_NONE;
+-      unsigned int random = 0, irq = desc->irq_data.irq;
++      unsigned int flags = 0, irq = desc->irq_data.irq;
+       do {
+               irqreturn_t res;
+@@ -161,7 +161,7 @@ handle_irq_event_percpu(struct irq_desc
+                       /* Fall through to add to randomness */
+               case IRQ_HANDLED:
+-                      random |= action->flags;
++                      flags |= action->flags;
+                       break;
+               default:
+@@ -172,8 +172,7 @@ handle_irq_event_percpu(struct irq_desc
+               action = action->next;
+       } while (action);
+-      if (random & IRQF_SAMPLE_RANDOM)
+-              add_interrupt_randomness(irq);
++      add_interrupt_randomness(irq, flags);
+       if (!noirqdebug)
+               note_interrupt(irq, desc, retval);
diff --git a/queue-3.5/random-use-lockless-techniques-in-the-interrupt-path.patch b/queue-3.5/random-use-lockless-techniques-in-the-interrupt-path.patch
new file mode 100644 (file)
index 0000000..ef19d2d
--- /dev/null
@@ -0,0 +1,243 @@
+From 902c098a3663de3fa18639efbb71b6080f0bcd3c Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Wed, 4 Jul 2012 10:38:30 -0400
+Subject: random: use lockless techniques in the interrupt path
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 902c098a3663de3fa18639efbb71b6080f0bcd3c upstream.
+
+The real-time Linux folks don't like add_interrupt_randomness() taking
+a spinlock since it is called in the low-level interrupt routine.
+This also allows us to reduce the overhead in the fast path, for the
+random driver, which is the interrupt collection path.
+
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c |   78 +++++++++++++++++++++++++-------------------------
+ 1 file changed, 39 insertions(+), 39 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -418,9 +418,9 @@ struct entropy_store {
+       /* read-write data: */
+       spinlock_t lock;
+       unsigned add_ptr;
++      unsigned input_rotate;
+       int entropy_count;
+       int entropy_total;
+-      int input_rotate;
+       unsigned int initialized:1;
+       __u8 last_data[EXTRACT_SIZE];
+ };
+@@ -468,26 +468,24 @@ static __u32 const twist_table[8] = {
+  * it's cheap to do so and helps slightly in the expected case where
+  * the entropy is concentrated in the low-order bits.
+  */
+-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+-                                 int nbytes, __u8 out[64])
++static void __mix_pool_bytes(struct entropy_store *r, const void *in,
++                           int nbytes, __u8 out[64])
+ {
+       unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+       int input_rotate;
+       int wordmask = r->poolinfo->poolwords - 1;
+       const char *bytes = in;
+       __u32 w;
+-      unsigned long flags;
+-      /* Taps are constant, so we can load them without holding r->lock.  */
+       tap1 = r->poolinfo->tap1;
+       tap2 = r->poolinfo->tap2;
+       tap3 = r->poolinfo->tap3;
+       tap4 = r->poolinfo->tap4;
+       tap5 = r->poolinfo->tap5;
+-      spin_lock_irqsave(&r->lock, flags);
+-      input_rotate = r->input_rotate;
+-      i = r->add_ptr;
++      smp_rmb();
++      input_rotate = ACCESS_ONCE(r->input_rotate);
++      i = ACCESS_ONCE(r->add_ptr);
+       /* mix one byte at a time to simplify size handling and churn faster */
+       while (nbytes--) {
+@@ -514,19 +512,23 @@ static void mix_pool_bytes_extract(struc
+               input_rotate += i ? 7 : 14;
+       }
+-      r->input_rotate = input_rotate;
+-      r->add_ptr = i;
++      ACCESS_ONCE(r->input_rotate) = input_rotate;
++      ACCESS_ONCE(r->add_ptr) = i;
++      smp_wmb();
+       if (out)
+               for (j = 0; j < 16; j++)
+                       ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
+-
+-      spin_unlock_irqrestore(&r->lock, flags);
+ }
+-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
++static void mix_pool_bytes(struct entropy_store *r, const void *in,
++                           int nbytes, __u8 out[64])
+ {
+-       mix_pool_bytes_extract(r, in, bytes, NULL);
++      unsigned long flags;
++
++      spin_lock_irqsave(&r->lock, flags);
++      __mix_pool_bytes(r, in, nbytes, out);
++      spin_unlock_irqrestore(&r->lock, flags);
+ }
+ struct fast_pool {
+@@ -564,23 +566,22 @@ static void fast_mix(struct fast_pool *f
+  */
+ static void credit_entropy_bits(struct entropy_store *r, int nbits)
+ {
+-      unsigned long flags;
+-      int entropy_count;
++      int entropy_count, orig;
+       if (!nbits)
+               return;
+-      spin_lock_irqsave(&r->lock, flags);
+-
+       DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+-      entropy_count = r->entropy_count;
++retry:
++      entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+       entropy_count += nbits;
+       if (entropy_count < 0) {
+               DEBUG_ENT("negative entropy/overflow\n");
+               entropy_count = 0;
+       } else if (entropy_count > r->poolinfo->POOLBITS)
+               entropy_count = r->poolinfo->POOLBITS;
+-      r->entropy_count = entropy_count;
++      if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++              goto retry;
+       if (!r->initialized && nbits > 0) {
+               r->entropy_total += nbits;
+@@ -593,7 +594,6 @@ static void credit_entropy_bits(struct e
+               wake_up_interruptible(&random_read_wait);
+               kill_fasync(&fasync, SIGIO, POLL_IN);
+       }
+-      spin_unlock_irqrestore(&r->lock, flags);
+ }
+ /*********************************************************************
+@@ -680,7 +680,7 @@ static void add_timer_randomness(struct
+               sample.cycles = get_cycles();
+       sample.num = num;
+-      mix_pool_bytes(&input_pool, &sample, sizeof(sample));
++      mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+       /*
+        * Calculate number of bits of randomness we probably added.
+@@ -764,7 +764,7 @@ void add_interrupt_randomness(int irq, i
+       fast_pool->last = now;
+       r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+-      mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
++      __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
+       /*
+        * If we don't have a valid cycle counter, and we see
+        * back-to-back timer interrupts, then skip giving credit for
+@@ -829,7 +829,7 @@ static void xfer_secondary_pool(struct e
+               bytes = extract_entropy(r->pull, tmp, bytes,
+                                       random_read_wakeup_thresh / 8, rsvd);
+-              mix_pool_bytes(r, tmp, bytes);
++              mix_pool_bytes(r, tmp, bytes, NULL);
+               credit_entropy_bits(r, bytes*8);
+       }
+ }
+@@ -890,9 +890,11 @@ static void extract_buf(struct entropy_s
+       int i;
+       __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+       __u8 extract[64];
++      unsigned long flags;
+       /* Generate a hash across the pool, 16 words (512 bits) at a time */
+       sha_init(hash);
++      spin_lock_irqsave(&r->lock, flags);
+       for (i = 0; i < r->poolinfo->poolwords; i += 16)
+               sha_transform(hash, (__u8 *)(r->pool + i), workspace);
+@@ -905,7 +907,8 @@ static void extract_buf(struct entropy_s
+        * brute-forcing the feedback as hard as brute-forcing the
+        * hash.
+        */
+-      mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
++      __mix_pool_bytes(r, hash, sizeof(hash), extract);
++      spin_unlock_irqrestore(&r->lock, flags);
+       /*
+        * To avoid duplicates, we atomically extract a portion of the
+@@ -928,11 +931,10 @@ static void extract_buf(struct entropy_s
+ }
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+-                             size_t nbytes, int min, int reserved)
++                               size_t nbytes, int min, int reserved)
+ {
+       ssize_t ret = 0, i;
+       __u8 tmp[EXTRACT_SIZE];
+-      unsigned long flags;
+       xfer_secondary_pool(r, nbytes);
+       nbytes = account(r, nbytes, min, reserved);
+@@ -941,6 +943,8 @@ static ssize_t extract_entropy(struct en
+               extract_buf(r, tmp);
+               if (fips_enabled) {
++                      unsigned long flags;
++
+                       spin_lock_irqsave(&r->lock, flags);
+                       if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+                               panic("Hardware RNG duplicated output!\n");
+@@ -1034,22 +1038,18 @@ EXPORT_SYMBOL(get_random_bytes);
+ static void init_std_data(struct entropy_store *r)
+ {
+       int i;
+-      ktime_t now;
+-      unsigned long flags;
++      ktime_t now = ktime_get_real();
++      unsigned long rv;
+-      spin_lock_irqsave(&r->lock, flags);
+       r->entropy_count = 0;
+       r->entropy_total = 0;
+-      spin_unlock_irqrestore(&r->lock, flags);
+-
+-      now = ktime_get_real();
+-      mix_pool_bytes(r, &now, sizeof(now));
+-      for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
+-              if (!arch_get_random_long(&flags))
++      mix_pool_bytes(r, &now, sizeof(now), NULL);
++      for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
++              if (!arch_get_random_long(&rv))
+                       break;
+-              mix_pool_bytes(r, &flags, sizeof(flags));
++              mix_pool_bytes(r, &rv, sizeof(rv), NULL);
+       }
+-      mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
++      mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
+ }
+ static int rand_initialize(void)
+@@ -1186,7 +1186,7 @@ write_pool(struct entropy_store *r, cons
+               count -= bytes;
+               p += bytes;
+-              mix_pool_bytes(r, buf, bytes);
++              mix_pool_bytes(r, buf, bytes, NULL);
+               cond_resched();
+       }
index 62a6d2fef06b8495dba32e241b7bdfadfc45162b..2138201c34c050ca051281bcdad3aa79466d74f4 100644 (file)
@@ -43,3 +43,5 @@ wireless-reg-restore-previous-behaviour-of-chan-max_power-calculations.patch
 x86-nops-missing-break-resulting-in-incorrect-selection-on-intel.patch
 x86-64-kcmp-the-kcmp-system-call-can-be-common.patch
 input-synaptics-handle-out-of-bounds-values-from-the-hardware.patch
+random-make-add_interrupt_randomness-do-something-sane.patch
+random-use-lockless-techniques-in-the-interrupt-path.patch