};
#endif /* CONFIG_SYSCTL */
+static atomic_t batch_generation = ATOMIC_INIT(0);
+
struct batched_entropy {
union {
u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
};
unsigned int position;
- spinlock_t batch_lock;
+ int generation;
};
/*
* wait_for_random_bytes() should be called and return 0 at least once at any
* point prior.
*/
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
-};
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void)
{
unsigned long flags;
struct batched_entropy *batch;
static void *previous;
+ int next_gen;
warn_unseeded_randomness(&previous);
+ local_irq_save(flags);
batch = raw_cpu_ptr(&batched_entropy_u64);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+
+ next_gen = atomic_read(&batch_generation);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0 ||
+ next_gen != batch->generation) {
extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
+ batch->generation = next_gen;
}
+
ret = batch->entropy_u64[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
+ local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(get_random_u64);
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
-};
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+
u32 get_random_u32(void)
{
u32 ret;
unsigned long flags;
struct batched_entropy *batch;
static void *previous;
+ int next_gen;
warn_unseeded_randomness(&previous);
+ local_irq_save(flags);
batch = raw_cpu_ptr(&batched_entropy_u32);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+
+ next_gen = atomic_read(&batch_generation);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0 ||
+ next_gen != batch->generation) {
extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
+ batch->generation = next_gen;
}
+
ret = batch->entropy_u32[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
+ local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(get_random_u32);
/* It's important to invalidate all potential batched entropy that might
* be stored before the crng is initialized, which we can do lazily by
- * simply resetting the counter to zero so that it's re-extracted on the
- * next usage. */
+ * bumping the generation counter.
+ */
static void invalidate_batched_entropy(void)
{
- int cpu;
- unsigned long flags;
-
- for_each_possible_cpu(cpu) {
- struct batched_entropy *batched_entropy;
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
- spin_lock_irqsave(&batched_entropy->batch_lock, flags);
- batched_entropy->position = 0;
- spin_unlock(&batched_entropy->batch_lock);
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
- spin_lock(&batched_entropy->batch_lock);
- batched_entropy->position = 0;
- spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
- }
+ atomic_inc(&batch_generation);
}
/**