]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
random: use offstack cpumask when necessary
authorArnd Bergmann <arnd@arndb.de>
Tue, 10 Jun 2025 09:27:08 +0000 (11:27 +0200)
committerJason A. Donenfeld <Jason@zx2c4.com>
Thu, 30 Oct 2025 17:35:26 +0000 (18:35 +0100)
The entropy generation function keeps a local cpu mask on the stack,
which can trigger warnings in configurations with a large number of
CPUs:

    drivers/char/random.c:1292:20: error: stack frame size (1288)
    exceeds limit (1280) in 'try_to_generate_entropy' [-Werror,-Wframe-larger-than]

Use the cpumask interface to dynamically allocate it in those
configurations.

Fixes: 1c21fe00eda7 ("random: spread out jitter callback to different CPUs")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
drivers/char/random.c

index 654b1fda52f0fb6705f56279cb7f7cce40064162..d45383d57919fe62c4398a1f077cb27322fabf29 100644 (file)
@@ -1296,6 +1296,7 @@ static void __cold try_to_generate_entropy(void)
        struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
        unsigned int i, num_different = 0;
        unsigned long last = random_get_entropy();
+       cpumask_var_t timer_cpus;
        int cpu = -1;
 
        for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
@@ -1310,13 +1311,15 @@ static void __cold try_to_generate_entropy(void)
 
        atomic_set(&stack->samples, 0);
        timer_setup_on_stack(&stack->timer, entropy_timer, 0);
+       if (!alloc_cpumask_var(&timer_cpus, GFP_KERNEL))
+               goto out;
+
        while (!crng_ready() && !signal_pending(current)) {
                /*
                 * Check !timer_pending() and then ensure that any previous callback has finished
                 * executing by checking timer_delete_sync_try(), before queueing the next one.
                 */
                if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
-                       struct cpumask timer_cpus;
                        unsigned int num_cpus;
 
                        /*
@@ -1326,19 +1329,19 @@ static void __cold try_to_generate_entropy(void)
                        preempt_disable();
 
                        /* Only schedule callbacks on timer CPUs that are online. */
-                       cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
-                       num_cpus = cpumask_weight(&timer_cpus);
+                       cpumask_and(timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
+                       num_cpus = cpumask_weight(timer_cpus);
                        /* In very bizarre case of misconfiguration, fallback to all online. */
                        if (unlikely(num_cpus == 0)) {
-                               timer_cpus = *cpu_online_mask;
-                               num_cpus = cpumask_weight(&timer_cpus);
+                               *timer_cpus = *cpu_online_mask;
+                               num_cpus = cpumask_weight(timer_cpus);
                        }
 
                        /* Basic CPU round-robin, which avoids the current CPU. */
                        do {
-                               cpu = cpumask_next(cpu, &timer_cpus);
+                               cpu = cpumask_next(cpu, timer_cpus);
                                if (cpu >= nr_cpu_ids)
-                                       cpu = cpumask_first(&timer_cpus);
+                                       cpu = cpumask_first(timer_cpus);
                        } while (cpu == smp_processor_id() && num_cpus > 1);
 
                        /* Expiring the timer at `jiffies` means it's the next tick. */
@@ -1354,6 +1357,8 @@ static void __cold try_to_generate_entropy(void)
        }
        mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
 
+       free_cpumask_var(timer_cpus);
+out:
        timer_delete_sync(&stack->timer);
        timer_destroy_on_stack(&stack->timer);
 }