]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
rcu/nocb: Create kthreads on all CPUs if "rcu_nocbs=" or "nohz_full=" are passed
authorFrederic Weisbecker <frederic@kernel.org>
Tue, 23 Nov 2021 00:37:06 +0000 (01:37 +0100)
committerPaul E. McKenney <paulmck@kernel.org>
Thu, 9 Dec 2021 19:35:06 +0000 (11:35 -0800)
In order to be able to (de-)offload any CPU using cpusets in the future,
create the NOCB data structures for all possible CPUs.  For now this is
done only as long as the "rcu_nocbs=" or "nohz_full=" kernel parameters
are passed to avoid the unnecessary overhead for most users.

Note that the rcuog and rcuoc kthreads are not created until at least
one of the corresponding CPUs comes online.  This approach avoids the
creation of excess kthreads when firmware lies about the number of CPUs
present on the system.

Reviewed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Tested-by: Juri Lelli <juri.lelli@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree_nocb.h

index 42092de677054f8b9bfe92d885d73624078f5fba..f580a6b2e74e3ca6fd75d0c86971992b84376f33 100644 (file)
@@ -1237,11 +1237,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
        struct rcu_data *rdp_gp;
        struct task_struct *t;
 
-       /*
-        * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
-        * then nothing to do.
-        */
-       if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
+       /* If there already is an rcuo kthread, then nothing to do. */
+       if (rdp->nocb_cb_kthread)
                return;
 
        /* If we didn't spawn the GP kthread first, reorganize! */
@@ -1269,7 +1266,7 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
  */
 static void rcu_spawn_cpu_nocb_kthread(int cpu)
 {
-       if (rcu_scheduler_fully_active)
+       if (rcu_scheduler_fully_active && rcu_nocb_is_setup)
                rcu_spawn_one_nocb_kthread(cpu);
 }
 
@@ -1319,7 +1316,7 @@ static void __init rcu_organize_nocb_kthreads(void)
         * Should the corresponding CPU come online in the future, then
         * we will spawn the needed set of rcu_nocb_kthread() kthreads.
         */
-       for_each_cpu(cpu, rcu_nocb_mask) {
+       for_each_possible_cpu(cpu) {
                rdp = per_cpu_ptr(&rcu_data, cpu);
                if (rdp->cpu >= nl) {
                        /* New GP kthread, set up for CBs & next GP. */
@@ -1343,7 +1340,8 @@ static void __init rcu_organize_nocb_kthreads(void)
                                pr_cont(" %d", cpu);
                }
                rdp->nocb_gp_rdp = rdp_gp;
-               list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
+               if (cpumask_test_cpu(cpu, rcu_nocb_mask))
+                       list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
        }
        if (gotnocbs && dump_tree)
                pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");