]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
rcu: Add *_ONCE() to rcu_data ->rcu_forced_tick
authorPaul E. McKenney <paulmck@kernel.org>
Thu, 9 Jan 2020 04:06:25 +0000 (20:06 -0800)
committerPaul E. McKenney <paulmck@kernel.org>
Thu, 20 Feb 2020 23:58:22 +0000 (15:58 -0800)
The rcu_data structure's ->rcu_forced_tick field is read locklessly, so
this commit adds WRITE_ONCE() to all updates and READ_ONCE() to all
lockless reads.

This data race was reported by KCSAN.  Not appropriate for backporting
due to failure being unlikely.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c

index e851a12920e6740c2bbd489c34a404ad570ea085..be59a5d7299dee8402d9db71d8fba8c4be77bfc8 100644 (file)
@@ -818,11 +818,12 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
                incby = 1;
        } else if (tick_nohz_full_cpu(rdp->cpu) &&
                   rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
-                  READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
+                  READ_ONCE(rdp->rcu_urgent_qs) &&
+                  !READ_ONCE(rdp->rcu_forced_tick)) {
                raw_spin_lock_rcu_node(rdp->mynode);
                // Recheck under lock.
                if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
-                       rdp->rcu_forced_tick = true;
+                       WRITE_ONCE(rdp->rcu_forced_tick, true);
                        tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
                }
                raw_spin_unlock_rcu_node(rdp->mynode);
@@ -899,7 +900,7 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
        WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
        if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
                tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
-               rdp->rcu_forced_tick = false;
+               WRITE_ONCE(rdp->rcu_forced_tick, false);
        }
 }