]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
rcu: Switch urgent quiescent-state requests to rcu_data structure
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sat, 4 Aug 2018 04:00:38 +0000 (21:00 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 30 Aug 2018 23:03:50 +0000 (16:03 -0700)
This commit removes ->rcu_need_heavy_qs and ->rcu_urgent_qs from the
rcu_dynticks structure and updates the code to access them from the
rcu_data structure.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h

index e778fd5546d118fc87dfb20d7e32535d070c4504..7ec0ba885273a72ca026e7d700c024e7b31cbe9a 100644 (file)
@@ -362,7 +362,7 @@ static void __maybe_unused rcu_momentary_dyntick_idle(void)
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
        int special;
 
-       raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
+       raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
        special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
        /* It is illegal to call this from idle state. */
        WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
@@ -928,7 +928,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
        cpu = task_cpu(t);
        if (!task_curr(t))
                return; /* This task is not running on that CPU. */
-       smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
+       smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
 }
 
 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
@@ -1081,8 +1081,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
         * is set way high.
         */
        jtsq = READ_ONCE(jiffies_to_sched_qs);
-       ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
-       rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
+       ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
+       rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
        if (!READ_ONCE(*rnhqp) &&
            (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
             time_after(jiffies, rcu_state.jiffies_resched))) {
@@ -2499,13 +2499,13 @@ void rcu_check_callbacks(int user)
        trace_rcu_utilization(TPS("Start scheduler-tick"));
        raw_cpu_inc(rcu_data.ticks_this_gp);
        /* The load-acquire pairs with the store-release setting to true. */
-       if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
+       if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
                /* Idle and userspace execution already are quiescent states. */
                if (!rcu_is_cpu_rrupt_from_idle() && !user) {
                        set_tsk_need_resched(current);
                        set_preempt_need_resched();
                }
-               __this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
+               __this_cpu_write(rcu_data.rcu_urgent_qs, false);
        }
        rcu_flavor_check_callbacks(user);
        if (rcu_pending())
index 36a47c7bd8827c3f7d071dd2e9534b8e72570ca9..4c31066ddb940aa455e7401479ccdbb767864279 100644 (file)
@@ -41,8 +41,6 @@ struct rcu_dynticks {
        long dynticks_nesting;      /* Track process nesting level. */
        long dynticks_nmi_nesting;  /* Track irq/NMI nesting level. */
        atomic_t dynticks;          /* Even value for idle, else odd. */
-       bool rcu_need_heavy_qs;     /* GP old, need heavy quiescent state. */
-       bool rcu_urgent_qs;         /* GP old need light quiescent state. */
 };
 
 /* Communicate arguments to a workqueue handler. */
index 030df96e0d3c804efe3a74bad36c5e43f518e40a..11387fcd4d85f47a33424d1a4ed76362f17ec691 100644 (file)
@@ -780,7 +780,7 @@ static void sync_sched_exp_handler(void *unused)
        }
        __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
        /* Store .exp before .rcu_urgent_qs. */
-       smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
+       smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
        set_tsk_need_resched(current);
        set_preempt_need_resched();
 }
index b5aeb2fe4cfe4906071c522bdc6067fe158013a4..161760957a07849d78f90fa4803fcf60dd32fc95 100644 (file)
@@ -967,17 +967,17 @@ void rcu_all_qs(void)
 {
        unsigned long flags;
 
-       if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
+       if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
                return;
        preempt_disable();
        /* Load rcu_urgent_qs before other flags. */
-       if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
+       if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
                preempt_enable();
                return;
        }
-       this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
+       this_cpu_write(rcu_data.rcu_urgent_qs, false);
        barrier(); /* Avoid RCU read-side critical sections leaking down. */
-       if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
+       if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
                local_irq_save(flags);
                rcu_momentary_dyntick_idle();
                local_irq_restore(flags);
@@ -997,10 +997,10 @@ void rcu_note_context_switch(bool preempt)
        trace_rcu_utilization(TPS("Start context switch"));
        rcu_qs();
        /* Load rcu_urgent_qs before other flags. */
-       if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
+       if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
                goto out;
-       this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
-       if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
+       this_cpu_write(rcu_data.rcu_urgent_qs, false);
+       if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
                rcu_momentary_dyntick_idle();
        if (!preempt)
                rcu_tasks_qs(current);