]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Sep 2015 17:49:42 +0000 (10:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Sep 2015 17:49:42 +0000 (10:49 -0700)
Pull scheduler fixes from Ingo Molnar:
 "A migrate_tasks() locking fix, and a late-coming nohz change plus a
  nohz debug check"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: 'Annotate' migrate_tasks()
  nohz: Assert existing housekeepers when nohz full enabled
  nohz: Affine unpinned timers to housekeepers

1  2 
kernel/sched/core.c

diff --combined kernel/sched/core.c
index 3595403921bd5be10c3e5e591bf04916e654423d,9b786704d34b8e1f91425043ad6109323f81eed0..97d276ff1edb1225f0ad894cb66b052be36b2104
@@@ -164,12 -164,14 +164,12 @@@ struct static_key sched_feat_keys[__SCH
  
  static void sched_feat_disable(int i)
  {
 -      if (static_key_enabled(&sched_feat_keys[i]))
 -              static_key_slow_dec(&sched_feat_keys[i]);
 +      static_key_disable(&sched_feat_keys[i]);
  }
  
  static void sched_feat_enable(int i)
  {
 -      if (!static_key_enabled(&sched_feat_keys[i]))
 -              static_key_slow_inc(&sched_feat_keys[i]);
 +      static_key_enable(&sched_feat_keys[i]);
  }
  #else
  static void sched_feat_disable(int i) { };
@@@ -621,18 -623,21 +621,21 @@@ int get_nohz_timer_target(void
        int i, cpu = smp_processor_id();
        struct sched_domain *sd;
  
-       if (!idle_cpu(cpu))
+       if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
                return cpu;
  
        rcu_read_lock();
        for_each_domain(cpu, sd) {
                for_each_cpu(i, sched_domain_span(sd)) {
-                       if (!idle_cpu(i)) {
+                       if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
                                cpu = i;
                                goto unlock;
                        }
                }
        }
+       if (!is_housekeeping_cpu(cpu))
+               cpu = housekeeping_any_cpu();
  unlock:
        rcu_read_unlock();
        return cpu;
@@@ -5178,24 -5183,47 +5181,47 @@@ static void migrate_tasks(struct rq *de
                        break;
  
                /*
-                * Ensure rq->lock covers the entire task selection
-                * until the migration.
+                * pick_next_task assumes pinned rq->lock.
                 */
                lockdep_pin_lock(&rq->lock);
                next = pick_next_task(rq, &fake_task);
                BUG_ON(!next);
                next->sched_class->put_prev_task(rq, next);
  
+               /*
+                * Rules for changing task_struct::cpus_allowed are holding
+                * both pi_lock and rq->lock, such that holding either
+                * stabilizes the mask.
+                *
+                * Drop rq->lock is not quite as disastrous as it usually is
+                * because !cpu_active at this point, which means load-balance
+                * will not interfere. Also, stop-machine.
+                */
+               lockdep_unpin_lock(&rq->lock);
+               raw_spin_unlock(&rq->lock);
+               raw_spin_lock(&next->pi_lock);
+               raw_spin_lock(&rq->lock);
+               /*
+                * Since we're inside stop-machine, _nothing_ should have
+                * changed the task, WARN if weird stuff happened, because in
+                * that case the above rq->lock drop is a fail too.
+                */
+               if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
+                       raw_spin_unlock(&next->pi_lock);
+                       continue;
+               }
                /* Find suitable destination for @next, with force if needed. */
                dest_cpu = select_fallback_rq(dead_rq->cpu, next);
  
-               lockdep_unpin_lock(&rq->lock);
                rq = __migrate_task(rq, next, dest_cpu);
                if (rq != dead_rq) {
                        raw_spin_unlock(&rq->lock);
                        rq = dead_rq;
                        raw_spin_lock(&rq->lock);
                }
+               raw_spin_unlock(&next->pi_lock);
        }
  
        rq->stop = stop;
@@@ -8131,7 -8159,7 +8157,7 @@@ static void cpu_cgroup_css_offline(stru
        sched_offline_group(tg);
  }
  
 -static void cpu_cgroup_fork(struct task_struct *task)
 +static void cpu_cgroup_fork(struct task_struct *task, void *private)
  {
        sched_move_task(task);
  }