]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Mar 2016 06:54:04 +0000 (22:54 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Mar 2016 06:54:04 +0000 (22:54 -0800)
added patches:
sched-allow-balance-callbacks-for-check_class_changed.patch
sched-dl-convert-switched_-from-to-_dl-prio_changed_dl-to-balance-callbacks.patch
sched-dl-remove-return-value-from-pull_dl_task.patch
sched-replace-post_schedule-with-a-balance-callback-list.patch
sched-rt-convert-switched_-from-to-_rt-prio_changed_rt-to-balance-callbacks.patch
sched-rt-remove-return-value-from-pull_rt_task.patch

queue-3.14/sched-allow-balance-callbacks-for-check_class_changed.patch [new file with mode: 0644]
queue-3.14/sched-dl-convert-switched_-from-to-_dl-prio_changed_dl-to-balance-callbacks.patch [new file with mode: 0644]
queue-3.14/sched-dl-remove-return-value-from-pull_dl_task.patch [new file with mode: 0644]
queue-3.14/sched-replace-post_schedule-with-a-balance-callback-list.patch [new file with mode: 0644]
queue-3.14/sched-rt-convert-switched_-from-to-_rt-prio_changed_rt-to-balance-callbacks.patch [new file with mode: 0644]
queue-3.14/sched-rt-remove-return-value-from-pull_rt_task.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/sched-allow-balance-callbacks-for-check_class_changed.patch b/queue-3.14/sched-allow-balance-callbacks-for-check_class_changed.patch
new file mode 100644 (file)
index 0000000..f1a08ea
--- /dev/null
@@ -0,0 +1,109 @@
+From 4c9a4bc89a9cca8128bce67d6bc8870d6b7ee0b2 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 11 Jun 2015 14:46:39 +0200
+Subject: sched: Allow balance callbacks for check_class_changed()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 4c9a4bc89a9cca8128bce67d6bc8870d6b7ee0b2 upstream.
+
+In order to remove dropping rq->lock from the
+switched_{to,from}()/prio_changed() sched_class methods, run the
+balance callbacks after it.
+
+We need to remove dropping rq->lock because its buggy,
+suppose using sched_setattr()/sched_setscheduler() to change a running
+task from FIFO to OTHER.
+
+By the time we get to switched_from_rt() the task is already enqueued
+on the cfs runqueues. If switched_from_rt() does pull_rt_task() and
+drops rq->lock, load-balancing can come in and move our task @p to
+another rq.
+
+The subsequent switched_to_fair() still assumes @p is on @rq and bad
+things will happen.
+
+By using balance callbacks we delay the load-balancing operations
+{rt,dl}x{push,pull} until we've done all the important work and the
+task is fully set up.
+
+Furthermore, the balance callbacks do not know about @p, therefore
+they cannot get confused like this.
+
+Reported-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: ktkhai@parallels.com
+Cc: rostedt@goodmis.org
+Cc: juri.lelli@gmail.com
+Cc: pang.xunlei@linaro.org
+Cc: oleg@redhat.com
+Cc: wanpeng.li@linux.intel.com
+Link: http://lkml.kernel.org/r/20150611124742.615343911@infradead.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Byungchul Park <byungchul.park@lge.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c |   24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -937,6 +937,13 @@ inline int task_curr(const struct task_s
+       return cpu_curr(task_cpu(p)) == p;
+ }
++/*
++ * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
++ * use the balance_callback list if you want balancing.
++ *
++ * this means any call to check_class_changed() must be followed by a call to
++ * balance_callback().
++ */
+ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
+                                      const struct sched_class *prev_class,
+                                      int oldprio)
+@@ -1423,8 +1430,12 @@ ttwu_do_wakeup(struct rq *rq, struct tas
+       p->state = TASK_RUNNING;
+ #ifdef CONFIG_SMP
+-      if (p->sched_class->task_woken)
++      if (p->sched_class->task_woken) {
++              /*
++               * XXX can drop rq->lock; most likely ok.
++               */
+               p->sched_class->task_woken(rq, p);
++      }
+       if (rq->idle_stamp) {
+               u64 delta = rq_clock(rq) - rq->idle_stamp;
+@@ -3006,7 +3017,11 @@ void rt_mutex_setprio(struct task_struct
+       check_class_changed(rq, p, prev_class, oldprio);
+ out_unlock:
++      preempt_disable(); /* avoid rq from going away on us */
+       __task_rq_unlock(rq);
++
++      balance_callback(rq);
++      preempt_enable();
+ }
+ #endif
+@@ -3512,10 +3527,17 @@ change:
+               enqueue_task(rq, p, 0);
+       check_class_changed(rq, p, prev_class, oldprio);
++      preempt_disable(); /* avoid rq from going away on us */
+       task_rq_unlock(rq, p, &flags);
+       rt_mutex_adjust_pi(p);
++      /*
++       * Run balance callbacks after we've adjusted the PI chain.
++       */
++      balance_callback(rq);
++      preempt_enable();
++
+       return 0;
+ }
diff --git a/queue-3.14/sched-dl-convert-switched_-from-to-_dl-prio_changed_dl-to-balance-callbacks.patch b/queue-3.14/sched-dl-convert-switched_-from-to-_dl-prio_changed_dl-to-balance-callbacks.patch
new file mode 100644 (file)
index 0000000..988e68a
--- /dev/null
@@ -0,0 +1,126 @@
+From 9916e214998a4a363b152b637245e5c958067350 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 11 Jun 2015 14:46:43 +0200
+Subject: sched, dl: Convert switched_{from, to}_dl() / prio_changed_dl() to balance callbacks
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 9916e214998a4a363b152b637245e5c958067350 upstream.
+
+Remove the direct {push,pull} balancing operations from
+switched_{from,to}_dl() / prio_changed_dl() and use the balance
+callback queue.
+
+Again, err on the side of too many reschedules; since too few is a
+hard bug while too many is just annoying.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: ktkhai@parallels.com
+Cc: rostedt@goodmis.org
+Cc: juri.lelli@gmail.com
+Cc: pang.xunlei@linaro.org
+Cc: oleg@redhat.com
+Cc: wanpeng.li@linux.intel.com
+Cc: umgwanakikbuti@gmail.com
+Link: http://lkml.kernel.org/r/20150611124742.968262663@infradead.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Byungchul Park <byungchul.park@lge.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/deadline.c |   34 +++++++++++++++++++++-------------
+ 1 file changed, 21 insertions(+), 13 deletions(-)
+
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -210,16 +210,23 @@ static inline int has_pushable_dl_tasks(
+ static int push_dl_task(struct rq *rq);
+-static DEFINE_PER_CPU(struct callback_head, dl_balance_head);
++static DEFINE_PER_CPU(struct callback_head, dl_push_head);
++static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
+ static void push_dl_tasks(struct rq *);
++static void pull_dl_task(struct rq *);
+ static inline void queue_push_tasks(struct rq *rq)
+ {
+       if (!has_pushable_dl_tasks(rq))
+               return;
+-      queue_balance_callback(rq, &per_cpu(dl_balance_head, rq->cpu), push_dl_tasks);
++      queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
++}
++
++static inline void queue_pull_task(struct rq *rq)
++{
++      queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
+ }
+ #else
+@@ -247,6 +254,10 @@ void dec_dl_migration(struct sched_dl_en
+ static inline void queue_push_tasks(struct rq *rq)
+ {
+ }
++
++static inline void queue_pull_task(struct rq *rq)
++{
++}
+ #endif /* CONFIG_SMP */
+ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
+@@ -1541,7 +1552,7 @@ static void switched_from_dl(struct rq *
+        * from an overloaded cpu, if any.
+        */
+       if (!rq->dl.dl_nr_running)
+-              pull_dl_task(rq);
++              queue_pull_task(rq);
+ #endif
+ }
+@@ -1551,8 +1562,6 @@ static void switched_from_dl(struct rq *
+  */
+ static void switched_to_dl(struct rq *rq, struct task_struct *p)
+ {
+-      int check_resched = 1;
+-
+       /*
+        * If p is throttled, don't consider the possibility
+        * of preempting rq->curr, the check will be done right
+@@ -1563,12 +1572,12 @@ static void switched_to_dl(struct rq *rq
+       if (p->on_rq || rq->curr != p) {
+ #ifdef CONFIG_SMP
+-              if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
+-                      /* Only reschedule if pushing failed */
+-                      check_resched = 0;
+-#endif /* CONFIG_SMP */
+-              if (check_resched && task_has_dl_policy(rq->curr))
++              if (rq->dl.overloaded)
++                      queue_push_tasks(rq);
++#else
++              if (task_has_dl_policy(rq->curr))
+                       check_preempt_curr_dl(rq, p, 0);
++#endif /* CONFIG_SMP */
+       }
+ }
+@@ -1588,15 +1597,14 @@ static void prio_changed_dl(struct rq *r
+                * or lowering its prio, so...
+                */
+               if (!rq->dl.overloaded)
+-                      pull_dl_task(rq);
++                      queue_pull_task(rq);
+               /*
+                * If we now have a earlier deadline task than p,
+                * then reschedule, provided p is still on this
+                * runqueue.
+                */
+-              if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
+-                  rq->curr == p)
++              if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
+                       resched_task(p);
+ #else
+               /*
diff --git a/queue-3.14/sched-dl-remove-return-value-from-pull_dl_task.patch b/queue-3.14/sched-dl-remove-return-value-from-pull_dl_task.patch
new file mode 100644 (file)
index 0000000..73afa6a
--- /dev/null
@@ -0,0 +1,76 @@
+From 0ea60c2054fc3b0c3eb68ac4f6884f3ee78d9925 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 11 Jun 2015 14:46:42 +0200
+Subject: sched,dl: Remove return value from pull_dl_task()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 0ea60c2054fc3b0c3eb68ac4f6884f3ee78d9925 upstream.
+
+In order to be able to use pull_dl_task() from a callback, we need to
+do away with the return value.
+
+Since the return value indicates if we should reschedule, do this
+inside the function. Since not all callers currently do this, this can
+increase the number of reschedules due rt balancing.
+
+Too many reschedules is not a correctness issues, too few are.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: ktkhai@parallels.com
+Cc: rostedt@goodmis.org
+Cc: juri.lelli@gmail.com
+Cc: pang.xunlei@linaro.org
+Cc: oleg@redhat.com
+Cc: wanpeng.li@linux.intel.com
+Cc: umgwanakikbuti@gmail.com
+Link: http://lkml.kernel.org/r/20150611124742.859398977@infradead.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Byungchul Park <byungchul.park@lge.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/deadline.c |   12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -1351,15 +1351,16 @@ static void push_dl_tasks(struct rq *rq)
+               ;
+ }
+-static int pull_dl_task(struct rq *this_rq)
++static void pull_dl_task(struct rq *this_rq)
+ {
+-      int this_cpu = this_rq->cpu, ret = 0, cpu;
++      int this_cpu = this_rq->cpu, cpu;
+       struct task_struct *p;
++      bool resched = false;
+       struct rq *src_rq;
+       u64 dmin = LONG_MAX;
+       if (likely(!dl_overloaded(this_rq)))
+-              return 0;
++              return;
+       /*
+        * Match the barrier from dl_set_overloaded; this guarantees that if we
+@@ -1414,7 +1415,7 @@ static int pull_dl_task(struct rq *this_
+                                          src_rq->curr->dl.deadline))
+                               goto skip;
+-                      ret = 1;
++                      resched = true;
+                       deactivate_task(src_rq, p, 0);
+                       set_task_cpu(p, this_cpu);
+@@ -1427,7 +1428,8 @@ skip:
+               double_unlock_balance(this_rq, src_rq);
+       }
+-      return ret;
++      if (resched)
++              resched_task(this_rq->curr);
+ }
+ static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
diff --git a/queue-3.14/sched-replace-post_schedule-with-a-balance-callback-list.patch b/queue-3.14/sched-replace-post_schedule-with-a-balance-callback-list.patch
new file mode 100644 (file)
index 0000000..4b0ea8a
--- /dev/null
@@ -0,0 +1,278 @@
+From e3fca9e7cbfb72694a21c886fcdf9f059cfded9c Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 11 Jun 2015 14:46:37 +0200
+Subject: sched: Replace post_schedule with a balance callback list
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit e3fca9e7cbfb72694a21c886fcdf9f059cfded9c upstream.
+
+Generalize the post_schedule() stuff into a balance callback list.
+This allows us to more easily use it outside of schedule() and cross
+sched_class.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: ktkhai@parallels.com
+Cc: rostedt@goodmis.org
+Cc: juri.lelli@gmail.com
+Cc: pang.xunlei@linaro.org
+Cc: oleg@redhat.com
+Cc: wanpeng.li@linux.intel.com
+Cc: umgwanakikbuti@gmail.com
+Link: http://lkml.kernel.org/r/20150611124742.424032725@infradead.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Byungchul Park <byungchul.park@lge.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c     |   36 ++++++++++++++++++++++++------------
+ kernel/sched/deadline.c |   23 ++++++++++++++++-------
+ kernel/sched/rt.c       |   27 ++++++++++++++++-----------
+ kernel/sched/sched.h    |   19 +++++++++++++++++--
+ 4 files changed, 73 insertions(+), 32 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2179,18 +2179,30 @@ static inline void pre_schedule(struct r
+ }
+ /* rq->lock is NOT held, but preemption is disabled */
+-static inline void post_schedule(struct rq *rq)
++static void __balance_callback(struct rq *rq)
+ {
+-      if (rq->post_schedule) {
+-              unsigned long flags;
++      struct callback_head *head, *next;
++      void (*func)(struct rq *rq);
++      unsigned long flags;
+-              raw_spin_lock_irqsave(&rq->lock, flags);
+-              if (rq->curr->sched_class->post_schedule)
+-                      rq->curr->sched_class->post_schedule(rq);
+-              raw_spin_unlock_irqrestore(&rq->lock, flags);
++      raw_spin_lock_irqsave(&rq->lock, flags);
++      head = rq->balance_callback;
++      rq->balance_callback = NULL;
++      while (head) {
++              func = (void (*)(struct rq *))head->func;
++              next = head->next;
++              head->next = NULL;
++              head = next;
+-              rq->post_schedule = 0;
++              func(rq);
+       }
++      raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++static inline void balance_callback(struct rq *rq)
++{
++      if (unlikely(rq->balance_callback))
++              __balance_callback(rq);
+ }
+ #else
+@@ -2199,7 +2211,7 @@ static inline void pre_schedule(struct r
+ {
+ }
+-static inline void post_schedule(struct rq *rq)
++static inline void balance_callback(struct rq *rq)
+ {
+ }
+@@ -2220,7 +2232,7 @@ asmlinkage void schedule_tail(struct tas
+        * FIXME: do we need to worry about rq being invalidated by the
+        * task_switch?
+        */
+-      post_schedule(rq);
++      balance_callback(rq);
+ #ifdef __ARCH_WANT_UNLOCKED_CTXSW
+       /* In this case, finish_task_switch does not reenable preemption */
+@@ -2732,7 +2744,7 @@ need_resched:
+       } else
+               raw_spin_unlock_irq(&rq->lock);
+-      post_schedule(rq);
++      balance_callback(rq);
+       sched_preempt_enable_no_resched();
+       if (need_resched())
+@@ -6902,7 +6914,7 @@ void __init sched_init(void)
+               rq->sd = NULL;
+               rq->rd = NULL;
+               rq->cpu_power = SCHED_POWER_SCALE;
+-              rq->post_schedule = 0;
++              rq->balance_callback = NULL;
+               rq->active_balance = 0;
+               rq->next_balance = jiffies;
+               rq->push_cpu = 0;
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -210,6 +210,18 @@ static inline int has_pushable_dl_tasks(
+ static int push_dl_task(struct rq *rq);
++static DEFINE_PER_CPU(struct callback_head, dl_balance_head);
++
++static void push_dl_tasks(struct rq *);
++
++static inline void queue_push_tasks(struct rq *rq)
++{
++      if (!has_pushable_dl_tasks(rq))
++              return;
++
++      queue_balance_callback(rq, &per_cpu(dl_balance_head, rq->cpu), push_dl_tasks);
++}
++
+ #else
+ static inline
+@@ -232,6 +244,9 @@ void dec_dl_migration(struct sched_dl_en
+ {
+ }
++static inline void queue_push_tasks(struct rq *rq)
++{
++}
+ #endif /* CONFIG_SMP */
+ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
+@@ -1005,7 +1020,7 @@ struct task_struct *pick_next_task_dl(st
+ #endif
+ #ifdef CONFIG_SMP
+-      rq->post_schedule = has_pushable_dl_tasks(rq);
++      queue_push_tasks(rq);
+ #endif /* CONFIG_SMP */
+       return p;
+@@ -1422,11 +1437,6 @@ static void pre_schedule_dl(struct rq *r
+               pull_dl_task(rq);
+ }
+-static void post_schedule_dl(struct rq *rq)
+-{
+-      push_dl_tasks(rq);
+-}
+-
+ /*
+  * Since the task is not running and a reschedule is not going to happen
+  * anytime soon on its runqueue, we try pushing it away now.
+@@ -1615,7 +1625,6 @@ const struct sched_class dl_sched_class
+       .rq_online              = rq_online_dl,
+       .rq_offline             = rq_offline_dl,
+       .pre_schedule           = pre_schedule_dl,
+-      .post_schedule          = post_schedule_dl,
+       .task_woken             = task_woken_dl,
+ #endif
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -315,6 +315,18 @@ static inline int has_pushable_tasks(str
+       return !plist_head_empty(&rq->rt.pushable_tasks);
+ }
++static DEFINE_PER_CPU(struct callback_head, rt_balance_head);
++
++static void push_rt_tasks(struct rq *);
++
++static inline void queue_push_tasks(struct rq *rq)
++{
++      if (!has_pushable_tasks(rq))
++              return;
++
++      queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks);
++}
++
+ static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+ {
+       plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+@@ -359,6 +371,9 @@ void dec_rt_migration(struct sched_rt_en
+ {
+ }
++static inline void queue_push_tasks(struct rq *rq)
++{
++}
+ #endif /* CONFIG_SMP */
+ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
+@@ -1349,11 +1364,7 @@ static struct task_struct *pick_next_tas
+               dequeue_pushable_task(rq, p);
+ #ifdef CONFIG_SMP
+-      /*
+-       * We detect this state here so that we can avoid taking the RQ
+-       * lock again later if there is no need to push
+-       */
+-      rq->post_schedule = has_pushable_tasks(rq);
++      queue_push_tasks(rq);
+ #endif
+       return p;
+@@ -1731,11 +1742,6 @@ static void pre_schedule_rt(struct rq *r
+               pull_rt_task(rq);
+ }
+-static void post_schedule_rt(struct rq *rq)
+-{
+-      push_rt_tasks(rq);
+-}
+-
+ /*
+  * If we are not running and we are not going to reschedule soon, we should
+  * try to push tasks away now
+@@ -2008,7 +2014,6 @@ const struct sched_class rt_sched_class
+       .rq_online              = rq_online_rt,
+       .rq_offline             = rq_offline_rt,
+       .pre_schedule           = pre_schedule_rt,
+-      .post_schedule          = post_schedule_rt,
+       .task_woken             = task_woken_rt,
+       .switched_from          = switched_from_rt,
+ #endif
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -587,9 +587,10 @@ struct rq {
+       unsigned long cpu_power;
++      struct callback_head *balance_callback;
++
+       unsigned char idle_balance;
+       /* For active balancing */
+-      int post_schedule;
+       int active_balance;
+       int push_cpu;
+       struct cpu_stop_work active_balance_work;
+@@ -690,6 +691,21 @@ extern int migrate_swap(struct task_stru
+ #ifdef CONFIG_SMP
++static inline void
++queue_balance_callback(struct rq *rq,
++                     struct callback_head *head,
++                     void (*func)(struct rq *rq))
++{
++      lockdep_assert_held(&rq->lock);
++
++      if (unlikely(head->next))
++              return;
++
++      head->func = (void (*)(struct callback_head *))func;
++      head->next = rq->balance_callback;
++      rq->balance_callback = head;
++}
++
+ #define rcu_dereference_check_sched_domain(p) \
+       rcu_dereference_check((p), \
+                             lockdep_is_held(&sched_domains_mutex))
+@@ -1131,7 +1147,6 @@ struct sched_class {
+       void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
+       void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
+-      void (*post_schedule) (struct rq *this_rq);
+       void (*task_waking) (struct task_struct *task);
+       void (*task_woken) (struct rq *this_rq, struct task_struct *task);
diff --git a/queue-3.14/sched-rt-convert-switched_-from-to-_rt-prio_changed_rt-to-balance-callbacks.patch b/queue-3.14/sched-rt-convert-switched_-from-to-_rt-prio_changed_rt-to-balance-callbacks.patch
new file mode 100644 (file)
index 0000000..a93e49c
--- /dev/null
@@ -0,0 +1,117 @@
+From fd7a4bed183523275279c9addbf42fce550c2e90 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 11 Jun 2015 14:46:41 +0200
+Subject: sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit fd7a4bed183523275279c9addbf42fce550c2e90 upstream.
+
+Remove the direct {push,pull} balancing operations from
+switched_{from,to}_rt() / prio_changed_rt() and use the balance
+callback queue.
+
+Again, err on the side of too many reschedules; since too few is a
+hard bug while too many is just annoying.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: ktkhai@parallels.com
+Cc: rostedt@goodmis.org
+Cc: juri.lelli@gmail.com
+Cc: pang.xunlei@linaro.org
+Cc: oleg@redhat.com
+Cc: wanpeng.li@linux.intel.com
+Cc: umgwanakikbuti@gmail.com
+Link: http://lkml.kernel.org/r/20150611124742.766832367@infradead.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Byungchul Park <byungchul.park@lge.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/rt.c |   35 +++++++++++++++++++----------------
+ 1 file changed, 19 insertions(+), 16 deletions(-)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -315,16 +315,23 @@ static inline int has_pushable_tasks(str
+       return !plist_head_empty(&rq->rt.pushable_tasks);
+ }
+-static DEFINE_PER_CPU(struct callback_head, rt_balance_head);
++static DEFINE_PER_CPU(struct callback_head, rt_push_head);
++static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
+ static void push_rt_tasks(struct rq *);
++static void pull_rt_task(struct rq *);
+ static inline void queue_push_tasks(struct rq *rq)
+ {
+       if (!has_pushable_tasks(rq))
+               return;
+-      queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks);
++      queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
++}
++
++static inline void queue_pull_task(struct rq *rq)
++{
++      queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
+ }
+ static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+@@ -1837,7 +1844,7 @@ static void switched_from_rt(struct rq *
+       if (!p->on_rq || rq->rt.rt_nr_running)
+               return;
+-      pull_rt_task(rq);
++      queue_pull_task(rq);
+ }
+ void init_sched_rt_class(void)
+@@ -1858,8 +1865,6 @@ void init_sched_rt_class(void)
+  */
+ static void switched_to_rt(struct rq *rq, struct task_struct *p)
+ {
+-      int check_resched = 1;
+-
+       /*
+        * If we are already running, then there's nothing
+        * that needs to be done. But if we are not running
+@@ -1869,13 +1874,12 @@ static void switched_to_rt(struct rq *rq
+        */
+       if (p->on_rq && rq->curr != p) {
+ #ifdef CONFIG_SMP
+-              if (rq->rt.overloaded && push_rt_task(rq) &&
+-                  /* Don't resched if we changed runqueues */
+-                  rq != task_rq(p))
+-                      check_resched = 0;
+-#endif /* CONFIG_SMP */
+-              if (check_resched && p->prio < rq->curr->prio)
++              if (rq->rt.overloaded)
++                      queue_push_tasks(rq);
++#else
++              if (p->prio < rq->curr->prio)
+                       resched_task(rq->curr);
++#endif /* CONFIG_SMP */
+       }
+ }
+@@ -1896,14 +1900,13 @@ prio_changed_rt(struct rq *rq, struct ta
+                * may need to pull tasks to this runqueue.
+                */
+               if (oldprio < p->prio)
+-                      pull_rt_task(rq);
++                      queue_pull_task(rq);
++
+               /*
+                * If there's a higher priority task waiting to run
+-               * then reschedule. Note, the above pull_rt_task
+-               * can release the rq lock and p could migrate.
+-               * Only reschedule if p is still on the same runqueue.
++               * then reschedule.
+                */
+-              if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
++              if (p->prio > rq->rt.highest_prio.curr)
+                       resched_task(p);
+ #else
+               /* For UP simply resched on drop of prio */
diff --git a/queue-3.14/sched-rt-remove-return-value-from-pull_rt_task.patch b/queue-3.14/sched-rt-remove-return-value-from-pull_rt_task.patch
new file mode 100644 (file)
index 0000000..1d5b0dd
--- /dev/null
@@ -0,0 +1,85 @@
+From 8046d6806247088de5725eaf8a2580b29e50ac5a Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 11 Jun 2015 14:46:40 +0200
+Subject: sched,rt: Remove return value from pull_rt_task()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 8046d6806247088de5725eaf8a2580b29e50ac5a upstream.
+
+In order to be able to use pull_rt_task() from a callback, we need to
+do away with the return value.
+
+Since the return value indicates if we should reschedule, do this
+inside the function. Since not all callers currently do this, this can
+increase the number of reschedules due rt balancing.
+
+Too many reschedules is not a correctness issues, too few are.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: ktkhai@parallels.com
+Cc: rostedt@goodmis.org
+Cc: juri.lelli@gmail.com
+Cc: pang.xunlei@linaro.org
+Cc: oleg@redhat.com
+Cc: wanpeng.li@linux.intel.com
+Cc: umgwanakikbuti@gmail.com
+Link: http://lkml.kernel.org/r/20150611124742.679002000@infradead.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Byungchul Park <byungchul.park@lge.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/rt.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1652,14 +1652,15 @@ static void push_rt_tasks(struct rq *rq)
+               ;
+ }
+-static int pull_rt_task(struct rq *this_rq)
++static void pull_rt_task(struct rq *this_rq)
+ {
+-      int this_cpu = this_rq->cpu, ret = 0, cpu;
++      int this_cpu = this_rq->cpu, cpu;
++      bool resched = false;
+       struct task_struct *p;
+       struct rq *src_rq;
+       if (likely(!rt_overloaded(this_rq)))
+-              return 0;
++              return;
+       /*
+        * Match the barrier from rt_set_overloaded; this guarantees that if we
+@@ -1716,7 +1717,7 @@ static int pull_rt_task(struct rq *this_
+                       if (p->prio < src_rq->curr->prio)
+                               goto skip;
+-                      ret = 1;
++                      resched = true;
+                       deactivate_task(src_rq, p, 0);
+                       set_task_cpu(p, this_cpu);
+@@ -1732,7 +1733,8 @@ skip:
+               double_unlock_balance(this_rq, src_rq);
+       }
+-      return ret;
++      if (resched)
++              resched_task(this_rq->curr);
+ }
+ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
+@@ -1835,8 +1837,7 @@ static void switched_from_rt(struct rq *
+       if (!p->on_rq || rq->rt.rt_nr_running)
+               return;
+-      if (pull_rt_task(rq))
+-              resched_task(rq->curr);
++      pull_rt_task(rq);
+ }
+ void init_sched_rt_class(void)
index d6b1024d2acc728f063cf830cf5dce2fa6e1c556..329b707a5854015338c665937409de6c37359e63 100644 (file)
@@ -40,3 +40,9 @@ failing-to-send-a-close-if-file-is-opened-wronly-and-server-reboots-on-a-4.x-mou
 bnx2x-don-t-notify-about-scratchpad-parities.patch
 unix-correctly-track-in-flight-fds-in-sending-process-user_struct.patch
 sched-clean-up-idle-task-smp-logic.patch
+sched-replace-post_schedule-with-a-balance-callback-list.patch
+sched-allow-balance-callbacks-for-check_class_changed.patch
+sched-rt-remove-return-value-from-pull_rt_task.patch
+sched-rt-convert-switched_-from-to-_rt-prio_changed_rt-to-balance-callbacks.patch
+sched-dl-remove-return-value-from-pull_dl_task.patch
+sched-dl-convert-switched_-from-to-_dl-prio_changed_dl-to-balance-callbacks.patch