]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 10 May 2013 20:30:32 +0000 (13:30 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 10 May 2013 20:30:32 +0000 (13:30 -0700)
added patches:
revert-math64-new-div64_u64_rem-helper.patch
sched-avoid-prev-stime-underflow.patch
sched-do-not-account-bogus-utime.patch

queue-3.9/revert-math64-new-div64_u64_rem-helper.patch [new file with mode: 0644]
queue-3.9/sched-avoid-prev-stime-underflow.patch [new file with mode: 0644]
queue-3.9/sched-do-not-account-bogus-utime.patch [new file with mode: 0644]
queue-3.9/series

diff --git a/queue-3.9/revert-math64-new-div64_u64_rem-helper.patch b/queue-3.9/revert-math64-new-div64_u64_rem-helper.patch
new file mode 100644 (file)
index 0000000..bf48a40
--- /dev/null
@@ -0,0 +1,119 @@
+From f3002134158092178be81339ec5a22ff80e6c308 Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Tue, 30 Apr 2013 11:35:07 +0200
+Subject: Revert "math64: New div64_u64_rem helper"
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit f3002134158092178be81339ec5a22ff80e6c308 upstream.
+
+This reverts commit f792685006274a850e6cc0ea9ade275ccdfc90bc.
+
+The cputime scaling code was changed/fixed and does not need the
+div64_u64_rem() primitive anymore. It has no other users, so let's
+remove them.
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: rostedt@goodmis.org
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Dave Hansen <dave@sr71.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1367314507-9728-4-git-send-email-sgruszka@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/math64.h |   19 +------------------
+ lib/div64.c            |   19 ++++++-------------
+ 2 files changed, 7 insertions(+), 31 deletions(-)
+
+--- a/include/linux/math64.h
++++ b/include/linux/math64.h
+@@ -30,15 +30,6 @@ static inline s64 div_s64_rem(s64 divide
+ }
+ /**
+- * div64_u64_rem - unsigned 64bit divide with 64bit divisor
+- */
+-static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+-{
+-      *remainder = dividend % divisor;
+-      return dividend / divisor;
+-}
+-
+-/**
+  * div64_u64 - unsigned 64bit divide with 64bit divisor
+  */
+ static inline u64 div64_u64(u64 dividend, u64 divisor)
+@@ -70,16 +61,8 @@ static inline u64 div_u64_rem(u64 divide
+ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
+ #endif
+-#ifndef div64_u64_rem
+-extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
+-#endif
+-
+ #ifndef div64_u64
+-static inline u64 div64_u64(u64 dividend, u64 divisor)
+-{
+-      u64 remainder;
+-      return div64_u64_rem(dividend, divisor, &remainder);
+-}
++extern u64 div64_u64(u64 dividend, u64 divisor);
+ #endif
+ #ifndef div64_s64
+--- a/lib/div64.c
++++ b/lib/div64.c
+@@ -79,10 +79,9 @@ EXPORT_SYMBOL(div_s64_rem);
+ #endif
+ /**
+- * div64_u64_rem - unsigned 64bit divide with 64bit divisor and 64bit remainder
++ * div64_u64 - unsigned 64bit divide with 64bit divisor
+  * @dividend: 64bit dividend
+  * @divisor:  64bit divisor
+- * @remainder:  64bit remainder
+  *
+  * This implementation is a modified version of the algorithm proposed
+  * by the book 'Hacker's Delight'.  The original source and full proof
+@@ -90,33 +89,27 @@ EXPORT_SYMBOL(div_s64_rem);
+  *
+  * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
+  */
+-#ifndef div64_u64_rem
+-u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
++#ifndef div64_u64
++u64 div64_u64(u64 dividend, u64 divisor)
+ {
+       u32 high = divisor >> 32;
+       u64 quot;
+       if (high == 0) {
+-              u32 rem32;
+-              quot = div_u64_rem(dividend, divisor, &rem32);
+-              *remainder = rem32;
++              quot = div_u64(dividend, divisor);
+       } else {
+               int n = 1 + fls(high);
+               quot = div_u64(dividend >> n, divisor >> n);
+               if (quot != 0)
+                       quot--;
+-
+-              *remainder = dividend - quot * divisor;
+-              if (*remainder >= divisor) {
++              if ((dividend - quot * divisor) >= divisor)
+                       quot++;
+-                      *remainder -= divisor;
+-              }
+       }
+       return quot;
+ }
+-EXPORT_SYMBOL(div64_u64_rem);
++EXPORT_SYMBOL(div64_u64);
+ #endif
+ /**
diff --git a/queue-3.9/sched-avoid-prev-stime-underflow.patch b/queue-3.9/sched-avoid-prev-stime-underflow.patch
new file mode 100644 (file)
index 0000000..7349e71
--- /dev/null
@@ -0,0 +1,80 @@
+From 68aa8efcd1ab961e4684ef5af32f72a6ec1911de Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Tue, 30 Apr 2013 11:35:06 +0200
+Subject: sched: Avoid prev->stime underflow
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit 68aa8efcd1ab961e4684ef5af32f72a6ec1911de upstream.
+
+Dave Hansen reported strange utime/stime values on his system:
+https://lkml.org/lkml/2013/4/4/435
+
+This happens because prev->stime value is bigger than rtime
+value. Root of the problem are non-monotonic rtime values (i.e.
+current rtime is smaller than previous rtime) and that should be
+debugged and fixed.
+
+But since problem did not manifest itself before commit
+62188451f0d63add7ad0cd2a1ae269d600c1663d "cputime: Avoid
+multiplication overflow on utime scaling", it should be threated
+as regression, which we can easily fixed on cputime_adjust()
+function.
+
+For now, let's apply this fix, but further work is needed to fix
+root of the problem.
+
+Reported-and-tested-by: Dave Hansen <dave@sr71.net>
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: rostedt@goodmis.org
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Dave Hansen <dave@sr71.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1367314507-9728-3-git-send-email-sgruszka@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/cputime.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -574,7 +574,7 @@ static void cputime_adjust(struct task_c
+                          struct cputime *prev,
+                          cputime_t *ut, cputime_t *st)
+ {
+-      cputime_t rtime, stime, total;
++      cputime_t rtime, stime, utime, total;
+       stime = curr->stime;
+       total = stime + curr->utime;
+@@ -599,13 +599,13 @@ static void cputime_adjust(struct task_c
+       if (prev->stime + prev->utime >= rtime)
+               goto out;
+-      if (!rtime) {
+-              stime = 0;
+-      } else if (!total) {
+-              stime = rtime;
+-      } else {
++      if (total) {
+               stime = scale_stime((__force u64)stime,
+                                   (__force u64)rtime, (__force u64)total);
++              utime = rtime - stime;
++      } else {
++              stime = rtime;
++              utime = 0;
+       }
+       /*
+@@ -614,7 +614,7 @@ static void cputime_adjust(struct task_c
+        * Let's enforce monotonicity.
+        */
+       prev->stime = max(prev->stime, stime);
+-      prev->utime = max(prev->utime, rtime - prev->stime);
++      prev->utime = max(prev->utime, utime);
+ out:
+       *ut = prev->utime;
diff --git a/queue-3.9/sched-do-not-account-bogus-utime.patch b/queue-3.9/sched-do-not-account-bogus-utime.patch
new file mode 100644 (file)
index 0000000..9fd73a9
--- /dev/null
@@ -0,0 +1,59 @@
+From 772c808a252594692972773f6ee41c289b8e0b2a Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Tue, 30 Apr 2013 11:35:05 +0200
+Subject: sched: Do not account bogus utime
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit 772c808a252594692972773f6ee41c289b8e0b2a upstream.
+
+Due to rounding in scale_stime(), for big numbers, scaled stime
+values will grow in chunks. Since rtime grow in jiffies and we
+calculate utime like below:
+
+       prev->stime = max(prev->stime, stime);
+       prev->utime = max(prev->utime, rtime - prev->stime);
+
+we could erroneously account stime values as utime. To prevent
+that only update prev->{u,s}time values when they are smaller
+than current rtime.
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: rostedt@goodmis.org
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Dave Hansen <dave@sr71.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1367314507-9728-2-git-send-email-sgruszka@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/cputime.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -591,6 +591,14 @@ static void cputime_adjust(struct task_c
+        */
+       rtime = nsecs_to_cputime(curr->sum_exec_runtime);
++      /*
++       * Update userspace visible utime/stime values only if actual execution
++       * time is bigger than already exported. Note that can happen, that we
++       * provided bigger values due to scaling inaccuracy on big numbers.
++       */
++      if (prev->stime + prev->utime >= rtime)
++              goto out;
++
+       if (!rtime) {
+               stime = 0;
+       } else if (!total) {
+@@ -608,6 +616,7 @@ static void cputime_adjust(struct task_c
+       prev->stime = max(prev->stime, stime);
+       prev->utime = max(prev->utime, rtime - prev->stime);
++out:
+       *ut = prev->utime;
+       *st = prev->stime;
+ }
index 3feb816fa153f75d77f88c5da8f0e31132dcb071..dcf907a5bfc721ca4410f2a48e058b5ea125bacb 100644 (file)
@@ -18,3 +18,6 @@ dm-cache-fix-error-return-code-in-cache_create.patch
 math64-new-div64_u64_rem-helper.patch
 sched-lower-chances-of-cputime-scaling-overflow.patch
 sched-avoid-cputime-scaling-overflow.patch
+sched-do-not-account-bogus-utime.patch
+revert-math64-new-div64_u64_rem-helper.patch
+sched-avoid-prev-stime-underflow.patch