]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
2 more patches added
authorGreg Kroah-Hartman <gregkh@suse.de>
Mon, 13 Aug 2007 23:36:21 +0000 (16:36 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Mon, 13 Aug 2007 23:36:21 +0000 (16:36 -0700)
queue-2.6.22/cpufreq-ondemand-add-a-check-to-avoid-negative-load-calculation.patch [new file with mode: 0644]
queue-2.6.22/cpufreq-ondemand-fix-tickless-accounting-and-software-coordination-bug.patch [new file with mode: 0644]
queue-2.6.22/series

diff --git a/queue-2.6.22/cpufreq-ondemand-add-a-check-to-avoid-negative-load-calculation.patch b/queue-2.6.22/cpufreq-ondemand-add-a-check-to-avoid-negative-load-calculation.patch
new file mode 100644 (file)
index 0000000..7a73576
--- /dev/null
@@ -0,0 +1,40 @@
+From 0af99b13c9f323e658b4f1d69a1ccae7d6f3f80a Mon Sep 17 00:00:00 2001
+From: Venki Pallipadi <venkatesh.pallipadi@intel.com>
+Date: Wed, 20 Jun 2007 14:24:52 -0700
+Subject: CPUFREQ: ondemand: add a check to avoid negative load calculation
+
+From: Venki Pallipadi <venkatesh.pallipadi@intel.com>
+
+Due to rounding and inexact jiffy accounting, idle_ticks can sometimes
+be higher than total_ticks. Make sure those cases are handled as
+zero load case.
+
+Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+Signed-off-by: Dave Jones <davej@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/cpufreq/cpufreq_ondemand.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq_ondemand.c
++++ b/drivers/cpufreq/cpufreq_ondemand.c
+@@ -335,7 +335,7 @@ static struct attribute_group dbs_attr_g
+ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
+ {
+       unsigned int idle_ticks, total_ticks;
+-      unsigned int load;
++      unsigned int load = 0;
+       cputime64_t cur_jiffies;
+       struct cpufreq_policy *policy;
+@@ -381,7 +381,8 @@ static void dbs_check_cpu(struct cpu_dbs
+               if (tmp_idle_ticks < idle_ticks)
+                       idle_ticks = tmp_idle_ticks;
+       }
+-      load = (100 * (total_ticks - idle_ticks)) / total_ticks;
++      if (likely(total_ticks > idle_ticks))
++              load = (100 * (total_ticks - idle_ticks)) / total_ticks;
+       /* Check for frequency increase */
+       if (load > dbs_tuners_ins.up_threshold) {
diff --git a/queue-2.6.22/cpufreq-ondemand-fix-tickless-accounting-and-software-coordination-bug.patch b/queue-2.6.22/cpufreq-ondemand-fix-tickless-accounting-and-software-coordination-bug.patch
new file mode 100644 (file)
index 0000000..19b23be
--- /dev/null
@@ -0,0 +1,73 @@
+From ea48761519bd40d7a881c587b5f3177664b2987e Mon Sep 17 00:00:00 2001
+From: Venki Pallipadi <venkatesh.pallipadi@intel.com>
+Date: Wed, 20 Jun 2007 14:26:24 -0700
+Subject: CPUFREQ: ondemand: fix tickless accounting and software coordination bug
+
+From: Venki Pallipadi <venkatesh.pallipadi@intel.com>
+
+With tickless kernel and software coordination os P-states, ondemand
+can look at wrong idle statistics. This can happen when ondemand sampling
+is happening on CPU 0 and due to software coordination sampling also looks at
+utilization of CPU 1. If CPU 1 is in tickless state at that moment, its idle
+statistics will not be uptodate and CPU 0 thinks CPU 1 is idle for less
+amount of time than it actually is.
+
+This can be resolved by looking at all the busy times of CPUs, which is
+accurate, even with tickless, and use that to determine idle time in a
+round about way (total time - busy time).
+
+Thanks to Arjan for originally reporting the ondemand bug on
+Lenovo T61.
+
+Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+Signed-off-by: Dave Jones <davej@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/cpufreq/cpufreq_ondemand.c |   25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq_ondemand.c
++++ b/drivers/cpufreq/cpufreq_ondemand.c
+@@ -96,15 +96,25 @@ static struct dbs_tuners {
+ static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
+ {
+-      cputime64_t retval;
++      cputime64_t idle_time;
++      cputime64_t cur_jiffies;
++      cputime64_t busy_time;
+-      retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
+-                      kstat_cpu(cpu).cpustat.iowait);
++      cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
++      busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
++                      kstat_cpu(cpu).cpustat.system);
+-      if (dbs_tuners_ins.ignore_nice)
+-              retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
++      busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
++      busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
++      busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
+-      return retval;
++      if (!dbs_tuners_ins.ignore_nice) {
++              busy_time = cputime64_add(busy_time,
++                              kstat_cpu(cpu).cpustat.nice);
++      }
++
++      idle_time = cputime64_sub(cur_jiffies, busy_time);
++      return idle_time;
+ }
+ /*
+@@ -339,7 +349,8 @@ static void dbs_check_cpu(struct cpu_dbs
+       cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
+       total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
+                       this_dbs_info->prev_cpu_wall);
+-      this_dbs_info->prev_cpu_wall = cur_jiffies;
++      this_dbs_info->prev_cpu_wall = get_jiffies_64();
++
+       if (!total_ticks)
+               return;
+       /*
index ad51f072c250082232a54fe4a8e39ee8c9ee9445..83a431b3703095ccfadf5b50d6dbf3f5e884ce2f 100644 (file)
@@ -8,3 +8,5 @@ powerpc-fix-size-check-for-hugetlbfs.patch
 direct-io-fix-error-path-crashes.patch
 stifb-detect-cards-in-double-buffer-mode-more-reliably.patch
 pata_atiixp-add-sb700-pci-id.patch
+cpufreq-ondemand-fix-tickless-accounting-and-software-coordination-bug.patch
+cpufreq-ondemand-add-a-check-to-avoid-negative-load-calculation.patch