]> git.ipfire.org Git - ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.drivers/cpufreq_change_load_calculation_2.patch
Add a patch to fix Intel E100 wake-on-lan problems.
[ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / cpufreq_change_load_calculation_2.patch
1 From: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
2 Subject: cpufreq, ondemand: Change the load calculation, optimizing for dependent cpus
3
4 Change the load calculation algorithm in ondemand to work well with software
5 coordination of frequency across the dependent cpus.
6
7 Multiply individual CPU utilization with the average freq of that logical CPU
8 during the measurement interval (using getavg call). And find the max CPU
9 utilization number in terms of CPU freq. That number is then used to
10 get to the target freq for next sampling interval.
11
12 Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
13 Signed-off-by: Thomas Renninger <trenn@suse.de>
14
15 ---
16 drivers/cpufreq/cpufreq_ondemand.c | 65 +++++++++++++++++++------------------
17 1 file changed, 35 insertions(+), 30 deletions(-)
18
19 Index: cpufreq.git/drivers/cpufreq/cpufreq_ondemand.c
20 ===================================================================
21 --- cpufreq.git.orig/drivers/cpufreq/cpufreq_ondemand.c 2008-07-31 14:33:54.000000000 -0700
22 +++ cpufreq.git/drivers/cpufreq/cpufreq_ondemand.c 2008-07-31 14:52:01.000000000 -0700
23 @@ -334,9 +334,7 @@ static struct attribute_group dbs_attr_g
24
25 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
26 {
27 - unsigned int idle_ticks, total_ticks;
28 - unsigned int load = 0;
29 - cputime64_t cur_jiffies;
30 + unsigned int max_load_freq;
31
32 struct cpufreq_policy *policy;
33 unsigned int j;
34 @@ -346,13 +344,7 @@ static void dbs_check_cpu(struct cpu_dbs
35
36 this_dbs_info->freq_lo = 0;
37 policy = this_dbs_info->cur_policy;
38 - cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
39 - total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
40 - this_dbs_info->prev_cpu_wall);
41 - this_dbs_info->prev_cpu_wall = get_jiffies_64();
42
43 - if (!total_ticks)
44 - return;
45 /*
46 * Every sampling_rate, we check, if current idle time is less
47 * than 20% (default), then we try to increase frequency
48 @@ -365,27 +357,46 @@ static void dbs_check_cpu(struct cpu_dbs
49 * 5% (default) of current frequency
50 */
51
52 - /* Get Idle Time */
53 - idle_ticks = UINT_MAX;
54 + /* Get Absolute Load - in terms of freq */
55 + max_load_freq = 0;
56 +
57 for_each_cpu_mask_nr(j, policy->cpus) {
58 - cputime64_t total_idle_ticks;
59 - unsigned int tmp_idle_ticks;
60 struct cpu_dbs_info_s *j_dbs_info;
61 + cputime64_t cur_wall_time, cur_idle_time;
62 + unsigned int idle_time, wall_time;
63 + unsigned int load, load_freq;
64 + int freq_avg;
65
66 j_dbs_info = &per_cpu(cpu_dbs_info, j);
67 - total_idle_ticks = get_cpu_idle_time(j);
68 - tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
69 + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
70 + wall_time = (unsigned int) cputime64_sub(cur_wall_time,
71 + j_dbs_info->prev_cpu_wall);
72 + j_dbs_info->prev_cpu_wall = cur_wall_time;
73 +
74 + cur_idle_time = get_cpu_idle_time(j);
75 + idle_time = (unsigned int) cputime64_sub(cur_idle_time,
76 j_dbs_info->prev_cpu_idle);
77 - j_dbs_info->prev_cpu_idle = total_idle_ticks;
78 + j_dbs_info->prev_cpu_idle = cur_idle_time;
79 +
80 + if (unlikely(wall_time <= idle_time ||
81 + (cputime_to_msecs(wall_time) <
82 + dbs_tuners_ins.sampling_rate / (2 * 1000)))) {
83 + continue;
84 + }
85 +
86 + load = 100 * (wall_time - idle_time) / wall_time;
87
88 - if (tmp_idle_ticks < idle_ticks)
89 - idle_ticks = tmp_idle_ticks;
90 + freq_avg = __cpufreq_driver_getavg(policy, j);
91 + if (freq_avg <= 0)
92 + freq_avg = policy->cur;
93 +
94 + load_freq = load * freq_avg;
95 + if (load_freq > max_load_freq)
96 + max_load_freq = load_freq;
97 }
98 - if (likely(total_ticks > idle_ticks))
99 - load = (100 * (total_ticks - idle_ticks)) / total_ticks;
100
101 /* Check for frequency increase */
102 - if (load > dbs_tuners_ins.up_threshold) {
103 + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
104 /* if we are already at full speed then break out early */
105 if (!dbs_tuners_ins.powersave_bias) {
106 if (policy->cur == policy->max)
107 @@ -412,15 +423,9 @@ static void dbs_check_cpu(struct cpu_dbs
108 * can support the current CPU usage without triggering the up
109 * policy. To be safe, we focus 10 points under the threshold.
110 */
111 - if (load < (dbs_tuners_ins.up_threshold - 10)) {
112 - unsigned int freq_next, freq_cur;
113 -
114 - freq_cur = __cpufreq_driver_getavg(policy, policy->cpu);
115 - if (!freq_cur)
116 - freq_cur = policy->cur;
117 -
118 - freq_next = (freq_cur * load) /
119 - (dbs_tuners_ins.up_threshold - 10);
120 + if (max_load_freq < (dbs_tuners_ins.up_threshold - 10) * policy->cur) {
121 + unsigned int freq_next;
122 + freq_next = max_load_freq / (dbs_tuners_ins.up_threshold - 10);
123
124 if (!dbs_tuners_ins.powersave_bias) {
125 __cpufreq_driver_target(policy, freq_next,
126
127 --