]>
Commit | Line | Data |
---|---|---|
00e5a55c BS |
1 | From: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> |
2 | Subject: cpufreq,ondemand: Prepare changes for doing micro-accounting | |
3 | ||
4 | Preparatory changes for doing idle micro-accounting in ondemand governor. | |
5 | get_cpu_idle_time() gets extra parameter and returns idle time and also the | |
6 | wall time that corresponds to the idle time measurement. | |
7 | ||
8 | Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
9 | Signed-off-by: Thomas Renninger <trenn@suse.de> | |
10 | ||
11 | --- | |
12 | drivers/cpufreq/cpufreq_ondemand.c | 29 +++++++++++++++-------------- | |
13 | 1 file changed, 15 insertions(+), 14 deletions(-) | |
14 | ||
15 | Index: cpufreq.git/drivers/cpufreq/cpufreq_ondemand.c | |
16 | =================================================================== | |
17 | --- cpufreq.git.orig/drivers/cpufreq/cpufreq_ondemand.c 2008-07-31 14:52:01.000000000 -0700 | |
18 | +++ cpufreq.git/drivers/cpufreq/cpufreq_ondemand.c 2008-07-31 14:52:10.000000000 -0700 | |
19 | @@ -94,13 +94,13 @@ static struct dbs_tuners { | |
20 | .powersave_bias = 0, | |
21 | }; | |
22 | ||
23 | -static inline cputime64_t get_cpu_idle_time(unsigned int cpu) | |
24 | +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | |
25 | { | |
26 | cputime64_t idle_time; | |
27 | - cputime64_t cur_jiffies; | |
28 | + cputime64_t cur_wall_time; | |
29 | cputime64_t busy_time; | |
30 | ||
31 | - cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); | |
32 | + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | |
33 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | |
34 | kstat_cpu(cpu).cpustat.system); | |
35 | ||
36 | @@ -113,7 +113,10 @@ static inline cputime64_t get_cpu_idle_t | |
37 | kstat_cpu(cpu).cpustat.nice); | |
38 | } | |
39 | ||
40 | - idle_time = cputime64_sub(cur_jiffies, busy_time); | |
41 | + idle_time = cputime64_sub(cur_wall_time, busy_time); | |
42 | + if (wall) | |
43 | + *wall = cur_wall_time; | |
44 | + | |
45 | return idle_time; | |
46 | } | |
47 | ||
48 | @@ -277,8 +280,8 @@ static ssize_t store_ignore_nice_load(st | |
49 | for_each_online_cpu(j) { | |
50 | struct cpu_dbs_info_s *dbs_info; | |
51 | dbs_info = &per_cpu(cpu_dbs_info, j); | |
52 | - dbs_info->prev_cpu_idle = get_cpu_idle_time(j); | |
53 | - dbs_info->prev_cpu_wall = get_jiffies_64(); | |
54 | + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | |
55 | + &dbs_info->prev_cpu_wall); | |
56 | } | |
57 | mutex_unlock(&dbs_mutex); | |
58 | ||
59 | @@ -368,21 +371,19 @@ static void dbs_check_cpu(struct cpu_dbs | |
60 | int freq_avg; | |
61 | ||
62 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | |
63 | - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | |
64 | + | |
65 | + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | |
66 | + | |
67 | wall_time = (unsigned int) cputime64_sub(cur_wall_time, | |
68 | j_dbs_info->prev_cpu_wall); | |
69 | j_dbs_info->prev_cpu_wall = cur_wall_time; | |
70 | ||
71 | - cur_idle_time = get_cpu_idle_time(j); | |
72 | idle_time = (unsigned int) cputime64_sub(cur_idle_time, | |
73 | j_dbs_info->prev_cpu_idle); | |
74 | j_dbs_info->prev_cpu_idle = cur_idle_time; | |
75 | ||
76 | - if (unlikely(wall_time <= idle_time || | |
77 | - (cputime_to_msecs(wall_time) < | |
78 | - dbs_tuners_ins.sampling_rate / (2 * 1000)))) { | |
79 | + if (unlikely(!wall_time || wall_time < idle_time)) | |
80 | continue; | |
81 | - } | |
82 | ||
83 | load = 100 * (wall_time - idle_time) / wall_time; | |
84 | ||
85 | @@ -531,8 +532,8 @@ static int cpufreq_governor_dbs(struct c | |
86 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | |
87 | j_dbs_info->cur_policy = policy; | |
88 | ||
89 | - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); | |
90 | - j_dbs_info->prev_cpu_wall = get_jiffies_64(); | |
91 | + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | |
92 | + &j_dbs_info->prev_cpu_wall); | |
93 | } | |
94 | this_dbs_info->cpu = cpu; | |
95 | /* | |
96 | ||
97 | -- |