]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/2.6.38.8/idle-governor-avoid-lock-acquisition-to-read-pm_qos-before.patch
drop drm patch
[thirdparty/kernel/stable-queue.git] / releases / 2.6.38.8 / idle-governor-avoid-lock-acquisition-to-read-pm_qos-before.patch
CommitLineData
9e44e803
GKH
1From 333c5ae9948194428fe6c5ef5c088304fc98263b Mon Sep 17 00:00:00 2001
2From: Tim Chen <tim.c.chen@linux.intel.com>
3Date: Fri, 11 Feb 2011 12:49:04 -0800
4Subject: idle governor: Avoid lock acquisition to read pm_qos before
5 entering idle
6
7From: Tim Chen <tim.c.chen@linux.intel.com>
8
9commit 333c5ae9948194428fe6c5ef5c088304fc98263b upstream.
10
11Thanks to the reviews and comments by Rafael, James, Mark and Andi.
12Here's version 2 of the patch incorporating your comments and also some
13update to my previous patch comments.
14
15I noticed that before entering idle state, the menu idle governor will
16look up the current pm_qos target value according to the list of qos
17requests received. This look up currently needs the acquisition of a
18lock to access the list of qos requests to find the qos target value,
19slowing down the entrance into idle state due to contention by multiple
20cpus to access this list. The contention is severe when there are a lot
21of cpus waking and going into idle. For example, for a simple workload
22that has 32 pair of processes ping ponging messages to each other, where
2364 cpu cores are active in test system, I see the following profile with
2437.82% of cpu cycles spent in contention of pm_qos_lock:
25
26- 37.82% swapper [kernel.kallsyms] [k]
27_raw_spin_lock_irqsave
28 - _raw_spin_lock_irqsave
29 - 95.65% pm_qos_request
30 menu_select
31 cpuidle_idle_call
32 - cpu_idle
33 99.98% start_secondary
34
35A better approach will be to cache the updated pm_qos target value so
36reading it does not require lock acquisition as in the patch below.
37With this patch the contention for pm_qos_lock is removed and I saw a
382.2X increase in throughput for my message passing workload.
39
40Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
41Acked-by: Andi Kleen <ak@linux.intel.com>
42Acked-by: James Bottomley <James.Bottomley@suse.de>
43Acked-by: mark gross <markgross@thegnar.org>
44Signed-off-by: Len Brown <len.brown@intel.com>
45Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
46
47---
48 include/linux/pm_qos_params.h | 4 ++++
49 kernel/pm_qos_params.c | 37 +++++++++++++++++++++++++------------
50 2 files changed, 29 insertions(+), 12 deletions(-)
51
52--- a/include/linux/pm_qos_params.h
53+++ b/include/linux/pm_qos_params.h
54@@ -16,6 +16,10 @@
55 #define PM_QOS_NUM_CLASSES 4
56 #define PM_QOS_DEFAULT_VALUE -1
57
58+#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
59+#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
60+#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
61+
62 struct pm_qos_request_list {
63 struct plist_node list;
64 int pm_qos_class;
65--- a/kernel/pm_qos_params.c
66+++ b/kernel/pm_qos_params.c
67@@ -53,11 +53,17 @@ enum pm_qos_type {
68 PM_QOS_MIN /* return the smallest value */
69 };
70
71+/*
72+ * Note: The lockless read path depends on the CPU accessing
73+ * target_value atomically. Atomic access is only guaranteed on all CPU
74+ * types linux supports for 32 bit quantites
75+ */
76 struct pm_qos_object {
77 struct plist_head requests;
78 struct blocking_notifier_head *notifiers;
79 struct miscdevice pm_qos_power_miscdev;
80 char *name;
81+ s32 target_value; /* Do not change to 64 bit */
82 s32 default_value;
83 enum pm_qos_type type;
84 };
85@@ -70,7 +76,8 @@ static struct pm_qos_object cpu_dma_pm_q
86 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
87 .notifiers = &cpu_dma_lat_notifier,
88 .name = "cpu_dma_latency",
89- .default_value = 2000 * USEC_PER_SEC,
90+ .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
91+ .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
92 .type = PM_QOS_MIN,
93 };
94
95@@ -79,7 +86,8 @@ static struct pm_qos_object network_lat_
96 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
97 .notifiers = &network_lat_notifier,
98 .name = "network_latency",
99- .default_value = 2000 * USEC_PER_SEC,
100+ .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
101+ .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
102 .type = PM_QOS_MIN
103 };
104
105@@ -89,7 +97,8 @@ static struct pm_qos_object network_thro
106 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
107 .notifiers = &network_throughput_notifier,
108 .name = "network_throughput",
109- .default_value = 0,
110+ .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
111+ .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
112 .type = PM_QOS_MAX,
113 };
114
115@@ -132,6 +141,16 @@ static inline int pm_qos_get_value(struc
116 }
117 }
118
119+static inline s32 pm_qos_read_value(struct pm_qos_object *o)
120+{
121+ return o->target_value;
122+}
123+
124+static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
125+{
126+ o->target_value = value;
127+}
128+
129 static void update_target(struct pm_qos_object *o, struct plist_node *node,
130 int del, int value)
131 {
132@@ -156,6 +175,7 @@ static void update_target(struct pm_qos_
133 plist_add(node, &o->requests);
134 }
135 curr_value = pm_qos_get_value(o);
136+ pm_qos_set_value(o, curr_value);
137 spin_unlock_irqrestore(&pm_qos_lock, flags);
138
139 if (prev_value != curr_value)
140@@ -190,18 +210,11 @@ static int find_pm_qos_object_by_minor(i
141 * pm_qos_request - returns current system wide qos expectation
142 * @pm_qos_class: identification of which qos value is requested
143 *
144- * This function returns the current target value in an atomic manner.
145+ * This function returns the current target value.
146 */
147 int pm_qos_request(int pm_qos_class)
148 {
149- unsigned long flags;
150- int value;
151-
152- spin_lock_irqsave(&pm_qos_lock, flags);
153- value = pm_qos_get_value(pm_qos_array[pm_qos_class]);
154- spin_unlock_irqrestore(&pm_qos_lock, flags);
155-
156- return value;
157+ return pm_qos_read_value(pm_qos_array[pm_qos_class]);
158 }
159 EXPORT_SYMBOL_GPL(pm_qos_request);
160