]> git.ipfire.org Git - thirdparty/linux.git/blame - include/linux/pm_qos.h
PM / QoS: Introduce PM QoS device flags support
[thirdparty/linux.git] / include / linux / pm_qos.h
CommitLineData
e8db0be1
JP
1#ifndef _LINUX_PM_QOS_H
2#define _LINUX_PM_QOS_H
d82b3518
MG
3/* interface for the pm_qos_power infrastructure of the linux kernel.
4 *
bf1db69f 5 * Mark Gross <mgross@linux.intel.com>
d82b3518 6 */
82f68251 7#include <linux/plist.h>
d82b3518
MG
8#include <linux/notifier.h>
9#include <linux/miscdevice.h>
1a9a9152 10#include <linux/device.h>
c4772d19 11#include <linux/workqueue.h>
d82b3518 12
d031e1de
AF
13enum {
14 PM_QOS_RESERVED = 0,
15 PM_QOS_CPU_DMA_LATENCY,
16 PM_QOS_NETWORK_LATENCY,
17 PM_QOS_NETWORK_THROUGHPUT,
18
19 /* insert new class ID */
20 PM_QOS_NUM_CLASSES,
21};
d82b3518 22
ae0fb4b7
RW
23enum pm_qos_flags_status {
24 PM_QOS_FLAGS_UNDEFINED = -1,
25 PM_QOS_FLAGS_NONE,
26 PM_QOS_FLAGS_SOME,
27 PM_QOS_FLAGS_ALL,
28};
29
d82b3518
MG
30#define PM_QOS_DEFAULT_VALUE -1
31
333c5ae9
TC
32#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
33#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
34#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
91ff4cb8 35#define PM_QOS_DEV_LAT_DEFAULT_VALUE 0
333c5ae9 36
cc749986
JP
37struct pm_qos_request {
38 struct plist_node node;
82f68251 39 int pm_qos_class;
c4772d19 40 struct delayed_work work; /* for pm_qos_update_request_timeout */
82f68251 41};
d82b3518 42
5efbe427
RW
43struct pm_qos_flags_request {
44 struct list_head node;
45 s32 flags; /* Do not change to 64 bit */
46};
47
ae0fb4b7
RW
48enum dev_pm_qos_req_type {
49 DEV_PM_QOS_LATENCY = 1,
50 DEV_PM_QOS_FLAGS,
51};
52
91ff4cb8 53struct dev_pm_qos_request {
ae0fb4b7 54 enum dev_pm_qos_req_type type;
021c870b
RW
55 union {
56 struct plist_node pnode;
ae0fb4b7 57 struct pm_qos_flags_request flr;
021c870b 58 } data;
91ff4cb8
JP
59 struct device *dev;
60};
61
4e1779ba
JP
62enum pm_qos_type {
63 PM_QOS_UNITIALIZED,
64 PM_QOS_MAX, /* return the largest value */
65 PM_QOS_MIN /* return the smallest value */
66};
67
68/*
5efbe427
RW
69 * Note: The lockless read path depends on the CPU accessing target_value
70 * or effective_flags atomically. Atomic access is only guaranteed on all CPU
4e1779ba
JP
71 * types linux supports for 32 bit quantites
72 */
73struct pm_qos_constraints {
74 struct plist_head list;
75 s32 target_value; /* Do not change to 64 bit */
76 s32 default_value;
77 enum pm_qos_type type;
78 struct blocking_notifier_head *notifiers;
79};
80
5efbe427
RW
81struct pm_qos_flags {
82 struct list_head list;
83 s32 effective_flags; /* Do not change to 64 bit */
84};
85
5f986c59
RW
86struct dev_pm_qos {
87 struct pm_qos_constraints latency;
ae0fb4b7 88 struct pm_qos_flags flags;
5f986c59
RW
89};
90
abe98ec2
JP
91/* Action requested to pm_qos_update_target */
92enum pm_qos_req_action {
93 PM_QOS_ADD_REQ, /* Add a new request */
94 PM_QOS_UPDATE_REQ, /* Update an existing request */
95 PM_QOS_REMOVE_REQ /* Remove an existing request */
96};
97
91ff4cb8
JP
98static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
99{
83618092 100 return req->dev != NULL;
91ff4cb8
JP
101}
102
abe98ec2
JP
103int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
104 enum pm_qos_req_action action, int value);
5efbe427
RW
105bool pm_qos_update_flags(struct pm_qos_flags *pqf,
106 struct pm_qos_flags_request *req,
107 enum pm_qos_req_action action, s32 val);
cc749986
JP
108void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
109 s32 value);
110void pm_qos_update_request(struct pm_qos_request *req,
e8db0be1 111 s32 new_value);
c4772d19
MH
112void pm_qos_update_request_timeout(struct pm_qos_request *req,
113 s32 new_value, unsigned long timeout_us);
cc749986 114void pm_qos_remove_request(struct pm_qos_request *req);
d82b3518 115
ed77134b
MG
116int pm_qos_request(int pm_qos_class);
117int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
118int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
cc749986 119int pm_qos_request_active(struct pm_qos_request *req);
b66213cd 120s32 pm_qos_read_value(struct pm_qos_constraints *c);
91ff4cb8 121
a9b542ee 122#ifdef CONFIG_PM
ae0fb4b7
RW
123enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
124enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
00dc9ad1 125s32 __dev_pm_qos_read_value(struct device *dev);
1a9a9152 126s32 dev_pm_qos_read_value(struct device *dev);
91ff4cb8 127int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
ae0fb4b7 128 enum dev_pm_qos_req_type type, s32 value);
91ff4cb8
JP
129int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
130int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
131int dev_pm_qos_add_notifier(struct device *dev,
132 struct notifier_block *notifier);
133int dev_pm_qos_remove_notifier(struct device *dev,
134 struct notifier_block *notifier);
b66213cd
JP
135int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
136int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
91ff4cb8
JP
137void dev_pm_qos_constraints_init(struct device *dev);
138void dev_pm_qos_constraints_destroy(struct device *dev);
40a5f8be
RW
139int dev_pm_qos_add_ancestor_request(struct device *dev,
140 struct dev_pm_qos_request *req, s32 value);
e8db0be1 141#else
ae0fb4b7
RW
142static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
143 s32 mask)
144 { return PM_QOS_FLAGS_UNDEFINED; }
145static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
146 s32 mask)
147 { return PM_QOS_FLAGS_UNDEFINED; }
00dc9ad1
RW
148static inline s32 __dev_pm_qos_read_value(struct device *dev)
149 { return 0; }
1a9a9152
RW
150static inline s32 dev_pm_qos_read_value(struct device *dev)
151 { return 0; }
91ff4cb8
JP
152static inline int dev_pm_qos_add_request(struct device *dev,
153 struct dev_pm_qos_request *req,
ae0fb4b7 154 enum dev_pm_qos_req_type type,
91ff4cb8
JP
155 s32 value)
156 { return 0; }
157static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
158 s32 new_value)
159 { return 0; }
160static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
161 { return 0; }
162static inline int dev_pm_qos_add_notifier(struct device *dev,
163 struct notifier_block *notifier)
164 { return 0; }
165static inline int dev_pm_qos_remove_notifier(struct device *dev,
166 struct notifier_block *notifier)
167 { return 0; }
b66213cd
JP
168static inline int dev_pm_qos_add_global_notifier(
169 struct notifier_block *notifier)
170 { return 0; }
171static inline int dev_pm_qos_remove_global_notifier(
172 struct notifier_block *notifier)
173 { return 0; }
91ff4cb8 174static inline void dev_pm_qos_constraints_init(struct device *dev)
1a9a9152
RW
175{
176 dev->power.power_state = PMSG_ON;
177}
91ff4cb8 178static inline void dev_pm_qos_constraints_destroy(struct device *dev)
1a9a9152
RW
179{
180 dev->power.power_state = PMSG_INVALID;
181}
40a5f8be
RW
182static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
183 struct dev_pm_qos_request *req, s32 value)
184 { return 0; }
e8db0be1 185#endif
d82b3518 186
85dc0b8a
RW
187#ifdef CONFIG_PM_RUNTIME
188int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
189void dev_pm_qos_hide_latency_limit(struct device *dev);
190#else
191static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
192 { return 0; }
193static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
194#endif
195
82f68251 196#endif