1 #ifndef _LINUX_PM_QOS_H
2 #define _LINUX_PM_QOS_H
3 /* interface for the pm_qos_power infrastructure of the linux kernel.
5 * Mark Gross <mgross@linux.intel.com>
7 #include <linux/plist.h>
8 #include <linux/notifier.h>
9 #include <linux/device.h>
10 #include <linux/workqueue.h>
14 PM_QOS_CPU_DMA_LATENCY
,
15 PM_QOS_NETWORK_LATENCY
,
16 PM_QOS_NETWORK_THROUGHPUT
,
17 PM_QOS_MEMORY_BANDWIDTH
,
19 /* insert new class ID */
23 enum pm_qos_flags_status
{
24 PM_QOS_FLAGS_UNDEFINED
= -1,
30 #define PM_QOS_DEFAULT_VALUE -1
32 #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
33 #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
34 #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
35 #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
36 #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
37 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
38 #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
39 #define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
41 #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
42 #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
44 struct pm_qos_request
{
45 struct plist_node node
;
47 struct delayed_work work
; /* for pm_qos_update_request_timeout */
50 struct pm_qos_flags_request
{
51 struct list_head node
;
52 s32 flags
; /* Do not change to 64 bit */
55 enum dev_pm_qos_req_type
{
56 DEV_PM_QOS_RESUME_LATENCY
= 1,
57 DEV_PM_QOS_LATENCY_TOLERANCE
,
61 struct dev_pm_qos_request
{
62 enum dev_pm_qos_req_type type
;
64 struct plist_node pnode
;
65 struct pm_qos_flags_request flr
;
72 PM_QOS_MAX
, /* return the largest value */
73 PM_QOS_MIN
, /* return the smallest value */
74 PM_QOS_SUM
/* return the sum */
78 * Note: The lockless read path depends on the CPU accessing target_value
79 * or effective_flags atomically. Atomic access is only guaranteed on all CPU
80 * types linux supports for 32 bit quantites
82 struct pm_qos_constraints
{
83 struct plist_head list
;
84 s32 target_value
; /* Do not change to 64 bit */
86 s32 no_constraint_value
;
87 enum pm_qos_type type
;
88 struct blocking_notifier_head
*notifiers
;
92 struct list_head list
;
93 s32 effective_flags
; /* Do not change to 64 bit */
97 struct pm_qos_constraints resume_latency
;
98 struct pm_qos_constraints latency_tolerance
;
99 struct pm_qos_flags flags
;
100 struct dev_pm_qos_request
*resume_latency_req
;
101 struct dev_pm_qos_request
*latency_tolerance_req
;
102 struct dev_pm_qos_request
*flags_req
;
105 /* Action requested to pm_qos_update_target */
106 enum pm_qos_req_action
{
107 PM_QOS_ADD_REQ
, /* Add a new request */
108 PM_QOS_UPDATE_REQ
, /* Update an existing request */
109 PM_QOS_REMOVE_REQ
/* Remove an existing request */
112 static inline int dev_pm_qos_request_active(struct dev_pm_qos_request
*req
)
114 return req
->dev
!= NULL
;
117 int pm_qos_update_target(struct pm_qos_constraints
*c
, struct plist_node
*node
,
118 enum pm_qos_req_action action
, int value
);
119 bool pm_qos_update_flags(struct pm_qos_flags
*pqf
,
120 struct pm_qos_flags_request
*req
,
121 enum pm_qos_req_action action
, s32 val
);
122 void pm_qos_add_request(struct pm_qos_request
*req
, int pm_qos_class
,
124 void pm_qos_update_request(struct pm_qos_request
*req
,
126 void pm_qos_update_request_timeout(struct pm_qos_request
*req
,
127 s32 new_value
, unsigned long timeout_us
);
128 void pm_qos_remove_request(struct pm_qos_request
*req
);
130 int pm_qos_request(int pm_qos_class
);
131 int pm_qos_add_notifier(int pm_qos_class
, struct notifier_block
*notifier
);
132 int pm_qos_remove_notifier(int pm_qos_class
, struct notifier_block
*notifier
);
133 int pm_qos_request_active(struct pm_qos_request
*req
);
134 s32
pm_qos_read_value(struct pm_qos_constraints
*c
);
137 enum pm_qos_flags_status
__dev_pm_qos_flags(struct device
*dev
, s32 mask
);
138 enum pm_qos_flags_status
dev_pm_qos_flags(struct device
*dev
, s32 mask
);
139 s32
__dev_pm_qos_read_value(struct device
*dev
);
140 s32
dev_pm_qos_read_value(struct device
*dev
);
141 int dev_pm_qos_add_request(struct device
*dev
, struct dev_pm_qos_request
*req
,
142 enum dev_pm_qos_req_type type
, s32 value
);
143 int dev_pm_qos_update_request(struct dev_pm_qos_request
*req
, s32 new_value
);
144 int dev_pm_qos_remove_request(struct dev_pm_qos_request
*req
);
145 int dev_pm_qos_add_notifier(struct device
*dev
,
146 struct notifier_block
*notifier
);
147 int dev_pm_qos_remove_notifier(struct device
*dev
,
148 struct notifier_block
*notifier
);
149 void dev_pm_qos_constraints_init(struct device
*dev
);
150 void dev_pm_qos_constraints_destroy(struct device
*dev
);
151 int dev_pm_qos_add_ancestor_request(struct device
*dev
,
152 struct dev_pm_qos_request
*req
,
153 enum dev_pm_qos_req_type type
, s32 value
);
154 int dev_pm_qos_expose_latency_limit(struct device
*dev
, s32 value
);
155 void dev_pm_qos_hide_latency_limit(struct device
*dev
);
156 int dev_pm_qos_expose_flags(struct device
*dev
, s32 value
);
157 void dev_pm_qos_hide_flags(struct device
*dev
);
158 int dev_pm_qos_update_flags(struct device
*dev
, s32 mask
, bool set
);
159 s32
dev_pm_qos_get_user_latency_tolerance(struct device
*dev
);
160 int dev_pm_qos_update_user_latency_tolerance(struct device
*dev
, s32 val
);
161 int dev_pm_qos_expose_latency_tolerance(struct device
*dev
);
162 void dev_pm_qos_hide_latency_tolerance(struct device
*dev
);
164 static inline s32
dev_pm_qos_requested_resume_latency(struct device
*dev
)
166 return dev
->power
.qos
->resume_latency_req
->data
.pnode
.prio
;
169 static inline s32
dev_pm_qos_requested_flags(struct device
*dev
)
171 return dev
->power
.qos
->flags_req
->data
.flr
.flags
;
174 static inline s32
dev_pm_qos_raw_read_value(struct device
*dev
)
176 return IS_ERR_OR_NULL(dev
->power
.qos
) ?
177 0 : pm_qos_read_value(&dev
->power
.qos
->resume_latency
);
180 static inline enum pm_qos_flags_status
__dev_pm_qos_flags(struct device
*dev
,
182 { return PM_QOS_FLAGS_UNDEFINED
; }
183 static inline enum pm_qos_flags_status
dev_pm_qos_flags(struct device
*dev
,
185 { return PM_QOS_FLAGS_UNDEFINED
; }
186 static inline s32
__dev_pm_qos_read_value(struct device
*dev
)
188 static inline s32
dev_pm_qos_read_value(struct device
*dev
)
190 static inline int dev_pm_qos_add_request(struct device
*dev
,
191 struct dev_pm_qos_request
*req
,
192 enum dev_pm_qos_req_type type
,
195 static inline int dev_pm_qos_update_request(struct dev_pm_qos_request
*req
,
198 static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request
*req
)
200 static inline int dev_pm_qos_add_notifier(struct device
*dev
,
201 struct notifier_block
*notifier
)
203 static inline int dev_pm_qos_remove_notifier(struct device
*dev
,
204 struct notifier_block
*notifier
)
206 static inline void dev_pm_qos_constraints_init(struct device
*dev
)
208 dev
->power
.power_state
= PMSG_ON
;
210 static inline void dev_pm_qos_constraints_destroy(struct device
*dev
)
212 dev
->power
.power_state
= PMSG_INVALID
;
214 static inline int dev_pm_qos_add_ancestor_request(struct device
*dev
,
215 struct dev_pm_qos_request
*req
,
216 enum dev_pm_qos_req_type type
,
219 static inline int dev_pm_qos_expose_latency_limit(struct device
*dev
, s32 value
)
221 static inline void dev_pm_qos_hide_latency_limit(struct device
*dev
) {}
222 static inline int dev_pm_qos_expose_flags(struct device
*dev
, s32 value
)
224 static inline void dev_pm_qos_hide_flags(struct device
*dev
) {}
225 static inline int dev_pm_qos_update_flags(struct device
*dev
, s32 m
, bool set
)
227 static inline s32
dev_pm_qos_get_user_latency_tolerance(struct device
*dev
)
228 { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
; }
229 static inline int dev_pm_qos_update_user_latency_tolerance(struct device
*dev
, s32 val
)
231 static inline int dev_pm_qos_expose_latency_tolerance(struct device
*dev
)
233 static inline void dev_pm_qos_hide_latency_tolerance(struct device
*dev
) {}
235 static inline s32
dev_pm_qos_requested_resume_latency(struct device
*dev
) { return 0; }
236 static inline s32
dev_pm_qos_requested_flags(struct device
*dev
) { return 0; }
237 static inline s32
dev_pm_qos_raw_read_value(struct device
*dev
) { return 0; }