spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / include / linux / pm_qos.h
blob4d99e4e6ef83fa4910e22959bc9326117ce1288e
1 #ifndef _LINUX_PM_QOS_H
2 #define _LINUX_PM_QOS_H
3 /* interface for the pm_qos_power infrastructure of the linux kernel.
5 * Mark Gross <mgross@linux.intel.com>
6 */
7 #include <linux/plist.h>
8 #include <linux/notifier.h>
9 #include <linux/miscdevice.h>
10 #include <linux/device.h>
12 #define PM_QOS_RESERVED 0
13 #define PM_QOS_CPU_DMA_LATENCY 1
14 #define PM_QOS_NETWORK_LATENCY 2
15 #define PM_QOS_NETWORK_THROUGHPUT 3
17 #define PM_QOS_NUM_CLASSES 4
18 #define PM_QOS_DEFAULT_VALUE -1
20 #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
21 #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
22 #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
23 #define PM_QOS_DEV_LAT_DEFAULT_VALUE 0
25 struct pm_qos_request {
26 struct plist_node node;
27 int pm_qos_class;
30 struct dev_pm_qos_request {
31 struct plist_node node;
32 struct device *dev;
35 enum pm_qos_type {
36 PM_QOS_UNITIALIZED,
37 PM_QOS_MAX, /* return the largest value */
38 PM_QOS_MIN /* return the smallest value */
42 * Note: The lockless read path depends on the CPU accessing
43 * target_value atomically. Atomic access is only guaranteed on all CPU
44 * types linux supports for 32 bit quantites
46 struct pm_qos_constraints {
47 struct plist_head list;
48 s32 target_value; /* Do not change to 64 bit */
49 s32 default_value;
50 enum pm_qos_type type;
51 struct blocking_notifier_head *notifiers;
54 /* Action requested to pm_qos_update_target */
55 enum pm_qos_req_action {
56 PM_QOS_ADD_REQ, /* Add a new request */
57 PM_QOS_UPDATE_REQ, /* Update an existing request */
58 PM_QOS_REMOVE_REQ /* Remove an existing request */
61 static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
63 return req->dev != 0;
66 #ifdef CONFIG_PM
67 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
68 enum pm_qos_req_action action, int value);
69 void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
70 s32 value);
71 void pm_qos_update_request(struct pm_qos_request *req,
72 s32 new_value);
73 void pm_qos_remove_request(struct pm_qos_request *req);
75 int pm_qos_request(int pm_qos_class);
76 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
77 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
78 int pm_qos_request_active(struct pm_qos_request *req);
79 s32 pm_qos_read_value(struct pm_qos_constraints *c);
81 s32 __dev_pm_qos_read_value(struct device *dev);
82 s32 dev_pm_qos_read_value(struct device *dev);
83 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
84 s32 value);
85 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
86 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
87 int dev_pm_qos_add_notifier(struct device *dev,
88 struct notifier_block *notifier);
89 int dev_pm_qos_remove_notifier(struct device *dev,
90 struct notifier_block *notifier);
91 int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
92 int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
93 void dev_pm_qos_constraints_init(struct device *dev);
94 void dev_pm_qos_constraints_destroy(struct device *dev);
95 int dev_pm_qos_add_ancestor_request(struct device *dev,
96 struct dev_pm_qos_request *req, s32 value);
97 #else
98 static inline int pm_qos_update_target(struct pm_qos_constraints *c,
99 struct plist_node *node,
100 enum pm_qos_req_action action,
101 int value)
102 { return 0; }
103 static inline void pm_qos_add_request(struct pm_qos_request *req,
104 int pm_qos_class, s32 value)
105 { return; }
106 static inline void pm_qos_update_request(struct pm_qos_request *req,
107 s32 new_value)
108 { return; }
109 static inline void pm_qos_remove_request(struct pm_qos_request *req)
110 { return; }
112 static inline int pm_qos_request(int pm_qos_class)
114 switch (pm_qos_class) {
115 case PM_QOS_CPU_DMA_LATENCY:
116 return PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
117 case PM_QOS_NETWORK_LATENCY:
118 return PM_QOS_NETWORK_LAT_DEFAULT_VALUE;
119 case PM_QOS_NETWORK_THROUGHPUT:
120 return PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE;
121 default:
122 return PM_QOS_DEFAULT_VALUE;
126 static inline int pm_qos_add_notifier(int pm_qos_class,
127 struct notifier_block *notifier)
128 { return 0; }
129 static inline int pm_qos_remove_notifier(int pm_qos_class,
130 struct notifier_block *notifier)
131 { return 0; }
132 static inline int pm_qos_request_active(struct pm_qos_request *req)
133 { return 0; }
134 static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
135 { return 0; }
137 static inline s32 __dev_pm_qos_read_value(struct device *dev)
138 { return 0; }
139 static inline s32 dev_pm_qos_read_value(struct device *dev)
140 { return 0; }
141 static inline int dev_pm_qos_add_request(struct device *dev,
142 struct dev_pm_qos_request *req,
143 s32 value)
144 { return 0; }
145 static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
146 s32 new_value)
147 { return 0; }
148 static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
149 { return 0; }
150 static inline int dev_pm_qos_add_notifier(struct device *dev,
151 struct notifier_block *notifier)
152 { return 0; }
153 static inline int dev_pm_qos_remove_notifier(struct device *dev,
154 struct notifier_block *notifier)
155 { return 0; }
156 static inline int dev_pm_qos_add_global_notifier(
157 struct notifier_block *notifier)
158 { return 0; }
159 static inline int dev_pm_qos_remove_global_notifier(
160 struct notifier_block *notifier)
161 { return 0; }
162 static inline void dev_pm_qos_constraints_init(struct device *dev)
164 dev->power.power_state = PMSG_ON;
166 static inline void dev_pm_qos_constraints_destroy(struct device *dev)
168 dev->power.power_state = PMSG_INVALID;
170 static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
171 struct dev_pm_qos_request *req, s32 value)
172 { return 0; }
173 #endif
175 #endif