writeback: split writeback_inodes_wb
[linux-2.6/next.git] / kernel / pm_qos_params.c
blobf42d3f737a3326a1a66751ba6589fb4d26b6c925
1 /*
2 * This module exposes the interface to kernel space for specifying
3 * QoS dependencies. It provides infrastructure for registration of:
5 * Dependents on a QoS value : register requests
6 * Watchers of QoS value : get notified when target QoS value changes
8 * This QoS design is best effort based. Dependents register their QoS needs.
9 * Watchers register to keep track of the current QoS needs of the system.
11 * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12 * each have defined units:
13 * latency: usec
14 * timeout: usec <-- currently not used.
15 * throughput: kbs (kilo byte / sec)
17 * There are lists of pm_qos_objects each one wrapping requests, notifiers
19 * User mode requests on a QOS parameter register themselves to the
20 * subsystem by opening the device node /dev/... and writing there request to
21 * the node. As long as the process holds a file handle open to the node the
22 * client continues to be accounted for. Upon file release the usermode
23 * request is removed and a new qos target is computed. This way when the
24 * request that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
27 * Mark Gross <mgross@linux.intel.com>
30 /*#define DEBUG*/
32 #include <linux/pm_qos_params.h>
33 #include <linux/sched.h>
34 #include <linux/spinlock.h>
35 #include <linux/slab.h>
36 #include <linux/time.h>
37 #include <linux/fs.h>
38 #include <linux/device.h>
39 #include <linux/miscdevice.h>
40 #include <linux/string.h>
41 #include <linux/platform_device.h>
42 #include <linux/init.h>
44 #include <linux/uaccess.h>
47 * locking rule: all changes to requests or notifiers lists
48 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
49 * held, taken with _irqsave. One lock to rule them all
51 struct pm_qos_request_list {
52 struct list_head list;
53 union {
54 s32 value;
55 s32 usec;
56 s32 kbps;
58 int pm_qos_class;
61 static s32 max_compare(s32 v1, s32 v2);
62 static s32 min_compare(s32 v1, s32 v2);
64 struct pm_qos_object {
65 struct pm_qos_request_list requests;
66 struct blocking_notifier_head *notifiers;
67 struct miscdevice pm_qos_power_miscdev;
68 char *name;
69 s32 default_value;
70 atomic_t target_value;
71 s32 (*comparitor)(s32, s32);
74 static struct pm_qos_object null_pm_qos;
75 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
76 static struct pm_qos_object cpu_dma_pm_qos = {
77 .requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)},
78 .notifiers = &cpu_dma_lat_notifier,
79 .name = "cpu_dma_latency",
80 .default_value = 2000 * USEC_PER_SEC,
81 .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
82 .comparitor = min_compare
85 static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
86 static struct pm_qos_object network_lat_pm_qos = {
87 .requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)},
88 .notifiers = &network_lat_notifier,
89 .name = "network_latency",
90 .default_value = 2000 * USEC_PER_SEC,
91 .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
92 .comparitor = min_compare
96 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
97 static struct pm_qos_object network_throughput_pm_qos = {
98 .requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)},
99 .notifiers = &network_throughput_notifier,
100 .name = "network_throughput",
101 .default_value = 0,
102 .target_value = ATOMIC_INIT(0),
103 .comparitor = max_compare
107 static struct pm_qos_object *pm_qos_array[] = {
108 &null_pm_qos,
109 &cpu_dma_pm_qos,
110 &network_lat_pm_qos,
111 &network_throughput_pm_qos
114 static DEFINE_SPINLOCK(pm_qos_lock);
116 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
117 size_t count, loff_t *f_pos);
118 static int pm_qos_power_open(struct inode *inode, struct file *filp);
119 static int pm_qos_power_release(struct inode *inode, struct file *filp);
121 static const struct file_operations pm_qos_power_fops = {
122 .write = pm_qos_power_write,
123 .open = pm_qos_power_open,
124 .release = pm_qos_power_release,
127 /* static helper functions */
128 static s32 max_compare(s32 v1, s32 v2)
130 return max(v1, v2);
133 static s32 min_compare(s32 v1, s32 v2)
135 return min(v1, v2);
139 static void update_target(int pm_qos_class)
141 s32 extreme_value;
142 struct pm_qos_request_list *node;
143 unsigned long flags;
144 int call_notifier = 0;
146 spin_lock_irqsave(&pm_qos_lock, flags);
147 extreme_value = pm_qos_array[pm_qos_class]->default_value;
148 list_for_each_entry(node,
149 &pm_qos_array[pm_qos_class]->requests.list, list) {
150 extreme_value = pm_qos_array[pm_qos_class]->comparitor(
151 extreme_value, node->value);
153 if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) !=
154 extreme_value) {
155 call_notifier = 1;
156 atomic_set(&pm_qos_array[pm_qos_class]->target_value,
157 extreme_value);
158 pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class,
159 atomic_read(&pm_qos_array[pm_qos_class]->target_value));
161 spin_unlock_irqrestore(&pm_qos_lock, flags);
163 if (call_notifier)
164 blocking_notifier_call_chain(
165 pm_qos_array[pm_qos_class]->notifiers,
166 (unsigned long) extreme_value, NULL);
169 static int register_pm_qos_misc(struct pm_qos_object *qos)
171 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
172 qos->pm_qos_power_miscdev.name = qos->name;
173 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
175 return misc_register(&qos->pm_qos_power_miscdev);
178 static int find_pm_qos_object_by_minor(int minor)
180 int pm_qos_class;
182 for (pm_qos_class = 0;
183 pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
184 if (minor ==
185 pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
186 return pm_qos_class;
188 return -1;
192 * pm_qos_request - returns current system wide qos expectation
193 * @pm_qos_class: identification of which qos value is requested
195 * This function returns the current target value in an atomic manner.
197 int pm_qos_request(int pm_qos_class)
199 return atomic_read(&pm_qos_array[pm_qos_class]->target_value);
201 EXPORT_SYMBOL_GPL(pm_qos_request);
204 * pm_qos_add_request - inserts new qos request into the list
205 * @pm_qos_class: identifies which list of qos request to us
206 * @value: defines the qos request
208 * This function inserts a new entry in the pm_qos_class list of requested qos
209 * performance characteristics. It recomputes the aggregate QoS expectations
210 * for the pm_qos_class of parameters, and returns the pm_qos_request list
211 * element as a handle for use in updating and removal. Call needs to save
212 * this handle for later use.
214 struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value)
216 struct pm_qos_request_list *dep;
217 unsigned long flags;
219 dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL);
220 if (dep) {
221 if (value == PM_QOS_DEFAULT_VALUE)
222 dep->value = pm_qos_array[pm_qos_class]->default_value;
223 else
224 dep->value = value;
225 dep->pm_qos_class = pm_qos_class;
227 spin_lock_irqsave(&pm_qos_lock, flags);
228 list_add(&dep->list,
229 &pm_qos_array[pm_qos_class]->requests.list);
230 spin_unlock_irqrestore(&pm_qos_lock, flags);
231 update_target(pm_qos_class);
234 return dep;
236 EXPORT_SYMBOL_GPL(pm_qos_add_request);
239 * pm_qos_update_request - modifies an existing qos request
240 * @pm_qos_req : handle to list element holding a pm_qos request to use
241 * @value: defines the qos request
243 * Updates an existing qos request for the pm_qos_class of parameters along
244 * with updating the target pm_qos_class value.
246 * Attempts are made to make this code callable on hot code paths.
248 void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
249 s32 new_value)
251 unsigned long flags;
252 int pending_update = 0;
253 s32 temp;
255 if (pm_qos_req) { /*guard against callers passing in null */
256 spin_lock_irqsave(&pm_qos_lock, flags);
257 if (new_value == PM_QOS_DEFAULT_VALUE)
258 temp = pm_qos_array[pm_qos_req->pm_qos_class]->default_value;
259 else
260 temp = new_value;
262 if (temp != pm_qos_req->value) {
263 pending_update = 1;
264 pm_qos_req->value = temp;
266 spin_unlock_irqrestore(&pm_qos_lock, flags);
267 if (pending_update)
268 update_target(pm_qos_req->pm_qos_class);
271 EXPORT_SYMBOL_GPL(pm_qos_update_request);
274 * pm_qos_remove_request - modifies an existing qos request
275 * @pm_qos_req: handle to request list element
277 * Will remove pm qos request from the list of requests and
278 * recompute the current target value for the pm_qos_class. Call this
279 * on slow code paths.
281 void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req)
283 unsigned long flags;
284 int qos_class;
286 if (pm_qos_req == NULL)
287 return;
288 /* silent return to keep pcm code cleaner */
290 qos_class = pm_qos_req->pm_qos_class;
291 spin_lock_irqsave(&pm_qos_lock, flags);
292 list_del(&pm_qos_req->list);
293 kfree(pm_qos_req);
294 spin_unlock_irqrestore(&pm_qos_lock, flags);
295 update_target(qos_class);
297 EXPORT_SYMBOL_GPL(pm_qos_remove_request);
300 * pm_qos_add_notifier - sets notification entry for changes to target value
301 * @pm_qos_class: identifies which qos target changes should be notified.
302 * @notifier: notifier block managed by caller.
304 * will register the notifier into a notification chain that gets called
305 * upon changes to the pm_qos_class target value.
307 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
309 int retval;
311 retval = blocking_notifier_chain_register(
312 pm_qos_array[pm_qos_class]->notifiers, notifier);
314 return retval;
316 EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
319 * pm_qos_remove_notifier - deletes notification entry from chain.
320 * @pm_qos_class: identifies which qos target changes are notified.
321 * @notifier: notifier block to be removed.
323 * will remove the notifier from the notification chain that gets called
324 * upon changes to the pm_qos_class target value.
326 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
328 int retval;
330 retval = blocking_notifier_chain_unregister(
331 pm_qos_array[pm_qos_class]->notifiers, notifier);
333 return retval;
335 EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
337 static int pm_qos_power_open(struct inode *inode, struct file *filp)
339 long pm_qos_class;
341 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
342 if (pm_qos_class >= 0) {
343 filp->private_data = (void *) pm_qos_add_request(pm_qos_class,
344 PM_QOS_DEFAULT_VALUE);
346 if (filp->private_data)
347 return 0;
349 return -EPERM;
352 static int pm_qos_power_release(struct inode *inode, struct file *filp)
354 struct pm_qos_request_list *req;
356 req = (struct pm_qos_request_list *)filp->private_data;
357 pm_qos_remove_request(req);
359 return 0;
363 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
364 size_t count, loff_t *f_pos)
366 s32 value;
367 int x;
368 char ascii_value[11];
369 struct pm_qos_request_list *pm_qos_req;
371 if (count == sizeof(s32)) {
372 if (copy_from_user(&value, buf, sizeof(s32)))
373 return -EFAULT;
374 } else if (count == 11) { /* len('0x12345678/0') */
375 if (copy_from_user(ascii_value, buf, 11))
376 return -EFAULT;
377 x = sscanf(ascii_value, "%x", &value);
378 if (x != 1)
379 return -EINVAL;
380 pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
381 } else
382 return -EINVAL;
384 pm_qos_req = (struct pm_qos_request_list *)filp->private_data;
385 pm_qos_update_request(pm_qos_req, value);
387 return count;
391 static int __init pm_qos_power_init(void)
393 int ret = 0;
395 ret = register_pm_qos_misc(&cpu_dma_pm_qos);
396 if (ret < 0) {
397 printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
398 return ret;
400 ret = register_pm_qos_misc(&network_lat_pm_qos);
401 if (ret < 0) {
402 printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
403 return ret;
405 ret = register_pm_qos_misc(&network_throughput_pm_qos);
406 if (ret < 0)
407 printk(KERN_ERR
408 "pm_qos_param: network_throughput setup failed\n");
410 return ret;
413 late_initcall(pm_qos_power_init);