mm/slab: sanity-check page type when looking up cache
[linux/fpc-iii.git] / kernel / power / qos.c
blob33e3febaba53f9636405e21a0eeea48411f2de7b
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This module exposes the interface to kernel space for specifying
4 * QoS dependencies. It provides infrastructure for registration of:
6 * Dependents on a QoS value : register requests
7 * Watchers of QoS value : get notified when target QoS value changes
9 * This QoS design is best effort based. Dependents register their QoS needs.
10 * Watchers register to keep track of the current QoS needs of the system.
12 * There are 3 basic classes of QoS parameter: latency, timeout, throughput
13 * each have defined units:
14 * latency: usec
15 * timeout: usec <-- currently not used.
16 * throughput: kbs (kilo byte / sec)
18 * There are lists of pm_qos_objects each one wrapping requests, notifiers
20 * User mode requests on a QOS parameter register themselves to the
21 * subsystem by opening the device node /dev/... and writing there request to
22 * the node. As long as the process holds a file handle open to the node the
23 * client continues to be accounted for. Upon file release the usermode
24 * request is removed and a new qos target is computed. This way when the
25 * request that the application has is cleaned up when closes the file
26 * pointer or exits the pm_qos_object will get an opportunity to clean up.
28 * Mark Gross <mgross@linux.intel.com>
31 /*#define DEBUG*/
33 #include <linux/pm_qos.h>
34 #include <linux/sched.h>
35 #include <linux/spinlock.h>
36 #include <linux/slab.h>
37 #include <linux/time.h>
38 #include <linux/fs.h>
39 #include <linux/device.h>
40 #include <linux/miscdevice.h>
41 #include <linux/string.h>
42 #include <linux/platform_device.h>
43 #include <linux/init.h>
44 #include <linux/kernel.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/export.h>
50 #include <trace/events/power.h>
53 * locking rule: all changes to constraints or notifiers lists
54 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
55 * held, taken with _irqsave. One lock to rule them all
57 struct pm_qos_object {
58 struct pm_qos_constraints *constraints;
59 struct miscdevice pm_qos_power_miscdev;
60 char *name;
63 static DEFINE_SPINLOCK(pm_qos_lock);
65 static struct pm_qos_object null_pm_qos;
67 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
68 static struct pm_qos_constraints cpu_dma_constraints = {
69 .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
70 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
71 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
72 .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
73 .type = PM_QOS_MIN,
74 .notifiers = &cpu_dma_lat_notifier,
76 static struct pm_qos_object cpu_dma_pm_qos = {
77 .constraints = &cpu_dma_constraints,
78 .name = "cpu_dma_latency",
81 static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
82 static struct pm_qos_constraints network_lat_constraints = {
83 .list = PLIST_HEAD_INIT(network_lat_constraints.list),
84 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
85 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
86 .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
87 .type = PM_QOS_MIN,
88 .notifiers = &network_lat_notifier,
90 static struct pm_qos_object network_lat_pm_qos = {
91 .constraints = &network_lat_constraints,
92 .name = "network_latency",
96 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
97 static struct pm_qos_constraints network_tput_constraints = {
98 .list = PLIST_HEAD_INIT(network_tput_constraints.list),
99 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
100 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
101 .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
102 .type = PM_QOS_MAX,
103 .notifiers = &network_throughput_notifier,
105 static struct pm_qos_object network_throughput_pm_qos = {
106 .constraints = &network_tput_constraints,
107 .name = "network_throughput",
111 static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
112 static struct pm_qos_constraints memory_bw_constraints = {
113 .list = PLIST_HEAD_INIT(memory_bw_constraints.list),
114 .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
115 .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
116 .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
117 .type = PM_QOS_SUM,
118 .notifiers = &memory_bandwidth_notifier,
120 static struct pm_qos_object memory_bandwidth_pm_qos = {
121 .constraints = &memory_bw_constraints,
122 .name = "memory_bandwidth",
126 static struct pm_qos_object *pm_qos_array[] = {
127 &null_pm_qos,
128 &cpu_dma_pm_qos,
129 &network_lat_pm_qos,
130 &network_throughput_pm_qos,
131 &memory_bandwidth_pm_qos,
134 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
135 size_t count, loff_t *f_pos);
136 static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
137 size_t count, loff_t *f_pos);
138 static int pm_qos_power_open(struct inode *inode, struct file *filp);
139 static int pm_qos_power_release(struct inode *inode, struct file *filp);
141 static const struct file_operations pm_qos_power_fops = {
142 .write = pm_qos_power_write,
143 .read = pm_qos_power_read,
144 .open = pm_qos_power_open,
145 .release = pm_qos_power_release,
146 .llseek = noop_llseek,
149 /* unlocked internal variant */
150 static inline int pm_qos_get_value(struct pm_qos_constraints *c)
152 struct plist_node *node;
153 int total_value = 0;
155 if (plist_head_empty(&c->list))
156 return c->no_constraint_value;
158 switch (c->type) {
159 case PM_QOS_MIN:
160 return plist_first(&c->list)->prio;
162 case PM_QOS_MAX:
163 return plist_last(&c->list)->prio;
165 case PM_QOS_SUM:
166 plist_for_each(node, &c->list)
167 total_value += node->prio;
169 return total_value;
171 default:
172 /* runtime check for not using enum */
173 BUG();
174 return PM_QOS_DEFAULT_VALUE;
178 s32 pm_qos_read_value(struct pm_qos_constraints *c)
180 return c->target_value;
183 static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
185 c->target_value = value;
188 static int pm_qos_debug_show(struct seq_file *s, void *unused)
190 struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
191 struct pm_qos_constraints *c;
192 struct pm_qos_request *req;
193 char *type;
194 unsigned long flags;
195 int tot_reqs = 0;
196 int active_reqs = 0;
198 if (IS_ERR_OR_NULL(qos)) {
199 pr_err("%s: bad qos param!\n", __func__);
200 return -EINVAL;
202 c = qos->constraints;
203 if (IS_ERR_OR_NULL(c)) {
204 pr_err("%s: Bad constraints on qos?\n", __func__);
205 return -EINVAL;
208 /* Lock to ensure we have a snapshot */
209 spin_lock_irqsave(&pm_qos_lock, flags);
210 if (plist_head_empty(&c->list)) {
211 seq_puts(s, "Empty!\n");
212 goto out;
215 switch (c->type) {
216 case PM_QOS_MIN:
217 type = "Minimum";
218 break;
219 case PM_QOS_MAX:
220 type = "Maximum";
221 break;
222 case PM_QOS_SUM:
223 type = "Sum";
224 break;
225 default:
226 type = "Unknown";
229 plist_for_each_entry(req, &c->list, node) {
230 char *state = "Default";
232 if ((req->node).prio != c->default_value) {
233 active_reqs++;
234 state = "Active";
236 tot_reqs++;
237 seq_printf(s, "%d: %d: %s\n", tot_reqs,
238 (req->node).prio, state);
241 seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
242 type, pm_qos_get_value(c), active_reqs, tot_reqs);
244 out:
245 spin_unlock_irqrestore(&pm_qos_lock, flags);
246 return 0;
249 DEFINE_SHOW_ATTRIBUTE(pm_qos_debug);
252 * pm_qos_update_target - manages the constraints list and calls the notifiers
253 * if needed
254 * @c: constraints data struct
255 * @node: request to add to the list, to update or to remove
256 * @action: action to take on the constraints list
257 * @value: value of the request to add or update
259 * This function returns 1 if the aggregated constraint value has changed, 0
260 * otherwise.
262 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
263 enum pm_qos_req_action action, int value)
265 unsigned long flags;
266 int prev_value, curr_value, new_value;
267 int ret;
269 spin_lock_irqsave(&pm_qos_lock, flags);
270 prev_value = pm_qos_get_value(c);
271 if (value == PM_QOS_DEFAULT_VALUE)
272 new_value = c->default_value;
273 else
274 new_value = value;
276 switch (action) {
277 case PM_QOS_REMOVE_REQ:
278 plist_del(node, &c->list);
279 break;
280 case PM_QOS_UPDATE_REQ:
282 * to change the list, we atomically remove, reinit
283 * with new value and add, then see if the extremal
284 * changed
286 plist_del(node, &c->list);
287 /* fall through */
288 case PM_QOS_ADD_REQ:
289 plist_node_init(node, new_value);
290 plist_add(node, &c->list);
291 break;
292 default:
293 /* no action */
297 curr_value = pm_qos_get_value(c);
298 pm_qos_set_value(c, curr_value);
300 spin_unlock_irqrestore(&pm_qos_lock, flags);
302 trace_pm_qos_update_target(action, prev_value, curr_value);
303 if (prev_value != curr_value) {
304 ret = 1;
305 if (c->notifiers)
306 blocking_notifier_call_chain(c->notifiers,
307 (unsigned long)curr_value,
308 NULL);
309 } else {
310 ret = 0;
312 return ret;
316 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
317 * @pqf: Device PM QoS flags set to remove the request from.
318 * @req: Request to remove from the set.
320 static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
321 struct pm_qos_flags_request *req)
323 s32 val = 0;
325 list_del(&req->node);
326 list_for_each_entry(req, &pqf->list, node)
327 val |= req->flags;
329 pqf->effective_flags = val;
333 * pm_qos_update_flags - Update a set of PM QoS flags.
334 * @pqf: Set of flags to update.
335 * @req: Request to add to the set, to modify, or to remove from the set.
336 * @action: Action to take on the set.
337 * @val: Value of the request to add or modify.
339 * Update the given set of PM QoS flags and call notifiers if the aggregate
340 * value has changed. Returns 1 if the aggregate constraint value has changed,
341 * 0 otherwise.
343 bool pm_qos_update_flags(struct pm_qos_flags *pqf,
344 struct pm_qos_flags_request *req,
345 enum pm_qos_req_action action, s32 val)
347 unsigned long irqflags;
348 s32 prev_value, curr_value;
350 spin_lock_irqsave(&pm_qos_lock, irqflags);
352 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
354 switch (action) {
355 case PM_QOS_REMOVE_REQ:
356 pm_qos_flags_remove_req(pqf, req);
357 break;
358 case PM_QOS_UPDATE_REQ:
359 pm_qos_flags_remove_req(pqf, req);
360 /* fall through */
361 case PM_QOS_ADD_REQ:
362 req->flags = val;
363 INIT_LIST_HEAD(&req->node);
364 list_add_tail(&req->node, &pqf->list);
365 pqf->effective_flags |= val;
366 break;
367 default:
368 /* no action */
372 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
374 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
376 trace_pm_qos_update_flags(action, prev_value, curr_value);
377 return prev_value != curr_value;
381 * pm_qos_request - returns current system wide qos expectation
382 * @pm_qos_class: identification of which qos value is requested
384 * This function returns the current target value.
386 int pm_qos_request(int pm_qos_class)
388 return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
390 EXPORT_SYMBOL_GPL(pm_qos_request);
392 int pm_qos_request_active(struct pm_qos_request *req)
394 return req->pm_qos_class != 0;
396 EXPORT_SYMBOL_GPL(pm_qos_request_active);
398 static void __pm_qos_update_request(struct pm_qos_request *req,
399 s32 new_value)
401 trace_pm_qos_update_request(req->pm_qos_class, new_value);
403 if (new_value != req->node.prio)
404 pm_qos_update_target(
405 pm_qos_array[req->pm_qos_class]->constraints,
406 &req->node, PM_QOS_UPDATE_REQ, new_value);
410 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
411 * @work: work struct for the delayed work (timeout)
413 * This cancels the timeout request by falling back to the default at timeout.
415 static void pm_qos_work_fn(struct work_struct *work)
417 struct pm_qos_request *req = container_of(to_delayed_work(work),
418 struct pm_qos_request,
419 work);
421 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
425 * pm_qos_add_request - inserts new qos request into the list
426 * @req: pointer to a preallocated handle
427 * @pm_qos_class: identifies which list of qos request to use
428 * @value: defines the qos request
430 * This function inserts a new entry in the pm_qos_class list of requested qos
431 * performance characteristics. It recomputes the aggregate QoS expectations
432 * for the pm_qos_class of parameters and initializes the pm_qos_request
433 * handle. Caller needs to save this handle for later use in updates and
434 * removal.
437 void pm_qos_add_request(struct pm_qos_request *req,
438 int pm_qos_class, s32 value)
440 if (!req) /*guard against callers passing in null */
441 return;
443 if (pm_qos_request_active(req)) {
444 WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
445 return;
447 req->pm_qos_class = pm_qos_class;
448 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
449 trace_pm_qos_add_request(pm_qos_class, value);
450 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
451 &req->node, PM_QOS_ADD_REQ, value);
453 EXPORT_SYMBOL_GPL(pm_qos_add_request);
456 * pm_qos_update_request - modifies an existing qos request
457 * @req : handle to list element holding a pm_qos request to use
458 * @value: defines the qos request
460 * Updates an existing qos request for the pm_qos_class of parameters along
461 * with updating the target pm_qos_class value.
463 * Attempts are made to make this code callable on hot code paths.
465 void pm_qos_update_request(struct pm_qos_request *req,
466 s32 new_value)
468 if (!req) /*guard against callers passing in null */
469 return;
471 if (!pm_qos_request_active(req)) {
472 WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
473 return;
476 cancel_delayed_work_sync(&req->work);
477 __pm_qos_update_request(req, new_value);
479 EXPORT_SYMBOL_GPL(pm_qos_update_request);
482 * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
483 * @req : handle to list element holding a pm_qos request to use
484 * @new_value: defines the temporal qos request
485 * @timeout_us: the effective duration of this qos request in usecs.
487 * After timeout_us, this qos request is cancelled automatically.
489 void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
490 unsigned long timeout_us)
492 if (!req)
493 return;
494 if (WARN(!pm_qos_request_active(req),
495 "%s called for unknown object.", __func__))
496 return;
498 cancel_delayed_work_sync(&req->work);
500 trace_pm_qos_update_request_timeout(req->pm_qos_class,
501 new_value, timeout_us);
502 if (new_value != req->node.prio)
503 pm_qos_update_target(
504 pm_qos_array[req->pm_qos_class]->constraints,
505 &req->node, PM_QOS_UPDATE_REQ, new_value);
507 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
511 * pm_qos_remove_request - modifies an existing qos request
512 * @req: handle to request list element
514 * Will remove pm qos request from the list of constraints and
515 * recompute the current target value for the pm_qos_class. Call this
516 * on slow code paths.
518 void pm_qos_remove_request(struct pm_qos_request *req)
520 if (!req) /*guard against callers passing in null */
521 return;
522 /* silent return to keep pcm code cleaner */
524 if (!pm_qos_request_active(req)) {
525 WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
526 return;
529 cancel_delayed_work_sync(&req->work);
531 trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
532 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
533 &req->node, PM_QOS_REMOVE_REQ,
534 PM_QOS_DEFAULT_VALUE);
535 memset(req, 0, sizeof(*req));
537 EXPORT_SYMBOL_GPL(pm_qos_remove_request);
540 * pm_qos_add_notifier - sets notification entry for changes to target value
541 * @pm_qos_class: identifies which qos target changes should be notified.
542 * @notifier: notifier block managed by caller.
544 * will register the notifier into a notification chain that gets called
545 * upon changes to the pm_qos_class target value.
547 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
549 int retval;
551 retval = blocking_notifier_chain_register(
552 pm_qos_array[pm_qos_class]->constraints->notifiers,
553 notifier);
555 return retval;
557 EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
560 * pm_qos_remove_notifier - deletes notification entry from chain.
561 * @pm_qos_class: identifies which qos target changes are notified.
562 * @notifier: notifier block to be removed.
564 * will remove the notifier from the notification chain that gets called
565 * upon changes to the pm_qos_class target value.
567 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
569 int retval;
571 retval = blocking_notifier_chain_unregister(
572 pm_qos_array[pm_qos_class]->constraints->notifiers,
573 notifier);
575 return retval;
577 EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
579 /* User space interface to PM QoS classes via misc devices */
580 static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
582 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
583 qos->pm_qos_power_miscdev.name = qos->name;
584 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
586 debugfs_create_file(qos->name, S_IRUGO, d, (void *)qos,
587 &pm_qos_debug_fops);
589 return misc_register(&qos->pm_qos_power_miscdev);
592 static int find_pm_qos_object_by_minor(int minor)
594 int pm_qos_class;
596 for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
597 pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
598 if (minor ==
599 pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
600 return pm_qos_class;
602 return -1;
605 static int pm_qos_power_open(struct inode *inode, struct file *filp)
607 long pm_qos_class;
609 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
610 if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
611 struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
612 if (!req)
613 return -ENOMEM;
615 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
616 filp->private_data = req;
618 return 0;
620 return -EPERM;
623 static int pm_qos_power_release(struct inode *inode, struct file *filp)
625 struct pm_qos_request *req;
627 req = filp->private_data;
628 pm_qos_remove_request(req);
629 kfree(req);
631 return 0;
635 static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
636 size_t count, loff_t *f_pos)
638 s32 value;
639 unsigned long flags;
640 struct pm_qos_request *req = filp->private_data;
642 if (!req)
643 return -EINVAL;
644 if (!pm_qos_request_active(req))
645 return -EINVAL;
647 spin_lock_irqsave(&pm_qos_lock, flags);
648 value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
649 spin_unlock_irqrestore(&pm_qos_lock, flags);
651 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
654 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
655 size_t count, loff_t *f_pos)
657 s32 value;
658 struct pm_qos_request *req;
660 if (count == sizeof(s32)) {
661 if (copy_from_user(&value, buf, sizeof(s32)))
662 return -EFAULT;
663 } else {
664 int ret;
666 ret = kstrtos32_from_user(buf, count, 16, &value);
667 if (ret)
668 return ret;
671 req = filp->private_data;
672 pm_qos_update_request(req, value);
674 return count;
678 static int __init pm_qos_power_init(void)
680 int ret = 0;
681 int i;
682 struct dentry *d;
684 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
686 d = debugfs_create_dir("pm_qos", NULL);
688 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
689 ret = register_pm_qos_misc(pm_qos_array[i], d);
690 if (ret < 0) {
691 pr_err("%s: %s setup failed\n",
692 __func__, pm_qos_array[i]->name);
693 return ret;
697 return ret;
700 late_initcall(pm_qos_power_init);