dmaengine: imx-sdma: Let the core do the device node validation
[linux/fpc-iii.git] / drivers / base / power / qos.c
blob6c91f8df1d59e0666c7eeb854f7b24e0f9e6b5d5
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Devices PM QoS constraints management
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * This module exposes the interface to kernel space for specifying
8 * per-device PM QoS dependencies. It provides infrastructure for registration
9 * of:
11 * Dependents on a QoS value : register requests
12 * Watchers of QoS value : get notified when target QoS value changes
14 * This QoS design is best effort based. Dependents register their QoS needs.
15 * Watchers register to keep track of the current QoS needs of the system.
16 * Watchers can register a per-device notification callback using the
17 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18 * per-device constraint data struct.
20 * Note about the per-device constraint data struct allocation:
21 * . The per-device constraints data struct ptr is stored into the device
22 * dev_pm_info.
23 * . To minimize the data usage by the per-device constraints, the data struct
24 * is only allocated at the first call to dev_pm_qos_add_request.
25 * . The data is later free'd when the device is removed from the system.
26 * . A global mutex protects the constraints users from the data being
27 * allocated and free'd.
30 #include <linux/pm_qos.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/device.h>
34 #include <linux/mutex.h>
35 #include <linux/export.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/err.h>
38 #include <trace/events/power.h>
40 #include "power.h"
42 static DEFINE_MUTEX(dev_pm_qos_mtx);
43 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
45 /**
46 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47 * @dev: Device to check the PM QoS flags for.
48 * @mask: Flags to check against.
50 * This routine must be called with dev->power.lock held.
52 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
54 struct dev_pm_qos *qos = dev->power.qos;
55 struct pm_qos_flags *pqf;
56 s32 val;
58 lockdep_assert_held(&dev->power.lock);
60 if (IS_ERR_OR_NULL(qos))
61 return PM_QOS_FLAGS_UNDEFINED;
63 pqf = &qos->flags;
64 if (list_empty(&pqf->list))
65 return PM_QOS_FLAGS_UNDEFINED;
67 val = pqf->effective_flags & mask;
68 if (val)
69 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
71 return PM_QOS_FLAGS_NONE;
74 /**
75 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76 * @dev: Device to check the PM QoS flags for.
77 * @mask: Flags to check against.
79 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
81 unsigned long irqflags;
82 enum pm_qos_flags_status ret;
84 spin_lock_irqsave(&dev->power.lock, irqflags);
85 ret = __dev_pm_qos_flags(dev, mask);
86 spin_unlock_irqrestore(&dev->power.lock, irqflags);
88 return ret;
90 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
92 /**
93 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
94 * @dev: Device to get the PM QoS constraint value for.
96 * This routine must be called with dev->power.lock held.
98 s32 __dev_pm_qos_read_value(struct device *dev)
100 lockdep_assert_held(&dev->power.lock);
102 return dev_pm_qos_raw_read_value(dev);
106 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107 * @dev: Device to get the PM QoS constraint value for.
109 s32 dev_pm_qos_read_value(struct device *dev)
111 unsigned long flags;
112 s32 ret;
114 spin_lock_irqsave(&dev->power.lock, flags);
115 ret = __dev_pm_qos_read_value(dev);
116 spin_unlock_irqrestore(&dev->power.lock, flags);
118 return ret;
122 * apply_constraint - Add/modify/remove device PM QoS request.
123 * @req: Constraint request to apply
124 * @action: Action to perform (add/update/remove).
125 * @value: Value to assign to the QoS request.
127 * Internal function to update the constraints list using the PM QoS core
128 * code and if needed call the per-device callbacks.
130 static int apply_constraint(struct dev_pm_qos_request *req,
131 enum pm_qos_req_action action, s32 value)
133 struct dev_pm_qos *qos = req->dev->power.qos;
134 int ret;
136 switch(req->type) {
137 case DEV_PM_QOS_RESUME_LATENCY:
138 if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
139 value = 0;
141 ret = pm_qos_update_target(&qos->resume_latency,
142 &req->data.pnode, action, value);
143 break;
144 case DEV_PM_QOS_LATENCY_TOLERANCE:
145 ret = pm_qos_update_target(&qos->latency_tolerance,
146 &req->data.pnode, action, value);
147 if (ret) {
148 value = pm_qos_read_value(&qos->latency_tolerance);
149 req->dev->power.set_latency_tolerance(req->dev, value);
151 break;
152 case DEV_PM_QOS_FLAGS:
153 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
154 action, value);
155 break;
156 default:
157 ret = -EINVAL;
160 return ret;
164 * dev_pm_qos_constraints_allocate
165 * @dev: device to allocate data for
167 * Called at the first call to add_request, for constraint data allocation
168 * Must be called with the dev_pm_qos_mtx mutex held
170 static int dev_pm_qos_constraints_allocate(struct device *dev)
172 struct dev_pm_qos *qos;
173 struct pm_qos_constraints *c;
174 struct blocking_notifier_head *n;
176 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
177 if (!qos)
178 return -ENOMEM;
180 n = kzalloc(sizeof(*n), GFP_KERNEL);
181 if (!n) {
182 kfree(qos);
183 return -ENOMEM;
185 BLOCKING_INIT_NOTIFIER_HEAD(n);
187 c = &qos->resume_latency;
188 plist_head_init(&c->list);
189 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
190 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
191 c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
192 c->type = PM_QOS_MIN;
193 c->notifiers = n;
195 c = &qos->latency_tolerance;
196 plist_head_init(&c->list);
197 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
198 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
199 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
200 c->type = PM_QOS_MIN;
202 INIT_LIST_HEAD(&qos->flags.list);
204 spin_lock_irq(&dev->power.lock);
205 dev->power.qos = qos;
206 spin_unlock_irq(&dev->power.lock);
208 return 0;
211 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
212 static void __dev_pm_qos_hide_flags(struct device *dev);
215 * dev_pm_qos_constraints_destroy
216 * @dev: target device
218 * Called from the device PM subsystem on device removal under device_pm_lock().
220 void dev_pm_qos_constraints_destroy(struct device *dev)
222 struct dev_pm_qos *qos;
223 struct dev_pm_qos_request *req, *tmp;
224 struct pm_qos_constraints *c;
225 struct pm_qos_flags *f;
227 mutex_lock(&dev_pm_qos_sysfs_mtx);
230 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 * exposed to user space, they have to be hidden at this point.
233 pm_qos_sysfs_remove_resume_latency(dev);
234 pm_qos_sysfs_remove_flags(dev);
236 mutex_lock(&dev_pm_qos_mtx);
238 __dev_pm_qos_hide_latency_limit(dev);
239 __dev_pm_qos_hide_flags(dev);
241 qos = dev->power.qos;
242 if (!qos)
243 goto out;
245 /* Flush the constraints lists for the device. */
246 c = &qos->resume_latency;
247 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
249 * Update constraints list and call the notification
250 * callbacks if needed
252 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
253 memset(req, 0, sizeof(*req));
255 c = &qos->latency_tolerance;
256 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
257 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
258 memset(req, 0, sizeof(*req));
260 f = &qos->flags;
261 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
262 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
263 memset(req, 0, sizeof(*req));
266 spin_lock_irq(&dev->power.lock);
267 dev->power.qos = ERR_PTR(-ENODEV);
268 spin_unlock_irq(&dev->power.lock);
270 kfree(qos->resume_latency.notifiers);
271 kfree(qos);
273 out:
274 mutex_unlock(&dev_pm_qos_mtx);
276 mutex_unlock(&dev_pm_qos_sysfs_mtx);
279 static bool dev_pm_qos_invalid_req_type(struct device *dev,
280 enum dev_pm_qos_req_type type)
282 return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
283 !dev->power.set_latency_tolerance;
286 static int __dev_pm_qos_add_request(struct device *dev,
287 struct dev_pm_qos_request *req,
288 enum dev_pm_qos_req_type type, s32 value)
290 int ret = 0;
292 if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
293 return -EINVAL;
295 if (WARN(dev_pm_qos_request_active(req),
296 "%s() called for already added request\n", __func__))
297 return -EINVAL;
299 if (IS_ERR(dev->power.qos))
300 ret = -ENODEV;
301 else if (!dev->power.qos)
302 ret = dev_pm_qos_constraints_allocate(dev);
304 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
305 if (!ret) {
306 req->dev = dev;
307 req->type = type;
308 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
310 return ret;
314 * dev_pm_qos_add_request - inserts new qos request into the list
315 * @dev: target device for the constraint
316 * @req: pointer to a preallocated handle
317 * @type: type of the request
318 * @value: defines the qos request
320 * This function inserts a new entry in the device constraints list of
321 * requested qos performance characteristics. It recomputes the aggregate
322 * QoS expectations of parameters and initializes the dev_pm_qos_request
323 * handle. Caller needs to save this handle for later use in updates and
324 * removal.
326 * Returns 1 if the aggregated constraint value has changed,
327 * 0 if the aggregated constraint value has not changed,
328 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
329 * to allocate for data structures, -ENODEV if the device has just been removed
330 * from the system.
332 * Callers should ensure that the target device is not RPM_SUSPENDED before
333 * using this function for requests of type DEV_PM_QOS_FLAGS.
335 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
336 enum dev_pm_qos_req_type type, s32 value)
338 int ret;
340 mutex_lock(&dev_pm_qos_mtx);
341 ret = __dev_pm_qos_add_request(dev, req, type, value);
342 mutex_unlock(&dev_pm_qos_mtx);
343 return ret;
345 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
348 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
349 * @req : PM QoS request to modify.
350 * @new_value: New value to request.
352 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
353 s32 new_value)
355 s32 curr_value;
356 int ret = 0;
358 if (!req) /*guard against callers passing in null */
359 return -EINVAL;
361 if (WARN(!dev_pm_qos_request_active(req),
362 "%s() called for unknown object\n", __func__))
363 return -EINVAL;
365 if (IS_ERR_OR_NULL(req->dev->power.qos))
366 return -ENODEV;
368 switch(req->type) {
369 case DEV_PM_QOS_RESUME_LATENCY:
370 case DEV_PM_QOS_LATENCY_TOLERANCE:
371 curr_value = req->data.pnode.prio;
372 break;
373 case DEV_PM_QOS_FLAGS:
374 curr_value = req->data.flr.flags;
375 break;
376 default:
377 return -EINVAL;
380 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
381 new_value);
382 if (curr_value != new_value)
383 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
385 return ret;
389 * dev_pm_qos_update_request - modifies an existing qos request
390 * @req : handle to list element holding a dev_pm_qos request to use
391 * @new_value: defines the qos request
393 * Updates an existing dev PM qos request along with updating the
394 * target value.
396 * Attempts are made to make this code callable on hot code paths.
398 * Returns 1 if the aggregated constraint value has changed,
399 * 0 if the aggregated constraint value has not changed,
400 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
401 * removed from the system
403 * Callers should ensure that the target device is not RPM_SUSPENDED before
404 * using this function for requests of type DEV_PM_QOS_FLAGS.
406 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
408 int ret;
410 mutex_lock(&dev_pm_qos_mtx);
411 ret = __dev_pm_qos_update_request(req, new_value);
412 mutex_unlock(&dev_pm_qos_mtx);
413 return ret;
415 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
417 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
419 int ret;
421 if (!req) /*guard against callers passing in null */
422 return -EINVAL;
424 if (WARN(!dev_pm_qos_request_active(req),
425 "%s() called for unknown object\n", __func__))
426 return -EINVAL;
428 if (IS_ERR_OR_NULL(req->dev->power.qos))
429 return -ENODEV;
431 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
432 PM_QOS_DEFAULT_VALUE);
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
434 memset(req, 0, sizeof(*req));
435 return ret;
439 * dev_pm_qos_remove_request - modifies an existing qos request
440 * @req: handle to request list element
442 * Will remove pm qos request from the list of constraints and
443 * recompute the current target value. Call this on slow code paths.
445 * Returns 1 if the aggregated constraint value has changed,
446 * 0 if the aggregated constraint value has not changed,
447 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
448 * removed from the system
450 * Callers should ensure that the target device is not RPM_SUSPENDED before
451 * using this function for requests of type DEV_PM_QOS_FLAGS.
453 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
455 int ret;
457 mutex_lock(&dev_pm_qos_mtx);
458 ret = __dev_pm_qos_remove_request(req);
459 mutex_unlock(&dev_pm_qos_mtx);
460 return ret;
462 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
465 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
466 * of per-device PM QoS constraints
468 * @dev: target device for the constraint
469 * @notifier: notifier block managed by caller.
471 * Will register the notifier into a notification chain that gets called
472 * upon changes to the target value for the device.
474 * If the device's constraints object doesn't exist when this routine is called,
475 * it will be created (or error code will be returned if that fails).
477 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
479 int ret = 0;
481 mutex_lock(&dev_pm_qos_mtx);
483 if (IS_ERR(dev->power.qos))
484 ret = -ENODEV;
485 else if (!dev->power.qos)
486 ret = dev_pm_qos_constraints_allocate(dev);
488 if (!ret)
489 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
490 notifier);
492 mutex_unlock(&dev_pm_qos_mtx);
493 return ret;
495 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
498 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
499 * of per-device PM QoS constraints
501 * @dev: target device for the constraint
502 * @notifier: notifier block to be removed.
504 * Will remove the notifier from the notification chain that gets called
505 * upon changes to the target value.
507 int dev_pm_qos_remove_notifier(struct device *dev,
508 struct notifier_block *notifier)
510 int retval = 0;
512 mutex_lock(&dev_pm_qos_mtx);
514 /* Silently return if the constraints object is not present. */
515 if (!IS_ERR_OR_NULL(dev->power.qos))
516 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
517 notifier);
519 mutex_unlock(&dev_pm_qos_mtx);
520 return retval;
522 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
525 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
526 * @dev: Device whose ancestor to add the request for.
527 * @req: Pointer to the preallocated handle.
528 * @type: Type of the request.
529 * @value: Constraint latency value.
531 int dev_pm_qos_add_ancestor_request(struct device *dev,
532 struct dev_pm_qos_request *req,
533 enum dev_pm_qos_req_type type, s32 value)
535 struct device *ancestor = dev->parent;
536 int ret = -ENODEV;
538 switch (type) {
539 case DEV_PM_QOS_RESUME_LATENCY:
540 while (ancestor && !ancestor->power.ignore_children)
541 ancestor = ancestor->parent;
543 break;
544 case DEV_PM_QOS_LATENCY_TOLERANCE:
545 while (ancestor && !ancestor->power.set_latency_tolerance)
546 ancestor = ancestor->parent;
548 break;
549 default:
550 ancestor = NULL;
552 if (ancestor)
553 ret = dev_pm_qos_add_request(ancestor, req, type, value);
555 if (ret < 0)
556 req->dev = NULL;
558 return ret;
560 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
562 static void __dev_pm_qos_drop_user_request(struct device *dev,
563 enum dev_pm_qos_req_type type)
565 struct dev_pm_qos_request *req = NULL;
567 switch(type) {
568 case DEV_PM_QOS_RESUME_LATENCY:
569 req = dev->power.qos->resume_latency_req;
570 dev->power.qos->resume_latency_req = NULL;
571 break;
572 case DEV_PM_QOS_LATENCY_TOLERANCE:
573 req = dev->power.qos->latency_tolerance_req;
574 dev->power.qos->latency_tolerance_req = NULL;
575 break;
576 case DEV_PM_QOS_FLAGS:
577 req = dev->power.qos->flags_req;
578 dev->power.qos->flags_req = NULL;
579 break;
581 __dev_pm_qos_remove_request(req);
582 kfree(req);
585 static void dev_pm_qos_drop_user_request(struct device *dev,
586 enum dev_pm_qos_req_type type)
588 mutex_lock(&dev_pm_qos_mtx);
589 __dev_pm_qos_drop_user_request(dev, type);
590 mutex_unlock(&dev_pm_qos_mtx);
594 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
595 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
596 * @value: Initial value of the latency limit.
598 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
600 struct dev_pm_qos_request *req;
601 int ret;
603 if (!device_is_registered(dev) || value < 0)
604 return -EINVAL;
606 req = kzalloc(sizeof(*req), GFP_KERNEL);
607 if (!req)
608 return -ENOMEM;
610 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
611 if (ret < 0) {
612 kfree(req);
613 return ret;
616 mutex_lock(&dev_pm_qos_sysfs_mtx);
618 mutex_lock(&dev_pm_qos_mtx);
620 if (IS_ERR_OR_NULL(dev->power.qos))
621 ret = -ENODEV;
622 else if (dev->power.qos->resume_latency_req)
623 ret = -EEXIST;
625 if (ret < 0) {
626 __dev_pm_qos_remove_request(req);
627 kfree(req);
628 mutex_unlock(&dev_pm_qos_mtx);
629 goto out;
631 dev->power.qos->resume_latency_req = req;
633 mutex_unlock(&dev_pm_qos_mtx);
635 ret = pm_qos_sysfs_add_resume_latency(dev);
636 if (ret)
637 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
639 out:
640 mutex_unlock(&dev_pm_qos_sysfs_mtx);
641 return ret;
643 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
645 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
647 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
648 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
652 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
653 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
655 void dev_pm_qos_hide_latency_limit(struct device *dev)
657 mutex_lock(&dev_pm_qos_sysfs_mtx);
659 pm_qos_sysfs_remove_resume_latency(dev);
661 mutex_lock(&dev_pm_qos_mtx);
662 __dev_pm_qos_hide_latency_limit(dev);
663 mutex_unlock(&dev_pm_qos_mtx);
665 mutex_unlock(&dev_pm_qos_sysfs_mtx);
667 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
670 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
671 * @dev: Device whose PM QoS flags are to be exposed to user space.
672 * @val: Initial values of the flags.
674 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
676 struct dev_pm_qos_request *req;
677 int ret;
679 if (!device_is_registered(dev))
680 return -EINVAL;
682 req = kzalloc(sizeof(*req), GFP_KERNEL);
683 if (!req)
684 return -ENOMEM;
686 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
687 if (ret < 0) {
688 kfree(req);
689 return ret;
692 pm_runtime_get_sync(dev);
693 mutex_lock(&dev_pm_qos_sysfs_mtx);
695 mutex_lock(&dev_pm_qos_mtx);
697 if (IS_ERR_OR_NULL(dev->power.qos))
698 ret = -ENODEV;
699 else if (dev->power.qos->flags_req)
700 ret = -EEXIST;
702 if (ret < 0) {
703 __dev_pm_qos_remove_request(req);
704 kfree(req);
705 mutex_unlock(&dev_pm_qos_mtx);
706 goto out;
708 dev->power.qos->flags_req = req;
710 mutex_unlock(&dev_pm_qos_mtx);
712 ret = pm_qos_sysfs_add_flags(dev);
713 if (ret)
714 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
716 out:
717 mutex_unlock(&dev_pm_qos_sysfs_mtx);
718 pm_runtime_put(dev);
719 return ret;
721 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
723 static void __dev_pm_qos_hide_flags(struct device *dev)
725 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
726 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
730 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
731 * @dev: Device whose PM QoS flags are to be hidden from user space.
733 void dev_pm_qos_hide_flags(struct device *dev)
735 pm_runtime_get_sync(dev);
736 mutex_lock(&dev_pm_qos_sysfs_mtx);
738 pm_qos_sysfs_remove_flags(dev);
740 mutex_lock(&dev_pm_qos_mtx);
741 __dev_pm_qos_hide_flags(dev);
742 mutex_unlock(&dev_pm_qos_mtx);
744 mutex_unlock(&dev_pm_qos_sysfs_mtx);
745 pm_runtime_put(dev);
747 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
750 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
751 * @dev: Device to update the PM QoS flags request for.
752 * @mask: Flags to set/clear.
753 * @set: Whether to set or clear the flags (true means set).
755 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
757 s32 value;
758 int ret;
760 pm_runtime_get_sync(dev);
761 mutex_lock(&dev_pm_qos_mtx);
763 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
764 ret = -EINVAL;
765 goto out;
768 value = dev_pm_qos_requested_flags(dev);
769 if (set)
770 value |= mask;
771 else
772 value &= ~mask;
774 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
776 out:
777 mutex_unlock(&dev_pm_qos_mtx);
778 pm_runtime_put(dev);
779 return ret;
783 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
784 * @dev: Device to obtain the user space latency tolerance for.
786 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
788 s32 ret;
790 mutex_lock(&dev_pm_qos_mtx);
791 ret = IS_ERR_OR_NULL(dev->power.qos)
792 || !dev->power.qos->latency_tolerance_req ?
793 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
794 dev->power.qos->latency_tolerance_req->data.pnode.prio;
795 mutex_unlock(&dev_pm_qos_mtx);
796 return ret;
800 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
801 * @dev: Device to update the user space latency tolerance for.
802 * @val: New user space latency tolerance for @dev (negative values disable).
804 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
806 int ret;
808 mutex_lock(&dev_pm_qos_mtx);
810 if (IS_ERR_OR_NULL(dev->power.qos)
811 || !dev->power.qos->latency_tolerance_req) {
812 struct dev_pm_qos_request *req;
814 if (val < 0) {
815 if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
816 ret = 0;
817 else
818 ret = -EINVAL;
819 goto out;
821 req = kzalloc(sizeof(*req), GFP_KERNEL);
822 if (!req) {
823 ret = -ENOMEM;
824 goto out;
826 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
827 if (ret < 0) {
828 kfree(req);
829 goto out;
831 dev->power.qos->latency_tolerance_req = req;
832 } else {
833 if (val < 0) {
834 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
835 ret = 0;
836 } else {
837 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
841 out:
842 mutex_unlock(&dev_pm_qos_mtx);
843 return ret;
845 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
848 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
849 * @dev: Device whose latency tolerance to expose
851 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
853 int ret;
855 if (!dev->power.set_latency_tolerance)
856 return -EINVAL;
858 mutex_lock(&dev_pm_qos_sysfs_mtx);
859 ret = pm_qos_sysfs_add_latency_tolerance(dev);
860 mutex_unlock(&dev_pm_qos_sysfs_mtx);
862 return ret;
864 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
867 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
868 * @dev: Device whose latency tolerance to hide
870 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
872 mutex_lock(&dev_pm_qos_sysfs_mtx);
873 pm_qos_sysfs_remove_latency_tolerance(dev);
874 mutex_unlock(&dev_pm_qos_sysfs_mtx);
876 /* Remove the request from user space now */
877 pm_runtime_get_sync(dev);
878 dev_pm_qos_update_user_latency_tolerance(dev,
879 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
880 pm_runtime_put(dev);
882 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);