FRV: Use generic show_interrupts()
[cris-mirror.git] / drivers / base / power / opp.c
blob56a6899f5e9e4ba35ae044eba9e482e51ee58c8d
1 /*
2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/cpufreq.h>
20 #include <linux/list.h>
21 #include <linux/rculist.h>
22 #include <linux/rcupdate.h>
23 #include <linux/opp.h>
26 * Internal data structure organization with the OPP layer library is as
27 * follows:
28 * dev_opp_list (root)
29 * |- device 1 (represents voltage domain 1)
30 * | |- opp 1 (availability, freq, voltage)
31 * | |- opp 2 ..
32 * ... ...
33 * | `- opp n ..
34 * |- device 2 (represents the next voltage domain)
35 * ...
36 * `- device m (represents mth voltage domain)
37 * device 1, 2.. are represented by dev_opp structure while each opp
38 * is represented by the opp structure.
41 /**
42 * struct opp - Generic OPP description structure
43 * @node: opp list node. The nodes are maintained throughout the lifetime
44 * of boot. It is expected only an optimal set of OPPs are
45 * added to the library by the SoC framework.
46 * RCU usage: opp list is traversed with RCU locks. node
47 * modification is possible realtime, hence the modifications
48 * are protected by the dev_opp_list_lock for integrity.
49 * IMPORTANT: the opp nodes should be maintained in increasing
50 * order.
51 * @available: true/false - marks if this OPP as available or not
52 * @rate: Frequency in hertz
53 * @u_volt: Nominal voltage in microvolts corresponding to this OPP
54 * @dev_opp: points back to the device_opp struct this opp belongs to
56 * This structure stores the OPP information for a given device.
58 struct opp {
59 struct list_head node;
61 bool available;
62 unsigned long rate;
63 unsigned long u_volt;
65 struct device_opp *dev_opp;
68 /**
69 * struct device_opp - Device opp structure
70 * @node: list node - contains the devices with OPPs that
71 * have been registered. Nodes once added are not modified in this
72 * list.
73 * RCU usage: nodes are not modified in the list of device_opp,
74 * however addition is possible and is secured by dev_opp_list_lock
75 * @dev: device pointer
76 * @opp_list: list of opps
78 * This is an internal data structure maintaining the link to opps attached to
79 * a device. This structure is not meant to be shared to users as it is
80 * meant for book keeping and private to OPP library
82 struct device_opp {
83 struct list_head node;
85 struct device *dev;
86 struct list_head opp_list;
90 * The root of the list of all devices. All device_opp structures branch off
91 * from here, with each device_opp containing the list of opp it supports in
92 * various states of availability.
94 static LIST_HEAD(dev_opp_list);
95 /* Lock to allow exclusive modification to the device and opp lists */
96 static DEFINE_MUTEX(dev_opp_list_lock);
98 /**
99 * find_device_opp() - find device_opp struct using device pointer
100 * @dev: device pointer used to lookup device OPPs
102 * Search list of device OPPs for one containing matching device. Does a RCU
103 * reader operation to grab the pointer needed.
105 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
106 * -EINVAL based on type of error.
108 * Locking: This function must be called under rcu_read_lock(). device_opp
109 * is a RCU protected pointer. This means that device_opp is valid as long
110 * as we are under RCU lock.
112 static struct device_opp *find_device_opp(struct device *dev)
114 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
116 if (unlikely(IS_ERR_OR_NULL(dev))) {
117 pr_err("%s: Invalid parameters\n", __func__);
118 return ERR_PTR(-EINVAL);
121 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
122 if (tmp_dev_opp->dev == dev) {
123 dev_opp = tmp_dev_opp;
124 break;
128 return dev_opp;
132 * opp_get_voltage() - Gets the voltage corresponding to an available opp
133 * @opp: opp for which voltage has to be returned for
135 * Return voltage in micro volt corresponding to the opp, else
136 * return 0
138 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
139 * protected pointer. This means that opp which could have been fetched by
140 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
141 * under RCU lock. The pointer returned by the opp_find_freq family must be
142 * used in the same section as the usage of this function with the pointer
143 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
144 * pointer.
146 unsigned long opp_get_voltage(struct opp *opp)
148 struct opp *tmp_opp;
149 unsigned long v = 0;
151 tmp_opp = rcu_dereference(opp);
152 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
153 pr_err("%s: Invalid parameters\n", __func__);
154 else
155 v = tmp_opp->u_volt;
157 return v;
161 * opp_get_freq() - Gets the frequency corresponding to an available opp
162 * @opp: opp for which frequency has to be returned for
164 * Return frequency in hertz corresponding to the opp, else
165 * return 0
167 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
168 * protected pointer. This means that opp which could have been fetched by
169 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
170 * under RCU lock. The pointer returned by the opp_find_freq family must be
171 * used in the same section as the usage of this function with the pointer
172 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
173 * pointer.
175 unsigned long opp_get_freq(struct opp *opp)
177 struct opp *tmp_opp;
178 unsigned long f = 0;
180 tmp_opp = rcu_dereference(opp);
181 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
182 pr_err("%s: Invalid parameters\n", __func__);
183 else
184 f = tmp_opp->rate;
186 return f;
190 * opp_get_opp_count() - Get number of opps available in the opp list
191 * @dev: device for which we do this operation
193 * This function returns the number of available opps if there are any,
194 * else returns 0 if none or the corresponding error value.
196 * Locking: This function must be called under rcu_read_lock(). This function
197 * internally references two RCU protected structures: device_opp and opp which
198 * are safe as long as we are under a common RCU locked section.
200 int opp_get_opp_count(struct device *dev)
202 struct device_opp *dev_opp;
203 struct opp *temp_opp;
204 int count = 0;
206 dev_opp = find_device_opp(dev);
207 if (IS_ERR(dev_opp)) {
208 int r = PTR_ERR(dev_opp);
209 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
210 return r;
213 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
214 if (temp_opp->available)
215 count++;
218 return count;
222 * opp_find_freq_exact() - search for an exact frequency
223 * @dev: device for which we do this operation
224 * @freq: frequency to search for
225 * @available: true/false - match for available opp
227 * Searches for exact match in the opp list and returns pointer to the matching
228 * opp if found, else returns ERR_PTR in case of error and should be handled
229 * using IS_ERR.
231 * Note: available is a modifier for the search. if available=true, then the
232 * match is for exact matching frequency and is available in the stored OPP
233 * table. if false, the match is for exact frequency which is not available.
235 * This provides a mechanism to enable an opp which is not available currently
236 * or the opposite as well.
238 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
239 * protected pointer. The reason for the same is that the opp pointer which is
240 * returned will remain valid for use with opp_get_{voltage, freq} only while
241 * under the locked area. The pointer returned must be used prior to unlocking
242 * with rcu_read_unlock() to maintain the integrity of the pointer.
244 struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
245 bool available)
247 struct device_opp *dev_opp;
248 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
250 dev_opp = find_device_opp(dev);
251 if (IS_ERR(dev_opp)) {
252 int r = PTR_ERR(dev_opp);
253 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
254 return ERR_PTR(r);
257 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
258 if (temp_opp->available == available &&
259 temp_opp->rate == freq) {
260 opp = temp_opp;
261 break;
265 return opp;
269 * opp_find_freq_ceil() - Search for an rounded ceil freq
270 * @dev: device for which we do this operation
271 * @freq: Start frequency
273 * Search for the matching ceil *available* OPP from a starting freq
274 * for a device.
276 * Returns matching *opp and refreshes *freq accordingly, else returns
277 * ERR_PTR in case of error and should be handled using IS_ERR.
279 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
280 * protected pointer. The reason for the same is that the opp pointer which is
281 * returned will remain valid for use with opp_get_{voltage, freq} only while
282 * under the locked area. The pointer returned must be used prior to unlocking
283 * with rcu_read_unlock() to maintain the integrity of the pointer.
285 struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
287 struct device_opp *dev_opp;
288 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
290 if (!dev || !freq) {
291 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
292 return ERR_PTR(-EINVAL);
295 dev_opp = find_device_opp(dev);
296 if (IS_ERR(dev_opp))
297 return opp;
299 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
300 if (temp_opp->available && temp_opp->rate >= *freq) {
301 opp = temp_opp;
302 *freq = opp->rate;
303 break;
307 return opp;
311 * opp_find_freq_floor() - Search for a rounded floor freq
312 * @dev: device for which we do this operation
313 * @freq: Start frequency
315 * Search for the matching floor *available* OPP from a starting freq
316 * for a device.
318 * Returns matching *opp and refreshes *freq accordingly, else returns
319 * ERR_PTR in case of error and should be handled using IS_ERR.
321 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
322 * protected pointer. The reason for the same is that the opp pointer which is
323 * returned will remain valid for use with opp_get_{voltage, freq} only while
324 * under the locked area. The pointer returned must be used prior to unlocking
325 * with rcu_read_unlock() to maintain the integrity of the pointer.
327 struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
329 struct device_opp *dev_opp;
330 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
332 if (!dev || !freq) {
333 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
334 return ERR_PTR(-EINVAL);
337 dev_opp = find_device_opp(dev);
338 if (IS_ERR(dev_opp))
339 return opp;
341 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
342 if (temp_opp->available) {
343 /* go to the next node, before choosing prev */
344 if (temp_opp->rate > *freq)
345 break;
346 else
347 opp = temp_opp;
350 if (!IS_ERR(opp))
351 *freq = opp->rate;
353 return opp;
357 * opp_add() - Add an OPP table from a table definitions
358 * @dev: device for which we do this operation
359 * @freq: Frequency in Hz for this OPP
360 * @u_volt: Voltage in uVolts for this OPP
362 * This function adds an opp definition to the opp list and returns status.
363 * The opp is made available by default and it can be controlled using
364 * opp_enable/disable functions.
366 * Locking: The internal device_opp and opp structures are RCU protected.
367 * Hence this function internally uses RCU updater strategy with mutex locks
368 * to keep the integrity of the internal data structures. Callers should ensure
369 * that this function is *NOT* called under RCU protection or in contexts where
370 * mutex cannot be locked.
372 int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
374 struct device_opp *dev_opp = NULL;
375 struct opp *opp, *new_opp;
376 struct list_head *head;
378 /* allocate new OPP node */
379 new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
380 if (!new_opp) {
381 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
382 return -ENOMEM;
385 /* Hold our list modification lock here */
386 mutex_lock(&dev_opp_list_lock);
388 /* Check for existing list for 'dev' */
389 dev_opp = find_device_opp(dev);
390 if (IS_ERR(dev_opp)) {
392 * Allocate a new device OPP table. In the infrequent case
393 * where a new device is needed to be added, we pay this
394 * penalty.
396 dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
397 if (!dev_opp) {
398 mutex_unlock(&dev_opp_list_lock);
399 kfree(new_opp);
400 dev_warn(dev,
401 "%s: Unable to create device OPP structure\n",
402 __func__);
403 return -ENOMEM;
406 dev_opp->dev = dev;
407 INIT_LIST_HEAD(&dev_opp->opp_list);
409 /* Secure the device list modification */
410 list_add_rcu(&dev_opp->node, &dev_opp_list);
413 /* populate the opp table */
414 new_opp->dev_opp = dev_opp;
415 new_opp->rate = freq;
416 new_opp->u_volt = u_volt;
417 new_opp->available = true;
419 /* Insert new OPP in order of increasing frequency */
420 head = &dev_opp->opp_list;
421 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
422 if (new_opp->rate < opp->rate)
423 break;
424 else
425 head = &opp->node;
428 list_add_rcu(&new_opp->node, head);
429 mutex_unlock(&dev_opp_list_lock);
431 return 0;
435 * opp_set_availability() - helper to set the availability of an opp
436 * @dev: device for which we do this operation
437 * @freq: OPP frequency to modify availability
438 * @availability_req: availability status requested for this opp
440 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
441 * share a common logic which is isolated here.
443 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
444 * copy operation, returns 0 if no modifcation was done OR modification was
445 * successful.
447 * Locking: The internal device_opp and opp structures are RCU protected.
448 * Hence this function internally uses RCU updater strategy with mutex locks to
449 * keep the integrity of the internal data structures. Callers should ensure
450 * that this function is *NOT* called under RCU protection or in contexts where
451 * mutex locking or synchronize_rcu() blocking calls cannot be used.
453 static int opp_set_availability(struct device *dev, unsigned long freq,
454 bool availability_req)
456 struct device_opp *tmp_dev_opp, *dev_opp = NULL;
457 struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
458 int r = 0;
460 /* keep the node allocated */
461 new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
462 if (!new_opp) {
463 dev_warn(dev, "%s: Unable to create OPP\n", __func__);
464 return -ENOMEM;
467 mutex_lock(&dev_opp_list_lock);
469 /* Find the device_opp */
470 list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
471 if (dev == tmp_dev_opp->dev) {
472 dev_opp = tmp_dev_opp;
473 break;
476 if (IS_ERR(dev_opp)) {
477 r = PTR_ERR(dev_opp);
478 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
479 goto unlock;
482 /* Do we have the frequency? */
483 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
484 if (tmp_opp->rate == freq) {
485 opp = tmp_opp;
486 break;
489 if (IS_ERR(opp)) {
490 r = PTR_ERR(opp);
491 goto unlock;
494 /* Is update really needed? */
495 if (opp->available == availability_req)
496 goto unlock;
497 /* copy the old data over */
498 *new_opp = *opp;
500 /* plug in new node */
501 new_opp->available = availability_req;
503 list_replace_rcu(&opp->node, &new_opp->node);
504 mutex_unlock(&dev_opp_list_lock);
505 synchronize_rcu();
507 /* clean up old opp */
508 new_opp = opp;
509 goto out;
511 unlock:
512 mutex_unlock(&dev_opp_list_lock);
513 out:
514 kfree(new_opp);
515 return r;
519 * opp_enable() - Enable a specific OPP
520 * @dev: device for which we do this operation
521 * @freq: OPP frequency to enable
523 * Enables a provided opp. If the operation is valid, this returns 0, else the
524 * corresponding error value. It is meant to be used for users an OPP available
525 * after being temporarily made unavailable with opp_disable.
527 * Locking: The internal device_opp and opp structures are RCU protected.
528 * Hence this function indirectly uses RCU and mutex locks to keep the
529 * integrity of the internal data structures. Callers should ensure that
530 * this function is *NOT* called under RCU protection or in contexts where
531 * mutex locking or synchronize_rcu() blocking calls cannot be used.
533 int opp_enable(struct device *dev, unsigned long freq)
535 return opp_set_availability(dev, freq, true);
539 * opp_disable() - Disable a specific OPP
540 * @dev: device for which we do this operation
541 * @freq: OPP frequency to disable
543 * Disables a provided opp. If the operation is valid, this returns
544 * 0, else the corresponding error value. It is meant to be a temporary
545 * control by users to make this OPP not available until the circumstances are
546 * right to make it available again (with a call to opp_enable).
548 * Locking: The internal device_opp and opp structures are RCU protected.
549 * Hence this function indirectly uses RCU and mutex locks to keep the
550 * integrity of the internal data structures. Callers should ensure that
551 * this function is *NOT* called under RCU protection or in contexts where
552 * mutex locking or synchronize_rcu() blocking calls cannot be used.
554 int opp_disable(struct device *dev, unsigned long freq)
556 return opp_set_availability(dev, freq, false);
559 #ifdef CONFIG_CPU_FREQ
561 * opp_init_cpufreq_table() - create a cpufreq table for a device
562 * @dev: device for which we do this operation
563 * @table: Cpufreq table returned back to caller
565 * Generate a cpufreq table for a provided device- this assumes that the
566 * opp list is already initialized and ready for usage.
568 * This function allocates required memory for the cpufreq table. It is
569 * expected that the caller does the required maintenance such as freeing
570 * the table as required.
572 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
573 * if no memory available for the operation (table is not populated), returns 0
574 * if successful and table is populated.
576 * WARNING: It is important for the callers to ensure refreshing their copy of
577 * the table if any of the mentioned functions have been invoked in the interim.
579 * Locking: The internal device_opp and opp structures are RCU protected.
580 * To simplify the logic, we pretend we are updater and hold relevant mutex here
581 * Callers should ensure that this function is *NOT* called under RCU protection
582 * or in contexts where mutex locking cannot be used.
584 int opp_init_cpufreq_table(struct device *dev,
585 struct cpufreq_frequency_table **table)
587 struct device_opp *dev_opp;
588 struct opp *opp;
589 struct cpufreq_frequency_table *freq_table;
590 int i = 0;
592 /* Pretend as if I am an updater */
593 mutex_lock(&dev_opp_list_lock);
595 dev_opp = find_device_opp(dev);
596 if (IS_ERR(dev_opp)) {
597 int r = PTR_ERR(dev_opp);
598 mutex_unlock(&dev_opp_list_lock);
599 dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
600 return r;
603 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
604 (opp_get_opp_count(dev) + 1), GFP_KERNEL);
605 if (!freq_table) {
606 mutex_unlock(&dev_opp_list_lock);
607 dev_warn(dev, "%s: Unable to allocate frequency table\n",
608 __func__);
609 return -ENOMEM;
612 list_for_each_entry(opp, &dev_opp->opp_list, node) {
613 if (opp->available) {
614 freq_table[i].index = i;
615 freq_table[i].frequency = opp->rate / 1000;
616 i++;
619 mutex_unlock(&dev_opp_list_lock);
621 freq_table[i].index = i;
622 freq_table[i].frequency = CPUFREQ_TABLE_END;
624 *table = &freq_table[0];
626 return 0;
628 #endif /* CONFIG_CPU_FREQ */