OMAP3: PM: Fixed VDD2 control to work from both sysfs and SRF API
[linux-ginger.git] / drivers / dca / dca-core.c
blob52e6bb70a490d8bebf26745191bcf80ce5338ca2
1 /*
2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This driver supports an interface for DCA clients and providers to meet.
26 #include <linux/kernel.h>
27 #include <linux/notifier.h>
28 #include <linux/device.h>
29 #include <linux/dca.h>
31 #define DCA_VERSION "1.12.1"
33 MODULE_VERSION(DCA_VERSION);
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Intel Corporation");
37 static DEFINE_SPINLOCK(dca_lock);
39 static LIST_HEAD(dca_domains);
41 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
43 struct pci_dev *pdev = to_pci_dev(dev);
44 struct pci_bus *bus = pdev->bus;
46 while (bus->parent)
47 bus = bus->parent;
49 return bus;
52 static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
54 struct dca_domain *domain;
56 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
57 if (!domain)
58 return NULL;
60 INIT_LIST_HEAD(&domain->dca_providers);
61 domain->pci_rc = rc;
63 return domain;
66 static void dca_free_domain(struct dca_domain *domain)
68 list_del(&domain->node);
69 kfree(domain);
72 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
74 struct dca_domain *domain;
76 list_for_each_entry(domain, &dca_domains, node)
77 if (domain->pci_rc == rc)
78 return domain;
80 return NULL;
83 static struct dca_domain *dca_get_domain(struct device *dev)
85 struct pci_bus *rc;
86 struct dca_domain *domain;
88 rc = dca_pci_rc_from_dev(dev);
89 domain = dca_find_domain(rc);
91 if (!domain) {
92 domain = dca_allocate_domain(rc);
93 if (domain)
94 list_add(&domain->node, &dca_domains);
97 return domain;
100 static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
102 struct dca_provider *dca;
103 struct pci_bus *rc;
104 struct dca_domain *domain;
106 if (dev) {
107 rc = dca_pci_rc_from_dev(dev);
108 domain = dca_find_domain(rc);
109 if (!domain)
110 return NULL;
111 } else {
112 if (!list_empty(&dca_domains))
113 domain = list_first_entry(&dca_domains,
114 struct dca_domain,
115 node);
116 else
117 return NULL;
120 list_for_each_entry(dca, &domain->dca_providers, node)
121 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
122 return dca;
124 return NULL;
128 * dca_add_requester - add a dca client to the list
129 * @dev - the device that wants dca service
131 int dca_add_requester(struct device *dev)
133 struct dca_provider *dca;
134 int err, slot = -ENODEV;
135 unsigned long flags;
136 struct pci_bus *pci_rc;
137 struct dca_domain *domain;
139 if (!dev)
140 return -EFAULT;
142 spin_lock_irqsave(&dca_lock, flags);
144 /* check if the requester has not been added already */
145 dca = dca_find_provider_by_dev(dev);
146 if (dca) {
147 spin_unlock_irqrestore(&dca_lock, flags);
148 return -EEXIST;
151 pci_rc = dca_pci_rc_from_dev(dev);
152 domain = dca_find_domain(pci_rc);
153 if (!domain) {
154 spin_unlock_irqrestore(&dca_lock, flags);
155 return -ENODEV;
158 list_for_each_entry(dca, &domain->dca_providers, node) {
159 slot = dca->ops->add_requester(dca, dev);
160 if (slot >= 0)
161 break;
164 spin_unlock_irqrestore(&dca_lock, flags);
166 if (slot < 0)
167 return slot;
169 err = dca_sysfs_add_req(dca, dev, slot);
170 if (err) {
171 spin_lock_irqsave(&dca_lock, flags);
172 if (dca == dca_find_provider_by_dev(dev))
173 dca->ops->remove_requester(dca, dev);
174 spin_unlock_irqrestore(&dca_lock, flags);
175 return err;
178 return 0;
180 EXPORT_SYMBOL_GPL(dca_add_requester);
183 * dca_remove_requester - remove a dca client from the list
184 * @dev - the device that wants dca service
186 int dca_remove_requester(struct device *dev)
188 struct dca_provider *dca;
189 int slot;
190 unsigned long flags;
192 if (!dev)
193 return -EFAULT;
195 spin_lock_irqsave(&dca_lock, flags);
196 dca = dca_find_provider_by_dev(dev);
197 if (!dca) {
198 spin_unlock_irqrestore(&dca_lock, flags);
199 return -ENODEV;
201 slot = dca->ops->remove_requester(dca, dev);
202 spin_unlock_irqrestore(&dca_lock, flags);
204 if (slot < 0)
205 return slot;
207 dca_sysfs_remove_req(dca, slot);
209 return 0;
211 EXPORT_SYMBOL_GPL(dca_remove_requester);
214 * dca_common_get_tag - return the dca tag (serves both new and old api)
215 * @dev - the device that wants dca service
216 * @cpu - the cpuid as returned by get_cpu()
218 u8 dca_common_get_tag(struct device *dev, int cpu)
220 struct dca_provider *dca;
221 u8 tag;
222 unsigned long flags;
224 spin_lock_irqsave(&dca_lock, flags);
226 dca = dca_find_provider_by_dev(dev);
227 if (!dca) {
228 spin_unlock_irqrestore(&dca_lock, flags);
229 return -ENODEV;
231 tag = dca->ops->get_tag(dca, dev, cpu);
233 spin_unlock_irqrestore(&dca_lock, flags);
234 return tag;
238 * dca3_get_tag - return the dca tag to the requester device
239 * for the given cpu (new api)
240 * @dev - the device that wants dca service
241 * @cpu - the cpuid as returned by get_cpu()
243 u8 dca3_get_tag(struct device *dev, int cpu)
245 if (!dev)
246 return -EFAULT;
248 return dca_common_get_tag(dev, cpu);
250 EXPORT_SYMBOL_GPL(dca3_get_tag);
253 * dca_get_tag - return the dca tag for the given cpu (old api)
254 * @cpu - the cpuid as returned by get_cpu()
256 u8 dca_get_tag(int cpu)
258 struct device *dev = NULL;
260 return dca_common_get_tag(dev, cpu);
262 EXPORT_SYMBOL_GPL(dca_get_tag);
265 * alloc_dca_provider - get data struct for describing a dca provider
266 * @ops - pointer to struct of dca operation function pointers
267 * @priv_size - size of extra mem to be added for provider's needs
269 struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
271 struct dca_provider *dca;
272 int alloc_size;
274 alloc_size = (sizeof(*dca) + priv_size);
275 dca = kzalloc(alloc_size, GFP_KERNEL);
276 if (!dca)
277 return NULL;
278 dca->ops = ops;
280 return dca;
282 EXPORT_SYMBOL_GPL(alloc_dca_provider);
285 * free_dca_provider - release the dca provider data struct
286 * @ops - pointer to struct of dca operation function pointers
287 * @priv_size - size of extra mem to be added for provider's needs
289 void free_dca_provider(struct dca_provider *dca)
291 kfree(dca);
293 EXPORT_SYMBOL_GPL(free_dca_provider);
295 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
298 * register_dca_provider - register a dca provider
299 * @dca - struct created by alloc_dca_provider()
300 * @dev - device providing dca services
302 int register_dca_provider(struct dca_provider *dca, struct device *dev)
304 int err;
305 unsigned long flags;
306 struct dca_domain *domain;
308 err = dca_sysfs_add_provider(dca, dev);
309 if (err)
310 return err;
312 spin_lock_irqsave(&dca_lock, flags);
313 domain = dca_get_domain(dev);
314 if (!domain) {
315 spin_unlock_irqrestore(&dca_lock, flags);
316 return -ENODEV;
318 list_add(&dca->node, &domain->dca_providers);
319 spin_unlock_irqrestore(&dca_lock, flags);
321 blocking_notifier_call_chain(&dca_provider_chain,
322 DCA_PROVIDER_ADD, NULL);
323 return 0;
325 EXPORT_SYMBOL_GPL(register_dca_provider);
328 * unregister_dca_provider - remove a dca provider
329 * @dca - struct created by alloc_dca_provider()
331 void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
333 unsigned long flags;
334 struct pci_bus *pci_rc;
335 struct dca_domain *domain;
337 blocking_notifier_call_chain(&dca_provider_chain,
338 DCA_PROVIDER_REMOVE, NULL);
340 spin_lock_irqsave(&dca_lock, flags);
342 list_del(&dca->node);
344 pci_rc = dca_pci_rc_from_dev(dev);
345 domain = dca_find_domain(pci_rc);
346 if (list_empty(&domain->dca_providers))
347 dca_free_domain(domain);
349 spin_unlock_irqrestore(&dca_lock, flags);
351 dca_sysfs_remove_provider(dca);
353 EXPORT_SYMBOL_GPL(unregister_dca_provider);
356 * dca_register_notify - register a client's notifier callback
358 void dca_register_notify(struct notifier_block *nb)
360 blocking_notifier_chain_register(&dca_provider_chain, nb);
362 EXPORT_SYMBOL_GPL(dca_register_notify);
365 * dca_unregister_notify - remove a client's notifier callback
367 void dca_unregister_notify(struct notifier_block *nb)
369 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
371 EXPORT_SYMBOL_GPL(dca_unregister_notify);
373 static int __init dca_init(void)
375 pr_info("dca service started, version %s\n", DCA_VERSION);
376 return dca_sysfs_init();
379 static void __exit dca_exit(void)
381 dca_sysfs_exit();
384 arch_initcall(dca_init);
385 module_exit(dca_exit);