PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / dca / dca-core.c
blob819dfda8823623fb8c7741f4ed299626393db044
1 /*
2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This driver supports an interface for DCA clients and providers to meet.
26 #include <linux/kernel.h>
27 #include <linux/notifier.h>
28 #include <linux/device.h>
29 #include <linux/dca.h>
30 #include <linux/slab.h>
31 #include <linux/module.h>
33 #define DCA_VERSION "1.12.1"
35 MODULE_VERSION(DCA_VERSION);
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Intel Corporation");
39 static DEFINE_RAW_SPINLOCK(dca_lock);
41 static LIST_HEAD(dca_domains);
43 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
45 static int dca_providers_blocked;
47 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
49 struct pci_dev *pdev = to_pci_dev(dev);
50 struct pci_bus *bus = pdev->bus;
52 while (bus->parent)
53 bus = bus->parent;
55 return bus;
58 static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
60 struct dca_domain *domain;
62 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
63 if (!domain)
64 return NULL;
66 INIT_LIST_HEAD(&domain->dca_providers);
67 domain->pci_rc = rc;
69 return domain;
72 static void dca_free_domain(struct dca_domain *domain)
74 list_del(&domain->node);
75 kfree(domain);
78 static int dca_provider_ioat_ver_3_0(struct device *dev)
80 struct pci_dev *pdev = to_pci_dev(dev);
82 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
83 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
84 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
85 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
86 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
87 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
88 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
89 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
90 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
93 static void unregister_dca_providers(void)
95 struct dca_provider *dca, *_dca;
96 struct list_head unregistered_providers;
97 struct dca_domain *domain;
98 unsigned long flags;
100 blocking_notifier_call_chain(&dca_provider_chain,
101 DCA_PROVIDER_REMOVE, NULL);
103 INIT_LIST_HEAD(&unregistered_providers);
105 raw_spin_lock_irqsave(&dca_lock, flags);
107 if (list_empty(&dca_domains)) {
108 raw_spin_unlock_irqrestore(&dca_lock, flags);
109 return;
112 /* at this point only one domain in the list is expected */
113 domain = list_first_entry(&dca_domains, struct dca_domain, node);
115 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
116 list_move(&dca->node, &unregistered_providers);
118 dca_free_domain(domain);
120 raw_spin_unlock_irqrestore(&dca_lock, flags);
122 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
123 dca_sysfs_remove_provider(dca);
124 list_del(&dca->node);
128 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
130 struct dca_domain *domain;
132 list_for_each_entry(domain, &dca_domains, node)
133 if (domain->pci_rc == rc)
134 return domain;
136 return NULL;
139 static struct dca_domain *dca_get_domain(struct device *dev)
141 struct pci_bus *rc;
142 struct dca_domain *domain;
144 rc = dca_pci_rc_from_dev(dev);
145 domain = dca_find_domain(rc);
147 if (!domain) {
148 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
149 dca_providers_blocked = 1;
152 return domain;
155 static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
157 struct dca_provider *dca;
158 struct pci_bus *rc;
159 struct dca_domain *domain;
161 if (dev) {
162 rc = dca_pci_rc_from_dev(dev);
163 domain = dca_find_domain(rc);
164 if (!domain)
165 return NULL;
166 } else {
167 if (!list_empty(&dca_domains))
168 domain = list_first_entry(&dca_domains,
169 struct dca_domain,
170 node);
171 else
172 return NULL;
175 list_for_each_entry(dca, &domain->dca_providers, node)
176 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
177 return dca;
179 return NULL;
183 * dca_add_requester - add a dca client to the list
184 * @dev - the device that wants dca service
186 int dca_add_requester(struct device *dev)
188 struct dca_provider *dca;
189 int err, slot = -ENODEV;
190 unsigned long flags;
191 struct pci_bus *pci_rc;
192 struct dca_domain *domain;
194 if (!dev)
195 return -EFAULT;
197 raw_spin_lock_irqsave(&dca_lock, flags);
199 /* check if the requester has not been added already */
200 dca = dca_find_provider_by_dev(dev);
201 if (dca) {
202 raw_spin_unlock_irqrestore(&dca_lock, flags);
203 return -EEXIST;
206 pci_rc = dca_pci_rc_from_dev(dev);
207 domain = dca_find_domain(pci_rc);
208 if (!domain) {
209 raw_spin_unlock_irqrestore(&dca_lock, flags);
210 return -ENODEV;
213 list_for_each_entry(dca, &domain->dca_providers, node) {
214 slot = dca->ops->add_requester(dca, dev);
215 if (slot >= 0)
216 break;
219 raw_spin_unlock_irqrestore(&dca_lock, flags);
221 if (slot < 0)
222 return slot;
224 err = dca_sysfs_add_req(dca, dev, slot);
225 if (err) {
226 raw_spin_lock_irqsave(&dca_lock, flags);
227 if (dca == dca_find_provider_by_dev(dev))
228 dca->ops->remove_requester(dca, dev);
229 raw_spin_unlock_irqrestore(&dca_lock, flags);
230 return err;
233 return 0;
235 EXPORT_SYMBOL_GPL(dca_add_requester);
238 * dca_remove_requester - remove a dca client from the list
239 * @dev - the device that wants dca service
241 int dca_remove_requester(struct device *dev)
243 struct dca_provider *dca;
244 int slot;
245 unsigned long flags;
247 if (!dev)
248 return -EFAULT;
250 raw_spin_lock_irqsave(&dca_lock, flags);
251 dca = dca_find_provider_by_dev(dev);
252 if (!dca) {
253 raw_spin_unlock_irqrestore(&dca_lock, flags);
254 return -ENODEV;
256 slot = dca->ops->remove_requester(dca, dev);
257 raw_spin_unlock_irqrestore(&dca_lock, flags);
259 if (slot < 0)
260 return slot;
262 dca_sysfs_remove_req(dca, slot);
264 return 0;
266 EXPORT_SYMBOL_GPL(dca_remove_requester);
269 * dca_common_get_tag - return the dca tag (serves both new and old api)
270 * @dev - the device that wants dca service
271 * @cpu - the cpuid as returned by get_cpu()
273 u8 dca_common_get_tag(struct device *dev, int cpu)
275 struct dca_provider *dca;
276 u8 tag;
277 unsigned long flags;
279 raw_spin_lock_irqsave(&dca_lock, flags);
281 dca = dca_find_provider_by_dev(dev);
282 if (!dca) {
283 raw_spin_unlock_irqrestore(&dca_lock, flags);
284 return -ENODEV;
286 tag = dca->ops->get_tag(dca, dev, cpu);
288 raw_spin_unlock_irqrestore(&dca_lock, flags);
289 return tag;
293 * dca3_get_tag - return the dca tag to the requester device
294 * for the given cpu (new api)
295 * @dev - the device that wants dca service
296 * @cpu - the cpuid as returned by get_cpu()
298 u8 dca3_get_tag(struct device *dev, int cpu)
300 if (!dev)
301 return -EFAULT;
303 return dca_common_get_tag(dev, cpu);
305 EXPORT_SYMBOL_GPL(dca3_get_tag);
308 * dca_get_tag - return the dca tag for the given cpu (old api)
309 * @cpu - the cpuid as returned by get_cpu()
311 u8 dca_get_tag(int cpu)
313 struct device *dev = NULL;
315 return dca_common_get_tag(dev, cpu);
317 EXPORT_SYMBOL_GPL(dca_get_tag);
320 * alloc_dca_provider - get data struct for describing a dca provider
321 * @ops - pointer to struct of dca operation function pointers
322 * @priv_size - size of extra mem to be added for provider's needs
324 struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
326 struct dca_provider *dca;
327 int alloc_size;
329 alloc_size = (sizeof(*dca) + priv_size);
330 dca = kzalloc(alloc_size, GFP_KERNEL);
331 if (!dca)
332 return NULL;
333 dca->ops = ops;
335 return dca;
337 EXPORT_SYMBOL_GPL(alloc_dca_provider);
340 * free_dca_provider - release the dca provider data struct
341 * @ops - pointer to struct of dca operation function pointers
342 * @priv_size - size of extra mem to be added for provider's needs
344 void free_dca_provider(struct dca_provider *dca)
346 kfree(dca);
348 EXPORT_SYMBOL_GPL(free_dca_provider);
351 * register_dca_provider - register a dca provider
352 * @dca - struct created by alloc_dca_provider()
353 * @dev - device providing dca services
355 int register_dca_provider(struct dca_provider *dca, struct device *dev)
357 int err;
358 unsigned long flags;
359 struct dca_domain *domain, *newdomain = NULL;
361 raw_spin_lock_irqsave(&dca_lock, flags);
362 if (dca_providers_blocked) {
363 raw_spin_unlock_irqrestore(&dca_lock, flags);
364 return -ENODEV;
366 raw_spin_unlock_irqrestore(&dca_lock, flags);
368 err = dca_sysfs_add_provider(dca, dev);
369 if (err)
370 return err;
372 raw_spin_lock_irqsave(&dca_lock, flags);
373 domain = dca_get_domain(dev);
374 if (!domain) {
375 struct pci_bus *rc;
377 if (dca_providers_blocked) {
378 raw_spin_unlock_irqrestore(&dca_lock, flags);
379 dca_sysfs_remove_provider(dca);
380 unregister_dca_providers();
381 return -ENODEV;
384 raw_spin_unlock_irqrestore(&dca_lock, flags);
385 rc = dca_pci_rc_from_dev(dev);
386 newdomain = dca_allocate_domain(rc);
387 if (!newdomain)
388 return -ENODEV;
389 raw_spin_lock_irqsave(&dca_lock, flags);
390 /* Recheck, we might have raced after dropping the lock */
391 domain = dca_get_domain(dev);
392 if (!domain) {
393 domain = newdomain;
394 newdomain = NULL;
395 list_add(&domain->node, &dca_domains);
398 list_add(&dca->node, &domain->dca_providers);
399 raw_spin_unlock_irqrestore(&dca_lock, flags);
401 blocking_notifier_call_chain(&dca_provider_chain,
402 DCA_PROVIDER_ADD, NULL);
403 kfree(newdomain);
404 return 0;
406 EXPORT_SYMBOL_GPL(register_dca_provider);
409 * unregister_dca_provider - remove a dca provider
410 * @dca - struct created by alloc_dca_provider()
412 void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
414 unsigned long flags;
415 struct pci_bus *pci_rc;
416 struct dca_domain *domain;
418 blocking_notifier_call_chain(&dca_provider_chain,
419 DCA_PROVIDER_REMOVE, NULL);
421 raw_spin_lock_irqsave(&dca_lock, flags);
423 if (list_empty(&dca_domains)) {
424 raw_spin_unlock_irqrestore(&dca_lock, flags);
425 return;
428 list_del(&dca->node);
430 pci_rc = dca_pci_rc_from_dev(dev);
431 domain = dca_find_domain(pci_rc);
432 if (list_empty(&domain->dca_providers))
433 dca_free_domain(domain);
435 raw_spin_unlock_irqrestore(&dca_lock, flags);
437 dca_sysfs_remove_provider(dca);
439 EXPORT_SYMBOL_GPL(unregister_dca_provider);
442 * dca_register_notify - register a client's notifier callback
444 void dca_register_notify(struct notifier_block *nb)
446 blocking_notifier_chain_register(&dca_provider_chain, nb);
448 EXPORT_SYMBOL_GPL(dca_register_notify);
451 * dca_unregister_notify - remove a client's notifier callback
453 void dca_unregister_notify(struct notifier_block *nb)
455 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
457 EXPORT_SYMBOL_GPL(dca_unregister_notify);
459 static int __init dca_init(void)
461 pr_info("dca service started, version %s\n", DCA_VERSION);
462 return dca_sysfs_init();
465 static void __exit dca_exit(void)
467 dca_sysfs_exit();
470 arch_initcall(dca_init);
471 module_exit(dca_exit);