2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This driver supports an interface for DCA clients and providers to meet.
26 #include <linux/kernel.h>
27 #include <linux/notifier.h>
28 #include <linux/device.h>
29 #include <linux/dca.h>
30 #include <linux/slab.h>
32 #define DCA_VERSION "1.12.1"
34 MODULE_VERSION(DCA_VERSION
);
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Intel Corporation");
38 static DEFINE_SPINLOCK(dca_lock
);
40 static LIST_HEAD(dca_domains
);
42 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain
);
44 static int dca_providers_blocked
;
46 static struct pci_bus
*dca_pci_rc_from_dev(struct device
*dev
)
48 struct pci_dev
*pdev
= to_pci_dev(dev
);
49 struct pci_bus
*bus
= pdev
->bus
;
57 static struct dca_domain
*dca_allocate_domain(struct pci_bus
*rc
)
59 struct dca_domain
*domain
;
61 domain
= kzalloc(sizeof(*domain
), GFP_NOWAIT
);
65 INIT_LIST_HEAD(&domain
->dca_providers
);
71 static void dca_free_domain(struct dca_domain
*domain
)
73 list_del(&domain
->node
);
77 static int dca_provider_ioat_ver_3_0(struct device
*dev
)
79 struct pci_dev
*pdev
= to_pci_dev(dev
);
81 return ((pdev
->vendor
== PCI_VENDOR_ID_INTEL
) &&
82 ((pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) ||
83 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG1
) ||
84 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG2
) ||
85 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG3
) ||
86 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG4
) ||
87 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG5
) ||
88 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG6
) ||
89 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG7
)));
92 static void unregister_dca_providers(void)
94 struct dca_provider
*dca
, *_dca
;
95 struct list_head unregistered_providers
;
96 struct dca_domain
*domain
;
99 blocking_notifier_call_chain(&dca_provider_chain
,
100 DCA_PROVIDER_REMOVE
, NULL
);
102 INIT_LIST_HEAD(&unregistered_providers
);
104 spin_lock_irqsave(&dca_lock
, flags
);
106 if (list_empty(&dca_domains
)) {
107 spin_unlock_irqrestore(&dca_lock
, flags
);
111 /* at this point only one domain in the list is expected */
112 domain
= list_first_entry(&dca_domains
, struct dca_domain
, node
);
114 list_for_each_entry_safe(dca
, _dca
, &domain
->dca_providers
, node
) {
115 list_del(&dca
->node
);
116 list_add(&dca
->node
, &unregistered_providers
);
119 dca_free_domain(domain
);
121 spin_unlock_irqrestore(&dca_lock
, flags
);
123 list_for_each_entry_safe(dca
, _dca
, &unregistered_providers
, node
) {
124 dca_sysfs_remove_provider(dca
);
125 list_del(&dca
->node
);
129 static struct dca_domain
*dca_find_domain(struct pci_bus
*rc
)
131 struct dca_domain
*domain
;
133 list_for_each_entry(domain
, &dca_domains
, node
)
134 if (domain
->pci_rc
== rc
)
140 static struct dca_domain
*dca_get_domain(struct device
*dev
)
143 struct dca_domain
*domain
;
145 rc
= dca_pci_rc_from_dev(dev
);
146 domain
= dca_find_domain(rc
);
149 if (dca_provider_ioat_ver_3_0(dev
) && !list_empty(&dca_domains
)) {
150 dca_providers_blocked
= 1;
152 domain
= dca_allocate_domain(rc
);
154 list_add(&domain
->node
, &dca_domains
);
161 static struct dca_provider
*dca_find_provider_by_dev(struct device
*dev
)
163 struct dca_provider
*dca
;
165 struct dca_domain
*domain
;
168 rc
= dca_pci_rc_from_dev(dev
);
169 domain
= dca_find_domain(rc
);
173 if (!list_empty(&dca_domains
))
174 domain
= list_first_entry(&dca_domains
,
181 list_for_each_entry(dca
, &domain
->dca_providers
, node
)
182 if ((!dev
) || (dca
->ops
->dev_managed(dca
, dev
)))
189 * dca_add_requester - add a dca client to the list
190 * @dev - the device that wants dca service
192 int dca_add_requester(struct device
*dev
)
194 struct dca_provider
*dca
;
195 int err
, slot
= -ENODEV
;
197 struct pci_bus
*pci_rc
;
198 struct dca_domain
*domain
;
203 spin_lock_irqsave(&dca_lock
, flags
);
205 /* check if the requester has not been added already */
206 dca
= dca_find_provider_by_dev(dev
);
208 spin_unlock_irqrestore(&dca_lock
, flags
);
212 pci_rc
= dca_pci_rc_from_dev(dev
);
213 domain
= dca_find_domain(pci_rc
);
215 spin_unlock_irqrestore(&dca_lock
, flags
);
219 list_for_each_entry(dca
, &domain
->dca_providers
, node
) {
220 slot
= dca
->ops
->add_requester(dca
, dev
);
225 spin_unlock_irqrestore(&dca_lock
, flags
);
230 err
= dca_sysfs_add_req(dca
, dev
, slot
);
232 spin_lock_irqsave(&dca_lock
, flags
);
233 if (dca
== dca_find_provider_by_dev(dev
))
234 dca
->ops
->remove_requester(dca
, dev
);
235 spin_unlock_irqrestore(&dca_lock
, flags
);
241 EXPORT_SYMBOL_GPL(dca_add_requester
);
244 * dca_remove_requester - remove a dca client from the list
245 * @dev - the device that wants dca service
247 int dca_remove_requester(struct device
*dev
)
249 struct dca_provider
*dca
;
256 spin_lock_irqsave(&dca_lock
, flags
);
257 dca
= dca_find_provider_by_dev(dev
);
259 spin_unlock_irqrestore(&dca_lock
, flags
);
262 slot
= dca
->ops
->remove_requester(dca
, dev
);
263 spin_unlock_irqrestore(&dca_lock
, flags
);
268 dca_sysfs_remove_req(dca
, slot
);
272 EXPORT_SYMBOL_GPL(dca_remove_requester
);
275 * dca_common_get_tag - return the dca tag (serves both new and old api)
276 * @dev - the device that wants dca service
277 * @cpu - the cpuid as returned by get_cpu()
279 u8
dca_common_get_tag(struct device
*dev
, int cpu
)
281 struct dca_provider
*dca
;
285 spin_lock_irqsave(&dca_lock
, flags
);
287 dca
= dca_find_provider_by_dev(dev
);
289 spin_unlock_irqrestore(&dca_lock
, flags
);
292 tag
= dca
->ops
->get_tag(dca
, dev
, cpu
);
294 spin_unlock_irqrestore(&dca_lock
, flags
);
299 * dca3_get_tag - return the dca tag to the requester device
300 * for the given cpu (new api)
301 * @dev - the device that wants dca service
302 * @cpu - the cpuid as returned by get_cpu()
304 u8
dca3_get_tag(struct device
*dev
, int cpu
)
309 return dca_common_get_tag(dev
, cpu
);
311 EXPORT_SYMBOL_GPL(dca3_get_tag
);
314 * dca_get_tag - return the dca tag for the given cpu (old api)
315 * @cpu - the cpuid as returned by get_cpu()
317 u8
dca_get_tag(int cpu
)
319 struct device
*dev
= NULL
;
321 return dca_common_get_tag(dev
, cpu
);
323 EXPORT_SYMBOL_GPL(dca_get_tag
);
326 * alloc_dca_provider - get data struct for describing a dca provider
327 * @ops - pointer to struct of dca operation function pointers
328 * @priv_size - size of extra mem to be added for provider's needs
330 struct dca_provider
*alloc_dca_provider(struct dca_ops
*ops
, int priv_size
)
332 struct dca_provider
*dca
;
335 alloc_size
= (sizeof(*dca
) + priv_size
);
336 dca
= kzalloc(alloc_size
, GFP_KERNEL
);
343 EXPORT_SYMBOL_GPL(alloc_dca_provider
);
346 * free_dca_provider - release the dca provider data struct
347 * @ops - pointer to struct of dca operation function pointers
348 * @priv_size - size of extra mem to be added for provider's needs
350 void free_dca_provider(struct dca_provider
*dca
)
354 EXPORT_SYMBOL_GPL(free_dca_provider
);
357 * register_dca_provider - register a dca provider
358 * @dca - struct created by alloc_dca_provider()
359 * @dev - device providing dca services
361 int register_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
365 struct dca_domain
*domain
;
367 spin_lock_irqsave(&dca_lock
, flags
);
368 if (dca_providers_blocked
) {
369 spin_unlock_irqrestore(&dca_lock
, flags
);
372 spin_unlock_irqrestore(&dca_lock
, flags
);
374 err
= dca_sysfs_add_provider(dca
, dev
);
378 spin_lock_irqsave(&dca_lock
, flags
);
379 domain
= dca_get_domain(dev
);
381 if (dca_providers_blocked
) {
382 spin_unlock_irqrestore(&dca_lock
, flags
);
383 dca_sysfs_remove_provider(dca
);
384 unregister_dca_providers();
386 spin_unlock_irqrestore(&dca_lock
, flags
);
390 list_add(&dca
->node
, &domain
->dca_providers
);
391 spin_unlock_irqrestore(&dca_lock
, flags
);
393 blocking_notifier_call_chain(&dca_provider_chain
,
394 DCA_PROVIDER_ADD
, NULL
);
397 EXPORT_SYMBOL_GPL(register_dca_provider
);
400 * unregister_dca_provider - remove a dca provider
401 * @dca - struct created by alloc_dca_provider()
403 void unregister_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
406 struct pci_bus
*pci_rc
;
407 struct dca_domain
*domain
;
409 blocking_notifier_call_chain(&dca_provider_chain
,
410 DCA_PROVIDER_REMOVE
, NULL
);
412 spin_lock_irqsave(&dca_lock
, flags
);
414 list_del(&dca
->node
);
416 pci_rc
= dca_pci_rc_from_dev(dev
);
417 domain
= dca_find_domain(pci_rc
);
418 if (list_empty(&domain
->dca_providers
))
419 dca_free_domain(domain
);
421 spin_unlock_irqrestore(&dca_lock
, flags
);
423 dca_sysfs_remove_provider(dca
);
425 EXPORT_SYMBOL_GPL(unregister_dca_provider
);
428 * dca_register_notify - register a client's notifier callback
430 void dca_register_notify(struct notifier_block
*nb
)
432 blocking_notifier_chain_register(&dca_provider_chain
, nb
);
434 EXPORT_SYMBOL_GPL(dca_register_notify
);
437 * dca_unregister_notify - remove a client's notifier callback
439 void dca_unregister_notify(struct notifier_block
*nb
)
441 blocking_notifier_chain_unregister(&dca_provider_chain
, nb
);
443 EXPORT_SYMBOL_GPL(dca_unregister_notify
);
445 static int __init
dca_init(void)
447 pr_info("dca service started, version %s\n", DCA_VERSION
);
448 return dca_sysfs_init();
451 static void __exit
dca_exit(void)
456 arch_initcall(dca_init
);
457 module_exit(dca_exit
);