2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This driver supports an interface for DCA clients and providers to meet.
26 #include <linux/kernel.h>
27 #include <linux/notifier.h>
28 #include <linux/device.h>
29 #include <linux/dca.h>
30 #include <linux/slab.h>
31 #include <linux/module.h>
33 #define DCA_VERSION "1.12.1"
35 MODULE_VERSION(DCA_VERSION
);
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Intel Corporation");
39 static DEFINE_SPINLOCK(dca_lock
);
41 static LIST_HEAD(dca_domains
);
43 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain
);
45 static int dca_providers_blocked
;
47 static struct pci_bus
*dca_pci_rc_from_dev(struct device
*dev
)
49 struct pci_dev
*pdev
= to_pci_dev(dev
);
50 struct pci_bus
*bus
= pdev
->bus
;
58 static struct dca_domain
*dca_allocate_domain(struct pci_bus
*rc
)
60 struct dca_domain
*domain
;
62 domain
= kzalloc(sizeof(*domain
), GFP_NOWAIT
);
66 INIT_LIST_HEAD(&domain
->dca_providers
);
72 static void dca_free_domain(struct dca_domain
*domain
)
74 list_del(&domain
->node
);
78 static int dca_provider_ioat_ver_3_0(struct device
*dev
)
80 struct pci_dev
*pdev
= to_pci_dev(dev
);
82 return ((pdev
->vendor
== PCI_VENDOR_ID_INTEL
) &&
83 ((pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) ||
84 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG1
) ||
85 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG2
) ||
86 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG3
) ||
87 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG4
) ||
88 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG5
) ||
89 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG6
) ||
90 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG7
)));
93 static void unregister_dca_providers(void)
95 struct dca_provider
*dca
, *_dca
;
96 struct list_head unregistered_providers
;
97 struct dca_domain
*domain
;
100 blocking_notifier_call_chain(&dca_provider_chain
,
101 DCA_PROVIDER_REMOVE
, NULL
);
103 INIT_LIST_HEAD(&unregistered_providers
);
105 spin_lock_irqsave(&dca_lock
, flags
);
107 if (list_empty(&dca_domains
)) {
108 spin_unlock_irqrestore(&dca_lock
, flags
);
112 /* at this point only one domain in the list is expected */
113 domain
= list_first_entry(&dca_domains
, struct dca_domain
, node
);
115 list_for_each_entry_safe(dca
, _dca
, &domain
->dca_providers
, node
)
116 list_move(&dca
->node
, &unregistered_providers
);
118 dca_free_domain(domain
);
120 spin_unlock_irqrestore(&dca_lock
, flags
);
122 list_for_each_entry_safe(dca
, _dca
, &unregistered_providers
, node
) {
123 dca_sysfs_remove_provider(dca
);
124 list_del(&dca
->node
);
128 static struct dca_domain
*dca_find_domain(struct pci_bus
*rc
)
130 struct dca_domain
*domain
;
132 list_for_each_entry(domain
, &dca_domains
, node
)
133 if (domain
->pci_rc
== rc
)
139 static struct dca_domain
*dca_get_domain(struct device
*dev
)
142 struct dca_domain
*domain
;
144 rc
= dca_pci_rc_from_dev(dev
);
145 domain
= dca_find_domain(rc
);
148 if (dca_provider_ioat_ver_3_0(dev
) && !list_empty(&dca_domains
)) {
149 dca_providers_blocked
= 1;
151 domain
= dca_allocate_domain(rc
);
153 list_add(&domain
->node
, &dca_domains
);
160 static struct dca_provider
*dca_find_provider_by_dev(struct device
*dev
)
162 struct dca_provider
*dca
;
164 struct dca_domain
*domain
;
167 rc
= dca_pci_rc_from_dev(dev
);
168 domain
= dca_find_domain(rc
);
172 if (!list_empty(&dca_domains
))
173 domain
= list_first_entry(&dca_domains
,
180 list_for_each_entry(dca
, &domain
->dca_providers
, node
)
181 if ((!dev
) || (dca
->ops
->dev_managed(dca
, dev
)))
188 * dca_add_requester - add a dca client to the list
189 * @dev - the device that wants dca service
191 int dca_add_requester(struct device
*dev
)
193 struct dca_provider
*dca
;
194 int err
, slot
= -ENODEV
;
196 struct pci_bus
*pci_rc
;
197 struct dca_domain
*domain
;
202 spin_lock_irqsave(&dca_lock
, flags
);
204 /* check if the requester has not been added already */
205 dca
= dca_find_provider_by_dev(dev
);
207 spin_unlock_irqrestore(&dca_lock
, flags
);
211 pci_rc
= dca_pci_rc_from_dev(dev
);
212 domain
= dca_find_domain(pci_rc
);
214 spin_unlock_irqrestore(&dca_lock
, flags
);
218 list_for_each_entry(dca
, &domain
->dca_providers
, node
) {
219 slot
= dca
->ops
->add_requester(dca
, dev
);
224 spin_unlock_irqrestore(&dca_lock
, flags
);
229 err
= dca_sysfs_add_req(dca
, dev
, slot
);
231 spin_lock_irqsave(&dca_lock
, flags
);
232 if (dca
== dca_find_provider_by_dev(dev
))
233 dca
->ops
->remove_requester(dca
, dev
);
234 spin_unlock_irqrestore(&dca_lock
, flags
);
240 EXPORT_SYMBOL_GPL(dca_add_requester
);
243 * dca_remove_requester - remove a dca client from the list
244 * @dev - the device that wants dca service
246 int dca_remove_requester(struct device
*dev
)
248 struct dca_provider
*dca
;
255 spin_lock_irqsave(&dca_lock
, flags
);
256 dca
= dca_find_provider_by_dev(dev
);
258 spin_unlock_irqrestore(&dca_lock
, flags
);
261 slot
= dca
->ops
->remove_requester(dca
, dev
);
262 spin_unlock_irqrestore(&dca_lock
, flags
);
267 dca_sysfs_remove_req(dca
, slot
);
271 EXPORT_SYMBOL_GPL(dca_remove_requester
);
274 * dca_common_get_tag - return the dca tag (serves both new and old api)
275 * @dev - the device that wants dca service
276 * @cpu - the cpuid as returned by get_cpu()
278 u8
dca_common_get_tag(struct device
*dev
, int cpu
)
280 struct dca_provider
*dca
;
284 spin_lock_irqsave(&dca_lock
, flags
);
286 dca
= dca_find_provider_by_dev(dev
);
288 spin_unlock_irqrestore(&dca_lock
, flags
);
291 tag
= dca
->ops
->get_tag(dca
, dev
, cpu
);
293 spin_unlock_irqrestore(&dca_lock
, flags
);
298 * dca3_get_tag - return the dca tag to the requester device
299 * for the given cpu (new api)
300 * @dev - the device that wants dca service
301 * @cpu - the cpuid as returned by get_cpu()
303 u8
dca3_get_tag(struct device
*dev
, int cpu
)
308 return dca_common_get_tag(dev
, cpu
);
310 EXPORT_SYMBOL_GPL(dca3_get_tag
);
313 * dca_get_tag - return the dca tag for the given cpu (old api)
314 * @cpu - the cpuid as returned by get_cpu()
316 u8
dca_get_tag(int cpu
)
318 struct device
*dev
= NULL
;
320 return dca_common_get_tag(dev
, cpu
);
322 EXPORT_SYMBOL_GPL(dca_get_tag
);
325 * alloc_dca_provider - get data struct for describing a dca provider
326 * @ops - pointer to struct of dca operation function pointers
327 * @priv_size - size of extra mem to be added for provider's needs
329 struct dca_provider
*alloc_dca_provider(struct dca_ops
*ops
, int priv_size
)
331 struct dca_provider
*dca
;
334 alloc_size
= (sizeof(*dca
) + priv_size
);
335 dca
= kzalloc(alloc_size
, GFP_KERNEL
);
342 EXPORT_SYMBOL_GPL(alloc_dca_provider
);
345 * free_dca_provider - release the dca provider data struct
346 * @ops - pointer to struct of dca operation function pointers
347 * @priv_size - size of extra mem to be added for provider's needs
349 void free_dca_provider(struct dca_provider
*dca
)
353 EXPORT_SYMBOL_GPL(free_dca_provider
);
356 * register_dca_provider - register a dca provider
357 * @dca - struct created by alloc_dca_provider()
358 * @dev - device providing dca services
360 int register_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
364 struct dca_domain
*domain
;
366 spin_lock_irqsave(&dca_lock
, flags
);
367 if (dca_providers_blocked
) {
368 spin_unlock_irqrestore(&dca_lock
, flags
);
371 spin_unlock_irqrestore(&dca_lock
, flags
);
373 err
= dca_sysfs_add_provider(dca
, dev
);
377 spin_lock_irqsave(&dca_lock
, flags
);
378 domain
= dca_get_domain(dev
);
380 if (dca_providers_blocked
) {
381 spin_unlock_irqrestore(&dca_lock
, flags
);
382 dca_sysfs_remove_provider(dca
);
383 unregister_dca_providers();
385 spin_unlock_irqrestore(&dca_lock
, flags
);
389 list_add(&dca
->node
, &domain
->dca_providers
);
390 spin_unlock_irqrestore(&dca_lock
, flags
);
392 blocking_notifier_call_chain(&dca_provider_chain
,
393 DCA_PROVIDER_ADD
, NULL
);
396 EXPORT_SYMBOL_GPL(register_dca_provider
);
399 * unregister_dca_provider - remove a dca provider
400 * @dca - struct created by alloc_dca_provider()
402 void unregister_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
405 struct pci_bus
*pci_rc
;
406 struct dca_domain
*domain
;
408 blocking_notifier_call_chain(&dca_provider_chain
,
409 DCA_PROVIDER_REMOVE
, NULL
);
411 spin_lock_irqsave(&dca_lock
, flags
);
413 list_del(&dca
->node
);
415 pci_rc
= dca_pci_rc_from_dev(dev
);
416 domain
= dca_find_domain(pci_rc
);
417 if (list_empty(&domain
->dca_providers
))
418 dca_free_domain(domain
);
420 spin_unlock_irqrestore(&dca_lock
, flags
);
422 dca_sysfs_remove_provider(dca
);
424 EXPORT_SYMBOL_GPL(unregister_dca_provider
);
427 * dca_register_notify - register a client's notifier callback
429 void dca_register_notify(struct notifier_block
*nb
)
431 blocking_notifier_chain_register(&dca_provider_chain
, nb
);
433 EXPORT_SYMBOL_GPL(dca_register_notify
);
436 * dca_unregister_notify - remove a client's notifier callback
438 void dca_unregister_notify(struct notifier_block
*nb
)
440 blocking_notifier_chain_unregister(&dca_provider_chain
, nb
);
442 EXPORT_SYMBOL_GPL(dca_unregister_notify
);
444 static int __init
dca_init(void)
446 pr_info("dca service started, version %s\n", DCA_VERSION
);
447 return dca_sysfs_init();
450 static void __exit
dca_exit(void)
455 arch_initcall(dca_init
);
456 module_exit(dca_exit
);