1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
7 * This driver supports an interface for DCA clients and providers to meet.
10 #include <linux/kernel.h>
11 #include <linux/notifier.h>
12 #include <linux/device.h>
13 #include <linux/dca.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
17 #define DCA_VERSION "1.12.1"
19 MODULE_VERSION(DCA_VERSION
);
20 MODULE_LICENSE("GPL");
21 MODULE_AUTHOR("Intel Corporation");
23 static DEFINE_RAW_SPINLOCK(dca_lock
);
25 static LIST_HEAD(dca_domains
);
27 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain
);
29 static int dca_providers_blocked
;
31 static struct pci_bus
*dca_pci_rc_from_dev(struct device
*dev
)
33 struct pci_dev
*pdev
= to_pci_dev(dev
);
34 struct pci_bus
*bus
= pdev
->bus
;
42 static struct dca_domain
*dca_allocate_domain(struct pci_bus
*rc
)
44 struct dca_domain
*domain
;
46 domain
= kzalloc(sizeof(*domain
), GFP_NOWAIT
);
50 INIT_LIST_HEAD(&domain
->dca_providers
);
56 static void dca_free_domain(struct dca_domain
*domain
)
58 list_del(&domain
->node
);
62 static int dca_provider_ioat_ver_3_0(struct device
*dev
)
64 struct pci_dev
*pdev
= to_pci_dev(dev
);
66 return ((pdev
->vendor
== PCI_VENDOR_ID_INTEL
) &&
67 ((pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) ||
68 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG1
) ||
69 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG2
) ||
70 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG3
) ||
71 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG4
) ||
72 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG5
) ||
73 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG6
) ||
74 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG7
)));
77 static void unregister_dca_providers(void)
79 struct dca_provider
*dca
, *_dca
;
80 struct list_head unregistered_providers
;
81 struct dca_domain
*domain
;
84 blocking_notifier_call_chain(&dca_provider_chain
,
85 DCA_PROVIDER_REMOVE
, NULL
);
87 INIT_LIST_HEAD(&unregistered_providers
);
89 raw_spin_lock_irqsave(&dca_lock
, flags
);
91 if (list_empty(&dca_domains
)) {
92 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
96 /* at this point only one domain in the list is expected */
97 domain
= list_first_entry(&dca_domains
, struct dca_domain
, node
);
99 list_for_each_entry_safe(dca
, _dca
, &domain
->dca_providers
, node
)
100 list_move(&dca
->node
, &unregistered_providers
);
102 dca_free_domain(domain
);
104 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
106 list_for_each_entry_safe(dca
, _dca
, &unregistered_providers
, node
) {
107 dca_sysfs_remove_provider(dca
);
108 list_del(&dca
->node
);
112 static struct dca_domain
*dca_find_domain(struct pci_bus
*rc
)
114 struct dca_domain
*domain
;
116 list_for_each_entry(domain
, &dca_domains
, node
)
117 if (domain
->pci_rc
== rc
)
123 static struct dca_domain
*dca_get_domain(struct device
*dev
)
126 struct dca_domain
*domain
;
128 rc
= dca_pci_rc_from_dev(dev
);
129 domain
= dca_find_domain(rc
);
132 if (dca_provider_ioat_ver_3_0(dev
) && !list_empty(&dca_domains
))
133 dca_providers_blocked
= 1;
139 static struct dca_provider
*dca_find_provider_by_dev(struct device
*dev
)
141 struct dca_provider
*dca
;
143 struct dca_domain
*domain
;
146 rc
= dca_pci_rc_from_dev(dev
);
147 domain
= dca_find_domain(rc
);
151 if (!list_empty(&dca_domains
))
152 domain
= list_first_entry(&dca_domains
,
159 list_for_each_entry(dca
, &domain
->dca_providers
, node
)
160 if ((!dev
) || (dca
->ops
->dev_managed(dca
, dev
)))
167 * dca_add_requester - add a dca client to the list
168 * @dev - the device that wants dca service
170 int dca_add_requester(struct device
*dev
)
172 struct dca_provider
*dca
;
173 int err
, slot
= -ENODEV
;
175 struct pci_bus
*pci_rc
;
176 struct dca_domain
*domain
;
181 raw_spin_lock_irqsave(&dca_lock
, flags
);
183 /* check if the requester has not been added already */
184 dca
= dca_find_provider_by_dev(dev
);
186 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
190 pci_rc
= dca_pci_rc_from_dev(dev
);
191 domain
= dca_find_domain(pci_rc
);
193 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
197 list_for_each_entry(dca
, &domain
->dca_providers
, node
) {
198 slot
= dca
->ops
->add_requester(dca
, dev
);
203 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
208 err
= dca_sysfs_add_req(dca
, dev
, slot
);
210 raw_spin_lock_irqsave(&dca_lock
, flags
);
211 if (dca
== dca_find_provider_by_dev(dev
))
212 dca
->ops
->remove_requester(dca
, dev
);
213 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
219 EXPORT_SYMBOL_GPL(dca_add_requester
);
222 * dca_remove_requester - remove a dca client from the list
223 * @dev - the device that wants dca service
225 int dca_remove_requester(struct device
*dev
)
227 struct dca_provider
*dca
;
234 raw_spin_lock_irqsave(&dca_lock
, flags
);
235 dca
= dca_find_provider_by_dev(dev
);
237 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
240 slot
= dca
->ops
->remove_requester(dca
, dev
);
241 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
246 dca_sysfs_remove_req(dca
, slot
);
250 EXPORT_SYMBOL_GPL(dca_remove_requester
);
253 * dca_common_get_tag - return the dca tag (serves both new and old api)
254 * @dev - the device that wants dca service
255 * @cpu - the cpuid as returned by get_cpu()
257 static u8
dca_common_get_tag(struct device
*dev
, int cpu
)
259 struct dca_provider
*dca
;
263 raw_spin_lock_irqsave(&dca_lock
, flags
);
265 dca
= dca_find_provider_by_dev(dev
);
267 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
270 tag
= dca
->ops
->get_tag(dca
, dev
, cpu
);
272 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
277 * dca3_get_tag - return the dca tag to the requester device
278 * for the given cpu (new api)
279 * @dev - the device that wants dca service
280 * @cpu - the cpuid as returned by get_cpu()
282 u8
dca3_get_tag(struct device
*dev
, int cpu
)
287 return dca_common_get_tag(dev
, cpu
);
289 EXPORT_SYMBOL_GPL(dca3_get_tag
);
292 * dca_get_tag - return the dca tag for the given cpu (old api)
293 * @cpu - the cpuid as returned by get_cpu()
295 u8
dca_get_tag(int cpu
)
297 struct device
*dev
= NULL
;
299 return dca_common_get_tag(dev
, cpu
);
301 EXPORT_SYMBOL_GPL(dca_get_tag
);
304 * alloc_dca_provider - get data struct for describing a dca provider
305 * @ops - pointer to struct of dca operation function pointers
306 * @priv_size - size of extra mem to be added for provider's needs
308 struct dca_provider
*alloc_dca_provider(const struct dca_ops
*ops
,
311 struct dca_provider
*dca
;
314 alloc_size
= (sizeof(*dca
) + priv_size
);
315 dca
= kzalloc(alloc_size
, GFP_KERNEL
);
322 EXPORT_SYMBOL_GPL(alloc_dca_provider
);
325 * free_dca_provider - release the dca provider data struct
326 * @ops - pointer to struct of dca operation function pointers
327 * @priv_size - size of extra mem to be added for provider's needs
329 void free_dca_provider(struct dca_provider
*dca
)
333 EXPORT_SYMBOL_GPL(free_dca_provider
);
336 * register_dca_provider - register a dca provider
337 * @dca - struct created by alloc_dca_provider()
338 * @dev - device providing dca services
340 int register_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
344 struct dca_domain
*domain
, *newdomain
= NULL
;
346 raw_spin_lock_irqsave(&dca_lock
, flags
);
347 if (dca_providers_blocked
) {
348 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
351 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
353 err
= dca_sysfs_add_provider(dca
, dev
);
357 raw_spin_lock_irqsave(&dca_lock
, flags
);
358 domain
= dca_get_domain(dev
);
362 if (dca_providers_blocked
) {
363 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
364 dca_sysfs_remove_provider(dca
);
365 unregister_dca_providers();
369 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
370 rc
= dca_pci_rc_from_dev(dev
);
371 newdomain
= dca_allocate_domain(rc
);
374 raw_spin_lock_irqsave(&dca_lock
, flags
);
375 /* Recheck, we might have raced after dropping the lock */
376 domain
= dca_get_domain(dev
);
380 list_add(&domain
->node
, &dca_domains
);
383 list_add(&dca
->node
, &domain
->dca_providers
);
384 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
386 blocking_notifier_call_chain(&dca_provider_chain
,
387 DCA_PROVIDER_ADD
, NULL
);
391 EXPORT_SYMBOL_GPL(register_dca_provider
);
394 * unregister_dca_provider - remove a dca provider
395 * @dca - struct created by alloc_dca_provider()
397 void unregister_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
400 struct pci_bus
*pci_rc
;
401 struct dca_domain
*domain
;
403 blocking_notifier_call_chain(&dca_provider_chain
,
404 DCA_PROVIDER_REMOVE
, NULL
);
406 raw_spin_lock_irqsave(&dca_lock
, flags
);
408 if (list_empty(&dca_domains
)) {
409 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
413 list_del(&dca
->node
);
415 pci_rc
= dca_pci_rc_from_dev(dev
);
416 domain
= dca_find_domain(pci_rc
);
417 if (list_empty(&domain
->dca_providers
))
418 dca_free_domain(domain
);
420 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
422 dca_sysfs_remove_provider(dca
);
424 EXPORT_SYMBOL_GPL(unregister_dca_provider
);
427 * dca_register_notify - register a client's notifier callback
429 void dca_register_notify(struct notifier_block
*nb
)
431 blocking_notifier_chain_register(&dca_provider_chain
, nb
);
433 EXPORT_SYMBOL_GPL(dca_register_notify
);
436 * dca_unregister_notify - remove a client's notifier callback
438 void dca_unregister_notify(struct notifier_block
*nb
)
440 blocking_notifier_chain_unregister(&dca_provider_chain
, nb
);
442 EXPORT_SYMBOL_GPL(dca_unregister_notify
);
444 static int __init
dca_init(void)
446 pr_info("dca service started, version %s\n", DCA_VERSION
);
447 return dca_sysfs_init();
450 static void __exit
dca_exit(void)
455 arch_initcall(dca_init
);
456 module_exit(dca_exit
);