1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
7 * This driver supports an interface for DCA clients and providers to meet.
10 #include <linux/kernel.h>
11 #include <linux/notifier.h>
12 #include <linux/device.h>
13 #include <linux/dca.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
17 #define DCA_VERSION "1.12.1"
19 MODULE_VERSION(DCA_VERSION
);
20 MODULE_DESCRIPTION("Intel Direct Cache Access (DCA) service module");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Intel Corporation");
24 static DEFINE_RAW_SPINLOCK(dca_lock
);
26 static LIST_HEAD(dca_domains
);
28 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain
);
30 static int dca_providers_blocked
;
32 static struct pci_bus
*dca_pci_rc_from_dev(struct device
*dev
)
34 struct pci_dev
*pdev
= to_pci_dev(dev
);
35 struct pci_bus
*bus
= pdev
->bus
;
43 static struct dca_domain
*dca_allocate_domain(struct pci_bus
*rc
)
45 struct dca_domain
*domain
;
47 domain
= kzalloc(sizeof(*domain
), GFP_NOWAIT
);
51 INIT_LIST_HEAD(&domain
->dca_providers
);
57 static void dca_free_domain(struct dca_domain
*domain
)
59 list_del(&domain
->node
);
63 static int dca_provider_ioat_ver_3_0(struct device
*dev
)
65 struct pci_dev
*pdev
= to_pci_dev(dev
);
67 return ((pdev
->vendor
== PCI_VENDOR_ID_INTEL
) &&
68 ((pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) ||
69 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG1
) ||
70 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG2
) ||
71 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG3
) ||
72 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG4
) ||
73 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG5
) ||
74 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG6
) ||
75 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG7
)));
78 static void unregister_dca_providers(void)
80 struct dca_provider
*dca
, *_dca
;
81 struct list_head unregistered_providers
;
82 struct dca_domain
*domain
;
85 blocking_notifier_call_chain(&dca_provider_chain
,
86 DCA_PROVIDER_REMOVE
, NULL
);
88 INIT_LIST_HEAD(&unregistered_providers
);
90 raw_spin_lock_irqsave(&dca_lock
, flags
);
92 if (list_empty(&dca_domains
)) {
93 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
97 /* at this point only one domain in the list is expected */
98 domain
= list_first_entry(&dca_domains
, struct dca_domain
, node
);
100 list_for_each_entry_safe(dca
, _dca
, &domain
->dca_providers
, node
)
101 list_move(&dca
->node
, &unregistered_providers
);
103 dca_free_domain(domain
);
105 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
107 list_for_each_entry_safe(dca
, _dca
, &unregistered_providers
, node
) {
108 dca_sysfs_remove_provider(dca
);
109 list_del(&dca
->node
);
113 static struct dca_domain
*dca_find_domain(struct pci_bus
*rc
)
115 struct dca_domain
*domain
;
117 list_for_each_entry(domain
, &dca_domains
, node
)
118 if (domain
->pci_rc
== rc
)
124 static struct dca_domain
*dca_get_domain(struct device
*dev
)
127 struct dca_domain
*domain
;
129 rc
= dca_pci_rc_from_dev(dev
);
130 domain
= dca_find_domain(rc
);
133 if (dca_provider_ioat_ver_3_0(dev
) && !list_empty(&dca_domains
))
134 dca_providers_blocked
= 1;
140 static struct dca_provider
*dca_find_provider_by_dev(struct device
*dev
)
142 struct dca_provider
*dca
;
144 struct dca_domain
*domain
;
147 rc
= dca_pci_rc_from_dev(dev
);
148 domain
= dca_find_domain(rc
);
152 if (!list_empty(&dca_domains
))
153 domain
= list_first_entry(&dca_domains
,
160 list_for_each_entry(dca
, &domain
->dca_providers
, node
)
161 if ((!dev
) || (dca
->ops
->dev_managed(dca
, dev
)))
168 * dca_add_requester - add a dca client to the list
169 * @dev - the device that wants dca service
171 int dca_add_requester(struct device
*dev
)
173 struct dca_provider
*dca
;
174 int err
, slot
= -ENODEV
;
176 struct pci_bus
*pci_rc
;
177 struct dca_domain
*domain
;
182 raw_spin_lock_irqsave(&dca_lock
, flags
);
184 /* check if the requester has not been added already */
185 dca
= dca_find_provider_by_dev(dev
);
187 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
191 pci_rc
= dca_pci_rc_from_dev(dev
);
192 domain
= dca_find_domain(pci_rc
);
194 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
198 list_for_each_entry(dca
, &domain
->dca_providers
, node
) {
199 slot
= dca
->ops
->add_requester(dca
, dev
);
204 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
209 err
= dca_sysfs_add_req(dca
, dev
, slot
);
211 raw_spin_lock_irqsave(&dca_lock
, flags
);
212 if (dca
== dca_find_provider_by_dev(dev
))
213 dca
->ops
->remove_requester(dca
, dev
);
214 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
220 EXPORT_SYMBOL_GPL(dca_add_requester
);
223 * dca_remove_requester - remove a dca client from the list
224 * @dev - the device that wants dca service
226 int dca_remove_requester(struct device
*dev
)
228 struct dca_provider
*dca
;
235 raw_spin_lock_irqsave(&dca_lock
, flags
);
236 dca
= dca_find_provider_by_dev(dev
);
238 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
241 slot
= dca
->ops
->remove_requester(dca
, dev
);
242 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
247 dca_sysfs_remove_req(dca
, slot
);
251 EXPORT_SYMBOL_GPL(dca_remove_requester
);
254 * dca_common_get_tag - return the dca tag (serves both new and old api)
255 * @dev - the device that wants dca service
256 * @cpu - the cpuid as returned by get_cpu()
258 static u8
dca_common_get_tag(struct device
*dev
, int cpu
)
260 struct dca_provider
*dca
;
264 raw_spin_lock_irqsave(&dca_lock
, flags
);
266 dca
= dca_find_provider_by_dev(dev
);
268 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
271 tag
= dca
->ops
->get_tag(dca
, dev
, cpu
);
273 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
278 * dca3_get_tag - return the dca tag to the requester device
279 * for the given cpu (new api)
280 * @dev - the device that wants dca service
281 * @cpu - the cpuid as returned by get_cpu()
283 u8
dca3_get_tag(struct device
*dev
, int cpu
)
288 return dca_common_get_tag(dev
, cpu
);
290 EXPORT_SYMBOL_GPL(dca3_get_tag
);
293 * dca_get_tag - return the dca tag for the given cpu (old api)
294 * @cpu - the cpuid as returned by get_cpu()
296 u8
dca_get_tag(int cpu
)
298 return dca_common_get_tag(NULL
, cpu
);
300 EXPORT_SYMBOL_GPL(dca_get_tag
);
303 * alloc_dca_provider - get data struct for describing a dca provider
304 * @ops - pointer to struct of dca operation function pointers
305 * @priv_size - size of extra mem to be added for provider's needs
307 struct dca_provider
*alloc_dca_provider(const struct dca_ops
*ops
,
310 struct dca_provider
*dca
;
313 alloc_size
= (sizeof(*dca
) + priv_size
);
314 dca
= kzalloc(alloc_size
, GFP_KERNEL
);
321 EXPORT_SYMBOL_GPL(alloc_dca_provider
);
324 * free_dca_provider - release the dca provider data struct
325 * @ops - pointer to struct of dca operation function pointers
326 * @priv_size - size of extra mem to be added for provider's needs
328 void free_dca_provider(struct dca_provider
*dca
)
332 EXPORT_SYMBOL_GPL(free_dca_provider
);
335 * register_dca_provider - register a dca provider
336 * @dca - struct created by alloc_dca_provider()
337 * @dev - device providing dca services
339 int register_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
343 struct dca_domain
*domain
, *newdomain
= NULL
;
345 raw_spin_lock_irqsave(&dca_lock
, flags
);
346 if (dca_providers_blocked
) {
347 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
350 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
352 err
= dca_sysfs_add_provider(dca
, dev
);
356 raw_spin_lock_irqsave(&dca_lock
, flags
);
357 domain
= dca_get_domain(dev
);
361 if (dca_providers_blocked
) {
362 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
363 dca_sysfs_remove_provider(dca
);
364 unregister_dca_providers();
368 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
369 rc
= dca_pci_rc_from_dev(dev
);
370 newdomain
= dca_allocate_domain(rc
);
373 raw_spin_lock_irqsave(&dca_lock
, flags
);
374 /* Recheck, we might have raced after dropping the lock */
375 domain
= dca_get_domain(dev
);
379 list_add(&domain
->node
, &dca_domains
);
382 list_add(&dca
->node
, &domain
->dca_providers
);
383 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
385 blocking_notifier_call_chain(&dca_provider_chain
,
386 DCA_PROVIDER_ADD
, NULL
);
390 EXPORT_SYMBOL_GPL(register_dca_provider
);
393 * unregister_dca_provider - remove a dca provider
394 * @dca - struct created by alloc_dca_provider()
396 void unregister_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
399 struct pci_bus
*pci_rc
;
400 struct dca_domain
*domain
;
402 blocking_notifier_call_chain(&dca_provider_chain
,
403 DCA_PROVIDER_REMOVE
, NULL
);
405 raw_spin_lock_irqsave(&dca_lock
, flags
);
407 if (list_empty(&dca_domains
)) {
408 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
412 list_del(&dca
->node
);
414 pci_rc
= dca_pci_rc_from_dev(dev
);
415 domain
= dca_find_domain(pci_rc
);
416 if (list_empty(&domain
->dca_providers
))
417 dca_free_domain(domain
);
419 raw_spin_unlock_irqrestore(&dca_lock
, flags
);
421 dca_sysfs_remove_provider(dca
);
423 EXPORT_SYMBOL_GPL(unregister_dca_provider
);
426 * dca_register_notify - register a client's notifier callback
428 void dca_register_notify(struct notifier_block
*nb
)
430 blocking_notifier_chain_register(&dca_provider_chain
, nb
);
432 EXPORT_SYMBOL_GPL(dca_register_notify
);
435 * dca_unregister_notify - remove a client's notifier callback
437 void dca_unregister_notify(struct notifier_block
*nb
)
439 blocking_notifier_chain_unregister(&dca_provider_chain
, nb
);
441 EXPORT_SYMBOL_GPL(dca_unregister_notify
);
443 static int __init
dca_init(void)
445 pr_info("dca service started, version %s\n", DCA_VERSION
);
446 return dca_sysfs_init();
449 static void __exit
dca_exit(void)
454 arch_initcall(dca_init
);
455 module_exit(dca_exit
);