1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2014 Intel Corp.
4 * Author: Jiang Liu <jiang.liu@linux.intel.com>
6 * This file is licensed under GPLv2.
8 * This file contains common code to support Message Signaled Interrupts for
9 * PCI compatible and non PCI compatible devices.
11 #include <linux/device.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/msi.h>
15 #include <linux/mutex.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/sysfs.h>
19 #include <linux/types.h>
20 #include <linux/xarray.h>
22 #include "internals.h"
25 * struct msi_device_data - MSI per device data
26 * @properties: MSI properties which are interesting to drivers
27 * @mutex: Mutex protecting the MSI descriptor store
28 * @__domains: Internal data for per device MSI domains
29 * @__iter_idx: Index to search the next entry for iterators
31 struct msi_device_data
{
32 unsigned long properties
;
34 struct msi_dev_domain __domains
[MSI_MAX_DEVICE_IRQDOMAINS
];
35 unsigned long __iter_idx
;
39 * struct msi_ctrl - MSI internal management control structure
40 * @domid: ID of the domain on which management operations should be done
41 * @first: First (hardware) slot index to operate on
42 * @last: Last (hardware) slot index to operate on
43 * @nirqs: The number of Linux interrupts to allocate. Can be larger
44 * than the range due to PCI/multi-MSI.
53 /* Invalid Xarray index which is outside of any searchable range */
54 #define MSI_XA_MAX_INDEX (ULONG_MAX - 1)
55 /* The maximum domain size */
56 #define MSI_XA_DOMAIN_SIZE (MSI_MAX_INDEX + 1)
58 static void msi_domain_free_locked(struct device
*dev
, struct msi_ctrl
*ctrl
);
59 static unsigned int msi_domain_get_hwsize(struct device
*dev
, unsigned int domid
);
60 static inline int msi_sysfs_create_group(struct device
*dev
);
64 * msi_alloc_desc - Allocate an initialized msi_desc
65 * @dev: Pointer to the device for which this is allocated
66 * @nvec: The number of vectors used in this entry
67 * @affinity: Optional pointer to an affinity mask array size of @nvec
69 * If @affinity is not %NULL then an affinity array[@nvec] is allocated
70 * and the affinity masks and flags from @affinity are copied.
72 * Return: pointer to allocated &msi_desc on success or %NULL on failure
74 static struct msi_desc
*msi_alloc_desc(struct device
*dev
, int nvec
,
75 const struct irq_affinity_desc
*affinity
)
77 struct msi_desc
*desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
83 desc
->nvec_used
= nvec
;
85 desc
->affinity
= kmemdup_array(affinity
, nvec
, sizeof(*desc
->affinity
), GFP_KERNEL
);
86 if (!desc
->affinity
) {
94 static void msi_free_desc(struct msi_desc
*desc
)
96 kfree(desc
->affinity
);
100 static int msi_insert_desc(struct device
*dev
, struct msi_desc
*desc
,
101 unsigned int domid
, unsigned int index
)
103 struct msi_device_data
*md
= dev
->msi
.data
;
104 struct xarray
*xa
= &md
->__domains
[domid
].store
;
108 hwsize
= msi_domain_get_hwsize(dev
, domid
);
110 if (index
== MSI_ANY_INDEX
) {
111 struct xa_limit limit
= { .min
= 0, .max
= hwsize
- 1 };
114 /* Let the xarray allocate a free index within the limit */
115 ret
= xa_alloc(xa
, &index
, desc
, limit
, GFP_KERNEL
);
119 desc
->msi_index
= index
;
122 if (index
>= hwsize
) {
127 desc
->msi_index
= index
;
128 ret
= xa_insert(xa
, index
, desc
, GFP_KERNEL
);
139 * msi_domain_insert_msi_desc - Allocate and initialize a MSI descriptor and
140 * insert it at @init_desc->msi_index
142 * @dev: Pointer to the device for which the descriptor is allocated
143 * @domid: The id of the interrupt domain to which the desriptor is added
144 * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
146 * Return: 0 on success or an appropriate failure code.
148 int msi_domain_insert_msi_desc(struct device
*dev
, unsigned int domid
,
149 struct msi_desc
*init_desc
)
151 struct msi_desc
*desc
;
153 lockdep_assert_held(&dev
->msi
.data
->mutex
);
155 desc
= msi_alloc_desc(dev
, init_desc
->nvec_used
, init_desc
->affinity
);
159 /* Copy type specific data to the new descriptor. */
160 desc
->pci
= init_desc
->pci
;
162 return msi_insert_desc(dev
, desc
, domid
, init_desc
->msi_index
);
165 static bool msi_desc_match(struct msi_desc
*desc
, enum msi_desc_filter filter
)
170 case MSI_DESC_NOTASSOCIATED
:
172 case MSI_DESC_ASSOCIATED
:
179 static bool msi_ctrl_valid(struct device
*dev
, struct msi_ctrl
*ctrl
)
183 if (WARN_ON_ONCE(ctrl
->domid
>= MSI_MAX_DEVICE_IRQDOMAINS
||
185 !dev
->msi
.data
->__domains
[ctrl
->domid
].domain
)))
188 hwsize
= msi_domain_get_hwsize(dev
, ctrl
->domid
);
189 if (WARN_ON_ONCE(ctrl
->first
> ctrl
->last
||
190 ctrl
->first
>= hwsize
||
191 ctrl
->last
>= hwsize
))
196 static void msi_domain_free_descs(struct device
*dev
, struct msi_ctrl
*ctrl
)
198 struct msi_desc
*desc
;
202 lockdep_assert_held(&dev
->msi
.data
->mutex
);
204 if (!msi_ctrl_valid(dev
, ctrl
))
207 xa
= &dev
->msi
.data
->__domains
[ctrl
->domid
].store
;
208 xa_for_each_range(xa
, idx
, desc
, ctrl
->first
, ctrl
->last
) {
211 /* Leak the descriptor when it is still referenced */
212 if (WARN_ON_ONCE(msi_desc_match(desc
, MSI_DESC_ASSOCIATED
)))
219 * msi_domain_free_msi_descs_range - Free a range of MSI descriptors of a device in an irqdomain
220 * @dev: Device for which to free the descriptors
221 * @domid: Id of the domain to operate on
222 * @first: Index to start freeing from (inclusive)
223 * @last: Last index to be freed (inclusive)
225 void msi_domain_free_msi_descs_range(struct device
*dev
, unsigned int domid
,
226 unsigned int first
, unsigned int last
)
228 struct msi_ctrl ctrl
= {
234 msi_domain_free_descs(dev
, &ctrl
);
238 * msi_domain_add_simple_msi_descs - Allocate and initialize MSI descriptors
239 * @dev: Pointer to the device for which the descriptors are allocated
240 * @ctrl: Allocation control struct
242 * Return: 0 on success or an appropriate failure code.
244 static int msi_domain_add_simple_msi_descs(struct device
*dev
, struct msi_ctrl
*ctrl
)
246 struct msi_desc
*desc
;
250 lockdep_assert_held(&dev
->msi
.data
->mutex
);
252 if (!msi_ctrl_valid(dev
, ctrl
))
255 for (idx
= ctrl
->first
; idx
<= ctrl
->last
; idx
++) {
256 desc
= msi_alloc_desc(dev
, 1, NULL
);
259 ret
= msi_insert_desc(dev
, desc
, ctrl
->domid
, idx
);
268 msi_domain_free_descs(dev
, ctrl
);
272 void __get_cached_msi_msg(struct msi_desc
*entry
, struct msi_msg
*msg
)
277 void get_cached_msi_msg(unsigned int irq
, struct msi_msg
*msg
)
279 struct msi_desc
*entry
= irq_get_msi_desc(irq
);
281 __get_cached_msi_msg(entry
, msg
);
283 EXPORT_SYMBOL_GPL(get_cached_msi_msg
);
285 static void msi_device_data_release(struct device
*dev
, void *res
)
287 struct msi_device_data
*md
= res
;
290 for (i
= 0; i
< MSI_MAX_DEVICE_IRQDOMAINS
; i
++) {
291 msi_remove_device_irq_domain(dev
, i
);
292 WARN_ON_ONCE(!xa_empty(&md
->__domains
[i
].store
));
293 xa_destroy(&md
->__domains
[i
].store
);
295 dev
->msi
.data
= NULL
;
299 * msi_setup_device_data - Setup MSI device data
300 * @dev: Device for which MSI device data should be set up
302 * Return: 0 on success, appropriate error code otherwise
304 * This can be called more than once for @dev. If the MSI device data is
305 * already allocated the call succeeds. The allocated memory is
306 * automatically released when the device is destroyed.
308 int msi_setup_device_data(struct device
*dev
)
310 struct msi_device_data
*md
;
316 md
= devres_alloc(msi_device_data_release
, sizeof(*md
), GFP_KERNEL
);
320 ret
= msi_sysfs_create_group(dev
);
326 for (i
= 0; i
< MSI_MAX_DEVICE_IRQDOMAINS
; i
++)
327 xa_init_flags(&md
->__domains
[i
].store
, XA_FLAGS_ALLOC
);
330 * If @dev::msi::domain is set and is a global MSI domain, copy the
331 * pointer into the domain array so all code can operate on domain
332 * ids. The NULL pointer check is required to keep the legacy
333 * architecture specific PCI/MSI support working.
335 if (dev
->msi
.domain
&& !irq_domain_is_msi_parent(dev
->msi
.domain
))
336 md
->__domains
[MSI_DEFAULT_DOMAIN
].domain
= dev
->msi
.domain
;
338 mutex_init(&md
->mutex
);
345 * msi_lock_descs - Lock the MSI descriptor storage of a device
346 * @dev: Device to operate on
348 void msi_lock_descs(struct device
*dev
)
350 mutex_lock(&dev
->msi
.data
->mutex
);
352 EXPORT_SYMBOL_GPL(msi_lock_descs
);
355 * msi_unlock_descs - Unlock the MSI descriptor storage of a device
356 * @dev: Device to operate on
358 void msi_unlock_descs(struct device
*dev
)
360 /* Invalidate the index which was cached by the iterator */
361 dev
->msi
.data
->__iter_idx
= MSI_XA_MAX_INDEX
;
362 mutex_unlock(&dev
->msi
.data
->mutex
);
364 EXPORT_SYMBOL_GPL(msi_unlock_descs
);
366 static struct msi_desc
*msi_find_desc(struct msi_device_data
*md
, unsigned int domid
,
367 enum msi_desc_filter filter
)
369 struct xarray
*xa
= &md
->__domains
[domid
].store
;
370 struct msi_desc
*desc
;
372 xa_for_each_start(xa
, md
->__iter_idx
, desc
, md
->__iter_idx
) {
373 if (msi_desc_match(desc
, filter
))
376 md
->__iter_idx
= MSI_XA_MAX_INDEX
;
381 * msi_domain_first_desc - Get the first MSI descriptor of an irqdomain associated to a device
382 * @dev: Device to operate on
383 * @domid: The id of the interrupt domain which should be walked.
384 * @filter: Descriptor state filter
386 * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
387 * must be invoked before the call.
389 * Return: Pointer to the first MSI descriptor matching the search
390 * criteria, NULL if none found.
392 struct msi_desc
*msi_domain_first_desc(struct device
*dev
, unsigned int domid
,
393 enum msi_desc_filter filter
)
395 struct msi_device_data
*md
= dev
->msi
.data
;
397 if (WARN_ON_ONCE(!md
|| domid
>= MSI_MAX_DEVICE_IRQDOMAINS
))
400 lockdep_assert_held(&md
->mutex
);
403 return msi_find_desc(md
, domid
, filter
);
405 EXPORT_SYMBOL_GPL(msi_domain_first_desc
);
408 * msi_next_desc - Get the next MSI descriptor of a device
409 * @dev: Device to operate on
410 * @domid: The id of the interrupt domain which should be walked.
411 * @filter: Descriptor state filter
413 * The first invocation of msi_next_desc() has to be preceeded by a
414 * successful invocation of __msi_first_desc(). Consecutive invocations are
415 * only valid if the previous one was successful. All these operations have
416 * to be done within the same MSI mutex held region.
418 * Return: Pointer to the next MSI descriptor matching the search
419 * criteria, NULL if none found.
421 struct msi_desc
*msi_next_desc(struct device
*dev
, unsigned int domid
,
422 enum msi_desc_filter filter
)
424 struct msi_device_data
*md
= dev
->msi
.data
;
426 if (WARN_ON_ONCE(!md
|| domid
>= MSI_MAX_DEVICE_IRQDOMAINS
))
429 lockdep_assert_held(&md
->mutex
);
431 if (md
->__iter_idx
>= (unsigned long)MSI_MAX_INDEX
)
435 return msi_find_desc(md
, domid
, filter
);
437 EXPORT_SYMBOL_GPL(msi_next_desc
);
440 * msi_domain_get_virq - Lookup the Linux interrupt number for a MSI index on a interrupt domain
441 * @dev: Device to operate on
442 * @domid: Domain ID of the interrupt domain associated to the device
443 * @index: MSI interrupt index to look for (0-based)
445 * Return: The Linux interrupt number on success (> 0), 0 if not found
447 unsigned int msi_domain_get_virq(struct device
*dev
, unsigned int domid
, unsigned int index
)
449 struct msi_desc
*desc
;
450 unsigned int ret
= 0;
457 if (WARN_ON_ONCE(index
> MSI_MAX_INDEX
|| domid
>= MSI_MAX_DEVICE_IRQDOMAINS
))
460 /* This check is only valid for the PCI default MSI domain */
461 if (dev_is_pci(dev
) && domid
== MSI_DEFAULT_DOMAIN
)
462 pcimsi
= to_pci_dev(dev
)->msi_enabled
;
465 xa
= &dev
->msi
.data
->__domains
[domid
].store
;
466 desc
= xa_load(xa
, pcimsi
? 0 : index
);
467 if (desc
&& desc
->irq
) {
469 * PCI-MSI has only one descriptor for multiple interrupts.
470 * PCI-MSIX and platform MSI use a descriptor per
474 if (index
< desc
->nvec_used
)
475 ret
= desc
->irq
+ index
;
481 msi_unlock_descs(dev
);
484 EXPORT_SYMBOL_GPL(msi_domain_get_virq
);
487 static struct attribute
*msi_dev_attrs
[] = {
491 static const struct attribute_group msi_irqs_group
= {
493 .attrs
= msi_dev_attrs
,
496 static inline int msi_sysfs_create_group(struct device
*dev
)
498 return devm_device_add_group(dev
, &msi_irqs_group
);
501 static ssize_t
msi_mode_show(struct device
*dev
, struct device_attribute
*attr
,
504 /* MSI vs. MSIX is per device not per interrupt */
505 bool is_msix
= dev_is_pci(dev
) ? to_pci_dev(dev
)->msix_enabled
: false;
507 return sysfs_emit(buf
, "%s\n", is_msix
? "msix" : "msi");
510 static void msi_sysfs_remove_desc(struct device
*dev
, struct msi_desc
*desc
)
512 struct device_attribute
*attrs
= desc
->sysfs_attrs
;
518 desc
->sysfs_attrs
= NULL
;
519 for (i
= 0; i
< desc
->nvec_used
; i
++) {
521 sysfs_remove_file_from_group(&dev
->kobj
, &attrs
[i
].attr
, msi_irqs_group
.name
);
522 kfree(attrs
[i
].attr
.name
);
527 static int msi_sysfs_populate_desc(struct device
*dev
, struct msi_desc
*desc
)
529 struct device_attribute
*attrs
;
532 attrs
= kcalloc(desc
->nvec_used
, sizeof(*attrs
), GFP_KERNEL
);
536 desc
->sysfs_attrs
= attrs
;
537 for (i
= 0; i
< desc
->nvec_used
; i
++) {
538 sysfs_attr_init(&attrs
[i
].attr
);
539 attrs
[i
].attr
.name
= kasprintf(GFP_KERNEL
, "%d", desc
->irq
+ i
);
540 if (!attrs
[i
].attr
.name
) {
545 attrs
[i
].attr
.mode
= 0444;
546 attrs
[i
].show
= msi_mode_show
;
548 ret
= sysfs_add_file_to_group(&dev
->kobj
, &attrs
[i
].attr
, msi_irqs_group
.name
);
550 attrs
[i
].show
= NULL
;
557 msi_sysfs_remove_desc(dev
, desc
);
561 #if defined(CONFIG_PCI_MSI_ARCH_FALLBACKS) || defined(CONFIG_PCI_XEN)
563 * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
564 * @dev: The device (PCI, platform etc) which will get sysfs entries
566 int msi_device_populate_sysfs(struct device
*dev
)
568 struct msi_desc
*desc
;
571 msi_for_each_desc(desc
, dev
, MSI_DESC_ASSOCIATED
) {
572 if (desc
->sysfs_attrs
)
574 ret
= msi_sysfs_populate_desc(dev
, desc
);
582 * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
583 * @dev: The device (PCI, platform etc) for which to remove
586 void msi_device_destroy_sysfs(struct device
*dev
)
588 struct msi_desc
*desc
;
590 msi_for_each_desc(desc
, dev
, MSI_DESC_ALL
)
591 msi_sysfs_remove_desc(dev
, desc
);
593 #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK || CONFIG_PCI_XEN */
594 #else /* CONFIG_SYSFS */
595 static inline int msi_sysfs_create_group(struct device
*dev
) { return 0; }
596 static inline int msi_sysfs_populate_desc(struct device
*dev
, struct msi_desc
*desc
) { return 0; }
597 static inline void msi_sysfs_remove_desc(struct device
*dev
, struct msi_desc
*desc
) { }
598 #endif /* !CONFIG_SYSFS */
600 static struct irq_domain
*msi_get_device_domain(struct device
*dev
, unsigned int domid
)
602 struct irq_domain
*domain
;
604 lockdep_assert_held(&dev
->msi
.data
->mutex
);
606 if (WARN_ON_ONCE(domid
>= MSI_MAX_DEVICE_IRQDOMAINS
))
609 domain
= dev
->msi
.data
->__domains
[domid
].domain
;
613 if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain
)))
619 static unsigned int msi_domain_get_hwsize(struct device
*dev
, unsigned int domid
)
621 struct msi_domain_info
*info
;
622 struct irq_domain
*domain
;
624 domain
= msi_get_device_domain(dev
, domid
);
626 info
= domain
->host_data
;
629 /* No domain, default to MSI_XA_DOMAIN_SIZE */
630 return MSI_XA_DOMAIN_SIZE
;
633 static inline void irq_chip_write_msi_msg(struct irq_data
*data
,
636 data
->chip
->irq_write_msi_msg(data
, msg
);
639 static void msi_check_level(struct irq_domain
*domain
, struct msi_msg
*msg
)
641 struct msi_domain_info
*info
= domain
->host_data
;
644 * If the MSI provider has messed with the second message and
645 * not advertized that it is level-capable, signal the breakage.
647 WARN_ON(!((info
->flags
& MSI_FLAG_LEVEL_CAPABLE
) &&
648 (info
->chip
->flags
& IRQCHIP_SUPPORTS_LEVEL_MSI
)) &&
649 (msg
[1].address_lo
|| msg
[1].address_hi
|| msg
[1].data
));
653 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
654 * @irq_data: The irq data associated to the interrupt
655 * @mask: The affinity mask to set
656 * @force: Flag to enforce setting (disable online checks)
658 * Intended to be used by MSI interrupt controllers which are
659 * implemented with hierarchical domains.
661 * Return: IRQ_SET_MASK_* result code
663 int msi_domain_set_affinity(struct irq_data
*irq_data
,
664 const struct cpumask
*mask
, bool force
)
666 struct irq_data
*parent
= irq_data
->parent_data
;
667 struct msi_msg msg
[2] = { [1] = { }, };
670 ret
= parent
->chip
->irq_set_affinity(parent
, mask
, force
);
671 if (ret
>= 0 && ret
!= IRQ_SET_MASK_OK_DONE
) {
672 BUG_ON(irq_chip_compose_msi_msg(irq_data
, msg
));
673 msi_check_level(irq_data
->domain
, msg
);
674 irq_chip_write_msi_msg(irq_data
, msg
);
680 static int msi_domain_activate(struct irq_domain
*domain
,
681 struct irq_data
*irq_data
, bool early
)
683 struct msi_msg msg
[2] = { [1] = { }, };
685 BUG_ON(irq_chip_compose_msi_msg(irq_data
, msg
));
686 msi_check_level(irq_data
->domain
, msg
);
687 irq_chip_write_msi_msg(irq_data
, msg
);
691 static void msi_domain_deactivate(struct irq_domain
*domain
,
692 struct irq_data
*irq_data
)
694 struct msi_msg msg
[2];
696 memset(msg
, 0, sizeof(msg
));
697 irq_chip_write_msi_msg(irq_data
, msg
);
700 static int msi_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
701 unsigned int nr_irqs
, void *arg
)
703 struct msi_domain_info
*info
= domain
->host_data
;
704 struct msi_domain_ops
*ops
= info
->ops
;
705 irq_hw_number_t hwirq
= ops
->get_hwirq(info
, arg
);
708 if (irq_find_mapping(domain
, hwirq
) > 0)
711 if (domain
->parent
) {
712 ret
= irq_domain_alloc_irqs_parent(domain
, virq
, nr_irqs
, arg
);
717 for (i
= 0; i
< nr_irqs
; i
++) {
718 ret
= ops
->msi_init(domain
, info
, virq
+ i
, hwirq
+ i
, arg
);
721 for (i
--; i
> 0; i
--)
722 ops
->msi_free(domain
, info
, virq
+ i
);
724 irq_domain_free_irqs_top(domain
, virq
, nr_irqs
);
732 static void msi_domain_free(struct irq_domain
*domain
, unsigned int virq
,
733 unsigned int nr_irqs
)
735 struct msi_domain_info
*info
= domain
->host_data
;
738 if (info
->ops
->msi_free
) {
739 for (i
= 0; i
< nr_irqs
; i
++)
740 info
->ops
->msi_free(domain
, info
, virq
+ i
);
742 irq_domain_free_irqs_top(domain
, virq
, nr_irqs
);
745 static int msi_domain_translate(struct irq_domain
*domain
, struct irq_fwspec
*fwspec
,
746 irq_hw_number_t
*hwirq
, unsigned int *type
)
748 struct msi_domain_info
*info
= domain
->host_data
;
751 * This will catch allocations through the regular irqdomain path except
752 * for MSI domains which really support this, e.g. MBIGEN.
754 if (!info
->ops
->msi_translate
)
756 return info
->ops
->msi_translate(domain
, fwspec
, hwirq
, type
);
759 static const struct irq_domain_ops msi_domain_ops
= {
760 .alloc
= msi_domain_alloc
,
761 .free
= msi_domain_free
,
762 .activate
= msi_domain_activate
,
763 .deactivate
= msi_domain_deactivate
,
764 .translate
= msi_domain_translate
,
767 static irq_hw_number_t
msi_domain_ops_get_hwirq(struct msi_domain_info
*info
,
768 msi_alloc_info_t
*arg
)
773 static int msi_domain_ops_prepare(struct irq_domain
*domain
, struct device
*dev
,
774 int nvec
, msi_alloc_info_t
*arg
)
776 memset(arg
, 0, sizeof(*arg
));
780 static void msi_domain_ops_set_desc(msi_alloc_info_t
*arg
,
781 struct msi_desc
*desc
)
786 static int msi_domain_ops_init(struct irq_domain
*domain
,
787 struct msi_domain_info
*info
,
788 unsigned int virq
, irq_hw_number_t hwirq
,
789 msi_alloc_info_t
*arg
)
791 irq_domain_set_hwirq_and_chip(domain
, virq
, hwirq
, info
->chip
,
793 if (info
->handler
&& info
->handler_name
) {
794 __irq_set_handler(virq
, info
->handler
, 0, info
->handler_name
);
795 if (info
->handler_data
)
796 irq_set_handler_data(virq
, info
->handler_data
);
801 static struct msi_domain_ops msi_domain_ops_default
= {
802 .get_hwirq
= msi_domain_ops_get_hwirq
,
803 .msi_init
= msi_domain_ops_init
,
804 .msi_prepare
= msi_domain_ops_prepare
,
805 .set_desc
= msi_domain_ops_set_desc
,
808 static void msi_domain_update_dom_ops(struct msi_domain_info
*info
)
810 struct msi_domain_ops
*ops
= info
->ops
;
813 info
->ops
= &msi_domain_ops_default
;
817 if (!(info
->flags
& MSI_FLAG_USE_DEF_DOM_OPS
))
820 if (ops
->get_hwirq
== NULL
)
821 ops
->get_hwirq
= msi_domain_ops_default
.get_hwirq
;
822 if (ops
->msi_init
== NULL
)
823 ops
->msi_init
= msi_domain_ops_default
.msi_init
;
824 if (ops
->msi_prepare
== NULL
)
825 ops
->msi_prepare
= msi_domain_ops_default
.msi_prepare
;
826 if (ops
->set_desc
== NULL
)
827 ops
->set_desc
= msi_domain_ops_default
.set_desc
;
830 static void msi_domain_update_chip_ops(struct msi_domain_info
*info
)
832 struct irq_chip
*chip
= info
->chip
;
834 BUG_ON(!chip
|| !chip
->irq_mask
|| !chip
->irq_unmask
);
835 if (!chip
->irq_set_affinity
&& !(info
->flags
& MSI_FLAG_NO_AFFINITY
))
836 chip
->irq_set_affinity
= msi_domain_set_affinity
;
839 static struct irq_domain
*__msi_create_irq_domain(struct fwnode_handle
*fwnode
,
840 struct msi_domain_info
*info
,
842 struct irq_domain
*parent
)
844 struct irq_domain
*domain
;
846 if (info
->hwsize
> MSI_XA_DOMAIN_SIZE
)
850 * Hardware size 0 is valid for backwards compatibility and for
851 * domains which are not backed by a hardware table. Grant the
852 * maximum index space.
855 info
->hwsize
= MSI_XA_DOMAIN_SIZE
;
857 msi_domain_update_dom_ops(info
);
858 if (info
->flags
& MSI_FLAG_USE_DEF_CHIP_OPS
)
859 msi_domain_update_chip_ops(info
);
861 domain
= irq_domain_create_hierarchy(parent
, flags
| IRQ_DOMAIN_FLAG_MSI
, 0,
862 fwnode
, &msi_domain_ops
, info
);
865 irq_domain_update_bus_token(domain
, info
->bus_token
);
866 if (info
->flags
& MSI_FLAG_PARENT_PM_DEV
)
867 domain
->pm_dev
= parent
->pm_dev
;
874 * msi_create_irq_domain - Create an MSI interrupt domain
875 * @fwnode: Optional fwnode of the interrupt controller
876 * @info: MSI domain info
877 * @parent: Parent irq domain
879 * Return: pointer to the created &struct irq_domain or %NULL on failure
881 struct irq_domain
*msi_create_irq_domain(struct fwnode_handle
*fwnode
,
882 struct msi_domain_info
*info
,
883 struct irq_domain
*parent
)
885 return __msi_create_irq_domain(fwnode
, info
, 0, parent
);
889 * msi_parent_init_dev_msi_info - Delegate initialization of device MSI info down
890 * in the domain hierarchy
891 * @dev: The device for which the domain should be created
892 * @domain: The domain in the hierarchy this op is being called on
893 * @msi_parent_domain: The IRQ_DOMAIN_FLAG_MSI_PARENT domain for the child to
895 * @msi_child_info: The MSI domain info of the IRQ_DOMAIN_FLAG_MSI_DEVICE
896 * domain to be created
898 * Return: true on success, false otherwise
900 * This is the most complex problem of per device MSI domains and the
901 * underlying interrupt domain hierarchy:
903 * The device domain to be initialized requests the broadest feature set
904 * possible and the underlying domain hierarchy puts restrictions on it.
906 * That's trivial for a simple parent->child relationship, but it gets
907 * interesting with an intermediate domain: root->parent->child. The
908 * intermediate 'parent' can expand the capabilities which the 'root'
909 * domain is providing. So that creates a classic hen and egg problem:
910 * Which entity is doing the restrictions/expansions?
912 * One solution is to let the root domain handle the initialization that's
913 * why there is the @domain and the @msi_parent_domain pointer.
915 bool msi_parent_init_dev_msi_info(struct device
*dev
, struct irq_domain
*domain
,
916 struct irq_domain
*msi_parent_domain
,
917 struct msi_domain_info
*msi_child_info
)
919 struct irq_domain
*parent
= domain
->parent
;
921 if (WARN_ON_ONCE(!parent
|| !parent
->msi_parent_ops
||
922 !parent
->msi_parent_ops
->init_dev_msi_info
))
925 return parent
->msi_parent_ops
->init_dev_msi_info(dev
, parent
, msi_parent_domain
,
930 * msi_create_device_irq_domain - Create a device MSI interrupt domain
931 * @dev: Pointer to the device
933 * @template: MSI domain info bundle used as template
934 * @hwsize: Maximum number of MSI table entries (0 if unknown or unlimited)
935 * @domain_data: Optional pointer to domain specific data which is set in
936 * msi_domain_info::data
937 * @chip_data: Optional pointer to chip specific data which is set in
938 * msi_domain_info::chip_data
940 * Return: True on success, false otherwise
942 * There is no firmware node required for this interface because the per
943 * device domains are software constructs which are actually closer to the
944 * hardware reality than any firmware can describe them.
946 * The domain name and the irq chip name for a MSI device domain are
947 * composed by: "$(PREFIX)$(CHIPNAME)-$(DEVNAME)"
949 * $PREFIX: Optional prefix provided by the underlying MSI parent domain
950 * via msi_parent_ops::prefix. If that pointer is NULL the prefix
952 * $CHIPNAME: The name of the irq_chip in @template
953 * $DEVNAME: The name of the device
955 * This results in understandable chip names and hardware interrupt numbers
956 * in e.g. /proc/interrupts
958 * PCI-MSI-0000:00:1c.0 0-edge Parent domain has no prefix
959 * IR-PCI-MSI-0000:00:1c.4 0-edge Same with interrupt remapping prefix 'IR-'
961 * IR-PCI-MSIX-0000:3d:00.0 0-edge Hardware interrupt numbers reflect
962 * IR-PCI-MSIX-0000:3d:00.0 1-edge the real MSI-X index on that device
963 * IR-PCI-MSIX-0000:3d:00.0 2-edge
965 * On IMS domains the hardware interrupt number is either a table entry
966 * index or a purely software managed index but it is guaranteed to be
969 * The domain pointer is stored in @dev::msi::data::__irqdomains[]. All
970 * subsequent operations on the domain depend on the domain id.
972 * The domain is automatically freed when the device is removed via devres
973 * in the context of @dev::msi::data freeing, but it can also be
974 * independently removed via @msi_remove_device_irq_domain().
976 bool msi_create_device_irq_domain(struct device
*dev
, unsigned int domid
,
977 const struct msi_domain_template
*template,
978 unsigned int hwsize
, void *domain_data
,
981 struct irq_domain
*domain
, *parent
= dev
->msi
.domain
;
982 struct fwnode_handle
*fwnode
, *fwnalloced
= NULL
;
983 struct msi_domain_template
*bundle
;
984 const struct msi_parent_ops
*pops
;
986 if (!irq_domain_is_msi_parent(parent
))
989 if (domid
>= MSI_MAX_DEVICE_IRQDOMAINS
)
992 bundle
= kmemdup(template, sizeof(*bundle
), GFP_KERNEL
);
996 bundle
->info
.hwsize
= hwsize
;
997 bundle
->info
.chip
= &bundle
->chip
;
998 bundle
->info
.ops
= &bundle
->ops
;
999 bundle
->info
.data
= domain_data
;
1000 bundle
->info
.chip_data
= chip_data
;
1002 pops
= parent
->msi_parent_ops
;
1003 snprintf(bundle
->name
, sizeof(bundle
->name
), "%s%s-%s",
1004 pops
->prefix
? : "", bundle
->chip
.name
, dev_name(dev
));
1005 bundle
->chip
.name
= bundle
->name
;
1008 * Using the device firmware node is required for wire to MSI
1009 * device domains so that the existing firmware results in a domain
1011 * All other device domains like PCI/MSI use the named firmware
1012 * node as they are not guaranteed to have a fwnode. They are never
1013 * looked up and always handled in the context of the device.
1015 if (bundle
->info
.flags
& MSI_FLAG_USE_DEV_FWNODE
)
1016 fwnode
= dev
->fwnode
;
1018 fwnode
= fwnalloced
= irq_domain_alloc_named_fwnode(bundle
->name
);
1023 if (msi_setup_device_data(dev
))
1026 msi_lock_descs(dev
);
1028 if (WARN_ON_ONCE(msi_get_device_domain(dev
, domid
)))
1031 if (!pops
->init_dev_msi_info(dev
, parent
, parent
, &bundle
->info
))
1034 domain
= __msi_create_irq_domain(fwnode
, &bundle
->info
, IRQ_DOMAIN_FLAG_MSI_DEVICE
, parent
);
1039 dev
->msi
.data
->__domains
[domid
].domain
= domain
;
1040 msi_unlock_descs(dev
);
1044 msi_unlock_descs(dev
);
1046 irq_domain_free_fwnode(fwnalloced
);
1053 * msi_remove_device_irq_domain - Free a device MSI interrupt domain
1054 * @dev: Pointer to the device
1057 void msi_remove_device_irq_domain(struct device
*dev
, unsigned int domid
)
1059 struct fwnode_handle
*fwnode
= NULL
;
1060 struct msi_domain_info
*info
;
1061 struct irq_domain
*domain
;
1063 msi_lock_descs(dev
);
1065 domain
= msi_get_device_domain(dev
, domid
);
1067 if (!domain
|| !irq_domain_is_msi_device(domain
))
1070 dev
->msi
.data
->__domains
[domid
].domain
= NULL
;
1071 info
= domain
->host_data
;
1072 if (irq_domain_is_msi_device(domain
))
1073 fwnode
= domain
->fwnode
;
1074 irq_domain_remove(domain
);
1075 irq_domain_free_fwnode(fwnode
);
1076 kfree(container_of(info
, struct msi_domain_template
, info
));
1079 msi_unlock_descs(dev
);
1083 * msi_match_device_irq_domain - Match a device irq domain against a bus token
1084 * @dev: Pointer to the device
1086 * @bus_token: Bus token to match against the domain bus token
1088 * Return: True if device domain exists and bus tokens match.
1090 bool msi_match_device_irq_domain(struct device
*dev
, unsigned int domid
,
1091 enum irq_domain_bus_token bus_token
)
1093 struct msi_domain_info
*info
;
1094 struct irq_domain
*domain
;
1097 msi_lock_descs(dev
);
1098 domain
= msi_get_device_domain(dev
, domid
);
1099 if (domain
&& irq_domain_is_msi_device(domain
)) {
1100 info
= domain
->host_data
;
1101 ret
= info
->bus_token
== bus_token
;
1103 msi_unlock_descs(dev
);
1107 static int msi_domain_prepare_irqs(struct irq_domain
*domain
, struct device
*dev
,
1108 int nvec
, msi_alloc_info_t
*arg
)
1110 struct msi_domain_info
*info
= domain
->host_data
;
1111 struct msi_domain_ops
*ops
= info
->ops
;
1113 return ops
->msi_prepare(domain
, dev
, nvec
, arg
);
1117 * Carefully check whether the device can use reservation mode. If
1118 * reservation mode is enabled then the early activation will assign a
1119 * dummy vector to the device. If the PCI/MSI device does not support
1120 * masking of the entry then this can result in spurious interrupts when
1121 * the device driver is not absolutely careful. But even then a malfunction
1122 * of the hardware could result in a spurious interrupt on the dummy vector
1123 * and render the device unusable. If the entry can be masked then the core
1124 * logic will prevent the spurious interrupt and reservation mode can be
1125 * used. For now reservation mode is restricted to PCI/MSI.
1127 static bool msi_check_reservation_mode(struct irq_domain
*domain
,
1128 struct msi_domain_info
*info
,
1131 struct msi_desc
*desc
;
1133 switch(domain
->bus_token
) {
1134 case DOMAIN_BUS_PCI_MSI
:
1135 case DOMAIN_BUS_PCI_DEVICE_MSI
:
1136 case DOMAIN_BUS_PCI_DEVICE_MSIX
:
1137 case DOMAIN_BUS_VMD_MSI
:
1143 if (!(info
->flags
& MSI_FLAG_MUST_REACTIVATE
))
1146 if (IS_ENABLED(CONFIG_PCI_MSI
) && pci_msi_ignore_mask
)
1150 * Checking the first MSI descriptor is sufficient. MSIX supports
1151 * masking and MSI does so when the can_mask attribute is set.
1153 desc
= msi_first_desc(dev
, MSI_DESC_ALL
);
1154 return desc
->pci
.msi_attrib
.is_msix
|| desc
->pci
.msi_attrib
.can_mask
;
1157 static int msi_handle_pci_fail(struct irq_domain
*domain
, struct msi_desc
*desc
,
1160 switch(domain
->bus_token
) {
1161 case DOMAIN_BUS_PCI_MSI
:
1162 case DOMAIN_BUS_PCI_DEVICE_MSI
:
1163 case DOMAIN_BUS_PCI_DEVICE_MSIX
:
1164 case DOMAIN_BUS_VMD_MSI
:
1165 if (IS_ENABLED(CONFIG_PCI_MSI
))
1172 /* Let a failed PCI multi MSI allocation retry */
1173 if (desc
->nvec_used
> 1)
1176 /* If there was a successful allocation let the caller know */
1177 return allocated
? allocated
: -ENOSPC
;
1180 #define VIRQ_CAN_RESERVE 0x01
1181 #define VIRQ_ACTIVATE 0x02
1183 static int msi_init_virq(struct irq_domain
*domain
, int virq
, unsigned int vflags
)
1185 struct irq_data
*irqd
= irq_domain_get_irq_data(domain
, virq
);
1188 if (!(vflags
& VIRQ_CAN_RESERVE
)) {
1189 irqd_clr_can_reserve(irqd
);
1192 * If the interrupt is managed but no CPU is available to
1193 * service it, shut it down until better times. Note that
1194 * we only do this on the !RESERVE path as x86 (the only
1195 * architecture using this flag) deals with this in a
1196 * different way by using a catch-all vector.
1198 if ((vflags
& VIRQ_ACTIVATE
) &&
1199 irqd_affinity_is_managed(irqd
) &&
1200 !cpumask_intersects(irq_data_get_affinity_mask(irqd
),
1202 irqd_set_managed_shutdown(irqd
);
1207 if (!(vflags
& VIRQ_ACTIVATE
))
1210 ret
= irq_domain_activate_irq(irqd
, vflags
& VIRQ_CAN_RESERVE
);
1214 * If the interrupt uses reservation mode, clear the activated bit
1215 * so request_irq() will assign the final vector.
1217 if (vflags
& VIRQ_CAN_RESERVE
)
1218 irqd_clr_activated(irqd
);
1222 static int __msi_domain_alloc_irqs(struct device
*dev
, struct irq_domain
*domain
,
1223 struct msi_ctrl
*ctrl
)
1225 struct xarray
*xa
= &dev
->msi
.data
->__domains
[ctrl
->domid
].store
;
1226 struct msi_domain_info
*info
= domain
->host_data
;
1227 struct msi_domain_ops
*ops
= info
->ops
;
1228 unsigned int vflags
= 0, allocated
= 0;
1229 msi_alloc_info_t arg
= { };
1230 struct msi_desc
*desc
;
1234 ret
= msi_domain_prepare_irqs(domain
, dev
, ctrl
->nirqs
, &arg
);
1239 * This flag is set by the PCI layer as we need to activate
1240 * the MSI entries before the PCI layer enables MSI in the
1241 * card. Otherwise the card latches a random msi message.
1243 if (info
->flags
& MSI_FLAG_ACTIVATE_EARLY
)
1244 vflags
|= VIRQ_ACTIVATE
;
1247 * Interrupt can use a reserved vector and will not occupy
1248 * a real device vector until the interrupt is requested.
1250 if (msi_check_reservation_mode(domain
, info
, dev
))
1251 vflags
|= VIRQ_CAN_RESERVE
;
1253 xa_for_each_range(xa
, idx
, desc
, ctrl
->first
, ctrl
->last
) {
1254 if (!msi_desc_match(desc
, MSI_DESC_NOTASSOCIATED
))
1257 /* This should return -ECONFUSED... */
1258 if (WARN_ON_ONCE(allocated
>= ctrl
->nirqs
))
1261 if (ops
->prepare_desc
)
1262 ops
->prepare_desc(domain
, &arg
, desc
);
1264 ops
->set_desc(&arg
, desc
);
1266 virq
= __irq_domain_alloc_irqs(domain
, -1, desc
->nvec_used
,
1267 dev_to_node(dev
), &arg
, false,
1270 return msi_handle_pci_fail(domain
, desc
, allocated
);
1272 for (i
= 0; i
< desc
->nvec_used
; i
++) {
1273 irq_set_msi_desc_off(virq
, i
, desc
);
1274 irq_debugfs_copy_devname(virq
+ i
, dev
);
1275 ret
= msi_init_virq(domain
, virq
+ i
, vflags
);
1279 if (info
->flags
& MSI_FLAG_DEV_SYSFS
) {
1280 ret
= msi_sysfs_populate_desc(dev
, desc
);
1289 static int msi_domain_alloc_simple_msi_descs(struct device
*dev
,
1290 struct msi_domain_info
*info
,
1291 struct msi_ctrl
*ctrl
)
1293 if (!(info
->flags
& MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS
))
1296 return msi_domain_add_simple_msi_descs(dev
, ctrl
);
1299 static int __msi_domain_alloc_locked(struct device
*dev
, struct msi_ctrl
*ctrl
)
1301 struct msi_domain_info
*info
;
1302 struct msi_domain_ops
*ops
;
1303 struct irq_domain
*domain
;
1306 if (!msi_ctrl_valid(dev
, ctrl
))
1309 domain
= msi_get_device_domain(dev
, ctrl
->domid
);
1313 info
= domain
->host_data
;
1315 ret
= msi_domain_alloc_simple_msi_descs(dev
, info
, ctrl
);
1320 if (ops
->domain_alloc_irqs
)
1321 return ops
->domain_alloc_irqs(domain
, dev
, ctrl
->nirqs
);
1323 return __msi_domain_alloc_irqs(dev
, domain
, ctrl
);
1326 static int msi_domain_alloc_locked(struct device
*dev
, struct msi_ctrl
*ctrl
)
1328 int ret
= __msi_domain_alloc_locked(dev
, ctrl
);
1331 msi_domain_free_locked(dev
, ctrl
);
1336 * msi_domain_alloc_irqs_range_locked - Allocate interrupts from a MSI interrupt domain
1337 * @dev: Pointer to device struct of the device for which the interrupts
1339 * @domid: Id of the interrupt domain to operate on
1340 * @first: First index to allocate (inclusive)
1341 * @last: Last index to allocate (inclusive)
1343 * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
1344 * pair. Use this for MSI irqdomains which implement their own descriptor
1347 * Return: %0 on success or an error code.
1349 int msi_domain_alloc_irqs_range_locked(struct device
*dev
, unsigned int domid
,
1350 unsigned int first
, unsigned int last
)
1352 struct msi_ctrl ctrl
= {
1356 .nirqs
= last
+ 1 - first
,
1359 return msi_domain_alloc_locked(dev
, &ctrl
);
1363 * msi_domain_alloc_irqs_range - Allocate interrupts from a MSI interrupt domain
1364 * @dev: Pointer to device struct of the device for which the interrupts
1366 * @domid: Id of the interrupt domain to operate on
1367 * @first: First index to allocate (inclusive)
1368 * @last: Last index to allocate (inclusive)
1370 * Return: %0 on success or an error code.
1372 int msi_domain_alloc_irqs_range(struct device
*dev
, unsigned int domid
,
1373 unsigned int first
, unsigned int last
)
1377 msi_lock_descs(dev
);
1378 ret
= msi_domain_alloc_irqs_range_locked(dev
, domid
, first
, last
);
1379 msi_unlock_descs(dev
);
1382 EXPORT_SYMBOL_GPL(msi_domain_alloc_irqs_range
);
1385 * msi_domain_alloc_irqs_all_locked - Allocate all interrupts from a MSI interrupt domain
1387 * @dev: Pointer to device struct of the device for which the interrupts
1389 * @domid: Id of the interrupt domain to operate on
1390 * @nirqs: The number of interrupts to allocate
1392 * This function scans all MSI descriptors of the MSI domain and allocates interrupts
1393 * for all unassigned ones. That function is to be used for MSI domain usage where
1394 * the descriptor allocation is handled at the call site, e.g. PCI/MSI[X].
1396 * Return: %0 on success or an error code.
1398 int msi_domain_alloc_irqs_all_locked(struct device
*dev
, unsigned int domid
, int nirqs
)
1400 struct msi_ctrl ctrl
= {
1403 .last
= msi_domain_get_hwsize(dev
, domid
) - 1,
1407 return msi_domain_alloc_locked(dev
, &ctrl
);
1410 static struct msi_map
__msi_domain_alloc_irq_at(struct device
*dev
, unsigned int domid
,
1412 const struct irq_affinity_desc
*affdesc
,
1413 union msi_instance_cookie
*icookie
)
1415 struct msi_ctrl ctrl
= { .domid
= domid
, .nirqs
= 1, };
1416 struct irq_domain
*domain
;
1417 struct msi_map map
= { };
1418 struct msi_desc
*desc
;
1421 domain
= msi_get_device_domain(dev
, domid
);
1423 map
.index
= -ENODEV
;
1427 desc
= msi_alloc_desc(dev
, 1, affdesc
);
1429 map
.index
= -ENOMEM
;
1434 desc
->data
.icookie
= *icookie
;
1436 ret
= msi_insert_desc(dev
, desc
, domid
, index
);
1442 ctrl
.first
= ctrl
.last
= desc
->msi_index
;
1444 ret
= __msi_domain_alloc_irqs(dev
, domain
, &ctrl
);
1447 msi_domain_free_locked(dev
, &ctrl
);
1449 map
.index
= desc
->msi_index
;
1450 map
.virq
= desc
->irq
;
1456 * msi_domain_alloc_irq_at - Allocate an interrupt from a MSI interrupt domain at
1457 * a given index - or at the next free index
1459 * @dev: Pointer to device struct of the device for which the interrupts
1461 * @domid: Id of the interrupt domain to operate on
1462 * @index: Index for allocation. If @index == %MSI_ANY_INDEX the allocation
1463 * uses the next free index.
1464 * @affdesc: Optional pointer to an interrupt affinity descriptor structure
1465 * @icookie: Optional pointer to a domain specific per instance cookie. If
1466 * non-NULL the content of the cookie is stored in msi_desc::data.
1467 * Must be NULL for MSI-X allocations
1469 * This requires a MSI interrupt domain which lets the core code manage the
1472 * Return: struct msi_map
1474 * On success msi_map::index contains the allocated index number and
1475 * msi_map::virq the corresponding Linux interrupt number
1477 * On failure msi_map::index contains the error code and msi_map::virq
1480 struct msi_map
msi_domain_alloc_irq_at(struct device
*dev
, unsigned int domid
, unsigned int index
,
1481 const struct irq_affinity_desc
*affdesc
,
1482 union msi_instance_cookie
*icookie
)
1486 msi_lock_descs(dev
);
1487 map
= __msi_domain_alloc_irq_at(dev
, domid
, index
, affdesc
, icookie
);
1488 msi_unlock_descs(dev
);
1493 * msi_device_domain_alloc_wired - Allocate a "wired" interrupt on @domain
1494 * @domain: The domain to allocate on
1495 * @hwirq: The hardware interrupt number to allocate for
1496 * @type: The interrupt type
1498 * This weirdness supports wire to MSI controllers like MBIGEN.
1500 * @hwirq is the hardware interrupt number which is handed in from
1501 * irq_create_fwspec_mapping(). As the wire to MSI domain is sparse, but
1502 * sized in firmware, the hardware interrupt number cannot be used as MSI
1503 * index. For the underlying irq chip the MSI index is irrelevant and
1504 * all it needs is the hardware interrupt number.
1506 * To handle this the MSI index is allocated with MSI_ANY_INDEX and the
1507 * hardware interrupt number is stored along with the type information in
1508 * msi_desc::cookie so the underlying interrupt chip and domain code can
1511 * Return: The Linux interrupt number (> 0) or an error code
1513 int msi_device_domain_alloc_wired(struct irq_domain
*domain
, unsigned int hwirq
,
1516 unsigned int domid
= MSI_DEFAULT_DOMAIN
;
1517 union msi_instance_cookie icookie
= { };
1518 struct device
*dev
= domain
->dev
;
1519 struct msi_map map
= { };
1521 if (WARN_ON_ONCE(!dev
|| domain
->bus_token
!= DOMAIN_BUS_WIRED_TO_MSI
))
1524 icookie
.value
= ((u64
)type
<< 32) | hwirq
;
1526 msi_lock_descs(dev
);
1527 if (WARN_ON_ONCE(msi_get_device_domain(dev
, domid
) != domain
))
1528 map
.index
= -EINVAL
;
1530 map
= __msi_domain_alloc_irq_at(dev
, domid
, MSI_ANY_INDEX
, NULL
, &icookie
);
1531 msi_unlock_descs(dev
);
1533 return map
.index
>= 0 ? map
.virq
: map
.index
;
1536 static void __msi_domain_free_irqs(struct device
*dev
, struct irq_domain
*domain
,
1537 struct msi_ctrl
*ctrl
)
1539 struct xarray
*xa
= &dev
->msi
.data
->__domains
[ctrl
->domid
].store
;
1540 struct msi_domain_info
*info
= domain
->host_data
;
1541 struct irq_data
*irqd
;
1542 struct msi_desc
*desc
;
1546 xa_for_each_range(xa
, idx
, desc
, ctrl
->first
, ctrl
->last
) {
1547 /* Only handle MSI entries which have an interrupt associated */
1548 if (!msi_desc_match(desc
, MSI_DESC_ASSOCIATED
))
1551 /* Make sure all interrupts are deactivated */
1552 for (i
= 0; i
< desc
->nvec_used
; i
++) {
1553 irqd
= irq_domain_get_irq_data(domain
, desc
->irq
+ i
);
1554 if (irqd
&& irqd_is_activated(irqd
))
1555 irq_domain_deactivate_irq(irqd
);
1558 irq_domain_free_irqs(desc
->irq
, desc
->nvec_used
);
1559 if (info
->flags
& MSI_FLAG_DEV_SYSFS
)
1560 msi_sysfs_remove_desc(dev
, desc
);
1565 static void msi_domain_free_locked(struct device
*dev
, struct msi_ctrl
*ctrl
)
1567 struct msi_domain_info
*info
;
1568 struct msi_domain_ops
*ops
;
1569 struct irq_domain
*domain
;
1571 if (!msi_ctrl_valid(dev
, ctrl
))
1574 domain
= msi_get_device_domain(dev
, ctrl
->domid
);
1578 info
= domain
->host_data
;
1581 if (ops
->domain_free_irqs
)
1582 ops
->domain_free_irqs(domain
, dev
);
1584 __msi_domain_free_irqs(dev
, domain
, ctrl
);
1586 if (ops
->msi_post_free
)
1587 ops
->msi_post_free(domain
, dev
);
1589 if (info
->flags
& MSI_FLAG_FREE_MSI_DESCS
)
1590 msi_domain_free_descs(dev
, ctrl
);
1594 * msi_domain_free_irqs_range_locked - Free a range of interrupts from a MSI interrupt domain
1595 * associated to @dev with msi_lock held
1596 * @dev: Pointer to device struct of the device for which the interrupts
1598 * @domid: Id of the interrupt domain to operate on
1599 * @first: First index to free (inclusive)
1600 * @last: Last index to free (inclusive)
1602 void msi_domain_free_irqs_range_locked(struct device
*dev
, unsigned int domid
,
1603 unsigned int first
, unsigned int last
)
1605 struct msi_ctrl ctrl
= {
1610 msi_domain_free_locked(dev
, &ctrl
);
1614 * msi_domain_free_irqs_range - Free a range of interrupts from a MSI interrupt domain
1615 * associated to @dev
1616 * @dev: Pointer to device struct of the device for which the interrupts
1618 * @domid: Id of the interrupt domain to operate on
1619 * @first: First index to free (inclusive)
1620 * @last: Last index to free (inclusive)
1622 void msi_domain_free_irqs_range(struct device
*dev
, unsigned int domid
,
1623 unsigned int first
, unsigned int last
)
1625 msi_lock_descs(dev
);
1626 msi_domain_free_irqs_range_locked(dev
, domid
, first
, last
);
1627 msi_unlock_descs(dev
);
1629 EXPORT_SYMBOL_GPL(msi_domain_free_irqs_all
);
1632 * msi_domain_free_irqs_all_locked - Free all interrupts from a MSI interrupt domain
1633 * associated to a device
1634 * @dev: Pointer to device struct of the device for which the interrupts
1636 * @domid: The id of the domain to operate on
1638 * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
1639 * pair. Use this for MSI irqdomains which implement their own vector
1642 void msi_domain_free_irqs_all_locked(struct device
*dev
, unsigned int domid
)
1644 msi_domain_free_irqs_range_locked(dev
, domid
, 0,
1645 msi_domain_get_hwsize(dev
, domid
) - 1);
1649 * msi_domain_free_irqs_all - Free all interrupts from a MSI interrupt domain
1650 * associated to a device
1651 * @dev: Pointer to device struct of the device for which the interrupts
1653 * @domid: The id of the domain to operate on
1655 void msi_domain_free_irqs_all(struct device
*dev
, unsigned int domid
)
1657 msi_lock_descs(dev
);
1658 msi_domain_free_irqs_all_locked(dev
, domid
);
1659 msi_unlock_descs(dev
);
1663 * msi_device_domain_free_wired - Free a wired interrupt in @domain
1664 * @domain: The domain to free the interrupt on
1665 * @virq: The Linux interrupt number to free
1667 * This is the counterpart of msi_device_domain_alloc_wired() for the
1668 * weird wired to MSI converting domains.
1670 void msi_device_domain_free_wired(struct irq_domain
*domain
, unsigned int virq
)
1672 struct msi_desc
*desc
= irq_get_msi_desc(virq
);
1673 struct device
*dev
= domain
->dev
;
1675 if (WARN_ON_ONCE(!dev
|| !desc
|| domain
->bus_token
!= DOMAIN_BUS_WIRED_TO_MSI
))
1678 msi_lock_descs(dev
);
1679 if (!WARN_ON_ONCE(msi_get_device_domain(dev
, MSI_DEFAULT_DOMAIN
) != domain
)) {
1680 msi_domain_free_irqs_range_locked(dev
, MSI_DEFAULT_DOMAIN
, desc
->msi_index
,
1683 msi_unlock_descs(dev
);
1687 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
1688 * @domain: The interrupt domain to retrieve data from
1690 * Return: the pointer to the msi_domain_info stored in @domain->host_data.
1692 struct msi_domain_info
*msi_get_domain_info(struct irq_domain
*domain
)
1694 return (struct msi_domain_info
*)domain
->host_data
;
1698 * msi_device_has_isolated_msi - True if the device has isolated MSI
1699 * @dev: The device to check
1701 * Isolated MSI means that HW modeled by an irq_domain on the path from the
1702 * initiating device to the CPU will validate that the MSI message specifies an
1703 * interrupt number that the device is authorized to trigger. This must block
1704 * devices from triggering interrupts they are not authorized to trigger.
1705 * Currently authorization means the MSI vector is one assigned to the device.
1707 * This is interesting for securing VFIO use cases where a rouge MSI (eg created
1708 * by abusing a normal PCI MemWr DMA) must not allow the VFIO userspace to
1709 * impact outside its security domain, eg userspace triggering interrupts on
1710 * kernel drivers, a VM triggering interrupts on the hypervisor, or a VM
1711 * triggering interrupts on another VM.
1713 bool msi_device_has_isolated_msi(struct device
*dev
)
1715 struct irq_domain
*domain
= dev_get_msi_domain(dev
);
1717 for (; domain
; domain
= domain
->parent
)
1718 if (domain
->flags
& IRQ_DOMAIN_FLAG_ISOLATED_MSI
)
1720 return arch_is_isolated_msi();
1722 EXPORT_SYMBOL_GPL(msi_device_has_isolated_msi
);