1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2014 Intel Corp.
4 * Author: Jiang Liu <jiang.liu@linux.intel.com>
6 * This file is licensed under GPLv2.
8 * This file contains common code to support Message Signalled Interrupt for
9 * PCI compatible and non PCI compatible devices.
11 #include <linux/types.h>
12 #include <linux/device.h>
13 #include <linux/irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/msi.h>
16 #include <linux/slab.h>
18 #include "internals.h"
21 * alloc_msi_entry - Allocate an initialize msi_entry
22 * @dev: Pointer to the device for which this is allocated
23 * @nvec: The number of vectors used in this entry
24 * @affinity: Optional pointer to an affinity mask array size of @nvec
26 * If @affinity is not NULL then an affinity array[@nvec] is allocated
27 * and the affinity masks and flags from @affinity are copied.
29 struct msi_desc
*alloc_msi_entry(struct device
*dev
, int nvec
,
30 const struct irq_affinity_desc
*affinity
)
32 struct msi_desc
*desc
;
34 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
38 INIT_LIST_HEAD(&desc
->list
);
40 desc
->nvec_used
= nvec
;
42 desc
->affinity
= kmemdup(affinity
,
43 nvec
* sizeof(*desc
->affinity
), GFP_KERNEL
);
44 if (!desc
->affinity
) {
53 void free_msi_entry(struct msi_desc
*entry
)
55 kfree(entry
->affinity
);
59 void __get_cached_msi_msg(struct msi_desc
*entry
, struct msi_msg
*msg
)
64 void get_cached_msi_msg(unsigned int irq
, struct msi_msg
*msg
)
66 struct msi_desc
*entry
= irq_get_msi_desc(irq
);
68 __get_cached_msi_msg(entry
, msg
);
70 EXPORT_SYMBOL_GPL(get_cached_msi_msg
);
72 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
73 static inline void irq_chip_write_msi_msg(struct irq_data
*data
,
76 data
->chip
->irq_write_msi_msg(data
, msg
);
79 static void msi_check_level(struct irq_domain
*domain
, struct msi_msg
*msg
)
81 struct msi_domain_info
*info
= domain
->host_data
;
84 * If the MSI provider has messed with the second message and
85 * not advertized that it is level-capable, signal the breakage.
87 WARN_ON(!((info
->flags
& MSI_FLAG_LEVEL_CAPABLE
) &&
88 (info
->chip
->flags
& IRQCHIP_SUPPORTS_LEVEL_MSI
)) &&
89 (msg
[1].address_lo
|| msg
[1].address_hi
|| msg
[1].data
));
93 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
94 * @irq_data: The irq data associated to the interrupt
95 * @mask: The affinity mask to set
96 * @force: Flag to enforce setting (disable online checks)
98 * Intended to be used by MSI interrupt controllers which are
99 * implemented with hierarchical domains.
101 int msi_domain_set_affinity(struct irq_data
*irq_data
,
102 const struct cpumask
*mask
, bool force
)
104 struct irq_data
*parent
= irq_data
->parent_data
;
105 struct msi_msg msg
[2] = { [1] = { }, };
108 ret
= parent
->chip
->irq_set_affinity(parent
, mask
, force
);
109 if (ret
>= 0 && ret
!= IRQ_SET_MASK_OK_DONE
) {
110 BUG_ON(irq_chip_compose_msi_msg(irq_data
, msg
));
111 msi_check_level(irq_data
->domain
, msg
);
112 irq_chip_write_msi_msg(irq_data
, msg
);
118 static int msi_domain_activate(struct irq_domain
*domain
,
119 struct irq_data
*irq_data
, bool early
)
121 struct msi_msg msg
[2] = { [1] = { }, };
123 BUG_ON(irq_chip_compose_msi_msg(irq_data
, msg
));
124 msi_check_level(irq_data
->domain
, msg
);
125 irq_chip_write_msi_msg(irq_data
, msg
);
129 static void msi_domain_deactivate(struct irq_domain
*domain
,
130 struct irq_data
*irq_data
)
132 struct msi_msg msg
[2];
134 memset(msg
, 0, sizeof(msg
));
135 irq_chip_write_msi_msg(irq_data
, msg
);
138 static int msi_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
139 unsigned int nr_irqs
, void *arg
)
141 struct msi_domain_info
*info
= domain
->host_data
;
142 struct msi_domain_ops
*ops
= info
->ops
;
143 irq_hw_number_t hwirq
= ops
->get_hwirq(info
, arg
);
146 if (irq_find_mapping(domain
, hwirq
) > 0)
149 if (domain
->parent
) {
150 ret
= irq_domain_alloc_irqs_parent(domain
, virq
, nr_irqs
, arg
);
155 for (i
= 0; i
< nr_irqs
; i
++) {
156 ret
= ops
->msi_init(domain
, info
, virq
+ i
, hwirq
+ i
, arg
);
159 for (i
--; i
> 0; i
--)
160 ops
->msi_free(domain
, info
, virq
+ i
);
162 irq_domain_free_irqs_top(domain
, virq
, nr_irqs
);
170 static void msi_domain_free(struct irq_domain
*domain
, unsigned int virq
,
171 unsigned int nr_irqs
)
173 struct msi_domain_info
*info
= domain
->host_data
;
176 if (info
->ops
->msi_free
) {
177 for (i
= 0; i
< nr_irqs
; i
++)
178 info
->ops
->msi_free(domain
, info
, virq
+ i
);
180 irq_domain_free_irqs_top(domain
, virq
, nr_irqs
);
183 static const struct irq_domain_ops msi_domain_ops
= {
184 .alloc
= msi_domain_alloc
,
185 .free
= msi_domain_free
,
186 .activate
= msi_domain_activate
,
187 .deactivate
= msi_domain_deactivate
,
190 static irq_hw_number_t
msi_domain_ops_get_hwirq(struct msi_domain_info
*info
,
191 msi_alloc_info_t
*arg
)
196 static int msi_domain_ops_prepare(struct irq_domain
*domain
, struct device
*dev
,
197 int nvec
, msi_alloc_info_t
*arg
)
199 memset(arg
, 0, sizeof(*arg
));
203 static void msi_domain_ops_set_desc(msi_alloc_info_t
*arg
,
204 struct msi_desc
*desc
)
209 static int msi_domain_ops_init(struct irq_domain
*domain
,
210 struct msi_domain_info
*info
,
211 unsigned int virq
, irq_hw_number_t hwirq
,
212 msi_alloc_info_t
*arg
)
214 irq_domain_set_hwirq_and_chip(domain
, virq
, hwirq
, info
->chip
,
216 if (info
->handler
&& info
->handler_name
) {
217 __irq_set_handler(virq
, info
->handler
, 0, info
->handler_name
);
218 if (info
->handler_data
)
219 irq_set_handler_data(virq
, info
->handler_data
);
224 static int msi_domain_ops_check(struct irq_domain
*domain
,
225 struct msi_domain_info
*info
,
231 static struct msi_domain_ops msi_domain_ops_default
= {
232 .get_hwirq
= msi_domain_ops_get_hwirq
,
233 .msi_init
= msi_domain_ops_init
,
234 .msi_check
= msi_domain_ops_check
,
235 .msi_prepare
= msi_domain_ops_prepare
,
236 .set_desc
= msi_domain_ops_set_desc
,
237 .domain_alloc_irqs
= __msi_domain_alloc_irqs
,
238 .domain_free_irqs
= __msi_domain_free_irqs
,
241 static void msi_domain_update_dom_ops(struct msi_domain_info
*info
)
243 struct msi_domain_ops
*ops
= info
->ops
;
246 info
->ops
= &msi_domain_ops_default
;
250 if (ops
->domain_alloc_irqs
== NULL
)
251 ops
->domain_alloc_irqs
= msi_domain_ops_default
.domain_alloc_irqs
;
252 if (ops
->domain_free_irqs
== NULL
)
253 ops
->domain_free_irqs
= msi_domain_ops_default
.domain_free_irqs
;
255 if (!(info
->flags
& MSI_FLAG_USE_DEF_DOM_OPS
))
258 if (ops
->get_hwirq
== NULL
)
259 ops
->get_hwirq
= msi_domain_ops_default
.get_hwirq
;
260 if (ops
->msi_init
== NULL
)
261 ops
->msi_init
= msi_domain_ops_default
.msi_init
;
262 if (ops
->msi_check
== NULL
)
263 ops
->msi_check
= msi_domain_ops_default
.msi_check
;
264 if (ops
->msi_prepare
== NULL
)
265 ops
->msi_prepare
= msi_domain_ops_default
.msi_prepare
;
266 if (ops
->set_desc
== NULL
)
267 ops
->set_desc
= msi_domain_ops_default
.set_desc
;
270 static void msi_domain_update_chip_ops(struct msi_domain_info
*info
)
272 struct irq_chip
*chip
= info
->chip
;
274 BUG_ON(!chip
|| !chip
->irq_mask
|| !chip
->irq_unmask
);
275 if (!chip
->irq_set_affinity
)
276 chip
->irq_set_affinity
= msi_domain_set_affinity
;
280 * msi_create_irq_domain - Create a MSI interrupt domain
281 * @fwnode: Optional fwnode of the interrupt controller
282 * @info: MSI domain info
283 * @parent: Parent irq domain
285 struct irq_domain
*msi_create_irq_domain(struct fwnode_handle
*fwnode
,
286 struct msi_domain_info
*info
,
287 struct irq_domain
*parent
)
289 struct irq_domain
*domain
;
291 msi_domain_update_dom_ops(info
);
292 if (info
->flags
& MSI_FLAG_USE_DEF_CHIP_OPS
)
293 msi_domain_update_chip_ops(info
);
295 domain
= irq_domain_create_hierarchy(parent
, IRQ_DOMAIN_FLAG_MSI
, 0,
296 fwnode
, &msi_domain_ops
, info
);
298 if (domain
&& !domain
->name
&& info
->chip
)
299 domain
->name
= info
->chip
->name
;
304 int msi_domain_prepare_irqs(struct irq_domain
*domain
, struct device
*dev
,
305 int nvec
, msi_alloc_info_t
*arg
)
307 struct msi_domain_info
*info
= domain
->host_data
;
308 struct msi_domain_ops
*ops
= info
->ops
;
311 ret
= ops
->msi_check(domain
, info
, dev
);
313 ret
= ops
->msi_prepare(domain
, dev
, nvec
, arg
);
318 int msi_domain_populate_irqs(struct irq_domain
*domain
, struct device
*dev
,
319 int virq
, int nvec
, msi_alloc_info_t
*arg
)
321 struct msi_domain_info
*info
= domain
->host_data
;
322 struct msi_domain_ops
*ops
= info
->ops
;
323 struct msi_desc
*desc
;
326 for_each_msi_entry(desc
, dev
) {
327 /* Don't even try the multi-MSI brain damage. */
328 if (WARN_ON(!desc
->irq
|| desc
->nvec_used
!= 1)) {
333 if (!(desc
->irq
>= virq
&& desc
->irq
< (virq
+ nvec
)))
336 ops
->set_desc(arg
, desc
);
337 /* Assumes the domain mutex is held! */
338 ret
= irq_domain_alloc_irqs_hierarchy(domain
, desc
->irq
, 1,
343 irq_set_msi_desc_off(desc
->irq
, 0, desc
);
347 /* Mop up the damage */
348 for_each_msi_entry(desc
, dev
) {
349 if (!(desc
->irq
>= virq
&& desc
->irq
< (virq
+ nvec
)))
352 irq_domain_free_irqs_common(domain
, desc
->irq
, 1);
360 * Carefully check whether the device can use reservation mode. If
361 * reservation mode is enabled then the early activation will assign a
362 * dummy vector to the device. If the PCI/MSI device does not support
363 * masking of the entry then this can result in spurious interrupts when
364 * the device driver is not absolutely careful. But even then a malfunction
365 * of the hardware could result in a spurious interrupt on the dummy vector
366 * and render the device unusable. If the entry can be masked then the core
367 * logic will prevent the spurious interrupt and reservation mode can be
368 * used. For now reservation mode is restricted to PCI/MSI.
370 static bool msi_check_reservation_mode(struct irq_domain
*domain
,
371 struct msi_domain_info
*info
,
374 struct msi_desc
*desc
;
376 switch(domain
->bus_token
) {
377 case DOMAIN_BUS_PCI_MSI
:
378 case DOMAIN_BUS_VMD_MSI
:
384 if (!(info
->flags
& MSI_FLAG_MUST_REACTIVATE
))
387 if (IS_ENABLED(CONFIG_PCI_MSI
) && pci_msi_ignore_mask
)
391 * Checking the first MSI descriptor is sufficient. MSIX supports
392 * masking and MSI does so when the maskbit is set.
394 desc
= first_msi_entry(dev
);
395 return desc
->msi_attrib
.is_msix
|| desc
->msi_attrib
.maskbit
;
398 int __msi_domain_alloc_irqs(struct irq_domain
*domain
, struct device
*dev
,
401 struct msi_domain_info
*info
= domain
->host_data
;
402 struct msi_domain_ops
*ops
= info
->ops
;
403 struct irq_data
*irq_data
;
404 struct msi_desc
*desc
;
405 msi_alloc_info_t arg
;
409 ret
= msi_domain_prepare_irqs(domain
, dev
, nvec
, &arg
);
413 for_each_msi_entry(desc
, dev
) {
414 ops
->set_desc(&arg
, desc
);
416 virq
= __irq_domain_alloc_irqs(domain
, -1, desc
->nvec_used
,
417 dev_to_node(dev
), &arg
, false,
421 if (ops
->handle_error
)
422 ret
= ops
->handle_error(domain
, desc
, ret
);
424 ops
->msi_finish(&arg
, ret
);
428 for (i
= 0; i
< desc
->nvec_used
; i
++) {
429 irq_set_msi_desc_off(virq
, i
, desc
);
430 irq_debugfs_copy_devname(virq
+ i
, dev
);
435 ops
->msi_finish(&arg
, 0);
437 can_reserve
= msi_check_reservation_mode(domain
, info
, dev
);
439 for_each_msi_entry(desc
, dev
) {
441 if (desc
->nvec_used
== 1)
442 dev_dbg(dev
, "irq %d for MSI\n", virq
);
444 dev_dbg(dev
, "irq [%d-%d] for MSI\n",
445 virq
, virq
+ desc
->nvec_used
- 1);
447 * This flag is set by the PCI layer as we need to activate
448 * the MSI entries before the PCI layer enables MSI in the
449 * card. Otherwise the card latches a random msi message.
451 if (!(info
->flags
& MSI_FLAG_ACTIVATE_EARLY
))
454 irq_data
= irq_domain_get_irq_data(domain
, desc
->irq
);
456 irqd_clr_can_reserve(irq_data
);
457 if (domain
->flags
& IRQ_DOMAIN_MSI_NOMASK_QUIRK
)
458 irqd_set_msi_nomask_quirk(irq_data
);
460 ret
= irq_domain_activate_irq(irq_data
, can_reserve
);
466 * If these interrupts use reservation mode, clear the activated bit
467 * so request_irq() will assign the final vector.
470 for_each_msi_entry(desc
, dev
) {
471 irq_data
= irq_domain_get_irq_data(domain
, desc
->irq
);
472 irqd_clr_activated(irq_data
);
478 for_each_msi_entry(desc
, dev
) {
479 struct irq_data
*irqd
;
481 if (desc
->irq
== virq
)
484 irqd
= irq_domain_get_irq_data(domain
, desc
->irq
);
485 if (irqd_is_activated(irqd
))
486 irq_domain_deactivate_irq(irqd
);
488 msi_domain_free_irqs(domain
, dev
);
493 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
494 * @domain: The domain to allocate from
495 * @dev: Pointer to device struct of the device for which the interrupts
497 * @nvec: The number of interrupts to allocate
499 * Returns 0 on success or an error code.
501 int msi_domain_alloc_irqs(struct irq_domain
*domain
, struct device
*dev
,
504 struct msi_domain_info
*info
= domain
->host_data
;
505 struct msi_domain_ops
*ops
= info
->ops
;
507 return ops
->domain_alloc_irqs(domain
, dev
, nvec
);
510 void __msi_domain_free_irqs(struct irq_domain
*domain
, struct device
*dev
)
512 struct msi_desc
*desc
;
514 for_each_msi_entry(desc
, dev
) {
516 * We might have failed to allocate an MSI early
517 * enough that there is no IRQ associated to this
518 * entry. If that's the case, don't do anything.
521 irq_domain_free_irqs(desc
->irq
, desc
->nvec_used
);
528 * __msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
529 * @domain: The domain to managing the interrupts
530 * @dev: Pointer to device struct of the device for which the interrupts
533 void msi_domain_free_irqs(struct irq_domain
*domain
, struct device
*dev
)
535 struct msi_domain_info
*info
= domain
->host_data
;
536 struct msi_domain_ops
*ops
= info
->ops
;
538 return ops
->domain_free_irqs(domain
, dev
);
542 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
543 * @domain: The interrupt domain to retrieve data from
545 * Returns the pointer to the msi_domain_info stored in
546 * @domain->host_data.
548 struct msi_domain_info
*msi_get_domain_info(struct irq_domain
*domain
)
550 return (struct msi_domain_info
*)domain
->host_data
;
553 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */