2 * linux/kernel/irq/msi.c
4 * Copyright (C) 2014 Intel Corp.
5 * Author: Jiang Liu <jiang.liu@linux.intel.com>
7 * This file is licensed under GPLv2.
9 * This file contains common code to support Message Signalled Interrupt for
10 * PCI compatible and non PCI compatible devices.
12 #include <linux/types.h>
13 #include <linux/device.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/msi.h>
17 #include <linux/slab.h>
20 * alloc_msi_entry - Allocate an initialize msi_entry
21 * @dev: Pointer to the device for which this is allocated
22 * @nvec: The number of vectors used in this entry
23 * @affinity: Optional pointer to an affinity mask array size of @nvec
25 * If @affinity is not NULL then a an affinity array[@nvec] is allocated
26 * and the affinity masks from @affinity are copied.
29 alloc_msi_entry(struct device
*dev
, int nvec
, const struct cpumask
*affinity
)
31 struct msi_desc
*desc
;
33 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
37 INIT_LIST_HEAD(&desc
->list
);
39 desc
->nvec_used
= nvec
;
41 desc
->affinity
= kmemdup(affinity
,
42 nvec
* sizeof(*desc
->affinity
), GFP_KERNEL
);
43 if (!desc
->affinity
) {
52 void free_msi_entry(struct msi_desc
*entry
)
54 kfree(entry
->affinity
);
58 void __get_cached_msi_msg(struct msi_desc
*entry
, struct msi_msg
*msg
)
63 void get_cached_msi_msg(unsigned int irq
, struct msi_msg
*msg
)
65 struct msi_desc
*entry
= irq_get_msi_desc(irq
);
67 __get_cached_msi_msg(entry
, msg
);
69 EXPORT_SYMBOL_GPL(get_cached_msi_msg
);
71 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
72 static inline void irq_chip_write_msi_msg(struct irq_data
*data
,
75 data
->chip
->irq_write_msi_msg(data
, msg
);
79 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
80 * @irq_data: The irq data associated to the interrupt
81 * @mask: The affinity mask to set
82 * @force: Flag to enforce setting (disable online checks)
84 * Intended to be used by MSI interrupt controllers which are
85 * implemented with hierarchical domains.
87 int msi_domain_set_affinity(struct irq_data
*irq_data
,
88 const struct cpumask
*mask
, bool force
)
90 struct irq_data
*parent
= irq_data
->parent_data
;
94 ret
= parent
->chip
->irq_set_affinity(parent
, mask
, force
);
95 if (ret
>= 0 && ret
!= IRQ_SET_MASK_OK_DONE
) {
96 BUG_ON(irq_chip_compose_msi_msg(irq_data
, &msg
));
97 irq_chip_write_msi_msg(irq_data
, &msg
);
103 static void msi_domain_activate(struct irq_domain
*domain
,
104 struct irq_data
*irq_data
)
108 BUG_ON(irq_chip_compose_msi_msg(irq_data
, &msg
));
109 irq_chip_write_msi_msg(irq_data
, &msg
);
112 static void msi_domain_deactivate(struct irq_domain
*domain
,
113 struct irq_data
*irq_data
)
117 memset(&msg
, 0, sizeof(msg
));
118 irq_chip_write_msi_msg(irq_data
, &msg
);
121 static int msi_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
122 unsigned int nr_irqs
, void *arg
)
124 struct msi_domain_info
*info
= domain
->host_data
;
125 struct msi_domain_ops
*ops
= info
->ops
;
126 irq_hw_number_t hwirq
= ops
->get_hwirq(info
, arg
);
129 if (irq_find_mapping(domain
, hwirq
) > 0)
132 if (domain
->parent
) {
133 ret
= irq_domain_alloc_irqs_parent(domain
, virq
, nr_irqs
, arg
);
138 for (i
= 0; i
< nr_irqs
; i
++) {
139 ret
= ops
->msi_init(domain
, info
, virq
+ i
, hwirq
+ i
, arg
);
142 for (i
--; i
> 0; i
--)
143 ops
->msi_free(domain
, info
, virq
+ i
);
145 irq_domain_free_irqs_top(domain
, virq
, nr_irqs
);
153 static void msi_domain_free(struct irq_domain
*domain
, unsigned int virq
,
154 unsigned int nr_irqs
)
156 struct msi_domain_info
*info
= domain
->host_data
;
159 if (info
->ops
->msi_free
) {
160 for (i
= 0; i
< nr_irqs
; i
++)
161 info
->ops
->msi_free(domain
, info
, virq
+ i
);
163 irq_domain_free_irqs_top(domain
, virq
, nr_irqs
);
166 static const struct irq_domain_ops msi_domain_ops
= {
167 .alloc
= msi_domain_alloc
,
168 .free
= msi_domain_free
,
169 .activate
= msi_domain_activate
,
170 .deactivate
= msi_domain_deactivate
,
173 #ifdef GENERIC_MSI_DOMAIN_OPS
174 static irq_hw_number_t
msi_domain_ops_get_hwirq(struct msi_domain_info
*info
,
175 msi_alloc_info_t
*arg
)
180 static int msi_domain_ops_prepare(struct irq_domain
*domain
, struct device
*dev
,
181 int nvec
, msi_alloc_info_t
*arg
)
183 memset(arg
, 0, sizeof(*arg
));
187 static void msi_domain_ops_set_desc(msi_alloc_info_t
*arg
,
188 struct msi_desc
*desc
)
193 #define msi_domain_ops_get_hwirq NULL
194 #define msi_domain_ops_prepare NULL
195 #define msi_domain_ops_set_desc NULL
196 #endif /* !GENERIC_MSI_DOMAIN_OPS */
198 static int msi_domain_ops_init(struct irq_domain
*domain
,
199 struct msi_domain_info
*info
,
200 unsigned int virq
, irq_hw_number_t hwirq
,
201 msi_alloc_info_t
*arg
)
203 irq_domain_set_hwirq_and_chip(domain
, virq
, hwirq
, info
->chip
,
205 if (info
->handler
&& info
->handler_name
) {
206 __irq_set_handler(virq
, info
->handler
, 0, info
->handler_name
);
207 if (info
->handler_data
)
208 irq_set_handler_data(virq
, info
->handler_data
);
213 static int msi_domain_ops_check(struct irq_domain
*domain
,
214 struct msi_domain_info
*info
,
220 static struct msi_domain_ops msi_domain_ops_default
= {
221 .get_hwirq
= msi_domain_ops_get_hwirq
,
222 .msi_init
= msi_domain_ops_init
,
223 .msi_check
= msi_domain_ops_check
,
224 .msi_prepare
= msi_domain_ops_prepare
,
225 .set_desc
= msi_domain_ops_set_desc
,
228 static void msi_domain_update_dom_ops(struct msi_domain_info
*info
)
230 struct msi_domain_ops
*ops
= info
->ops
;
233 info
->ops
= &msi_domain_ops_default
;
237 if (ops
->get_hwirq
== NULL
)
238 ops
->get_hwirq
= msi_domain_ops_default
.get_hwirq
;
239 if (ops
->msi_init
== NULL
)
240 ops
->msi_init
= msi_domain_ops_default
.msi_init
;
241 if (ops
->msi_check
== NULL
)
242 ops
->msi_check
= msi_domain_ops_default
.msi_check
;
243 if (ops
->msi_prepare
== NULL
)
244 ops
->msi_prepare
= msi_domain_ops_default
.msi_prepare
;
245 if (ops
->set_desc
== NULL
)
246 ops
->set_desc
= msi_domain_ops_default
.set_desc
;
249 static void msi_domain_update_chip_ops(struct msi_domain_info
*info
)
251 struct irq_chip
*chip
= info
->chip
;
253 BUG_ON(!chip
|| !chip
->irq_mask
|| !chip
->irq_unmask
);
254 if (!chip
->irq_set_affinity
)
255 chip
->irq_set_affinity
= msi_domain_set_affinity
;
259 * msi_create_irq_domain - Create a MSI interrupt domain
260 * @fwnode: Optional fwnode of the interrupt controller
261 * @info: MSI domain info
262 * @parent: Parent irq domain
264 struct irq_domain
*msi_create_irq_domain(struct fwnode_handle
*fwnode
,
265 struct msi_domain_info
*info
,
266 struct irq_domain
*parent
)
268 struct irq_domain
*domain
;
270 if (info
->flags
& MSI_FLAG_USE_DEF_DOM_OPS
)
271 msi_domain_update_dom_ops(info
);
272 if (info
->flags
& MSI_FLAG_USE_DEF_CHIP_OPS
)
273 msi_domain_update_chip_ops(info
);
275 domain
= irq_domain_create_hierarchy(parent
, IRQ_DOMAIN_FLAG_MSI
, 0,
276 fwnode
, &msi_domain_ops
, info
);
278 if (domain
&& !domain
->name
&& info
->chip
)
279 domain
->name
= info
->chip
->name
;
284 int msi_domain_prepare_irqs(struct irq_domain
*domain
, struct device
*dev
,
285 int nvec
, msi_alloc_info_t
*arg
)
287 struct msi_domain_info
*info
= domain
->host_data
;
288 struct msi_domain_ops
*ops
= info
->ops
;
291 ret
= ops
->msi_check(domain
, info
, dev
);
293 ret
= ops
->msi_prepare(domain
, dev
, nvec
, arg
);
298 int msi_domain_populate_irqs(struct irq_domain
*domain
, struct device
*dev
,
299 int virq
, int nvec
, msi_alloc_info_t
*arg
)
301 struct msi_domain_info
*info
= domain
->host_data
;
302 struct msi_domain_ops
*ops
= info
->ops
;
303 struct msi_desc
*desc
;
306 for_each_msi_entry(desc
, dev
) {
307 /* Don't even try the multi-MSI brain damage. */
308 if (WARN_ON(!desc
->irq
|| desc
->nvec_used
!= 1)) {
313 if (!(desc
->irq
>= virq
&& desc
->irq
< (virq
+ nvec
)))
316 ops
->set_desc(arg
, desc
);
317 /* Assumes the domain mutex is held! */
318 ret
= irq_domain_alloc_irqs_hierarchy(domain
, desc
->irq
, 1,
323 irq_set_msi_desc_off(desc
->irq
, 0, desc
);
327 /* Mop up the damage */
328 for_each_msi_entry(desc
, dev
) {
329 if (!(desc
->irq
>= virq
&& desc
->irq
< (virq
+ nvec
)))
332 irq_domain_free_irqs_common(domain
, desc
->irq
, 1);
340 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
341 * @domain: The domain to allocate from
342 * @dev: Pointer to device struct of the device for which the interrupts
344 * @nvec: The number of interrupts to allocate
346 * Returns 0 on success or an error code.
348 int msi_domain_alloc_irqs(struct irq_domain
*domain
, struct device
*dev
,
351 struct msi_domain_info
*info
= domain
->host_data
;
352 struct msi_domain_ops
*ops
= info
->ops
;
353 msi_alloc_info_t arg
;
354 struct msi_desc
*desc
;
357 ret
= msi_domain_prepare_irqs(domain
, dev
, nvec
, &arg
);
361 for_each_msi_entry(desc
, dev
) {
362 ops
->set_desc(&arg
, desc
);
364 virq
= __irq_domain_alloc_irqs(domain
, -1, desc
->nvec_used
,
365 dev_to_node(dev
), &arg
, false,
369 if (ops
->handle_error
)
370 ret
= ops
->handle_error(domain
, desc
, ret
);
372 ops
->msi_finish(&arg
, ret
);
376 for (i
= 0; i
< desc
->nvec_used
; i
++)
377 irq_set_msi_desc_off(virq
, i
, desc
);
381 ops
->msi_finish(&arg
, 0);
383 for_each_msi_entry(desc
, dev
) {
385 if (desc
->nvec_used
== 1)
386 dev_dbg(dev
, "irq %d for MSI\n", virq
);
388 dev_dbg(dev
, "irq [%d-%d] for MSI\n",
389 virq
, virq
+ desc
->nvec_used
- 1);
391 * This flag is set by the PCI layer as we need to activate
392 * the MSI entries before the PCI layer enables MSI in the
393 * card. Otherwise the card latches a random msi message.
395 if (info
->flags
& MSI_FLAG_ACTIVATE_EARLY
) {
396 struct irq_data
*irq_data
;
398 irq_data
= irq_domain_get_irq_data(domain
, desc
->irq
);
399 irq_domain_activate_irq(irq_data
);
407 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
408 * @domain: The domain to managing the interrupts
409 * @dev: Pointer to device struct of the device for which the interrupts
412 void msi_domain_free_irqs(struct irq_domain
*domain
, struct device
*dev
)
414 struct msi_desc
*desc
;
416 for_each_msi_entry(desc
, dev
) {
418 * We might have failed to allocate an MSI early
419 * enough that there is no IRQ associated to this
420 * entry. If that's the case, don't do anything.
423 irq_domain_free_irqs(desc
->irq
, desc
->nvec_used
);
430 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
431 * @domain: The interrupt domain to retrieve data from
433 * Returns the pointer to the msi_domain_info stored in
434 * @domain->host_data.
436 struct msi_domain_info
*msi_get_domain_info(struct irq_domain
*domain
)
438 return (struct msi_domain_info
*)domain
->host_data
;
441 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */