3 * Purpose: PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/config.h>
14 #include <linux/ioport.h>
15 #include <linux/smp_lock.h>
16 #include <linux/pci.h>
17 #include <linux/proc_fs.h>
19 #include <asm/errno.h>
26 static DEFINE_SPINLOCK(msi_lock
);
27 static struct msi_desc
* msi_desc
[NR_IRQS
] = { [0 ... NR_IRQS
-1] = NULL
};
28 static kmem_cache_t
* msi_cachep
;
30 static int pci_msi_enable
= 1;
31 static int last_alloc_vector
= 0;
32 static int nr_released_vectors
= 0;
33 static int nr_reserved_vectors
= NR_HP_RESERVED_VECTORS
;
34 static int nr_msix_devices
= 0;
36 #ifndef CONFIG_X86_IO_APIC
37 int vector_irq
[NR_VECTORS
] = { [0 ... NR_VECTORS
- 1] = -1};
38 u8 irq_vector
[NR_IRQ_VECTORS
] = { FIRST_DEVICE_VECTOR
, 0 };
41 static void msi_cache_ctor(void *p
, kmem_cache_t
*cache
, unsigned long flags
)
43 memset(p
, 0, NR_IRQS
* sizeof(struct msi_desc
));
46 static int msi_cache_init(void)
48 msi_cachep
= kmem_cache_create("msi_cache",
49 NR_IRQS
* sizeof(struct msi_desc
),
50 0, SLAB_HWCACHE_ALIGN
, msi_cache_ctor
, NULL
);
57 static void msi_set_mask_bit(unsigned int vector
, int flag
)
59 struct msi_desc
*entry
;
61 entry
= (struct msi_desc
*)msi_desc
[vector
];
62 if (!entry
|| !entry
->dev
|| !entry
->mask_base
)
64 switch (entry
->msi_attrib
.type
) {
70 pos
= (long)entry
->mask_base
;
71 pci_read_config_dword(entry
->dev
, pos
, &mask_bits
);
74 pci_write_config_dword(entry
->dev
, pos
, mask_bits
);
79 int offset
= entry
->msi_attrib
.entry_nr
* PCI_MSIX_ENTRY_SIZE
+
80 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET
;
81 writel(flag
, entry
->mask_base
+ offset
);
90 static void set_msi_affinity(unsigned int vector
, cpumask_t cpu_mask
)
92 struct msi_desc
*entry
;
93 struct msg_address address
;
95 entry
= (struct msi_desc
*)msi_desc
[vector
];
96 if (!entry
|| !entry
->dev
)
99 switch (entry
->msi_attrib
.type
) {
104 if (!(pos
= pci_find_capability(entry
->dev
, PCI_CAP_ID_MSI
)))
107 pci_read_config_dword(entry
->dev
, msi_lower_address_reg(pos
),
108 &address
.lo_address
.value
);
109 address
.lo_address
.value
&= MSI_ADDRESS_DEST_ID_MASK
;
110 address
.lo_address
.value
|= (cpu_mask_to_apicid(cpu_mask
) <<
111 MSI_TARGET_CPU_SHIFT
);
112 entry
->msi_attrib
.current_cpu
= cpu_mask_to_apicid(cpu_mask
);
113 pci_write_config_dword(entry
->dev
, msi_lower_address_reg(pos
),
114 address
.lo_address
.value
);
117 case PCI_CAP_ID_MSIX
:
119 int offset
= entry
->msi_attrib
.entry_nr
* PCI_MSIX_ENTRY_SIZE
+
120 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
;
122 address
.lo_address
.value
= readl(entry
->mask_base
+ offset
);
123 address
.lo_address
.value
&= MSI_ADDRESS_DEST_ID_MASK
;
124 address
.lo_address
.value
|= (cpu_mask_to_apicid(cpu_mask
) <<
125 MSI_TARGET_CPU_SHIFT
);
126 entry
->msi_attrib
.current_cpu
= cpu_mask_to_apicid(cpu_mask
);
127 writel(address
.lo_address
.value
, entry
->mask_base
+ offset
);
135 #ifdef CONFIG_IRQBALANCE
136 static inline void move_msi(int vector
)
138 if (!cpus_empty(pending_irq_balance_cpumask
[vector
])) {
139 set_msi_affinity(vector
, pending_irq_balance_cpumask
[vector
]);
140 cpus_clear(pending_irq_balance_cpumask
[vector
]);
143 #endif /* CONFIG_IRQBALANCE */
144 #endif /* CONFIG_SMP */
146 static void mask_MSI_irq(unsigned int vector
)
148 msi_set_mask_bit(vector
, 1);
151 static void unmask_MSI_irq(unsigned int vector
)
153 msi_set_mask_bit(vector
, 0);
156 static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector
)
158 struct msi_desc
*entry
;
161 spin_lock_irqsave(&msi_lock
, flags
);
162 entry
= msi_desc
[vector
];
163 if (!entry
|| !entry
->dev
) {
164 spin_unlock_irqrestore(&msi_lock
, flags
);
167 entry
->msi_attrib
.state
= 1; /* Mark it active */
168 spin_unlock_irqrestore(&msi_lock
, flags
);
170 return 0; /* never anything pending */
173 static void release_msi(unsigned int vector
);
174 static void shutdown_msi_irq(unsigned int vector
)
179 #define shutdown_msi_irq_wo_maskbit shutdown_msi_irq
180 static void enable_msi_irq_wo_maskbit(unsigned int vector
) {}
181 static void disable_msi_irq_wo_maskbit(unsigned int vector
) {}
182 static void ack_msi_irq_wo_maskbit(unsigned int vector
) {}
183 static void end_msi_irq_wo_maskbit(unsigned int vector
)
189 static unsigned int startup_msi_irq_w_maskbit(unsigned int vector
)
191 struct msi_desc
*entry
;
194 spin_lock_irqsave(&msi_lock
, flags
);
195 entry
= msi_desc
[vector
];
196 if (!entry
|| !entry
->dev
) {
197 spin_unlock_irqrestore(&msi_lock
, flags
);
200 entry
->msi_attrib
.state
= 1; /* Mark it active */
201 spin_unlock_irqrestore(&msi_lock
, flags
);
203 unmask_MSI_irq(vector
);
204 return 0; /* never anything pending */
207 #define shutdown_msi_irq_w_maskbit shutdown_msi_irq
208 #define enable_msi_irq_w_maskbit unmask_MSI_irq
209 #define disable_msi_irq_w_maskbit mask_MSI_irq
210 #define ack_msi_irq_w_maskbit mask_MSI_irq
212 static void end_msi_irq_w_maskbit(unsigned int vector
)
215 unmask_MSI_irq(vector
);
220 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
221 * which implement the MSI-X Capability Structure.
223 static struct hw_interrupt_type msix_irq_type
= {
224 .typename
= "PCI-MSI-X",
225 .startup
= startup_msi_irq_w_maskbit
,
226 .shutdown
= shutdown_msi_irq_w_maskbit
,
227 .enable
= enable_msi_irq_w_maskbit
,
228 .disable
= disable_msi_irq_w_maskbit
,
229 .ack
= ack_msi_irq_w_maskbit
,
230 .end
= end_msi_irq_w_maskbit
,
231 .set_affinity
= set_msi_irq_affinity
235 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
236 * which implement the MSI Capability Structure with
237 * Mask-and-Pending Bits.
239 static struct hw_interrupt_type msi_irq_w_maskbit_type
= {
240 .typename
= "PCI-MSI",
241 .startup
= startup_msi_irq_w_maskbit
,
242 .shutdown
= shutdown_msi_irq_w_maskbit
,
243 .enable
= enable_msi_irq_w_maskbit
,
244 .disable
= disable_msi_irq_w_maskbit
,
245 .ack
= ack_msi_irq_w_maskbit
,
246 .end
= end_msi_irq_w_maskbit
,
247 .set_affinity
= set_msi_irq_affinity
251 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
252 * which implement the MSI Capability Structure without
253 * Mask-and-Pending Bits.
255 static struct hw_interrupt_type msi_irq_wo_maskbit_type
= {
256 .typename
= "PCI-MSI",
257 .startup
= startup_msi_irq_wo_maskbit
,
258 .shutdown
= shutdown_msi_irq_wo_maskbit
,
259 .enable
= enable_msi_irq_wo_maskbit
,
260 .disable
= disable_msi_irq_wo_maskbit
,
261 .ack
= ack_msi_irq_wo_maskbit
,
262 .end
= end_msi_irq_wo_maskbit
,
263 .set_affinity
= set_msi_irq_affinity
266 static void msi_data_init(struct msg_data
*msi_data
,
269 memset(msi_data
, 0, sizeof(struct msg_data
));
270 msi_data
->vector
= (u8
)vector
;
271 msi_data
->delivery_mode
= MSI_DELIVERY_MODE
;
272 msi_data
->level
= MSI_LEVEL_MODE
;
273 msi_data
->trigger
= MSI_TRIGGER_MODE
;
276 static void msi_address_init(struct msg_address
*msi_address
)
278 unsigned int dest_id
;
280 memset(msi_address
, 0, sizeof(struct msg_address
));
281 msi_address
->hi_address
= (u32
)0;
282 dest_id
= (MSI_ADDRESS_HEADER
<< MSI_ADDRESS_HEADER_SHIFT
);
283 msi_address
->lo_address
.u
.dest_mode
= MSI_DEST_MODE
;
284 msi_address
->lo_address
.u
.redirection_hint
= MSI_REDIRECTION_HINT_MODE
;
285 msi_address
->lo_address
.u
.dest_id
= dest_id
;
286 msi_address
->lo_address
.value
|= (MSI_TARGET_CPU
<< MSI_TARGET_CPU_SHIFT
);
289 static int msi_free_vector(struct pci_dev
* dev
, int vector
, int reassign
);
290 static int assign_msi_vector(void)
292 static int new_vector_avail
= 1;
297 * msi_lock is provided to ensure that successful allocation of MSI
298 * vector is assigned unique among drivers.
300 spin_lock_irqsave(&msi_lock
, flags
);
302 if (!new_vector_avail
) {
306 * vector_irq[] = -1 indicates that this specific vector is:
307 * - assigned for MSI (since MSI have no associated IRQ) or
308 * - assigned for legacy if less than 16, or
309 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
310 * vector_irq[] = 0 indicates that this vector, previously
311 * assigned for MSI, is freed by hotplug removed operations.
312 * This vector will be reused for any subsequent hotplug added
314 * vector_irq[] > 0 indicates that this vector is assigned for
315 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
316 * vector-to-IOxAPIC IRQ mapping.
318 for (vector
= FIRST_DEVICE_VECTOR
; vector
< NR_IRQS
; vector
++) {
319 if (vector_irq
[vector
] != 0)
321 free_vector
= vector
;
322 if (!msi_desc
[vector
])
328 spin_unlock_irqrestore(&msi_lock
, flags
);
331 vector_irq
[free_vector
] = -1;
332 nr_released_vectors
--;
333 spin_unlock_irqrestore(&msi_lock
, flags
);
334 if (msi_desc
[free_vector
] != NULL
) {
338 /* free all linked vectors before re-assign */
340 spin_lock_irqsave(&msi_lock
, flags
);
341 dev
= msi_desc
[free_vector
]->dev
;
342 tail
= msi_desc
[free_vector
]->link
.tail
;
343 spin_unlock_irqrestore(&msi_lock
, flags
);
344 msi_free_vector(dev
, tail
, 1);
345 } while (free_vector
!= tail
);
350 vector
= assign_irq_vector(AUTO_ASSIGN
);
351 last_alloc_vector
= vector
;
352 if (vector
== LAST_DEVICE_VECTOR
)
353 new_vector_avail
= 0;
355 spin_unlock_irqrestore(&msi_lock
, flags
);
359 static int get_new_vector(void)
363 if ((vector
= assign_msi_vector()) > 0)
364 set_intr_gate(vector
, interrupt
[vector
]);
369 static int msi_init(void)
371 static int status
= -ENOMEM
;
378 printk(KERN_WARNING
"PCI: MSI quirk detected. MSI disabled.\n");
383 if ((status
= msi_cache_init()) < 0) {
385 printk(KERN_WARNING
"PCI: MSI cache init failed\n");
388 last_alloc_vector
= assign_irq_vector(AUTO_ASSIGN
);
389 if (last_alloc_vector
< 0) {
391 printk(KERN_WARNING
"PCI: No interrupt vectors available for MSI\n");
395 vector_irq
[last_alloc_vector
] = 0;
396 nr_released_vectors
++;
401 static int get_msi_vector(struct pci_dev
*dev
)
403 return get_new_vector();
406 static struct msi_desc
* alloc_msi_entry(void)
408 struct msi_desc
*entry
;
410 entry
= (struct msi_desc
*) kmem_cache_alloc(msi_cachep
, SLAB_KERNEL
);
414 memset(entry
, 0, sizeof(struct msi_desc
));
415 entry
->link
.tail
= entry
->link
.head
= 0; /* single message */
421 static void attach_msi_entry(struct msi_desc
*entry
, int vector
)
425 spin_lock_irqsave(&msi_lock
, flags
);
426 msi_desc
[vector
] = entry
;
427 spin_unlock_irqrestore(&msi_lock
, flags
);
430 static void irq_handler_init(int cap_id
, int pos
, int mask
)
432 spin_lock(&irq_desc
[pos
].lock
);
433 if (cap_id
== PCI_CAP_ID_MSIX
)
434 irq_desc
[pos
].handler
= &msix_irq_type
;
437 irq_desc
[pos
].handler
= &msi_irq_wo_maskbit_type
;
439 irq_desc
[pos
].handler
= &msi_irq_w_maskbit_type
;
441 spin_unlock(&irq_desc
[pos
].lock
);
444 static void enable_msi_mode(struct pci_dev
*dev
, int pos
, int type
)
448 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
449 if (type
== PCI_CAP_ID_MSI
) {
450 /* Set enabled bits to single MSI & enable MSI_enable bit */
451 msi_enable(control
, 1);
452 pci_write_config_word(dev
, msi_control_reg(pos
), control
);
454 msix_enable(control
);
455 pci_write_config_word(dev
, msi_control_reg(pos
), control
);
457 if (pci_find_capability(dev
, PCI_CAP_ID_EXP
)) {
458 /* PCI Express Endpoint device detected */
460 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
461 cmd
|= PCI_COMMAND_INTX_DISABLE
;
462 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
466 static void disable_msi_mode(struct pci_dev
*dev
, int pos
, int type
)
470 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
471 if (type
== PCI_CAP_ID_MSI
) {
472 /* Set enabled bits to single MSI & enable MSI_enable bit */
473 msi_disable(control
);
474 pci_write_config_word(dev
, msi_control_reg(pos
), control
);
476 msix_disable(control
);
477 pci_write_config_word(dev
, msi_control_reg(pos
), control
);
479 if (pci_find_capability(dev
, PCI_CAP_ID_EXP
)) {
480 /* PCI Express Endpoint device detected */
482 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
483 cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
484 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
488 static int msi_lookup_vector(struct pci_dev
*dev
, int type
)
493 spin_lock_irqsave(&msi_lock
, flags
);
494 for (vector
= FIRST_DEVICE_VECTOR
; vector
< NR_IRQS
; vector
++) {
495 if (!msi_desc
[vector
] || msi_desc
[vector
]->dev
!= dev
||
496 msi_desc
[vector
]->msi_attrib
.type
!= type
||
497 msi_desc
[vector
]->msi_attrib
.default_vector
!= dev
->irq
)
499 spin_unlock_irqrestore(&msi_lock
, flags
);
500 /* This pre-assigned MSI vector for this device
501 already exits. Override dev->irq with this vector */
505 spin_unlock_irqrestore(&msi_lock
, flags
);
510 void pci_scan_msi_device(struct pci_dev
*dev
)
515 if (pci_find_capability(dev
, PCI_CAP_ID_MSIX
) > 0)
517 else if (pci_find_capability(dev
, PCI_CAP_ID_MSI
) > 0)
518 nr_reserved_vectors
++;
522 * msi_capability_init - configure device's MSI capability structure
523 * @dev: pointer to the pci_dev data structure of MSI device function
525 * Setup the MSI capability structure of device funtion with a single
526 * MSI vector, regardless of device function is capable of handling
527 * multiple messages. A return of zero indicates the successful setup
528 * of an entry zero with the new MSI vector or non-zero for otherwise.
530 static int msi_capability_init(struct pci_dev
*dev
)
532 struct msi_desc
*entry
;
533 struct msg_address address
;
534 struct msg_data data
;
538 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
539 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
540 /* MSI Entry Initialization */
541 if (!(entry
= alloc_msi_entry()))
544 if ((vector
= get_msi_vector(dev
)) < 0) {
545 kmem_cache_free(msi_cachep
, entry
);
548 entry
->link
.head
= vector
;
549 entry
->link
.tail
= vector
;
550 entry
->msi_attrib
.type
= PCI_CAP_ID_MSI
;
551 entry
->msi_attrib
.state
= 0; /* Mark it not active */
552 entry
->msi_attrib
.entry_nr
= 0;
553 entry
->msi_attrib
.maskbit
= is_mask_bit_support(control
);
554 entry
->msi_attrib
.default_vector
= dev
->irq
; /* Save IOAPIC IRQ */
557 if (is_mask_bit_support(control
)) {
558 entry
->mask_base
= (void __iomem
*)(long)msi_mask_bits_reg(pos
,
559 is_64bit_address(control
));
561 /* Replace with MSI handler */
562 irq_handler_init(PCI_CAP_ID_MSI
, vector
, entry
->msi_attrib
.maskbit
);
563 /* Configure MSI capability structure */
564 msi_address_init(&address
);
565 msi_data_init(&data
, vector
);
566 entry
->msi_attrib
.current_cpu
= ((address
.lo_address
.u
.dest_id
>>
567 MSI_TARGET_CPU_SHIFT
) & MSI_TARGET_CPU_MASK
);
568 pci_write_config_dword(dev
, msi_lower_address_reg(pos
),
569 address
.lo_address
.value
);
570 if (is_64bit_address(control
)) {
571 pci_write_config_dword(dev
,
572 msi_upper_address_reg(pos
), address
.hi_address
);
573 pci_write_config_word(dev
,
574 msi_data_reg(pos
, 1), *((u32
*)&data
));
576 pci_write_config_word(dev
,
577 msi_data_reg(pos
, 0), *((u32
*)&data
));
578 if (entry
->msi_attrib
.maskbit
) {
579 unsigned int maskbits
, temp
;
580 /* All MSIs are unmasked by default, Mask them all */
581 pci_read_config_dword(dev
,
582 msi_mask_bits_reg(pos
, is_64bit_address(control
)),
584 temp
= (1 << multi_msi_capable(control
));
585 temp
= ((temp
- 1) & ~temp
);
587 pci_write_config_dword(dev
,
588 msi_mask_bits_reg(pos
, is_64bit_address(control
)),
591 attach_msi_entry(entry
, vector
);
592 /* Set MSI enabled bits */
593 enable_msi_mode(dev
, pos
, PCI_CAP_ID_MSI
);
599 * msix_capability_init - configure device's MSI-X capability
600 * @dev: pointer to the pci_dev data structure of MSI-X device function
602 * Setup the MSI-X capability structure of device funtion with a
603 * single MSI-X vector. A return of zero indicates the successful setup of
604 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
606 static int msix_capability_init(struct pci_dev
*dev
,
607 struct msix_entry
*entries
, int nvec
)
609 struct msi_desc
*head
= NULL
, *tail
= NULL
, *entry
= NULL
;
610 struct msg_address address
;
611 struct msg_data data
;
612 int vector
, pos
, i
, j
, nr_entries
, temp
= 0;
613 u32 phys_addr
, table_offset
;
618 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
619 /* Request & Map MSI-X table region */
620 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
621 nr_entries
= multi_msix_capable(control
);
622 pci_read_config_dword(dev
, msix_table_offset_reg(pos
),
624 bir
= (u8
)(table_offset
& PCI_MSIX_FLAGS_BIRMASK
);
625 phys_addr
= pci_resource_start (dev
, bir
);
626 phys_addr
+= (u32
)(table_offset
& ~PCI_MSIX_FLAGS_BIRMASK
);
627 base
= ioremap_nocache(phys_addr
, nr_entries
* PCI_MSIX_ENTRY_SIZE
);
631 /* MSI-X Table Initialization */
632 for (i
= 0; i
< nvec
; i
++) {
633 entry
= alloc_msi_entry();
636 if ((vector
= get_msi_vector(dev
)) < 0)
639 j
= entries
[i
].entry
;
640 entries
[i
].vector
= vector
;
641 entry
->msi_attrib
.type
= PCI_CAP_ID_MSIX
;
642 entry
->msi_attrib
.state
= 0; /* Mark it not active */
643 entry
->msi_attrib
.entry_nr
= j
;
644 entry
->msi_attrib
.maskbit
= 1;
645 entry
->msi_attrib
.default_vector
= dev
->irq
;
647 entry
->mask_base
= base
;
649 entry
->link
.head
= vector
;
650 entry
->link
.tail
= vector
;
653 entry
->link
.head
= temp
;
654 entry
->link
.tail
= tail
->link
.tail
;
655 tail
->link
.tail
= vector
;
656 head
->link
.head
= vector
;
660 /* Replace with MSI-X handler */
661 irq_handler_init(PCI_CAP_ID_MSIX
, vector
, 1);
662 /* Configure MSI-X capability structure */
663 msi_address_init(&address
);
664 msi_data_init(&data
, vector
);
665 entry
->msi_attrib
.current_cpu
=
666 ((address
.lo_address
.u
.dest_id
>>
667 MSI_TARGET_CPU_SHIFT
) & MSI_TARGET_CPU_MASK
);
668 writel(address
.lo_address
.value
,
669 base
+ j
* PCI_MSIX_ENTRY_SIZE
+
670 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
);
671 writel(address
.hi_address
,
672 base
+ j
* PCI_MSIX_ENTRY_SIZE
+
673 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET
);
675 base
+ j
* PCI_MSIX_ENTRY_SIZE
+
676 PCI_MSIX_ENTRY_DATA_OFFSET
);
677 attach_msi_entry(entry
, vector
);
681 for (; i
>= 0; i
--) {
682 vector
= (entries
+ i
)->vector
;
683 msi_free_vector(dev
, vector
, 0);
684 (entries
+ i
)->vector
= 0;
688 /* Set MSI-X enabled bits */
689 enable_msi_mode(dev
, pos
, PCI_CAP_ID_MSIX
);
695 * pci_enable_msi - configure device's MSI capability structure
696 * @dev: pointer to the pci_dev data structure of MSI device function
698 * Setup the MSI capability structure of device function with
699 * a single MSI vector upon its software driver call to request for
700 * MSI mode enabled on its hardware device function. A return of zero
701 * indicates the successful setup of an entry zero with the new MSI
702 * vector or non-zero for otherwise.
704 int pci_enable_msi(struct pci_dev
* dev
)
706 int pos
, temp
, status
= -EINVAL
;
709 if (!pci_msi_enable
|| !dev
)
714 if ((status
= msi_init()) < 0)
717 if (!(pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
)))
720 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
721 if (control
& PCI_MSI_FLAGS_ENABLE
)
722 return 0; /* Already in MSI mode */
724 if (!msi_lookup_vector(dev
, PCI_CAP_ID_MSI
)) {
728 spin_lock_irqsave(&msi_lock
, flags
);
729 if (!vector_irq
[dev
->irq
]) {
730 msi_desc
[dev
->irq
]->msi_attrib
.state
= 0;
731 vector_irq
[dev
->irq
] = -1;
732 nr_released_vectors
--;
733 spin_unlock_irqrestore(&msi_lock
, flags
);
734 enable_msi_mode(dev
, pos
, PCI_CAP_ID_MSI
);
737 spin_unlock_irqrestore(&msi_lock
, flags
);
740 /* Check whether driver already requested for MSI-X vectors */
741 if ((pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
)) > 0 &&
742 !msi_lookup_vector(dev
, PCI_CAP_ID_MSIX
)) {
743 printk(KERN_INFO
"PCI: %s: Can't enable MSI. "
744 "Device already has MSI-X vectors assigned\n",
749 status
= msi_capability_init(dev
);
752 nr_reserved_vectors
--; /* Only MSI capable */
753 else if (nr_msix_devices
> 0)
754 nr_msix_devices
--; /* Both MSI and MSI-X capable,
755 but choose enabling MSI */
761 void pci_disable_msi(struct pci_dev
* dev
)
763 struct msi_desc
*entry
;
764 int pos
, default_vector
;
768 if (!dev
|| !(pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
)))
771 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
772 if (!(control
& PCI_MSI_FLAGS_ENABLE
))
775 spin_lock_irqsave(&msi_lock
, flags
);
776 entry
= msi_desc
[dev
->irq
];
777 if (!entry
|| !entry
->dev
|| entry
->msi_attrib
.type
!= PCI_CAP_ID_MSI
) {
778 spin_unlock_irqrestore(&msi_lock
, flags
);
781 if (entry
->msi_attrib
.state
) {
782 spin_unlock_irqrestore(&msi_lock
, flags
);
783 printk(KERN_WARNING
"PCI: %s: pci_disable_msi() called without "
784 "free_irq() on MSI vector %d\n",
785 pci_name(dev
), dev
->irq
);
786 BUG_ON(entry
->msi_attrib
.state
> 0);
788 vector_irq
[dev
->irq
] = 0; /* free it */
789 nr_released_vectors
++;
790 default_vector
= entry
->msi_attrib
.default_vector
;
791 spin_unlock_irqrestore(&msi_lock
, flags
);
792 /* Restore dev->irq to its default pin-assertion vector */
793 dev
->irq
= default_vector
;
794 disable_msi_mode(dev
, pci_find_capability(dev
, PCI_CAP_ID_MSI
),
799 static void release_msi(unsigned int vector
)
801 struct msi_desc
*entry
;
804 spin_lock_irqsave(&msi_lock
, flags
);
805 entry
= msi_desc
[vector
];
806 if (entry
&& entry
->dev
)
807 entry
->msi_attrib
.state
= 0; /* Mark it not active */
808 spin_unlock_irqrestore(&msi_lock
, flags
);
811 static int msi_free_vector(struct pci_dev
* dev
, int vector
, int reassign
)
813 struct msi_desc
*entry
;
814 int head
, entry_nr
, type
;
818 spin_lock_irqsave(&msi_lock
, flags
);
819 entry
= msi_desc
[vector
];
820 if (!entry
|| entry
->dev
!= dev
) {
821 spin_unlock_irqrestore(&msi_lock
, flags
);
824 type
= entry
->msi_attrib
.type
;
825 entry_nr
= entry
->msi_attrib
.entry_nr
;
826 head
= entry
->link
.head
;
827 base
= entry
->mask_base
;
828 msi_desc
[entry
->link
.head
]->link
.tail
= entry
->link
.tail
;
829 msi_desc
[entry
->link
.tail
]->link
.head
= entry
->link
.head
;
832 vector_irq
[vector
] = 0;
833 nr_released_vectors
++;
835 msi_desc
[vector
] = NULL
;
836 spin_unlock_irqrestore(&msi_lock
, flags
);
838 kmem_cache_free(msi_cachep
, entry
);
840 if (type
== PCI_CAP_ID_MSIX
) {
843 entry_nr
* PCI_MSIX_ENTRY_SIZE
+
844 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET
);
846 if (head
== vector
) {
848 * Detect last MSI-X vector to be released.
849 * Release the MSI-X memory-mapped table.
852 u32 phys_addr
, table_offset
;
856 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
857 pci_read_config_word(dev
, msi_control_reg(pos
),
859 nr_entries
= multi_msix_capable(control
);
860 pci_read_config_dword(dev
, msix_table_offset_reg(pos
),
862 bir
= (u8
)(table_offset
& PCI_MSIX_FLAGS_BIRMASK
);
863 phys_addr
= pci_resource_start (dev
, bir
);
864 phys_addr
+= (u32
)(table_offset
&
865 ~PCI_MSIX_FLAGS_BIRMASK
);
873 static int reroute_msix_table(int head
, struct msix_entry
*entries
, int *nvec
)
875 int vector
= head
, tail
= 0;
876 int i
, j
= 0, nr_entries
= 0;
880 spin_lock_irqsave(&msi_lock
, flags
);
881 while (head
!= tail
) {
883 tail
= msi_desc
[vector
]->link
.tail
;
884 if (entries
[0].entry
== msi_desc
[vector
]->msi_attrib
.entry_nr
)
888 if (*nvec
> nr_entries
) {
889 spin_unlock_irqrestore(&msi_lock
, flags
);
893 vector
= ((j
> 0) ? j
: head
);
894 for (i
= 0; i
< *nvec
; i
++) {
895 j
= msi_desc
[vector
]->msi_attrib
.entry_nr
;
896 msi_desc
[vector
]->msi_attrib
.state
= 0; /* Mark it not active */
897 vector_irq
[vector
] = -1; /* Mark it busy */
898 nr_released_vectors
--;
899 entries
[i
].vector
= vector
;
900 if (j
!= (entries
+ i
)->entry
) {
901 base
= msi_desc
[vector
]->mask_base
;
902 msi_desc
[vector
]->msi_attrib
.entry_nr
=
903 (entries
+ i
)->entry
;
904 writel( readl(base
+ j
* PCI_MSIX_ENTRY_SIZE
+
905 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
), base
+
906 (entries
+ i
)->entry
* PCI_MSIX_ENTRY_SIZE
+
907 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
);
908 writel( readl(base
+ j
* PCI_MSIX_ENTRY_SIZE
+
909 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET
), base
+
910 (entries
+ i
)->entry
* PCI_MSIX_ENTRY_SIZE
+
911 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET
);
912 writel( (readl(base
+ j
* PCI_MSIX_ENTRY_SIZE
+
913 PCI_MSIX_ENTRY_DATA_OFFSET
) & 0xff00) | vector
,
914 base
+ (entries
+i
)->entry
*PCI_MSIX_ENTRY_SIZE
+
915 PCI_MSIX_ENTRY_DATA_OFFSET
);
917 vector
= msi_desc
[vector
]->link
.tail
;
919 spin_unlock_irqrestore(&msi_lock
, flags
);
925 * pci_enable_msix - configure device's MSI-X capability structure
926 * @dev: pointer to the pci_dev data structure of MSI-X device function
927 * @data: pointer to an array of MSI-X entries
928 * @nvec: number of MSI-X vectors requested for allocation by device driver
930 * Setup the MSI-X capability structure of device function with the number
931 * of requested vectors upon its software driver call to request for
932 * MSI-X mode enabled on its hardware device function. A return of zero
933 * indicates the successful configuration of MSI-X capability structure
934 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
935 * Or a return of > 0 indicates that driver request is exceeding the number
936 * of vectors available. Driver should use the returned value to re-send
939 int pci_enable_msix(struct pci_dev
* dev
, struct msix_entry
*entries
, int nvec
)
941 int status
, pos
, nr_entries
, free_vectors
;
946 if (!pci_msi_enable
|| !dev
|| !entries
)
949 if ((status
= msi_init()) < 0)
952 if (!(pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
)))
955 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
956 if (control
& PCI_MSIX_FLAGS_ENABLE
)
957 return -EINVAL
; /* Already in MSI-X mode */
959 nr_entries
= multi_msix_capable(control
);
960 if (nvec
> nr_entries
)
963 /* Check for any invalid entries */
964 for (i
= 0; i
< nvec
; i
++) {
965 if (entries
[i
].entry
>= nr_entries
)
966 return -EINVAL
; /* invalid entry */
967 for (j
= i
+ 1; j
< nvec
; j
++) {
968 if (entries
[i
].entry
== entries
[j
].entry
)
969 return -EINVAL
; /* duplicate entry */
973 if (!msi_lookup_vector(dev
, PCI_CAP_ID_MSIX
)) {
976 /* Reroute MSI-X table */
977 if (reroute_msix_table(dev
->irq
, entries
, &nr_entries
)) {
978 /* #requested > #previous-assigned */
983 enable_msi_mode(dev
, pos
, PCI_CAP_ID_MSIX
);
986 /* Check whether driver already requested for MSI vector */
987 if (pci_find_capability(dev
, PCI_CAP_ID_MSI
) > 0 &&
988 !msi_lookup_vector(dev
, PCI_CAP_ID_MSI
)) {
989 printk(KERN_INFO
"PCI: %s: Can't enable MSI-X. "
990 "Device already has an MSI vector assigned\n",
996 spin_lock_irqsave(&msi_lock
, flags
);
998 * msi_lock is provided to ensure that enough vectors resources are
999 * available before granting.
1001 free_vectors
= pci_vector_resources(last_alloc_vector
,
1002 nr_released_vectors
);
1003 /* Ensure that each MSI/MSI-X device has one vector reserved by
1004 default to avoid any MSI-X driver to take all available
1006 free_vectors
-= nr_reserved_vectors
;
1007 /* Find the average of free vectors among MSI-X devices */
1008 if (nr_msix_devices
> 0)
1009 free_vectors
/= nr_msix_devices
;
1010 spin_unlock_irqrestore(&msi_lock
, flags
);
1012 if (nvec
> free_vectors
) {
1013 if (free_vectors
> 0)
1014 return free_vectors
;
1019 status
= msix_capability_init(dev
, entries
, nvec
);
1020 if (!status
&& nr_msix_devices
> 0)
1026 void pci_disable_msix(struct pci_dev
* dev
)
1031 if (!dev
|| !(pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
)))
1034 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
1035 if (!(control
& PCI_MSIX_FLAGS_ENABLE
))
1039 if (!msi_lookup_vector(dev
, PCI_CAP_ID_MSIX
)) {
1040 int state
, vector
, head
, tail
= 0, warning
= 0;
1041 unsigned long flags
;
1043 vector
= head
= dev
->irq
;
1044 spin_lock_irqsave(&msi_lock
, flags
);
1045 while (head
!= tail
) {
1046 state
= msi_desc
[vector
]->msi_attrib
.state
;
1050 vector_irq
[vector
] = 0; /* free it */
1051 nr_released_vectors
++;
1053 tail
= msi_desc
[vector
]->link
.tail
;
1056 spin_unlock_irqrestore(&msi_lock
, flags
);
1059 printk(KERN_WARNING
"PCI: %s: pci_disable_msix() called without "
1060 "free_irq() on all MSI-X vectors\n",
1062 BUG_ON(warning
> 0);
1065 disable_msi_mode(dev
,
1066 pci_find_capability(dev
, PCI_CAP_ID_MSIX
),
1074 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1075 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1077 * Being called during hotplug remove, from which the device funciton
1078 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1079 * allocated for this device function, are reclaimed to unused state,
1080 * which may be used later on.
1082 void msi_remove_pci_irq_vectors(struct pci_dev
* dev
)
1084 int state
, pos
, temp
;
1085 unsigned long flags
;
1087 if (!pci_msi_enable
|| !dev
)
1090 temp
= dev
->irq
; /* Save IOAPIC IRQ */
1091 if ((pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
)) > 0 &&
1092 !msi_lookup_vector(dev
, PCI_CAP_ID_MSI
)) {
1093 spin_lock_irqsave(&msi_lock
, flags
);
1094 state
= msi_desc
[dev
->irq
]->msi_attrib
.state
;
1095 spin_unlock_irqrestore(&msi_lock
, flags
);
1097 printk(KERN_WARNING
"PCI: %s: msi_remove_pci_irq_vectors() "
1098 "called without free_irq() on MSI vector %d\n",
1099 pci_name(dev
), dev
->irq
);
1101 } else /* Release MSI vector assigned to this device */
1102 msi_free_vector(dev
, dev
->irq
, 0);
1103 dev
->irq
= temp
; /* Restore IOAPIC IRQ */
1105 if ((pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
)) > 0 &&
1106 !msi_lookup_vector(dev
, PCI_CAP_ID_MSIX
)) {
1107 int vector
, head
, tail
= 0, warning
= 0;
1108 void __iomem
*base
= NULL
;
1110 vector
= head
= dev
->irq
;
1111 while (head
!= tail
) {
1112 spin_lock_irqsave(&msi_lock
, flags
);
1113 state
= msi_desc
[vector
]->msi_attrib
.state
;
1114 tail
= msi_desc
[vector
]->link
.tail
;
1115 base
= msi_desc
[vector
]->mask_base
;
1116 spin_unlock_irqrestore(&msi_lock
, flags
);
1119 else if (vector
!= head
) /* Release MSI-X vector */
1120 msi_free_vector(dev
, vector
, 0);
1123 msi_free_vector(dev
, vector
, 0);
1125 /* Force to release the MSI-X memory-mapped table */
1126 u32 phys_addr
, table_offset
;
1130 pci_read_config_word(dev
, msi_control_reg(pos
),
1132 pci_read_config_dword(dev
, msix_table_offset_reg(pos
),
1134 bir
= (u8
)(table_offset
& PCI_MSIX_FLAGS_BIRMASK
);
1135 phys_addr
= pci_resource_start (dev
, bir
);
1136 phys_addr
+= (u32
)(table_offset
&
1137 ~PCI_MSIX_FLAGS_BIRMASK
);
1139 printk(KERN_WARNING
"PCI: %s: msi_remove_pci_irq_vectors() "
1140 "called without free_irq() on all MSI-X vectors\n",
1142 BUG_ON(warning
> 0);
1144 dev
->irq
= temp
; /* Restore IOAPIC IRQ */
1148 EXPORT_SYMBOL(pci_enable_msi
);
1149 EXPORT_SYMBOL(pci_disable_msi
);
1150 EXPORT_SYMBOL(pci_enable_msix
);
1151 EXPORT_SYMBOL(pci_disable_msix
);