[NETFILTER]: nf_conntrack: export hash allocation/destruction functions
[linux-2.6/verdex.git] / drivers / pci / msi.c
blobbe1df85e5e2d5ae08b5a3f562cb462589495f409
1 /*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
9 #include <linux/err.h>
10 #include <linux/mm.h>
11 #include <linux/irq.h>
12 #include <linux/interrupt.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/pci.h>
16 #include <linux/proc_fs.h>
17 #include <linux/msi.h>
18 #include <linux/smp.h>
20 #include <asm/errno.h>
21 #include <asm/io.h>
23 #include "pci.h"
24 #include "msi.h"
26 static int pci_msi_enable = 1;
28 static void msi_set_enable(struct pci_dev *dev, int enable)
30 int pos;
31 u16 control;
33 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
34 if (pos) {
35 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
36 control &= ~PCI_MSI_FLAGS_ENABLE;
37 if (enable)
38 control |= PCI_MSI_FLAGS_ENABLE;
39 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
43 static void msix_set_enable(struct pci_dev *dev, int enable)
45 int pos;
46 u16 control;
48 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
49 if (pos) {
50 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
51 control &= ~PCI_MSIX_FLAGS_ENABLE;
52 if (enable)
53 control |= PCI_MSIX_FLAGS_ENABLE;
54 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
58 static void msix_flush_writes(unsigned int irq)
60 struct msi_desc *entry;
62 entry = get_irq_msi(irq);
63 BUG_ON(!entry || !entry->dev);
64 switch (entry->msi_attrib.type) {
65 case PCI_CAP_ID_MSI:
66 /* nothing to do */
67 break;
68 case PCI_CAP_ID_MSIX:
70 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
71 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
72 readl(entry->mask_base + offset);
73 break;
75 default:
76 BUG();
77 break;
81 static void msi_set_mask_bit(unsigned int irq, int flag)
83 struct msi_desc *entry;
85 entry = get_irq_msi(irq);
86 BUG_ON(!entry || !entry->dev);
87 switch (entry->msi_attrib.type) {
88 case PCI_CAP_ID_MSI:
89 if (entry->msi_attrib.maskbit) {
90 int pos;
91 u32 mask_bits;
93 pos = (long)entry->mask_base;
94 pci_read_config_dword(entry->dev, pos, &mask_bits);
95 mask_bits &= ~(1);
96 mask_bits |= flag;
97 pci_write_config_dword(entry->dev, pos, mask_bits);
98 } else {
99 msi_set_enable(entry->dev, !flag);
101 break;
102 case PCI_CAP_ID_MSIX:
104 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
105 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
106 writel(flag, entry->mask_base + offset);
107 readl(entry->mask_base + offset);
108 break;
110 default:
111 BUG();
112 break;
114 entry->msi_attrib.masked = !!flag;
117 void read_msi_msg(unsigned int irq, struct msi_msg *msg)
119 struct msi_desc *entry = get_irq_msi(irq);
120 switch(entry->msi_attrib.type) {
121 case PCI_CAP_ID_MSI:
123 struct pci_dev *dev = entry->dev;
124 int pos = entry->msi_attrib.pos;
125 u16 data;
127 pci_read_config_dword(dev, msi_lower_address_reg(pos),
128 &msg->address_lo);
129 if (entry->msi_attrib.is_64) {
130 pci_read_config_dword(dev, msi_upper_address_reg(pos),
131 &msg->address_hi);
132 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
133 } else {
134 msg->address_hi = 0;
135 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
137 msg->data = data;
138 break;
140 case PCI_CAP_ID_MSIX:
142 void __iomem *base;
143 base = entry->mask_base +
144 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
146 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
147 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
148 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
149 break;
151 default:
152 BUG();
156 void write_msi_msg(unsigned int irq, struct msi_msg *msg)
158 struct msi_desc *entry = get_irq_msi(irq);
159 switch (entry->msi_attrib.type) {
160 case PCI_CAP_ID_MSI:
162 struct pci_dev *dev = entry->dev;
163 int pos = entry->msi_attrib.pos;
165 pci_write_config_dword(dev, msi_lower_address_reg(pos),
166 msg->address_lo);
167 if (entry->msi_attrib.is_64) {
168 pci_write_config_dword(dev, msi_upper_address_reg(pos),
169 msg->address_hi);
170 pci_write_config_word(dev, msi_data_reg(pos, 1),
171 msg->data);
172 } else {
173 pci_write_config_word(dev, msi_data_reg(pos, 0),
174 msg->data);
176 break;
178 case PCI_CAP_ID_MSIX:
180 void __iomem *base;
181 base = entry->mask_base +
182 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
184 writel(msg->address_lo,
185 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
186 writel(msg->address_hi,
187 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
188 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
189 break;
191 default:
192 BUG();
194 entry->msg = *msg;
197 void mask_msi_irq(unsigned int irq)
199 msi_set_mask_bit(irq, 1);
200 msix_flush_writes(irq);
203 void unmask_msi_irq(unsigned int irq)
205 msi_set_mask_bit(irq, 0);
206 msix_flush_writes(irq);
209 static int msi_free_irqs(struct pci_dev* dev);
212 static struct msi_desc* alloc_msi_entry(void)
214 struct msi_desc *entry;
216 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
217 if (!entry)
218 return NULL;
220 INIT_LIST_HEAD(&entry->list);
221 entry->irq = 0;
222 entry->dev = NULL;
224 return entry;
227 #ifdef CONFIG_PM
228 static void __pci_restore_msi_state(struct pci_dev *dev)
230 int pos;
231 u16 control;
232 struct msi_desc *entry;
234 if (!dev->msi_enabled)
235 return;
237 entry = get_irq_msi(dev->irq);
238 pos = entry->msi_attrib.pos;
240 pci_intx(dev, 0); /* disable intx */
241 msi_set_enable(dev, 0);
242 write_msi_msg(dev->irq, &entry->msg);
243 if (entry->msi_attrib.maskbit)
244 msi_set_mask_bit(dev->irq, entry->msi_attrib.masked);
246 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
247 control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
248 if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked)
249 control |= PCI_MSI_FLAGS_ENABLE;
250 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
253 static void __pci_restore_msix_state(struct pci_dev *dev)
255 int pos;
256 struct msi_desc *entry;
257 u16 control;
259 if (!dev->msix_enabled)
260 return;
262 /* route the table */
263 pci_intx(dev, 0); /* disable intx */
264 msix_set_enable(dev, 0);
266 list_for_each_entry(entry, &dev->msi_list, list) {
267 write_msi_msg(entry->irq, &entry->msg);
268 msi_set_mask_bit(entry->irq, entry->msi_attrib.masked);
271 BUG_ON(list_empty(&dev->msi_list));
272 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
273 pos = entry->msi_attrib.pos;
274 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
275 control &= ~PCI_MSIX_FLAGS_MASKALL;
276 control |= PCI_MSIX_FLAGS_ENABLE;
277 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
280 void pci_restore_msi_state(struct pci_dev *dev)
282 __pci_restore_msi_state(dev);
283 __pci_restore_msix_state(dev);
285 #endif /* CONFIG_PM */
288 * msi_capability_init - configure device's MSI capability structure
289 * @dev: pointer to the pci_dev data structure of MSI device function
291 * Setup the MSI capability structure of device function with a single
292 * MSI irq, regardless of device function is capable of handling
293 * multiple messages. A return of zero indicates the successful setup
294 * of an entry zero with the new MSI irq or non-zero for otherwise.
296 static int msi_capability_init(struct pci_dev *dev)
298 struct msi_desc *entry;
299 int pos, ret;
300 u16 control;
302 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
304 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
305 pci_read_config_word(dev, msi_control_reg(pos), &control);
306 /* MSI Entry Initialization */
307 entry = alloc_msi_entry();
308 if (!entry)
309 return -ENOMEM;
311 entry->msi_attrib.type = PCI_CAP_ID_MSI;
312 entry->msi_attrib.is_64 = is_64bit_address(control);
313 entry->msi_attrib.entry_nr = 0;
314 entry->msi_attrib.maskbit = is_mask_bit_support(control);
315 entry->msi_attrib.masked = 1;
316 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
317 entry->msi_attrib.pos = pos;
318 if (is_mask_bit_support(control)) {
319 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
320 is_64bit_address(control));
322 entry->dev = dev;
323 if (entry->msi_attrib.maskbit) {
324 unsigned int maskbits, temp;
325 /* All MSIs are unmasked by default, Mask them all */
326 pci_read_config_dword(dev,
327 msi_mask_bits_reg(pos, is_64bit_address(control)),
328 &maskbits);
329 temp = (1 << multi_msi_capable(control));
330 temp = ((temp - 1) & ~temp);
331 maskbits |= temp;
332 pci_write_config_dword(dev,
333 msi_mask_bits_reg(pos, is_64bit_address(control)),
334 maskbits);
336 list_add_tail(&entry->list, &dev->msi_list);
338 /* Configure MSI capability structure */
339 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
340 if (ret) {
341 msi_free_irqs(dev);
342 return ret;
345 /* Set MSI enabled bits */
346 pci_intx(dev, 0); /* disable intx */
347 msi_set_enable(dev, 1);
348 dev->msi_enabled = 1;
350 dev->irq = entry->irq;
351 return 0;
355 * msix_capability_init - configure device's MSI-X capability
356 * @dev: pointer to the pci_dev data structure of MSI-X device function
357 * @entries: pointer to an array of struct msix_entry entries
358 * @nvec: number of @entries
360 * Setup the MSI-X capability structure of device function with a
361 * single MSI-X irq. A return of zero indicates the successful setup of
362 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
364 static int msix_capability_init(struct pci_dev *dev,
365 struct msix_entry *entries, int nvec)
367 struct msi_desc *entry;
368 int pos, i, j, nr_entries, ret;
369 unsigned long phys_addr;
370 u32 table_offset;
371 u16 control;
372 u8 bir;
373 void __iomem *base;
375 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
377 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
378 /* Request & Map MSI-X table region */
379 pci_read_config_word(dev, msi_control_reg(pos), &control);
380 nr_entries = multi_msix_capable(control);
382 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
383 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
384 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
385 phys_addr = pci_resource_start (dev, bir) + table_offset;
386 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
387 if (base == NULL)
388 return -ENOMEM;
390 /* MSI-X Table Initialization */
391 for (i = 0; i < nvec; i++) {
392 entry = alloc_msi_entry();
393 if (!entry)
394 break;
396 j = entries[i].entry;
397 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
398 entry->msi_attrib.is_64 = 1;
399 entry->msi_attrib.entry_nr = j;
400 entry->msi_attrib.maskbit = 1;
401 entry->msi_attrib.masked = 1;
402 entry->msi_attrib.default_irq = dev->irq;
403 entry->msi_attrib.pos = pos;
404 entry->dev = dev;
405 entry->mask_base = base;
407 list_add_tail(&entry->list, &dev->msi_list);
410 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
411 if (ret) {
412 int avail = 0;
413 list_for_each_entry(entry, &dev->msi_list, list) {
414 if (entry->irq != 0) {
415 avail++;
419 msi_free_irqs(dev);
421 /* If we had some success report the number of irqs
422 * we succeeded in setting up.
424 if (avail == 0)
425 avail = ret;
426 return avail;
429 i = 0;
430 list_for_each_entry(entry, &dev->msi_list, list) {
431 entries[i].vector = entry->irq;
432 set_irq_msi(entry->irq, entry);
433 i++;
435 /* Set MSI-X enabled bits */
436 pci_intx(dev, 0); /* disable intx */
437 msix_set_enable(dev, 1);
438 dev->msix_enabled = 1;
440 return 0;
444 * pci_msi_check_device - check whether MSI may be enabled on a device
445 * @dev: pointer to the pci_dev data structure of MSI device function
446 * @nvec: how many MSIs have been requested ?
447 * @type: are we checking for MSI or MSI-X ?
449 * Look at global flags, the device itself, and its parent busses
450 * to determine if MSI/-X are supported for the device. If MSI/-X is
451 * supported return 0, else return an error code.
453 static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
455 struct pci_bus *bus;
456 int ret;
458 /* MSI must be globally enabled and supported by the device */
459 if (!pci_msi_enable || !dev || dev->no_msi)
460 return -EINVAL;
463 * You can't ask to have 0 or less MSIs configured.
464 * a) it's stupid ..
465 * b) the list manipulation code assumes nvec >= 1.
467 if (nvec < 1)
468 return -ERANGE;
470 /* Any bridge which does NOT route MSI transactions from it's
471 * secondary bus to it's primary bus must set NO_MSI flag on
472 * the secondary pci_bus.
473 * We expect only arch-specific PCI host bus controller driver
474 * or quirks for specific PCI bridges to be setting NO_MSI.
476 for (bus = dev->bus; bus; bus = bus->parent)
477 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
478 return -EINVAL;
480 ret = arch_msi_check_device(dev, nvec, type);
481 if (ret)
482 return ret;
484 if (!pci_find_capability(dev, type))
485 return -EINVAL;
487 return 0;
491 * pci_enable_msi - configure device's MSI capability structure
492 * @dev: pointer to the pci_dev data structure of MSI device function
494 * Setup the MSI capability structure of device function with
495 * a single MSI irq upon its software driver call to request for
496 * MSI mode enabled on its hardware device function. A return of zero
497 * indicates the successful setup of an entry zero with the new MSI
498 * irq or non-zero for otherwise.
500 int pci_enable_msi(struct pci_dev* dev)
502 int status;
504 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
505 if (status)
506 return status;
508 WARN_ON(!!dev->msi_enabled);
510 /* Check whether driver already requested for MSI-X irqs */
511 if (dev->msix_enabled) {
512 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
513 "Device already has MSI-X enabled\n",
514 pci_name(dev));
515 return -EINVAL;
517 status = msi_capability_init(dev);
518 return status;
520 EXPORT_SYMBOL(pci_enable_msi);
522 void pci_disable_msi(struct pci_dev* dev)
524 struct msi_desc *entry;
525 int default_irq;
527 if (!pci_msi_enable || !dev || !dev->msi_enabled)
528 return;
530 msi_set_enable(dev, 0);
531 pci_intx(dev, 1); /* enable intx */
532 dev->msi_enabled = 0;
534 BUG_ON(list_empty(&dev->msi_list));
535 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
536 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
537 return;
540 default_irq = entry->msi_attrib.default_irq;
541 msi_free_irqs(dev);
543 /* Restore dev->irq to its default pin-assertion irq */
544 dev->irq = default_irq;
546 EXPORT_SYMBOL(pci_disable_msi);
548 static int msi_free_irqs(struct pci_dev* dev)
550 struct msi_desc *entry, *tmp;
552 list_for_each_entry(entry, &dev->msi_list, list) {
553 if (entry->irq)
554 BUG_ON(irq_has_action(entry->irq));
557 arch_teardown_msi_irqs(dev);
559 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
560 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
561 writel(1, entry->mask_base + entry->msi_attrib.entry_nr
562 * PCI_MSIX_ENTRY_SIZE
563 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
565 if (list_is_last(&entry->list, &dev->msi_list))
566 iounmap(entry->mask_base);
568 list_del(&entry->list);
569 kfree(entry);
572 return 0;
576 * pci_enable_msix - configure device's MSI-X capability structure
577 * @dev: pointer to the pci_dev data structure of MSI-X device function
578 * @entries: pointer to an array of MSI-X entries
579 * @nvec: number of MSI-X irqs requested for allocation by device driver
581 * Setup the MSI-X capability structure of device function with the number
582 * of requested irqs upon its software driver call to request for
583 * MSI-X mode enabled on its hardware device function. A return of zero
584 * indicates the successful configuration of MSI-X capability structure
585 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
586 * Or a return of > 0 indicates that driver request is exceeding the number
587 * of irqs available. Driver should use the returned value to re-send
588 * its request.
590 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
592 int status, pos, nr_entries;
593 int i, j;
594 u16 control;
596 if (!entries)
597 return -EINVAL;
599 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
600 if (status)
601 return status;
603 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
604 pci_read_config_word(dev, msi_control_reg(pos), &control);
605 nr_entries = multi_msix_capable(control);
606 if (nvec > nr_entries)
607 return -EINVAL;
609 /* Check for any invalid entries */
610 for (i = 0; i < nvec; i++) {
611 if (entries[i].entry >= nr_entries)
612 return -EINVAL; /* invalid entry */
613 for (j = i + 1; j < nvec; j++) {
614 if (entries[i].entry == entries[j].entry)
615 return -EINVAL; /* duplicate entry */
618 WARN_ON(!!dev->msix_enabled);
620 /* Check whether driver already requested for MSI irq */
621 if (dev->msi_enabled) {
622 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
623 "Device already has an MSI irq assigned\n",
624 pci_name(dev));
625 return -EINVAL;
627 status = msix_capability_init(dev, entries, nvec);
628 return status;
630 EXPORT_SYMBOL(pci_enable_msix);
632 static void msix_free_all_irqs(struct pci_dev *dev)
634 msi_free_irqs(dev);
637 void pci_disable_msix(struct pci_dev* dev)
639 if (!pci_msi_enable || !dev || !dev->msix_enabled)
640 return;
642 msix_set_enable(dev, 0);
643 pci_intx(dev, 1); /* enable intx */
644 dev->msix_enabled = 0;
646 msix_free_all_irqs(dev);
648 EXPORT_SYMBOL(pci_disable_msix);
651 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
652 * @dev: pointer to the pci_dev data structure of MSI(X) device function
654 * Being called during hotplug remove, from which the device function
655 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
656 * allocated for this device function, are reclaimed to unused state,
657 * which may be used later on.
659 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
661 if (!pci_msi_enable || !dev)
662 return;
664 if (dev->msi_enabled)
665 msi_free_irqs(dev);
667 if (dev->msix_enabled)
668 msix_free_all_irqs(dev);
671 void pci_no_msi(void)
673 pci_msi_enable = 0;
676 void pci_msi_init_pci_dev(struct pci_dev *dev)
678 INIT_LIST_HEAD(&dev->msi_list);
682 /* Arch hooks */
684 int __attribute__ ((weak))
685 arch_msi_check_device(struct pci_dev* dev, int nvec, int type)
687 return 0;
690 int __attribute__ ((weak))
691 arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
693 return 0;
696 int __attribute__ ((weak))
697 arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
699 struct msi_desc *entry;
700 int ret;
702 list_for_each_entry(entry, &dev->msi_list, list) {
703 ret = arch_setup_msi_irq(dev, entry);
704 if (ret)
705 return ret;
708 return 0;
711 void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
713 return;
716 void __attribute__ ((weak))
717 arch_teardown_msi_irqs(struct pci_dev *dev)
719 struct msi_desc *entry;
721 list_for_each_entry(entry, &dev->msi_list, list) {
722 if (entry->irq != 0)
723 arch_teardown_msi_irq(entry->irq);