1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
17 #include <asm/iommu.h>
20 #include <asm/pstate.h>
21 #include <asm/oplib.h>
22 #include <asm/hypervisor.h>
26 #include "iommu_common.h"
28 #include "pci_sun4v.h"
30 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
32 struct pci_iommu_batch
{
33 struct pci_dev
*pdev
; /* Device mapping is for. */
34 unsigned long prot
; /* IOMMU page protections */
35 unsigned long entry
; /* Index into IOTSB. */
36 u64
*pglist
; /* List of physical pages */
37 unsigned long npages
; /* Number of pages in list. */
40 static DEFINE_PER_CPU(struct pci_iommu_batch
, pci_iommu_batch
);
42 /* Interrupts must be disabled. */
43 static inline void pci_iommu_batch_start(struct pci_dev
*pdev
, unsigned long prot
, unsigned long entry
)
45 struct pci_iommu_batch
*p
= &__get_cpu_var(pci_iommu_batch
);
53 /* Interrupts must be disabled. */
54 static long pci_iommu_batch_flush(struct pci_iommu_batch
*p
)
56 struct pcidev_cookie
*pcp
= p
->pdev
->sysdata
;
57 unsigned long devhandle
= pcp
->pbm
->devhandle
;
58 unsigned long prot
= p
->prot
;
59 unsigned long entry
= p
->entry
;
60 u64
*pglist
= p
->pglist
;
61 unsigned long npages
= p
->npages
;
66 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
67 npages
, prot
, __pa(pglist
));
68 if (unlikely(num
< 0)) {
69 if (printk_ratelimit())
70 printk("pci_iommu_batch_flush: IOMMU map of "
71 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
73 devhandle
, HV_PCI_TSBID(0, entry
),
74 npages
, prot
, __pa(pglist
), num
);
89 /* Interrupts must be disabled. */
90 static inline long pci_iommu_batch_add(u64 phys_page
)
92 struct pci_iommu_batch
*p
= &__get_cpu_var(pci_iommu_batch
);
94 BUG_ON(p
->npages
>= PGLIST_NENTS
);
96 p
->pglist
[p
->npages
++] = phys_page
;
97 if (p
->npages
== PGLIST_NENTS
)
98 return pci_iommu_batch_flush(p
);
103 /* Interrupts must be disabled. */
104 static inline long pci_iommu_batch_end(void)
106 struct pci_iommu_batch
*p
= &__get_cpu_var(pci_iommu_batch
);
108 BUG_ON(p
->npages
>= PGLIST_NENTS
);
110 return pci_iommu_batch_flush(p
);
113 static long pci_arena_alloc(struct pci_iommu_arena
*arena
, unsigned long npages
)
115 unsigned long n
, i
, start
, end
, limit
;
118 limit
= arena
->limit
;
123 n
= find_next_zero_bit(arena
->map
, limit
, start
);
125 if (unlikely(end
>= limit
)) {
126 if (likely(pass
< 1)) {
132 /* Scanned the whole thing, give up. */
137 for (i
= n
; i
< end
; i
++) {
138 if (test_bit(i
, arena
->map
)) {
144 for (i
= n
; i
< end
; i
++)
145 __set_bit(i
, arena
->map
);
152 static void pci_arena_free(struct pci_iommu_arena
*arena
, unsigned long base
, unsigned long npages
)
156 for (i
= base
; i
< (base
+ npages
); i
++)
157 __clear_bit(i
, arena
->map
);
160 static void *pci_4v_alloc_consistent(struct pci_dev
*pdev
, size_t size
, dma_addr_t
*dma_addrp
, gfp_t gfp
)
162 struct pcidev_cookie
*pcp
;
163 struct pci_iommu
*iommu
;
164 unsigned long flags
, order
, first_page
, npages
, n
;
168 size
= IO_PAGE_ALIGN(size
);
169 order
= get_order(size
);
170 if (unlikely(order
>= MAX_ORDER
))
173 npages
= size
>> IO_PAGE_SHIFT
;
175 first_page
= __get_free_pages(gfp
, order
);
176 if (unlikely(first_page
== 0UL))
179 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
182 iommu
= pcp
->pbm
->iommu
;
184 spin_lock_irqsave(&iommu
->lock
, flags
);
185 entry
= pci_arena_alloc(&iommu
->arena
, npages
);
186 spin_unlock_irqrestore(&iommu
->lock
, flags
);
188 if (unlikely(entry
< 0L))
189 goto arena_alloc_fail
;
191 *dma_addrp
= (iommu
->page_table_map_base
+
192 (entry
<< IO_PAGE_SHIFT
));
193 ret
= (void *) first_page
;
194 first_page
= __pa(first_page
);
196 local_irq_save(flags
);
198 pci_iommu_batch_start(pdev
,
199 (HV_PCI_MAP_ATTR_READ
|
200 HV_PCI_MAP_ATTR_WRITE
),
203 for (n
= 0; n
< npages
; n
++) {
204 long err
= pci_iommu_batch_add(first_page
+ (n
* PAGE_SIZE
));
205 if (unlikely(err
< 0L))
209 if (unlikely(pci_iommu_batch_end() < 0L))
212 local_irq_restore(flags
);
217 /* Interrupts are disabled. */
218 spin_lock(&iommu
->lock
);
219 pci_arena_free(&iommu
->arena
, entry
, npages
);
220 spin_unlock_irqrestore(&iommu
->lock
, flags
);
223 free_pages(first_page
, order
);
227 static void pci_4v_free_consistent(struct pci_dev
*pdev
, size_t size
, void *cpu
, dma_addr_t dvma
)
229 struct pcidev_cookie
*pcp
;
230 struct pci_iommu
*iommu
;
231 unsigned long flags
, order
, npages
, entry
;
234 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
236 iommu
= pcp
->pbm
->iommu
;
237 devhandle
= pcp
->pbm
->devhandle
;
238 entry
= ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
240 spin_lock_irqsave(&iommu
->lock
, flags
);
242 pci_arena_free(&iommu
->arena
, entry
, npages
);
247 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
251 } while (npages
!= 0);
253 spin_unlock_irqrestore(&iommu
->lock
, flags
);
255 order
= get_order(size
);
257 free_pages((unsigned long)cpu
, order
);
260 static dma_addr_t
pci_4v_map_single(struct pci_dev
*pdev
, void *ptr
, size_t sz
, int direction
)
262 struct pcidev_cookie
*pcp
;
263 struct pci_iommu
*iommu
;
264 unsigned long flags
, npages
, oaddr
;
265 unsigned long i
, base_paddr
;
271 iommu
= pcp
->pbm
->iommu
;
273 if (unlikely(direction
== PCI_DMA_NONE
))
276 oaddr
= (unsigned long)ptr
;
277 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
278 npages
>>= IO_PAGE_SHIFT
;
280 spin_lock_irqsave(&iommu
->lock
, flags
);
281 entry
= pci_arena_alloc(&iommu
->arena
, npages
);
282 spin_unlock_irqrestore(&iommu
->lock
, flags
);
284 if (unlikely(entry
< 0L))
287 bus_addr
= (iommu
->page_table_map_base
+
288 (entry
<< IO_PAGE_SHIFT
));
289 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
290 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
291 prot
= HV_PCI_MAP_ATTR_READ
;
292 if (direction
!= PCI_DMA_TODEVICE
)
293 prot
|= HV_PCI_MAP_ATTR_WRITE
;
295 local_irq_save(flags
);
297 pci_iommu_batch_start(pdev
, prot
, entry
);
299 for (i
= 0; i
< npages
; i
++, base_paddr
+= IO_PAGE_SIZE
) {
300 long err
= pci_iommu_batch_add(base_paddr
);
301 if (unlikely(err
< 0L))
304 if (unlikely(pci_iommu_batch_end() < 0L))
307 local_irq_restore(flags
);
312 if (printk_ratelimit())
314 return PCI_DMA_ERROR_CODE
;
317 /* Interrupts are disabled. */
318 spin_lock(&iommu
->lock
);
319 pci_arena_free(&iommu
->arena
, entry
, npages
);
320 spin_unlock_irqrestore(&iommu
->lock
, flags
);
322 return PCI_DMA_ERROR_CODE
;
325 static void pci_4v_unmap_single(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
327 struct pcidev_cookie
*pcp
;
328 struct pci_iommu
*iommu
;
329 unsigned long flags
, npages
;
333 if (unlikely(direction
== PCI_DMA_NONE
)) {
334 if (printk_ratelimit())
340 iommu
= pcp
->pbm
->iommu
;
341 devhandle
= pcp
->pbm
->devhandle
;
343 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
344 npages
>>= IO_PAGE_SHIFT
;
345 bus_addr
&= IO_PAGE_MASK
;
347 spin_lock_irqsave(&iommu
->lock
, flags
);
349 entry
= (bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
350 pci_arena_free(&iommu
->arena
, entry
, npages
);
355 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
359 } while (npages
!= 0);
361 spin_unlock_irqrestore(&iommu
->lock
, flags
);
364 #define SG_ENT_PHYS_ADDRESS(SG) \
365 (__pa(page_address((SG)->page)) + (SG)->offset)
367 static inline long fill_sg(long entry
, struct pci_dev
*pdev
,
368 struct scatterlist
*sg
,
369 int nused
, int nelems
, unsigned long prot
)
371 struct scatterlist
*dma_sg
= sg
;
372 struct scatterlist
*sg_end
= sg
+ nelems
;
376 local_irq_save(flags
);
378 pci_iommu_batch_start(pdev
, prot
, entry
);
380 for (i
= 0; i
< nused
; i
++) {
381 unsigned long pteval
= ~0UL;
384 dma_npages
= ((dma_sg
->dma_address
& (IO_PAGE_SIZE
- 1UL)) +
386 ((IO_PAGE_SIZE
- 1UL))) >> IO_PAGE_SHIFT
;
388 unsigned long offset
;
391 /* If we are here, we know we have at least one
392 * more page to map. So walk forward until we
393 * hit a page crossing, and begin creating new
394 * mappings from that spot.
399 tmp
= SG_ENT_PHYS_ADDRESS(sg
);
401 if (((tmp
^ pteval
) >> IO_PAGE_SHIFT
) != 0UL) {
402 pteval
= tmp
& IO_PAGE_MASK
;
403 offset
= tmp
& (IO_PAGE_SIZE
- 1UL);
406 if (((tmp
^ (tmp
+ len
- 1UL)) >> IO_PAGE_SHIFT
) != 0UL) {
407 pteval
= (tmp
+ IO_PAGE_SIZE
) & IO_PAGE_MASK
;
409 len
-= (IO_PAGE_SIZE
- (tmp
& (IO_PAGE_SIZE
- 1UL)));
415 pteval
= (pteval
& IOPTE_PAGE
);
419 err
= pci_iommu_batch_add(pteval
);
420 if (unlikely(err
< 0L))
421 goto iommu_map_failed
;
423 pteval
+= IO_PAGE_SIZE
;
424 len
-= (IO_PAGE_SIZE
- offset
);
429 pteval
= (pteval
& IOPTE_PAGE
) + len
;
432 /* Skip over any tail mappings we've fully mapped,
433 * adjusting pteval along the way. Stop when we
434 * detect a page crossing event.
436 while (sg
< sg_end
&&
437 (pteval
<< (64 - IO_PAGE_SHIFT
)) != 0UL &&
438 (pteval
== SG_ENT_PHYS_ADDRESS(sg
)) &&
440 (SG_ENT_PHYS_ADDRESS(sg
) + sg
->length
- 1UL)) >> IO_PAGE_SHIFT
) == 0UL) {
441 pteval
+= sg
->length
;
444 if ((pteval
<< (64 - IO_PAGE_SHIFT
)) == 0UL)
446 } while (dma_npages
!= 0);
450 if (unlikely(pci_iommu_batch_end() < 0L))
451 goto iommu_map_failed
;
453 local_irq_restore(flags
);
457 local_irq_restore(flags
);
461 static int pci_4v_map_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
463 struct pcidev_cookie
*pcp
;
464 struct pci_iommu
*iommu
;
465 unsigned long flags
, npages
, prot
;
467 struct scatterlist
*sgtmp
;
471 /* Fast path single entry scatterlists. */
473 sglist
->dma_address
=
474 pci_4v_map_single(pdev
,
475 (page_address(sglist
->page
) + sglist
->offset
),
476 sglist
->length
, direction
);
477 if (unlikely(sglist
->dma_address
== PCI_DMA_ERROR_CODE
))
479 sglist
->dma_length
= sglist
->length
;
484 iommu
= pcp
->pbm
->iommu
;
486 if (unlikely(direction
== PCI_DMA_NONE
))
489 /* Step 1: Prepare scatter list. */
490 npages
= prepare_sg(sglist
, nelems
);
492 /* Step 2: Allocate a cluster and context, if necessary. */
493 spin_lock_irqsave(&iommu
->lock
, flags
);
494 entry
= pci_arena_alloc(&iommu
->arena
, npages
);
495 spin_unlock_irqrestore(&iommu
->lock
, flags
);
497 if (unlikely(entry
< 0L))
500 dma_base
= iommu
->page_table_map_base
+
501 (entry
<< IO_PAGE_SHIFT
);
503 /* Step 3: Normalize DMA addresses. */
507 while (used
&& sgtmp
->dma_length
) {
508 sgtmp
->dma_address
+= dma_base
;
512 used
= nelems
- used
;
514 /* Step 4: Create the mappings. */
515 prot
= HV_PCI_MAP_ATTR_READ
;
516 if (direction
!= PCI_DMA_TODEVICE
)
517 prot
|= HV_PCI_MAP_ATTR_WRITE
;
519 err
= fill_sg(entry
, pdev
, sglist
, used
, nelems
, prot
);
520 if (unlikely(err
< 0L))
521 goto iommu_map_failed
;
526 if (printk_ratelimit())
531 spin_lock_irqsave(&iommu
->lock
, flags
);
532 pci_arena_free(&iommu
->arena
, entry
, npages
);
533 spin_unlock_irqrestore(&iommu
->lock
, flags
);
538 static void pci_4v_unmap_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
540 struct pcidev_cookie
*pcp
;
541 struct pci_iommu
*iommu
;
542 unsigned long flags
, i
, npages
;
544 u32 devhandle
, bus_addr
;
546 if (unlikely(direction
== PCI_DMA_NONE
)) {
547 if (printk_ratelimit())
552 iommu
= pcp
->pbm
->iommu
;
553 devhandle
= pcp
->pbm
->devhandle
;
555 bus_addr
= sglist
->dma_address
& IO_PAGE_MASK
;
557 for (i
= 1; i
< nelems
; i
++)
558 if (sglist
[i
].dma_length
== 0)
561 npages
= (IO_PAGE_ALIGN(sglist
[i
].dma_address
+ sglist
[i
].dma_length
) -
562 bus_addr
) >> IO_PAGE_SHIFT
;
564 entry
= ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
566 spin_lock_irqsave(&iommu
->lock
, flags
);
568 pci_arena_free(&iommu
->arena
, entry
, npages
);
573 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
577 } while (npages
!= 0);
579 spin_unlock_irqrestore(&iommu
->lock
, flags
);
582 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
584 /* Nothing to do... */
587 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
589 /* Nothing to do... */
592 struct pci_iommu_ops pci_sun4v_iommu_ops
= {
593 .alloc_consistent
= pci_4v_alloc_consistent
,
594 .free_consistent
= pci_4v_free_consistent
,
595 .map_single
= pci_4v_map_single
,
596 .unmap_single
= pci_4v_unmap_single
,
597 .map_sg
= pci_4v_map_sg
,
598 .unmap_sg
= pci_4v_unmap_sg
,
599 .dma_sync_single_for_cpu
= pci_4v_dma_sync_single_for_cpu
,
600 .dma_sync_sg_for_cpu
= pci_4v_dma_sync_sg_for_cpu
,
603 /* SUN4V PCI configuration space accessors. */
606 struct pdev_entry
*next
;
613 #define PDEV_HTAB_SIZE 16
614 #define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
615 static struct pdev_entry
*pdev_htab
[PDEV_HTAB_SIZE
];
617 static inline unsigned int pdev_hashfn(u32 devhandle
, unsigned int bus
, unsigned int device
, unsigned int func
)
621 val
= (devhandle
^ (devhandle
>> 4));
626 return val
& PDEV_HTAB_MASK
;
629 static int pdev_htab_add(u32 devhandle
, unsigned int bus
, unsigned int device
, unsigned int func
)
631 struct pdev_entry
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
632 struct pdev_entry
**slot
;
637 slot
= &pdev_htab
[pdev_hashfn(devhandle
, bus
, device
, func
)];
641 p
->devhandle
= devhandle
;
649 /* Recursively descend into the OBP device tree, rooted at toplevel_node,
650 * looking for a PCI device matching bus and devfn.
652 static int obp_find(struct device_node
*toplevel_node
, unsigned int bus
, unsigned int devfn
)
654 toplevel_node
= toplevel_node
->child
;
656 while (toplevel_node
!= NULL
) {
657 struct linux_prom_pci_registers
*regs
;
658 struct property
*prop
;
661 ret
= obp_find(toplevel_node
, bus
, devfn
);
665 prop
= of_find_property(toplevel_node
, "reg", NULL
);
670 if (((regs
->phys_hi
>> 16) & 0xff) == bus
&&
671 ((regs
->phys_hi
>> 8) & 0xff) == devfn
)
675 toplevel_node
= toplevel_node
->sibling
;
678 return toplevel_node
!= NULL
;
681 static int pdev_htab_populate(struct pci_pbm_info
*pbm
)
683 u32 devhandle
= pbm
->devhandle
;
686 for (bus
= pbm
->pci_first_busno
; bus
<= pbm
->pci_last_busno
; bus
++) {
689 for (devfn
= 0; devfn
< 256; devfn
++) {
690 unsigned int device
= PCI_SLOT(devfn
);
691 unsigned int func
= PCI_FUNC(devfn
);
693 if (obp_find(pbm
->prom_node
, bus
, devfn
)) {
694 int err
= pdev_htab_add(devhandle
, bus
,
705 static struct pdev_entry
*pdev_find(u32 devhandle
, unsigned int bus
, unsigned int device
, unsigned int func
)
707 struct pdev_entry
*p
;
709 p
= pdev_htab
[pdev_hashfn(devhandle
, bus
, device
, func
)];
711 if (p
->devhandle
== devhandle
&&
713 p
->device
== device
&&
723 static inline int pci_sun4v_out_of_range(struct pci_pbm_info
*pbm
, unsigned int bus
, unsigned int device
, unsigned int func
)
725 if (bus
< pbm
->pci_first_busno
||
726 bus
> pbm
->pci_last_busno
)
728 return pdev_find(pbm
->devhandle
, bus
, device
, func
) == NULL
;
731 static int pci_sun4v_read_pci_cfg(struct pci_bus
*bus_dev
, unsigned int devfn
,
732 int where
, int size
, u32
*value
)
734 struct pci_pbm_info
*pbm
= bus_dev
->sysdata
;
735 u32 devhandle
= pbm
->devhandle
;
736 unsigned int bus
= bus_dev
->number
;
737 unsigned int device
= PCI_SLOT(devfn
);
738 unsigned int func
= PCI_FUNC(devfn
);
741 if (pci_sun4v_out_of_range(pbm
, bus
, device
, func
)) {
744 ret
= pci_sun4v_config_get(devhandle
,
745 HV_PCI_DEVICE_BUILD(bus
, device
, func
),
748 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
749 devhandle
, HV_PCI_DEVICE_BUILD(bus
, device
, func
),
758 *value
= ret
& 0xffff;
761 *value
= ret
& 0xffffffff;
766 return PCIBIOS_SUCCESSFUL
;
769 static int pci_sun4v_write_pci_cfg(struct pci_bus
*bus_dev
, unsigned int devfn
,
770 int where
, int size
, u32 value
)
772 struct pci_pbm_info
*pbm
= bus_dev
->sysdata
;
773 u32 devhandle
= pbm
->devhandle
;
774 unsigned int bus
= bus_dev
->number
;
775 unsigned int device
= PCI_SLOT(devfn
);
776 unsigned int func
= PCI_FUNC(devfn
);
779 if (pci_sun4v_out_of_range(pbm
, bus
, device
, func
)) {
782 ret
= pci_sun4v_config_put(devhandle
,
783 HV_PCI_DEVICE_BUILD(bus
, device
, func
),
786 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
787 devhandle
, HV_PCI_DEVICE_BUILD(bus
, device
, func
),
788 where
, size
, value
, ret
);
791 return PCIBIOS_SUCCESSFUL
;
794 static struct pci_ops pci_sun4v_ops
= {
795 .read
= pci_sun4v_read_pci_cfg
,
796 .write
= pci_sun4v_write_pci_cfg
,
800 static void pbm_scan_bus(struct pci_controller_info
*p
,
801 struct pci_pbm_info
*pbm
)
803 struct pcidev_cookie
*cookie
= kzalloc(sizeof(*cookie
), GFP_KERNEL
);
806 prom_printf("%s: Critical allocation failure.\n", pbm
->name
);
810 /* All we care about is the PBM. */
813 pbm
->pci_bus
= pci_scan_bus(pbm
->pci_first_busno
, p
->pci_ops
, pbm
);
815 pci_fixup_host_bridge_self(pbm
->pci_bus
);
816 pbm
->pci_bus
->self
->sysdata
= cookie
;
818 pci_fill_in_pbm_cookies(pbm
->pci_bus
, pbm
, pbm
->prom_node
);
819 pci_record_assignments(pbm
, pbm
->pci_bus
);
820 pci_assign_unassigned(pbm
, pbm
->pci_bus
);
821 pci_fixup_irq(pbm
, pbm
->pci_bus
);
822 pci_determine_66mhz_disposition(pbm
, pbm
->pci_bus
);
823 pci_setup_busmastering(pbm
, pbm
->pci_bus
);
826 static void pci_sun4v_scan_bus(struct pci_controller_info
*p
)
828 struct property
*prop
;
829 struct device_node
*dp
;
831 if ((dp
= p
->pbm_A
.prom_node
) != NULL
) {
832 prop
= of_find_property(dp
, "66mhz-capable", NULL
);
833 p
->pbm_A
.is_66mhz_capable
= (prop
!= NULL
);
835 pbm_scan_bus(p
, &p
->pbm_A
);
837 if ((dp
= p
->pbm_B
.prom_node
) != NULL
) {
838 prop
= of_find_property(dp
, "66mhz-capable", NULL
);
839 p
->pbm_B
.is_66mhz_capable
= (prop
!= NULL
);
841 pbm_scan_bus(p
, &p
->pbm_B
);
844 /* XXX register error interrupt handlers XXX */
847 static void pci_sun4v_base_address_update(struct pci_dev
*pdev
, int resource
)
849 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
850 struct pci_pbm_info
*pbm
= pcp
->pbm
;
851 struct resource
*res
, *root
;
853 int where
, size
, is_64bit
;
855 res
= &pdev
->resource
[resource
];
857 where
= PCI_BASE_ADDRESS_0
+ (resource
* 4);
858 } else if (resource
== PCI_ROM_RESOURCE
) {
859 where
= pdev
->rom_base_reg
;
861 /* Somebody might have asked allocation of a non-standard resource */
865 /* XXX 64-bit MEM handling is not %100 correct... XXX */
867 if (res
->flags
& IORESOURCE_IO
)
868 root
= &pbm
->io_space
;
870 root
= &pbm
->mem_space
;
871 if ((res
->flags
& PCI_BASE_ADDRESS_MEM_TYPE_MASK
)
872 == PCI_BASE_ADDRESS_MEM_TYPE_64
)
876 size
= res
->end
- res
->start
;
877 pci_read_config_dword(pdev
, where
, ®
);
878 reg
= ((reg
& size
) |
879 (((u32
)(res
->start
- root
->start
)) & ~size
));
880 if (resource
== PCI_ROM_RESOURCE
) {
881 reg
|= PCI_ROM_ADDRESS_ENABLE
;
882 res
->flags
|= IORESOURCE_ROM_ENABLE
;
884 pci_write_config_dword(pdev
, where
, reg
);
886 /* This knows that the upper 32-bits of the address
887 * must be zero. Our PCI common layer enforces this.
890 pci_write_config_dword(pdev
, where
+ 4, 0);
893 static void pci_sun4v_resource_adjust(struct pci_dev
*pdev
,
894 struct resource
*res
,
895 struct resource
*root
)
897 res
->start
+= root
->start
;
898 res
->end
+= root
->start
;
901 /* Use ranges property to determine where PCI MEM, I/O, and Config
902 * space are for this PCI bus module.
904 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info
*pbm
)
906 int i
, saw_mem
, saw_io
;
908 saw_mem
= saw_io
= 0;
909 for (i
= 0; i
< pbm
->num_pbm_ranges
; i
++) {
910 struct linux_prom_pci_ranges
*pr
= &pbm
->pbm_ranges
[i
];
914 type
= (pr
->child_phys_hi
>> 24) & 0x3;
915 a
= (((unsigned long)pr
->parent_phys_hi
<< 32UL) |
916 ((unsigned long)pr
->parent_phys_lo
<< 0UL));
920 /* 16-bit IO space, 16MB */
921 pbm
->io_space
.start
= a
;
922 pbm
->io_space
.end
= a
+ ((16UL*1024UL*1024UL) - 1UL);
923 pbm
->io_space
.flags
= IORESOURCE_IO
;
928 /* 32-bit MEM space, 2GB */
929 pbm
->mem_space
.start
= a
;
930 pbm
->mem_space
.end
= a
+ (0x80000000UL
- 1UL);
931 pbm
->mem_space
.flags
= IORESOURCE_MEM
;
936 /* XXX 64-bit MEM handling XXX */
943 if (!saw_io
|| !saw_mem
) {
944 prom_printf("%s: Fatal error, missing %s PBM range.\n",
946 (!saw_io
? "IO" : "MEM"));
950 printk("%s: PCI IO[%lx] MEM[%lx]\n",
953 pbm
->mem_space
.start
);
956 static void pbm_register_toplevel_resources(struct pci_controller_info
*p
,
957 struct pci_pbm_info
*pbm
)
959 pbm
->io_space
.name
= pbm
->mem_space
.name
= pbm
->name
;
961 request_resource(&ioport_resource
, &pbm
->io_space
);
962 request_resource(&iomem_resource
, &pbm
->mem_space
);
963 pci_register_legacy_regions(&pbm
->io_space
,
967 static unsigned long probe_existing_entries(struct pci_pbm_info
*pbm
,
968 struct pci_iommu
*iommu
)
970 struct pci_iommu_arena
*arena
= &iommu
->arena
;
971 unsigned long i
, cnt
= 0;
974 devhandle
= pbm
->devhandle
;
975 for (i
= 0; i
< arena
->limit
; i
++) {
976 unsigned long ret
, io_attrs
, ra
;
978 ret
= pci_sun4v_iommu_getmap(devhandle
,
982 if (page_in_phys_avail(ra
)) {
983 pci_sun4v_iommu_demap(devhandle
,
984 HV_PCI_TSBID(0, i
), 1);
987 __set_bit(i
, arena
->map
);
995 static void pci_sun4v_iommu_init(struct pci_pbm_info
*pbm
)
997 struct pci_iommu
*iommu
= pbm
->iommu
;
998 struct property
*prop
;
999 unsigned long num_tsb_entries
, sz
;
1000 u32 vdma
[2], dma_mask
, dma_offset
;
1003 prop
= of_find_property(pbm
->prom_node
, "virtual-dma", NULL
);
1005 u32
*val
= prop
->value
;
1010 /* No property, use default values. */
1011 vdma
[0] = 0x80000000;
1012 vdma
[1] = 0x80000000;
1018 dma_mask
|= 0x1fffffff;
1023 dma_mask
|= 0x3fffffff;
1028 dma_mask
|= 0x7fffffff;
1033 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
1037 tsbsize
*= (8 * 1024);
1039 num_tsb_entries
= tsbsize
/ sizeof(iopte_t
);
1041 dma_offset
= vdma
[0];
1043 /* Setup initial software IOMMU state. */
1044 spin_lock_init(&iommu
->lock
);
1045 iommu
->ctx_lowest_free
= 1;
1046 iommu
->page_table_map_base
= dma_offset
;
1047 iommu
->dma_addr_mask
= dma_mask
;
1049 /* Allocate and initialize the free area map. */
1050 sz
= num_tsb_entries
/ 8;
1051 sz
= (sz
+ 7UL) & ~7UL;
1052 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
1053 if (!iommu
->arena
.map
) {
1054 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
1057 iommu
->arena
.limit
= num_tsb_entries
;
1059 sz
= probe_existing_entries(pbm
, iommu
);
1061 printk("%s: Imported %lu TSB entries from OBP\n",
1065 static void pci_sun4v_get_bus_range(struct pci_pbm_info
*pbm
)
1067 struct property
*prop
;
1068 unsigned int *busrange
;
1070 prop
= of_find_property(pbm
->prom_node
, "bus-range", NULL
);
1072 busrange
= prop
->value
;
1074 pbm
->pci_first_busno
= busrange
[0];
1075 pbm
->pci_last_busno
= busrange
[1];
1079 #ifdef CONFIG_PCI_MSI
1080 struct pci_sun4v_msiq_entry
{
1082 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
1083 #define MSIQ_VERSION_SHIFT 32
1084 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
1085 #define MSIQ_TYPE_SHIFT 0
1086 #define MSIQ_TYPE_NONE 0x00
1087 #define MSIQ_TYPE_MSG 0x01
1088 #define MSIQ_TYPE_MSI32 0x02
1089 #define MSIQ_TYPE_MSI64 0x03
1090 #define MSIQ_TYPE_INTX 0x08
1091 #define MSIQ_TYPE_NONE2 0xff
1096 u64 req_id
; /* bus/device/func */
1097 #define MSIQ_REQID_BUS_MASK 0xff00UL
1098 #define MSIQ_REQID_BUS_SHIFT 8
1099 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
1100 #define MSIQ_REQID_DEVICE_SHIFT 3
1101 #define MSIQ_REQID_FUNC_MASK 0x0007UL
1102 #define MSIQ_REQID_FUNC_SHIFT 0
1106 /* The format of this value is message type dependant.
1107 * For MSI bits 15:0 are the data from the MSI packet.
1108 * For MSI-X bits 31:0 are the data from the MSI packet.
1109 * For MSG, the message code and message routing code where:
1110 * bits 39:32 is the bus/device/fn of the msg target-id
1111 * bits 18:16 is the message routing code
1112 * bits 7:0 is the message code
1113 * For INTx the low order 2-bits are:
1124 /* For now this just runs as a pre-handler for the real interrupt handler.
1125 * So we just walk through the queue and ACK all the entries, update the
1126 * head pointer, and return.
1128 * In the longer term it would be nice to do something more integrated
1129 * wherein we can pass in some of this MSI info to the drivers. This
1130 * would be most useful for PCIe fabric error messages, although we could
1131 * invoke those directly from the loop here in order to pass the info around.
1133 static void pci_sun4v_msi_prehandler(unsigned int ino
, void *data1
, void *data2
)
1135 struct pci_pbm_info
*pbm
= data1
;
1136 struct pci_sun4v_msiq_entry
*base
, *ep
;
1137 unsigned long msiqid
, orig_head
, head
, type
, err
;
1139 msiqid
= (unsigned long) data2
;
1142 err
= pci_sun4v_msiq_gethead(pbm
->devhandle
, msiqid
, &head
);
1146 if (unlikely(head
>= (pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
))))
1149 head
/= sizeof(struct pci_sun4v_msiq_entry
);
1151 base
= (pbm
->msi_queues
+ ((msiqid
- pbm
->msiq_first
) *
1152 (pbm
->msiq_ent_count
*
1153 sizeof(struct pci_sun4v_msiq_entry
))));
1155 while ((ep
->version_type
& MSIQ_TYPE_MASK
) != 0) {
1156 type
= (ep
->version_type
& MSIQ_TYPE_MASK
) >> MSIQ_TYPE_SHIFT
;
1157 if (unlikely(type
!= MSIQ_TYPE_MSI32
&&
1158 type
!= MSIQ_TYPE_MSI64
))
1161 pci_sun4v_msi_setstate(pbm
->devhandle
,
1162 ep
->msi_data
/* msi_num */,
1165 /* Clear the entry. */
1166 ep
->version_type
&= ~MSIQ_TYPE_MASK
;
1168 /* Go to next entry in ring. */
1170 if (head
>= pbm
->msiq_ent_count
)
1175 if (likely(head
!= orig_head
)) {
1176 /* ACK entries by updating head pointer. */
1177 head
*= sizeof(struct pci_sun4v_msiq_entry
);
1178 err
= pci_sun4v_msiq_sethead(pbm
->devhandle
, msiqid
, head
);
1185 printk(KERN_EMERG
"MSI: Hypervisor set head gives error %lu\n", err
);
1189 printk(KERN_EMERG
"MSI: Hypervisor get head gives error %lu\n", err
);
1192 printk(KERN_EMERG
"MSI: devhandle[%x] msiqid[%lx] head[%lu]\n",
1193 pbm
->devhandle
, msiqid
, head
);
1197 printk(KERN_EMERG
"MSI: Hypervisor gives bad offset %lx max(%lx)\n",
1198 head
, pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
));
1202 printk(KERN_EMERG
"MSI: Entry has bad type %lx\n", type
);
1206 static int msi_bitmap_alloc(struct pci_pbm_info
*pbm
)
1208 unsigned long size
, bits_per_ulong
;
1210 bits_per_ulong
= sizeof(unsigned long) * 8;
1211 size
= (pbm
->msi_num
+ (bits_per_ulong
- 1)) & ~(bits_per_ulong
- 1);
1213 BUG_ON(size
% sizeof(unsigned long));
1215 pbm
->msi_bitmap
= kzalloc(size
, GFP_KERNEL
);
1216 if (!pbm
->msi_bitmap
)
1222 static void msi_bitmap_free(struct pci_pbm_info
*pbm
)
1224 kfree(pbm
->msi_bitmap
);
1225 pbm
->msi_bitmap
= NULL
;
1228 static int msi_queue_alloc(struct pci_pbm_info
*pbm
)
1230 unsigned long q_size
, alloc_size
, pages
, order
;
1233 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
1234 alloc_size
= (pbm
->msiq_num
* q_size
);
1235 order
= get_order(alloc_size
);
1236 pages
= __get_free_pages(GFP_KERNEL
| __GFP_COMP
, order
);
1238 printk(KERN_ERR
"MSI: Cannot allocate MSI queues (o=%lu).\n",
1242 memset((char *)pages
, 0, PAGE_SIZE
<< order
);
1243 pbm
->msi_queues
= (void *) pages
;
1245 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
1246 unsigned long err
, base
= __pa(pages
+ (i
* q_size
));
1247 unsigned long ret1
, ret2
;
1249 err
= pci_sun4v_msiq_conf(pbm
->devhandle
,
1250 pbm
->msiq_first
+ i
,
1251 base
, pbm
->msiq_ent_count
);
1253 printk(KERN_ERR
"MSI: msiq register fails (err=%lu)\n",
1258 err
= pci_sun4v_msiq_info(pbm
->devhandle
,
1259 pbm
->msiq_first
+ i
,
1262 printk(KERN_ERR
"MSI: Cannot read msiq (err=%lu)\n",
1266 if (ret1
!= base
|| ret2
!= pbm
->msiq_ent_count
) {
1267 printk(KERN_ERR
"MSI: Bogus qconf "
1268 "expected[%lx:%x] got[%lx:%lx]\n",
1269 base
, pbm
->msiq_ent_count
,
1278 free_pages(pages
, order
);
1282 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
1287 val
= of_get_property(pbm
->prom_node
, "#msi-eqs", &len
);
1288 if (!val
|| len
!= 4)
1290 pbm
->msiq_num
= *val
;
1291 if (pbm
->msiq_num
) {
1297 struct msi_range_prop
{
1301 struct addr_range_prop
{
1310 val
= of_get_property(pbm
->prom_node
, "msi-eq-size", &len
);
1311 if (!val
|| len
!= 4)
1314 pbm
->msiq_ent_count
= *val
;
1316 mqp
= of_get_property(pbm
->prom_node
,
1317 "msi-eq-to-devino", &len
);
1318 if (!mqp
|| len
!= sizeof(struct msiq_prop
))
1321 pbm
->msiq_first
= mqp
->first_msiq
;
1322 pbm
->msiq_first_devino
= mqp
->first_devino
;
1324 val
= of_get_property(pbm
->prom_node
, "#msi", &len
);
1325 if (!val
|| len
!= 4)
1327 pbm
->msi_num
= *val
;
1329 mrng
= of_get_property(pbm
->prom_node
, "msi-ranges", &len
);
1330 if (!mrng
|| len
!= sizeof(struct msi_range_prop
))
1332 pbm
->msi_first
= mrng
->first_msi
;
1334 val
= of_get_property(pbm
->prom_node
, "msi-data-mask", &len
);
1335 if (!val
|| len
!= 4)
1337 pbm
->msi_data_mask
= *val
;
1339 val
= of_get_property(pbm
->prom_node
, "msix-data-width", &len
);
1340 if (!val
|| len
!= 4)
1342 pbm
->msix_data_width
= *val
;
1344 arng
= of_get_property(pbm
->prom_node
, "msi-address-ranges",
1346 if (!arng
|| len
!= sizeof(struct addr_range_prop
))
1348 pbm
->msi32_start
= ((u64
)arng
->msi32_high
<< 32) |
1349 (u64
) arng
->msi32_low
;
1350 pbm
->msi64_start
= ((u64
)arng
->msi64_high
<< 32) |
1351 (u64
) arng
->msi64_low
;
1352 pbm
->msi32_len
= arng
->msi32_len
;
1353 pbm
->msi64_len
= arng
->msi64_len
;
1355 if (msi_bitmap_alloc(pbm
))
1358 if (msi_queue_alloc(pbm
)) {
1359 msi_bitmap_free(pbm
);
1363 printk(KERN_INFO
"%s: MSI Queue first[%u] num[%u] count[%u] "
1366 pbm
->msiq_first
, pbm
->msiq_num
,
1367 pbm
->msiq_ent_count
,
1368 pbm
->msiq_first_devino
);
1369 printk(KERN_INFO
"%s: MSI first[%u] num[%u] mask[0x%x] "
1372 pbm
->msi_first
, pbm
->msi_num
, pbm
->msi_data_mask
,
1373 pbm
->msix_data_width
);
1374 printk(KERN_INFO
"%s: MSI addr32[0x%lx:0x%x] "
1375 "addr64[0x%lx:0x%x]\n",
1377 pbm
->msi32_start
, pbm
->msi32_len
,
1378 pbm
->msi64_start
, pbm
->msi64_len
);
1379 printk(KERN_INFO
"%s: MSI queues at RA [%p]\n",
1388 printk(KERN_INFO
"%s: No MSI support.\n", pbm
->name
);
1391 static int alloc_msi(struct pci_pbm_info
*pbm
)
1395 for (i
= 0; i
< pbm
->msi_num
; i
++) {
1396 if (!test_and_set_bit(i
, pbm
->msi_bitmap
))
1397 return i
+ pbm
->msi_first
;
1403 static void free_msi(struct pci_pbm_info
*pbm
, int msi_num
)
1405 msi_num
-= pbm
->msi_first
;
1406 clear_bit(msi_num
, pbm
->msi_bitmap
);
1409 static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p
,
1410 struct pci_dev
*pdev
,
1411 struct msi_desc
*entry
)
1413 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
1414 struct pci_pbm_info
*pbm
= pcp
->pbm
;
1415 unsigned long devino
, msiqid
;
1421 msi_num
= alloc_msi(pbm
);
1425 devino
= sun4v_build_msi(pbm
->devhandle
, virt_irq_p
,
1426 pbm
->msiq_first_devino
,
1427 (pbm
->msiq_first_devino
+
1433 set_irq_msi(*virt_irq_p
, entry
);
1435 msiqid
= ((devino
- pbm
->msiq_first_devino
) +
1439 if (pci_sun4v_msiq_setstate(pbm
->devhandle
, msiqid
, HV_MSIQSTATE_IDLE
))
1443 if (pci_sun4v_msiq_setvalid(pbm
->devhandle
, msiqid
, HV_MSIQ_VALID
))
1446 if (pci_sun4v_msi_setmsiq(pbm
->devhandle
,
1448 (entry
->msi_attrib
.is_64
?
1449 HV_MSITYPE_MSI64
: HV_MSITYPE_MSI32
)))
1452 if (pci_sun4v_msi_setstate(pbm
->devhandle
, msi_num
, HV_MSISTATE_IDLE
))
1455 if (pci_sun4v_msi_setvalid(pbm
->devhandle
, msi_num
, HV_MSIVALID_VALID
))
1458 pcp
->msi_num
= msi_num
;
1460 if (entry
->msi_attrib
.is_64
) {
1461 msg
.address_hi
= pbm
->msi64_start
>> 32;
1462 msg
.address_lo
= pbm
->msi64_start
& 0xffffffff;
1465 msg
.address_lo
= pbm
->msi32_start
;
1468 write_msi_msg(*virt_irq_p
, &msg
);
1470 irq_install_pre_handler(*virt_irq_p
,
1471 pci_sun4v_msi_prehandler
,
1472 pbm
, (void *) msiqid
);
1477 free_msi(pbm
, msi_num
);
1478 sun4v_destroy_msi(*virt_irq_p
);
1484 static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq
,
1485 struct pci_dev
*pdev
)
1487 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
1488 struct pci_pbm_info
*pbm
= pcp
->pbm
;
1489 unsigned long msiqid
, err
;
1490 unsigned int msi_num
;
1492 msi_num
= pcp
->msi_num
;
1493 err
= pci_sun4v_msi_getmsiq(pbm
->devhandle
, msi_num
, &msiqid
);
1495 printk(KERN_ERR
"%s: getmsiq gives error %lu\n",
1500 pci_sun4v_msi_setvalid(pbm
->devhandle
, msi_num
, HV_MSIVALID_INVALID
);
1501 pci_sun4v_msiq_setvalid(pbm
->devhandle
, msiqid
, HV_MSIQ_INVALID
);
1503 free_msi(pbm
, msi_num
);
1505 /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ
1508 sun4v_destroy_msi(virt_irq
);
1510 #else /* CONFIG_PCI_MSI */
1511 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
1514 #endif /* !(CONFIG_PCI_MSI) */
1516 static void pci_sun4v_pbm_init(struct pci_controller_info
*p
, struct device_node
*dp
, u32 devhandle
)
1518 struct pci_pbm_info
*pbm
;
1519 struct property
*prop
;
1522 if (devhandle
& 0x40)
1528 pbm
->prom_node
= dp
;
1529 pbm
->pci_first_slot
= 1;
1531 pbm
->devhandle
= devhandle
;
1533 pbm
->name
= dp
->full_name
;
1535 printk("%s: SUN4V PCI Bus Module\n", pbm
->name
);
1537 prop
= of_find_property(dp
, "ranges", &len
);
1538 pbm
->pbm_ranges
= prop
->value
;
1539 pbm
->num_pbm_ranges
=
1540 (len
/ sizeof(struct linux_prom_pci_ranges
));
1542 /* Mask out the top 8 bits of the ranges, leaving the real
1545 for (i
= 0; i
< pbm
->num_pbm_ranges
; i
++)
1546 pbm
->pbm_ranges
[i
].parent_phys_hi
&= 0x0fffffff;
1548 pci_sun4v_determine_mem_io_space(pbm
);
1549 pbm_register_toplevel_resources(p
, pbm
);
1551 prop
= of_find_property(dp
, "interrupt-map", &len
);
1552 pbm
->pbm_intmap
= prop
->value
;
1553 pbm
->num_pbm_intmap
=
1554 (len
/ sizeof(struct linux_prom_pci_intmap
));
1556 prop
= of_find_property(dp
, "interrupt-map-mask", NULL
);
1557 pbm
->pbm_intmask
= prop
->value
;
1559 pci_sun4v_get_bus_range(pbm
);
1560 pci_sun4v_iommu_init(pbm
);
1561 pci_sun4v_msi_init(pbm
);
1563 pdev_htab_populate(pbm
);
1566 void sun4v_pci_init(struct device_node
*dp
, char *model_name
)
1568 struct pci_controller_info
*p
;
1569 struct pci_iommu
*iommu
;
1570 struct property
*prop
;
1571 struct linux_prom64_registers
*regs
;
1575 prop
= of_find_property(dp
, "reg", NULL
);
1578 devhandle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
1580 for (p
= pci_controller_root
; p
; p
= p
->next
) {
1581 struct pci_pbm_info
*pbm
;
1583 if (p
->pbm_A
.prom_node
&& p
->pbm_B
.prom_node
)
1586 pbm
= (p
->pbm_A
.prom_node
?
1590 if (pbm
->devhandle
== (devhandle
^ 0x40)) {
1591 pci_sun4v_pbm_init(p
, dp
, devhandle
);
1596 for_each_possible_cpu(i
) {
1597 unsigned long page
= get_zeroed_page(GFP_ATOMIC
);
1600 goto fatal_memory_error
;
1602 per_cpu(pci_iommu_batch
, i
).pglist
= (u64
*) page
;
1605 p
= kzalloc(sizeof(struct pci_controller_info
), GFP_ATOMIC
);
1607 goto fatal_memory_error
;
1609 iommu
= kzalloc(sizeof(struct pci_iommu
), GFP_ATOMIC
);
1611 goto fatal_memory_error
;
1613 p
->pbm_A
.iommu
= iommu
;
1615 iommu
= kzalloc(sizeof(struct pci_iommu
), GFP_ATOMIC
);
1617 goto fatal_memory_error
;
1619 p
->pbm_B
.iommu
= iommu
;
1621 p
->next
= pci_controller_root
;
1622 pci_controller_root
= p
;
1624 p
->index
= pci_num_controllers
++;
1625 p
->pbms_same_domain
= 0;
1627 p
->scan_bus
= pci_sun4v_scan_bus
;
1628 p
->base_address_update
= pci_sun4v_base_address_update
;
1629 p
->resource_adjust
= pci_sun4v_resource_adjust
;
1630 #ifdef CONFIG_PCI_MSI
1631 p
->setup_msi_irq
= pci_sun4v_setup_msi_irq
;
1632 p
->teardown_msi_irq
= pci_sun4v_teardown_msi_irq
;
1634 p
->pci_ops
= &pci_sun4v_ops
;
1636 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1639 pci_memspace_mask
= 0x7fffffffUL
;
1641 pci_sun4v_pbm_init(p
, dp
, devhandle
);
1645 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");