1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
15 #include <asm/iommu.h>
18 #include <asm/pstate.h>
19 #include <asm/oplib.h>
20 #include <asm/hypervisor.h>
23 #include "iommu_common.h"
25 #include "pci_sun4v.h"
27 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
29 struct pci_iommu_batch
{
30 struct pci_dev
*pdev
; /* Device mapping is for. */
31 unsigned long prot
; /* IOMMU page protections */
32 unsigned long entry
; /* Index into IOTSB. */
33 u64
*pglist
; /* List of physical pages */
34 unsigned long npages
; /* Number of pages in list. */
37 static DEFINE_PER_CPU(struct pci_iommu_batch
, pci_iommu_batch
);
39 /* Interrupts must be disabled. */
40 static inline void pci_iommu_batch_start(struct pci_dev
*pdev
, unsigned long prot
, unsigned long entry
)
42 struct pci_iommu_batch
*p
= &__get_cpu_var(pci_iommu_batch
);
50 /* Interrupts must be disabled. */
51 static long pci_iommu_batch_flush(struct pci_iommu_batch
*p
)
53 struct pcidev_cookie
*pcp
= p
->pdev
->sysdata
;
54 unsigned long devhandle
= pcp
->pbm
->devhandle
;
55 unsigned long prot
= p
->prot
;
56 unsigned long entry
= p
->entry
;
57 u64
*pglist
= p
->pglist
;
58 unsigned long npages
= p
->npages
;
63 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
64 npages
, prot
, __pa(pglist
));
65 if (unlikely(num
< 0)) {
66 if (printk_ratelimit())
67 printk("pci_iommu_batch_flush: IOMMU map of "
68 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
70 devhandle
, HV_PCI_TSBID(0, entry
),
71 npages
, prot
, __pa(pglist
), num
);
86 /* Interrupts must be disabled. */
87 static inline long pci_iommu_batch_add(u64 phys_page
)
89 struct pci_iommu_batch
*p
= &__get_cpu_var(pci_iommu_batch
);
91 BUG_ON(p
->npages
>= PGLIST_NENTS
);
93 p
->pglist
[p
->npages
++] = phys_page
;
94 if (p
->npages
== PGLIST_NENTS
)
95 return pci_iommu_batch_flush(p
);
100 /* Interrupts must be disabled. */
101 static inline long pci_iommu_batch_end(void)
103 struct pci_iommu_batch
*p
= &__get_cpu_var(pci_iommu_batch
);
105 BUG_ON(p
->npages
>= PGLIST_NENTS
);
107 return pci_iommu_batch_flush(p
);
110 static long pci_arena_alloc(struct pci_iommu_arena
*arena
, unsigned long npages
)
112 unsigned long n
, i
, start
, end
, limit
;
115 limit
= arena
->limit
;
120 n
= find_next_zero_bit(arena
->map
, limit
, start
);
122 if (unlikely(end
>= limit
)) {
123 if (likely(pass
< 1)) {
129 /* Scanned the whole thing, give up. */
134 for (i
= n
; i
< end
; i
++) {
135 if (test_bit(i
, arena
->map
)) {
141 for (i
= n
; i
< end
; i
++)
142 __set_bit(i
, arena
->map
);
149 static void pci_arena_free(struct pci_iommu_arena
*arena
, unsigned long base
, unsigned long npages
)
153 for (i
= base
; i
< (base
+ npages
); i
++)
154 __clear_bit(i
, arena
->map
);
157 static void *pci_4v_alloc_consistent(struct pci_dev
*pdev
, size_t size
, dma_addr_t
*dma_addrp
, gfp_t gfp
)
159 struct pcidev_cookie
*pcp
;
160 struct pci_iommu
*iommu
;
161 unsigned long flags
, order
, first_page
, npages
, n
;
165 size
= IO_PAGE_ALIGN(size
);
166 order
= get_order(size
);
167 if (unlikely(order
>= MAX_ORDER
))
170 npages
= size
>> IO_PAGE_SHIFT
;
172 first_page
= __get_free_pages(gfp
, order
);
173 if (unlikely(first_page
== 0UL))
176 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
179 iommu
= pcp
->pbm
->iommu
;
181 spin_lock_irqsave(&iommu
->lock
, flags
);
182 entry
= pci_arena_alloc(&iommu
->arena
, npages
);
183 spin_unlock_irqrestore(&iommu
->lock
, flags
);
185 if (unlikely(entry
< 0L))
186 goto arena_alloc_fail
;
188 *dma_addrp
= (iommu
->page_table_map_base
+
189 (entry
<< IO_PAGE_SHIFT
));
190 ret
= (void *) first_page
;
191 first_page
= __pa(first_page
);
193 local_irq_save(flags
);
195 pci_iommu_batch_start(pdev
,
196 (HV_PCI_MAP_ATTR_READ
|
197 HV_PCI_MAP_ATTR_WRITE
),
200 for (n
= 0; n
< npages
; n
++) {
201 long err
= pci_iommu_batch_add(first_page
+ (n
* PAGE_SIZE
));
202 if (unlikely(err
< 0L))
206 if (unlikely(pci_iommu_batch_end() < 0L))
209 local_irq_restore(flags
);
214 /* Interrupts are disabled. */
215 spin_lock(&iommu
->lock
);
216 pci_arena_free(&iommu
->arena
, entry
, npages
);
217 spin_unlock_irqrestore(&iommu
->lock
, flags
);
220 free_pages(first_page
, order
);
224 static void pci_4v_free_consistent(struct pci_dev
*pdev
, size_t size
, void *cpu
, dma_addr_t dvma
)
226 struct pcidev_cookie
*pcp
;
227 struct pci_iommu
*iommu
;
228 unsigned long flags
, order
, npages
, entry
;
231 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
233 iommu
= pcp
->pbm
->iommu
;
234 devhandle
= pcp
->pbm
->devhandle
;
235 entry
= ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
237 spin_lock_irqsave(&iommu
->lock
, flags
);
239 pci_arena_free(&iommu
->arena
, entry
, npages
);
244 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
248 } while (npages
!= 0);
250 spin_unlock_irqrestore(&iommu
->lock
, flags
);
252 order
= get_order(size
);
254 free_pages((unsigned long)cpu
, order
);
257 static dma_addr_t
pci_4v_map_single(struct pci_dev
*pdev
, void *ptr
, size_t sz
, int direction
)
259 struct pcidev_cookie
*pcp
;
260 struct pci_iommu
*iommu
;
261 unsigned long flags
, npages
, oaddr
;
262 unsigned long i
, base_paddr
;
268 iommu
= pcp
->pbm
->iommu
;
270 if (unlikely(direction
== PCI_DMA_NONE
))
273 oaddr
= (unsigned long)ptr
;
274 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
275 npages
>>= IO_PAGE_SHIFT
;
277 spin_lock_irqsave(&iommu
->lock
, flags
);
278 entry
= pci_arena_alloc(&iommu
->arena
, npages
);
279 spin_unlock_irqrestore(&iommu
->lock
, flags
);
281 if (unlikely(entry
< 0L))
284 bus_addr
= (iommu
->page_table_map_base
+
285 (entry
<< IO_PAGE_SHIFT
));
286 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
287 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
288 prot
= HV_PCI_MAP_ATTR_READ
;
289 if (direction
!= PCI_DMA_TODEVICE
)
290 prot
|= HV_PCI_MAP_ATTR_WRITE
;
292 local_irq_save(flags
);
294 pci_iommu_batch_start(pdev
, prot
, entry
);
296 for (i
= 0; i
< npages
; i
++, base_paddr
+= IO_PAGE_SIZE
) {
297 long err
= pci_iommu_batch_add(base_paddr
);
298 if (unlikely(err
< 0L))
301 if (unlikely(pci_iommu_batch_end() < 0L))
304 local_irq_restore(flags
);
309 if (printk_ratelimit())
311 return PCI_DMA_ERROR_CODE
;
314 /* Interrupts are disabled. */
315 spin_lock(&iommu
->lock
);
316 pci_arena_free(&iommu
->arena
, entry
, npages
);
317 spin_unlock_irqrestore(&iommu
->lock
, flags
);
319 return PCI_DMA_ERROR_CODE
;
322 static void pci_4v_unmap_single(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
324 struct pcidev_cookie
*pcp
;
325 struct pci_iommu
*iommu
;
326 unsigned long flags
, npages
;
330 if (unlikely(direction
== PCI_DMA_NONE
)) {
331 if (printk_ratelimit())
337 iommu
= pcp
->pbm
->iommu
;
338 devhandle
= pcp
->pbm
->devhandle
;
340 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
341 npages
>>= IO_PAGE_SHIFT
;
342 bus_addr
&= IO_PAGE_MASK
;
344 spin_lock_irqsave(&iommu
->lock
, flags
);
346 entry
= (bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
347 pci_arena_free(&iommu
->arena
, entry
, npages
);
352 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
356 } while (npages
!= 0);
358 spin_unlock_irqrestore(&iommu
->lock
, flags
);
361 #define SG_ENT_PHYS_ADDRESS(SG) \
362 (__pa(page_address((SG)->page)) + (SG)->offset)
364 static inline long fill_sg(long entry
, struct pci_dev
*pdev
,
365 struct scatterlist
*sg
,
366 int nused
, int nelems
, unsigned long prot
)
368 struct scatterlist
*dma_sg
= sg
;
369 struct scatterlist
*sg_end
= sg
+ nelems
;
373 local_irq_save(flags
);
375 pci_iommu_batch_start(pdev
, prot
, entry
);
377 for (i
= 0; i
< nused
; i
++) {
378 unsigned long pteval
= ~0UL;
381 dma_npages
= ((dma_sg
->dma_address
& (IO_PAGE_SIZE
- 1UL)) +
383 ((IO_PAGE_SIZE
- 1UL))) >> IO_PAGE_SHIFT
;
385 unsigned long offset
;
388 /* If we are here, we know we have at least one
389 * more page to map. So walk forward until we
390 * hit a page crossing, and begin creating new
391 * mappings from that spot.
396 tmp
= SG_ENT_PHYS_ADDRESS(sg
);
398 if (((tmp
^ pteval
) >> IO_PAGE_SHIFT
) != 0UL) {
399 pteval
= tmp
& IO_PAGE_MASK
;
400 offset
= tmp
& (IO_PAGE_SIZE
- 1UL);
403 if (((tmp
^ (tmp
+ len
- 1UL)) >> IO_PAGE_SHIFT
) != 0UL) {
404 pteval
= (tmp
+ IO_PAGE_SIZE
) & IO_PAGE_MASK
;
406 len
-= (IO_PAGE_SIZE
- (tmp
& (IO_PAGE_SIZE
- 1UL)));
412 pteval
= (pteval
& IOPTE_PAGE
);
416 err
= pci_iommu_batch_add(pteval
);
417 if (unlikely(err
< 0L))
418 goto iommu_map_failed
;
420 pteval
+= IO_PAGE_SIZE
;
421 len
-= (IO_PAGE_SIZE
- offset
);
426 pteval
= (pteval
& IOPTE_PAGE
) + len
;
429 /* Skip over any tail mappings we've fully mapped,
430 * adjusting pteval along the way. Stop when we
431 * detect a page crossing event.
433 while (sg
< sg_end
&&
434 (pteval
<< (64 - IO_PAGE_SHIFT
)) != 0UL &&
435 (pteval
== SG_ENT_PHYS_ADDRESS(sg
)) &&
437 (SG_ENT_PHYS_ADDRESS(sg
) + sg
->length
- 1UL)) >> IO_PAGE_SHIFT
) == 0UL) {
438 pteval
+= sg
->length
;
441 if ((pteval
<< (64 - IO_PAGE_SHIFT
)) == 0UL)
443 } while (dma_npages
!= 0);
447 if (unlikely(pci_iommu_batch_end() < 0L))
448 goto iommu_map_failed
;
450 local_irq_restore(flags
);
454 local_irq_restore(flags
);
458 static int pci_4v_map_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
460 struct pcidev_cookie
*pcp
;
461 struct pci_iommu
*iommu
;
462 unsigned long flags
, npages
, prot
;
464 struct scatterlist
*sgtmp
;
468 /* Fast path single entry scatterlists. */
470 sglist
->dma_address
=
471 pci_4v_map_single(pdev
,
472 (page_address(sglist
->page
) + sglist
->offset
),
473 sglist
->length
, direction
);
474 if (unlikely(sglist
->dma_address
== PCI_DMA_ERROR_CODE
))
476 sglist
->dma_length
= sglist
->length
;
481 iommu
= pcp
->pbm
->iommu
;
483 if (unlikely(direction
== PCI_DMA_NONE
))
486 /* Step 1: Prepare scatter list. */
487 npages
= prepare_sg(sglist
, nelems
);
489 /* Step 2: Allocate a cluster and context, if necessary. */
490 spin_lock_irqsave(&iommu
->lock
, flags
);
491 entry
= pci_arena_alloc(&iommu
->arena
, npages
);
492 spin_unlock_irqrestore(&iommu
->lock
, flags
);
494 if (unlikely(entry
< 0L))
497 dma_base
= iommu
->page_table_map_base
+
498 (entry
<< IO_PAGE_SHIFT
);
500 /* Step 3: Normalize DMA addresses. */
504 while (used
&& sgtmp
->dma_length
) {
505 sgtmp
->dma_address
+= dma_base
;
509 used
= nelems
- used
;
511 /* Step 4: Create the mappings. */
512 prot
= HV_PCI_MAP_ATTR_READ
;
513 if (direction
!= PCI_DMA_TODEVICE
)
514 prot
|= HV_PCI_MAP_ATTR_WRITE
;
516 err
= fill_sg(entry
, pdev
, sglist
, used
, nelems
, prot
);
517 if (unlikely(err
< 0L))
518 goto iommu_map_failed
;
523 if (printk_ratelimit())
528 spin_lock_irqsave(&iommu
->lock
, flags
);
529 pci_arena_free(&iommu
->arena
, entry
, npages
);
530 spin_unlock_irqrestore(&iommu
->lock
, flags
);
535 static void pci_4v_unmap_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
537 struct pcidev_cookie
*pcp
;
538 struct pci_iommu
*iommu
;
539 unsigned long flags
, i
, npages
;
541 u32 devhandle
, bus_addr
;
543 if (unlikely(direction
== PCI_DMA_NONE
)) {
544 if (printk_ratelimit())
549 iommu
= pcp
->pbm
->iommu
;
550 devhandle
= pcp
->pbm
->devhandle
;
552 bus_addr
= sglist
->dma_address
& IO_PAGE_MASK
;
554 for (i
= 1; i
< nelems
; i
++)
555 if (sglist
[i
].dma_length
== 0)
558 npages
= (IO_PAGE_ALIGN(sglist
[i
].dma_address
+ sglist
[i
].dma_length
) -
559 bus_addr
) >> IO_PAGE_SHIFT
;
561 entry
= ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
563 spin_lock_irqsave(&iommu
->lock
, flags
);
565 pci_arena_free(&iommu
->arena
, entry
, npages
);
570 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
574 } while (npages
!= 0);
576 spin_unlock_irqrestore(&iommu
->lock
, flags
);
579 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
581 /* Nothing to do... */
584 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
586 /* Nothing to do... */
589 struct pci_iommu_ops pci_sun4v_iommu_ops
= {
590 .alloc_consistent
= pci_4v_alloc_consistent
,
591 .free_consistent
= pci_4v_free_consistent
,
592 .map_single
= pci_4v_map_single
,
593 .unmap_single
= pci_4v_unmap_single
,
594 .map_sg
= pci_4v_map_sg
,
595 .unmap_sg
= pci_4v_unmap_sg
,
596 .dma_sync_single_for_cpu
= pci_4v_dma_sync_single_for_cpu
,
597 .dma_sync_sg_for_cpu
= pci_4v_dma_sync_sg_for_cpu
,
600 /* SUN4V PCI configuration space accessors. */
603 struct pdev_entry
*next
;
610 #define PDEV_HTAB_SIZE 16
611 #define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
612 static struct pdev_entry
*pdev_htab
[PDEV_HTAB_SIZE
];
614 static inline unsigned int pdev_hashfn(u32 devhandle
, unsigned int bus
, unsigned int device
, unsigned int func
)
618 val
= (devhandle
^ (devhandle
>> 4));
623 return val
& PDEV_HTAB_MASK
;
626 static int pdev_htab_add(u32 devhandle
, unsigned int bus
, unsigned int device
, unsigned int func
)
628 struct pdev_entry
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
629 struct pdev_entry
**slot
;
634 slot
= &pdev_htab
[pdev_hashfn(devhandle
, bus
, device
, func
)];
638 p
->devhandle
= devhandle
;
646 /* Recursively descend into the OBP device tree, rooted at toplevel_node,
647 * looking for a PCI device matching bus and devfn.
649 static int obp_find(struct linux_prom_pci_registers
*pregs
, int toplevel_node
, unsigned int bus
, unsigned int devfn
)
651 toplevel_node
= prom_getchild(toplevel_node
);
653 while (toplevel_node
!= 0) {
654 int ret
= obp_find(pregs
, toplevel_node
, bus
, devfn
);
659 ret
= prom_getproperty(toplevel_node
, "reg", (char *) pregs
,
660 sizeof(*pregs
) * PROMREG_MAX
);
661 if (ret
== 0 || ret
== -1)
664 if (((pregs
[0].phys_hi
>> 16) & 0xff) == bus
&&
665 ((pregs
[0].phys_hi
>> 8) & 0xff) == devfn
)
669 toplevel_node
= prom_getsibling(toplevel_node
);
672 return toplevel_node
;
675 static int pdev_htab_populate(struct pci_pbm_info
*pbm
)
677 struct linux_prom_pci_registers pr
[PROMREG_MAX
];
678 u32 devhandle
= pbm
->devhandle
;
681 for (bus
= pbm
->pci_first_busno
; bus
<= pbm
->pci_last_busno
; bus
++) {
684 for (devfn
= 0; devfn
< 256; devfn
++) {
685 unsigned int device
= PCI_SLOT(devfn
);
686 unsigned int func
= PCI_FUNC(devfn
);
688 if (obp_find(pr
, pbm
->prom_node
, bus
, devfn
)) {
689 int err
= pdev_htab_add(devhandle
, bus
,
700 static struct pdev_entry
*pdev_find(u32 devhandle
, unsigned int bus
, unsigned int device
, unsigned int func
)
702 struct pdev_entry
*p
;
704 p
= pdev_htab
[pdev_hashfn(devhandle
, bus
, device
, func
)];
706 if (p
->devhandle
== devhandle
&&
708 p
->device
== device
&&
718 static inline int pci_sun4v_out_of_range(struct pci_pbm_info
*pbm
, unsigned int bus
, unsigned int device
, unsigned int func
)
720 if (bus
< pbm
->pci_first_busno
||
721 bus
> pbm
->pci_last_busno
)
723 return pdev_find(pbm
->devhandle
, bus
, device
, func
) == NULL
;
726 static int pci_sun4v_read_pci_cfg(struct pci_bus
*bus_dev
, unsigned int devfn
,
727 int where
, int size
, u32
*value
)
729 struct pci_pbm_info
*pbm
= bus_dev
->sysdata
;
730 u32 devhandle
= pbm
->devhandle
;
731 unsigned int bus
= bus_dev
->number
;
732 unsigned int device
= PCI_SLOT(devfn
);
733 unsigned int func
= PCI_FUNC(devfn
);
736 if (pci_sun4v_out_of_range(pbm
, bus
, device
, func
)) {
739 ret
= pci_sun4v_config_get(devhandle
,
740 HV_PCI_DEVICE_BUILD(bus
, device
, func
),
743 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
744 devhandle
, HV_PCI_DEVICE_BUILD(bus
, device
, func
),
753 *value
= ret
& 0xffff;
756 *value
= ret
& 0xffffffff;
761 return PCIBIOS_SUCCESSFUL
;
764 static int pci_sun4v_write_pci_cfg(struct pci_bus
*bus_dev
, unsigned int devfn
,
765 int where
, int size
, u32 value
)
767 struct pci_pbm_info
*pbm
= bus_dev
->sysdata
;
768 u32 devhandle
= pbm
->devhandle
;
769 unsigned int bus
= bus_dev
->number
;
770 unsigned int device
= PCI_SLOT(devfn
);
771 unsigned int func
= PCI_FUNC(devfn
);
774 if (pci_sun4v_out_of_range(pbm
, bus
, device
, func
)) {
777 ret
= pci_sun4v_config_put(devhandle
,
778 HV_PCI_DEVICE_BUILD(bus
, device
, func
),
781 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
782 devhandle
, HV_PCI_DEVICE_BUILD(bus
, device
, func
),
783 where
, size
, value
, ret
);
786 return PCIBIOS_SUCCESSFUL
;
789 static struct pci_ops pci_sun4v_ops
= {
790 .read
= pci_sun4v_read_pci_cfg
,
791 .write
= pci_sun4v_write_pci_cfg
,
795 static void pbm_scan_bus(struct pci_controller_info
*p
,
796 struct pci_pbm_info
*pbm
)
798 struct pcidev_cookie
*cookie
= kmalloc(sizeof(*cookie
), GFP_KERNEL
);
801 prom_printf("%s: Critical allocation failure.\n", pbm
->name
);
805 /* All we care about is the PBM. */
806 memset(cookie
, 0, sizeof(*cookie
));
809 pbm
->pci_bus
= pci_scan_bus(pbm
->pci_first_busno
, p
->pci_ops
, pbm
);
811 pci_fixup_host_bridge_self(pbm
->pci_bus
);
812 pbm
->pci_bus
->self
->sysdata
= cookie
;
814 pci_fill_in_pbm_cookies(pbm
->pci_bus
, pbm
,
816 pci_record_assignments(pbm
, pbm
->pci_bus
);
817 pci_assign_unassigned(pbm
, pbm
->pci_bus
);
818 pci_fixup_irq(pbm
, pbm
->pci_bus
);
819 pci_determine_66mhz_disposition(pbm
, pbm
->pci_bus
);
820 pci_setup_busmastering(pbm
, pbm
->pci_bus
);
823 static void pci_sun4v_scan_bus(struct pci_controller_info
*p
)
825 if (p
->pbm_A
.prom_node
) {
826 p
->pbm_A
.is_66mhz_capable
=
827 prom_getbool(p
->pbm_A
.prom_node
, "66mhz-capable");
829 pbm_scan_bus(p
, &p
->pbm_A
);
831 if (p
->pbm_B
.prom_node
) {
832 p
->pbm_B
.is_66mhz_capable
=
833 prom_getbool(p
->pbm_B
.prom_node
, "66mhz-capable");
835 pbm_scan_bus(p
, &p
->pbm_B
);
838 /* XXX register error interrupt handlers XXX */
841 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info
*pbm
,
842 struct pci_dev
*pdev
,
845 u32 devhandle
= pbm
->devhandle
;
847 return sun4v_build_irq(devhandle
, devino
);
850 static void pci_sun4v_base_address_update(struct pci_dev
*pdev
, int resource
)
852 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
853 struct pci_pbm_info
*pbm
= pcp
->pbm
;
854 struct resource
*res
, *root
;
856 int where
, size
, is_64bit
;
858 res
= &pdev
->resource
[resource
];
860 where
= PCI_BASE_ADDRESS_0
+ (resource
* 4);
861 } else if (resource
== PCI_ROM_RESOURCE
) {
862 where
= pdev
->rom_base_reg
;
864 /* Somebody might have asked allocation of a non-standard resource */
868 /* XXX 64-bit MEM handling is not %100 correct... XXX */
870 if (res
->flags
& IORESOURCE_IO
)
871 root
= &pbm
->io_space
;
873 root
= &pbm
->mem_space
;
874 if ((res
->flags
& PCI_BASE_ADDRESS_MEM_TYPE_MASK
)
875 == PCI_BASE_ADDRESS_MEM_TYPE_64
)
879 size
= res
->end
- res
->start
;
880 pci_read_config_dword(pdev
, where
, ®
);
881 reg
= ((reg
& size
) |
882 (((u32
)(res
->start
- root
->start
)) & ~size
));
883 if (resource
== PCI_ROM_RESOURCE
) {
884 reg
|= PCI_ROM_ADDRESS_ENABLE
;
885 res
->flags
|= IORESOURCE_ROM_ENABLE
;
887 pci_write_config_dword(pdev
, where
, reg
);
889 /* This knows that the upper 32-bits of the address
890 * must be zero. Our PCI common layer enforces this.
893 pci_write_config_dword(pdev
, where
+ 4, 0);
896 static void pci_sun4v_resource_adjust(struct pci_dev
*pdev
,
897 struct resource
*res
,
898 struct resource
*root
)
900 res
->start
+= root
->start
;
901 res
->end
+= root
->start
;
904 /* Use ranges property to determine where PCI MEM, I/O, and Config
905 * space are for this PCI bus module.
907 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info
*pbm
)
909 int i
, saw_mem
, saw_io
;
911 saw_mem
= saw_io
= 0;
912 for (i
= 0; i
< pbm
->num_pbm_ranges
; i
++) {
913 struct linux_prom_pci_ranges
*pr
= &pbm
->pbm_ranges
[i
];
917 type
= (pr
->child_phys_hi
>> 24) & 0x3;
918 a
= (((unsigned long)pr
->parent_phys_hi
<< 32UL) |
919 ((unsigned long)pr
->parent_phys_lo
<< 0UL));
923 /* 16-bit IO space, 16MB */
924 pbm
->io_space
.start
= a
;
925 pbm
->io_space
.end
= a
+ ((16UL*1024UL*1024UL) - 1UL);
926 pbm
->io_space
.flags
= IORESOURCE_IO
;
931 /* 32-bit MEM space, 2GB */
932 pbm
->mem_space
.start
= a
;
933 pbm
->mem_space
.end
= a
+ (0x80000000UL
- 1UL);
934 pbm
->mem_space
.flags
= IORESOURCE_MEM
;
939 /* XXX 64-bit MEM handling XXX */
946 if (!saw_io
|| !saw_mem
) {
947 prom_printf("%s: Fatal error, missing %s PBM range.\n",
949 (!saw_io
? "IO" : "MEM"));
953 printk("%s: PCI IO[%lx] MEM[%lx]\n",
956 pbm
->mem_space
.start
);
959 static void pbm_register_toplevel_resources(struct pci_controller_info
*p
,
960 struct pci_pbm_info
*pbm
)
962 pbm
->io_space
.name
= pbm
->mem_space
.name
= pbm
->name
;
964 request_resource(&ioport_resource
, &pbm
->io_space
);
965 request_resource(&iomem_resource
, &pbm
->mem_space
);
966 pci_register_legacy_regions(&pbm
->io_space
,
970 static unsigned long probe_existing_entries(struct pci_pbm_info
*pbm
,
971 struct pci_iommu
*iommu
)
973 struct pci_iommu_arena
*arena
= &iommu
->arena
;
974 unsigned long i
, cnt
= 0;
977 devhandle
= pbm
->devhandle
;
978 for (i
= 0; i
< arena
->limit
; i
++) {
979 unsigned long ret
, io_attrs
, ra
;
981 ret
= pci_sun4v_iommu_getmap(devhandle
,
986 __set_bit(i
, arena
->map
);
993 static void pci_sun4v_iommu_init(struct pci_pbm_info
*pbm
)
995 struct pci_iommu
*iommu
= pbm
->iommu
;
996 unsigned long num_tsb_entries
, sz
;
997 u32 vdma
[2], dma_mask
, dma_offset
;
1000 err
= prom_getproperty(pbm
->prom_node
, "virtual-dma",
1001 (char *)&vdma
[0], sizeof(vdma
));
1002 if (err
== 0 || err
== -1) {
1003 /* No property, use default values. */
1004 vdma
[0] = 0x80000000;
1005 vdma
[1] = 0x80000000;
1011 dma_mask
|= 0x1fffffff;
1016 dma_mask
|= 0x3fffffff;
1021 dma_mask
|= 0x7fffffff;
1026 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
1030 tsbsize
*= (8 * 1024);
1032 num_tsb_entries
= tsbsize
/ sizeof(iopte_t
);
1034 dma_offset
= vdma
[0];
1036 /* Setup initial software IOMMU state. */
1037 spin_lock_init(&iommu
->lock
);
1038 iommu
->ctx_lowest_free
= 1;
1039 iommu
->page_table_map_base
= dma_offset
;
1040 iommu
->dma_addr_mask
= dma_mask
;
1042 /* Allocate and initialize the free area map. */
1043 sz
= num_tsb_entries
/ 8;
1044 sz
= (sz
+ 7UL) & ~7UL;
1045 iommu
->arena
.map
= kmalloc(sz
, GFP_KERNEL
);
1046 if (!iommu
->arena
.map
) {
1047 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
1050 memset(iommu
->arena
.map
, 0, sz
);
1051 iommu
->arena
.limit
= num_tsb_entries
;
1053 sz
= probe_existing_entries(pbm
, iommu
);
1055 printk("%s: TSB entries [%lu], existing mapings [%lu]\n",
1056 pbm
->name
, num_tsb_entries
, sz
);
1059 static void pci_sun4v_get_bus_range(struct pci_pbm_info
*pbm
)
1061 unsigned int busrange
[2];
1062 int prom_node
= pbm
->prom_node
;
1065 err
= prom_getproperty(prom_node
, "bus-range",
1066 (char *)&busrange
[0],
1068 if (err
== 0 || err
== -1) {
1069 prom_printf("%s: Fatal error, no bus-range.\n", pbm
->name
);
1073 pbm
->pci_first_busno
= busrange
[0];
1074 pbm
->pci_last_busno
= busrange
[1];
1078 static void pci_sun4v_pbm_init(struct pci_controller_info
*p
, int prom_node
, u32 devhandle
)
1080 struct pci_pbm_info
*pbm
;
1083 if (devhandle
& 0x40)
1089 pbm
->prom_node
= prom_node
;
1090 pbm
->pci_first_slot
= 1;
1092 pbm
->devhandle
= devhandle
;
1094 sprintf(pbm
->name
, "SUN4V-PCI%d PBM%c",
1095 p
->index
, (pbm
== &p
->pbm_A
? 'A' : 'B'));
1097 printk("%s: devhandle[%x] prom_node[%x:%x]\n",
1098 pbm
->name
, pbm
->devhandle
,
1099 pbm
->prom_node
, prom_getchild(pbm
->prom_node
));
1101 prom_getstring(prom_node
, "name",
1102 pbm
->prom_name
, sizeof(pbm
->prom_name
));
1104 err
= prom_getproperty(prom_node
, "ranges",
1105 (char *) pbm
->pbm_ranges
,
1106 sizeof(pbm
->pbm_ranges
));
1107 if (err
== 0 || err
== -1) {
1108 prom_printf("%s: Fatal error, no ranges property.\n",
1113 pbm
->num_pbm_ranges
=
1114 (err
/ sizeof(struct linux_prom_pci_ranges
));
1116 /* Mask out the top 8 bits of the ranges, leaving the real
1119 for (i
= 0; i
< pbm
->num_pbm_ranges
; i
++)
1120 pbm
->pbm_ranges
[i
].parent_phys_hi
&= 0x0fffffff;
1122 pci_sun4v_determine_mem_io_space(pbm
);
1123 pbm_register_toplevel_resources(p
, pbm
);
1125 err
= prom_getproperty(prom_node
, "interrupt-map",
1126 (char *)pbm
->pbm_intmap
,
1127 sizeof(pbm
->pbm_intmap
));
1128 if (err
== 0 || err
== -1) {
1129 prom_printf("%s: Fatal error, no interrupt-map property.\n",
1134 pbm
->num_pbm_intmap
= (err
/ sizeof(struct linux_prom_pci_intmap
));
1135 err
= prom_getproperty(prom_node
, "interrupt-map-mask",
1136 (char *)&pbm
->pbm_intmask
,
1137 sizeof(pbm
->pbm_intmask
));
1138 if (err
== 0 || err
== -1) {
1139 prom_printf("%s: Fatal error, no interrupt-map-mask.\n",
1144 pci_sun4v_get_bus_range(pbm
);
1145 pci_sun4v_iommu_init(pbm
);
1147 pdev_htab_populate(pbm
);
1150 void sun4v_pci_init(int node
, char *model_name
)
1152 struct pci_controller_info
*p
;
1153 struct pci_iommu
*iommu
;
1154 struct linux_prom64_registers regs
;
1158 prom_getproperty(node
, "reg", (char *)®s
, sizeof(regs
));
1159 devhandle
= (regs
.phys_addr
>> 32UL) & 0x0fffffff;
1161 for (p
= pci_controller_root
; p
; p
= p
->next
) {
1162 struct pci_pbm_info
*pbm
;
1164 if (p
->pbm_A
.prom_node
&& p
->pbm_B
.prom_node
)
1167 pbm
= (p
->pbm_A
.prom_node
?
1171 if (pbm
->devhandle
== (devhandle
^ 0x40)) {
1172 pci_sun4v_pbm_init(p
, node
, devhandle
);
1177 for_each_possible_cpu(i
) {
1178 unsigned long page
= get_zeroed_page(GFP_ATOMIC
);
1181 goto fatal_memory_error
;
1183 per_cpu(pci_iommu_batch
, i
).pglist
= (u64
*) page
;
1186 p
= kmalloc(sizeof(struct pci_controller_info
), GFP_ATOMIC
);
1188 goto fatal_memory_error
;
1190 memset(p
, 0, sizeof(*p
));
1192 iommu
= kmalloc(sizeof(struct pci_iommu
), GFP_ATOMIC
);
1194 goto fatal_memory_error
;
1196 memset(iommu
, 0, sizeof(*iommu
));
1197 p
->pbm_A
.iommu
= iommu
;
1199 iommu
= kmalloc(sizeof(struct pci_iommu
), GFP_ATOMIC
);
1201 goto fatal_memory_error
;
1203 memset(iommu
, 0, sizeof(*iommu
));
1204 p
->pbm_B
.iommu
= iommu
;
1206 p
->next
= pci_controller_root
;
1207 pci_controller_root
= p
;
1209 p
->index
= pci_num_controllers
++;
1210 p
->pbms_same_domain
= 0;
1212 p
->scan_bus
= pci_sun4v_scan_bus
;
1213 p
->irq_build
= pci_sun4v_irq_build
;
1214 p
->base_address_update
= pci_sun4v_base_address_update
;
1215 p
->resource_adjust
= pci_sun4v_resource_adjust
;
1216 p
->pci_ops
= &pci_sun4v_ops
;
1218 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1221 pci_memspace_mask
= 0x7fffffffUL
;
1223 pci_sun4v_pbm_init(p
, node
, devhandle
);
1227 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");