1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
17 #include <asm/iommu.h>
20 #include <asm/pstate.h>
21 #include <asm/oplib.h>
22 #include <asm/hypervisor.h>
26 #include "iommu_common.h"
28 #include "pci_sun4v.h"
30 static unsigned long vpci_major
= 1;
31 static unsigned long vpci_minor
= 1;
33 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
36 struct device
*dev
; /* Device mapping is for. */
37 unsigned long prot
; /* IOMMU page protections */
38 unsigned long entry
; /* Index into IOTSB. */
39 u64
*pglist
; /* List of physical pages */
40 unsigned long npages
; /* Number of pages in list. */
43 static DEFINE_PER_CPU(struct iommu_batch
, iommu_batch
);
45 /* Interrupts must be disabled. */
46 static inline void iommu_batch_start(struct device
*dev
, unsigned long prot
, unsigned long entry
)
48 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
56 /* Interrupts must be disabled. */
57 static long iommu_batch_flush(struct iommu_batch
*p
)
59 struct pci_pbm_info
*pbm
= p
->dev
->archdata
.host_controller
;
60 unsigned long devhandle
= pbm
->devhandle
;
61 unsigned long prot
= p
->prot
;
62 unsigned long entry
= p
->entry
;
63 u64
*pglist
= p
->pglist
;
64 unsigned long npages
= p
->npages
;
69 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
70 npages
, prot
, __pa(pglist
));
71 if (unlikely(num
< 0)) {
72 if (printk_ratelimit())
73 printk("iommu_batch_flush: IOMMU map of "
74 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
76 devhandle
, HV_PCI_TSBID(0, entry
),
77 npages
, prot
, __pa(pglist
), num
);
92 static inline void iommu_batch_new_entry(unsigned long entry
)
94 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
96 if (p
->entry
+ p
->npages
== entry
)
103 /* Interrupts must be disabled. */
104 static inline long iommu_batch_add(u64 phys_page
)
106 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
108 BUG_ON(p
->npages
>= PGLIST_NENTS
);
110 p
->pglist
[p
->npages
++] = phys_page
;
111 if (p
->npages
== PGLIST_NENTS
)
112 return iommu_batch_flush(p
);
117 /* Interrupts must be disabled. */
118 static inline long iommu_batch_end(void)
120 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
122 BUG_ON(p
->npages
>= PGLIST_NENTS
);
124 return iommu_batch_flush(p
);
127 static void *dma_4v_alloc_coherent(struct device
*dev
, size_t size
,
128 dma_addr_t
*dma_addrp
, gfp_t gfp
)
130 unsigned long flags
, order
, first_page
, npages
, n
;
137 size
= IO_PAGE_ALIGN(size
);
138 order
= get_order(size
);
139 if (unlikely(order
>= MAX_ORDER
))
142 npages
= size
>> IO_PAGE_SHIFT
;
144 nid
= dev
->archdata
.numa_node
;
145 page
= alloc_pages_node(nid
, gfp
, order
);
149 first_page
= (unsigned long) page_address(page
);
150 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
152 iommu
= dev
->archdata
.iommu
;
154 spin_lock_irqsave(&iommu
->lock
, flags
);
155 entry
= iommu_range_alloc(dev
, iommu
, npages
, NULL
);
156 spin_unlock_irqrestore(&iommu
->lock
, flags
);
158 if (unlikely(entry
== DMA_ERROR_CODE
))
159 goto range_alloc_fail
;
161 *dma_addrp
= (iommu
->page_table_map_base
+
162 (entry
<< IO_PAGE_SHIFT
));
163 ret
= (void *) first_page
;
164 first_page
= __pa(first_page
);
166 local_irq_save(flags
);
168 iommu_batch_start(dev
,
169 (HV_PCI_MAP_ATTR_READ
|
170 HV_PCI_MAP_ATTR_WRITE
),
173 for (n
= 0; n
< npages
; n
++) {
174 long err
= iommu_batch_add(first_page
+ (n
* PAGE_SIZE
));
175 if (unlikely(err
< 0L))
179 if (unlikely(iommu_batch_end() < 0L))
182 local_irq_restore(flags
);
187 /* Interrupts are disabled. */
188 spin_lock(&iommu
->lock
);
189 iommu_range_free(iommu
, *dma_addrp
, npages
);
190 spin_unlock_irqrestore(&iommu
->lock
, flags
);
193 free_pages(first_page
, order
);
197 static void dma_4v_free_coherent(struct device
*dev
, size_t size
, void *cpu
,
200 struct pci_pbm_info
*pbm
;
202 unsigned long flags
, order
, npages
, entry
;
205 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
206 iommu
= dev
->archdata
.iommu
;
207 pbm
= dev
->archdata
.host_controller
;
208 devhandle
= pbm
->devhandle
;
209 entry
= ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
211 spin_lock_irqsave(&iommu
->lock
, flags
);
213 iommu_range_free(iommu
, dvma
, npages
);
218 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
222 } while (npages
!= 0);
224 spin_unlock_irqrestore(&iommu
->lock
, flags
);
226 order
= get_order(size
);
228 free_pages((unsigned long)cpu
, order
);
231 static dma_addr_t
dma_4v_map_single(struct device
*dev
, void *ptr
, size_t sz
,
232 enum dma_data_direction direction
)
235 unsigned long flags
, npages
, oaddr
;
236 unsigned long i
, base_paddr
;
241 iommu
= dev
->archdata
.iommu
;
243 if (unlikely(direction
== DMA_NONE
))
246 oaddr
= (unsigned long)ptr
;
247 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
248 npages
>>= IO_PAGE_SHIFT
;
250 spin_lock_irqsave(&iommu
->lock
, flags
);
251 entry
= iommu_range_alloc(dev
, iommu
, npages
, NULL
);
252 spin_unlock_irqrestore(&iommu
->lock
, flags
);
254 if (unlikely(entry
== DMA_ERROR_CODE
))
257 bus_addr
= (iommu
->page_table_map_base
+
258 (entry
<< IO_PAGE_SHIFT
));
259 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
260 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
261 prot
= HV_PCI_MAP_ATTR_READ
;
262 if (direction
!= DMA_TO_DEVICE
)
263 prot
|= HV_PCI_MAP_ATTR_WRITE
;
265 local_irq_save(flags
);
267 iommu_batch_start(dev
, prot
, entry
);
269 for (i
= 0; i
< npages
; i
++, base_paddr
+= IO_PAGE_SIZE
) {
270 long err
= iommu_batch_add(base_paddr
);
271 if (unlikely(err
< 0L))
274 if (unlikely(iommu_batch_end() < 0L))
277 local_irq_restore(flags
);
282 if (printk_ratelimit())
284 return DMA_ERROR_CODE
;
287 /* Interrupts are disabled. */
288 spin_lock(&iommu
->lock
);
289 iommu_range_free(iommu
, bus_addr
, npages
);
290 spin_unlock_irqrestore(&iommu
->lock
, flags
);
292 return DMA_ERROR_CODE
;
295 static void dma_4v_unmap_single(struct device
*dev
, dma_addr_t bus_addr
,
296 size_t sz
, enum dma_data_direction direction
)
298 struct pci_pbm_info
*pbm
;
300 unsigned long flags
, npages
;
304 if (unlikely(direction
== DMA_NONE
)) {
305 if (printk_ratelimit())
310 iommu
= dev
->archdata
.iommu
;
311 pbm
= dev
->archdata
.host_controller
;
312 devhandle
= pbm
->devhandle
;
314 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
315 npages
>>= IO_PAGE_SHIFT
;
316 bus_addr
&= IO_PAGE_MASK
;
318 spin_lock_irqsave(&iommu
->lock
, flags
);
320 iommu_range_free(iommu
, bus_addr
, npages
);
322 entry
= (bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
326 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
330 } while (npages
!= 0);
332 spin_unlock_irqrestore(&iommu
->lock
, flags
);
335 static int dma_4v_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
336 int nelems
, enum dma_data_direction direction
)
338 struct scatterlist
*s
, *outs
, *segstart
;
339 unsigned long flags
, handle
, prot
;
340 dma_addr_t dma_next
= 0, dma_addr
;
341 unsigned int max_seg_size
;
342 unsigned long seg_boundary_size
;
343 int outcount
, incount
, i
;
345 unsigned long base_shift
;
348 BUG_ON(direction
== DMA_NONE
);
350 iommu
= dev
->archdata
.iommu
;
351 if (nelems
== 0 || !iommu
)
354 prot
= HV_PCI_MAP_ATTR_READ
;
355 if (direction
!= DMA_TO_DEVICE
)
356 prot
|= HV_PCI_MAP_ATTR_WRITE
;
358 outs
= s
= segstart
= &sglist
[0];
363 /* Init first segment length for backout at failure */
364 outs
->dma_length
= 0;
366 spin_lock_irqsave(&iommu
->lock
, flags
);
368 iommu_batch_start(dev
, prot
, ~0UL);
370 max_seg_size
= dma_get_max_seg_size(dev
);
371 seg_boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
372 IO_PAGE_SIZE
) >> IO_PAGE_SHIFT
;
373 base_shift
= iommu
->page_table_map_base
>> IO_PAGE_SHIFT
;
374 for_each_sg(sglist
, s
, nelems
, i
) {
375 unsigned long paddr
, npages
, entry
, out_entry
= 0, slen
;
383 /* Allocate iommu entries for that segment */
384 paddr
= (unsigned long) SG_ENT_PHYS_ADDRESS(s
);
385 npages
= iommu_num_pages(paddr
, slen
);
386 entry
= iommu_range_alloc(dev
, iommu
, npages
, &handle
);
389 if (unlikely(entry
== DMA_ERROR_CODE
)) {
390 if (printk_ratelimit())
391 printk(KERN_INFO
"iommu_alloc failed, iommu %p paddr %lx"
392 " npages %lx\n", iommu
, paddr
, npages
);
393 goto iommu_map_failed
;
396 iommu_batch_new_entry(entry
);
398 /* Convert entry to a dma_addr_t */
399 dma_addr
= iommu
->page_table_map_base
+
400 (entry
<< IO_PAGE_SHIFT
);
401 dma_addr
|= (s
->offset
& ~IO_PAGE_MASK
);
403 /* Insert into HW table */
404 paddr
&= IO_PAGE_MASK
;
406 err
= iommu_batch_add(paddr
);
407 if (unlikely(err
< 0L))
408 goto iommu_map_failed
;
409 paddr
+= IO_PAGE_SIZE
;
412 /* If we are in an open segment, try merging */
414 /* We cannot merge if:
415 * - allocated dma_addr isn't contiguous to previous allocation
417 if ((dma_addr
!= dma_next
) ||
418 (outs
->dma_length
+ s
->length
> max_seg_size
) ||
419 (is_span_boundary(out_entry
, base_shift
,
420 seg_boundary_size
, outs
, s
))) {
421 /* Can't merge: create a new segment */
424 outs
= sg_next(outs
);
426 outs
->dma_length
+= s
->length
;
431 /* This is a new segment, fill entries */
432 outs
->dma_address
= dma_addr
;
433 outs
->dma_length
= slen
;
437 /* Calculate next page pointer for contiguous check */
438 dma_next
= dma_addr
+ slen
;
441 err
= iommu_batch_end();
443 if (unlikely(err
< 0L))
444 goto iommu_map_failed
;
446 spin_unlock_irqrestore(&iommu
->lock
, flags
);
448 if (outcount
< incount
) {
449 outs
= sg_next(outs
);
450 outs
->dma_address
= DMA_ERROR_CODE
;
451 outs
->dma_length
= 0;
457 for_each_sg(sglist
, s
, nelems
, i
) {
458 if (s
->dma_length
!= 0) {
459 unsigned long vaddr
, npages
;
461 vaddr
= s
->dma_address
& IO_PAGE_MASK
;
462 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
);
463 iommu_range_free(iommu
, vaddr
, npages
);
465 s
->dma_address
= DMA_ERROR_CODE
;
471 spin_unlock_irqrestore(&iommu
->lock
, flags
);
476 static void dma_4v_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
477 int nelems
, enum dma_data_direction direction
)
479 struct pci_pbm_info
*pbm
;
480 struct scatterlist
*sg
;
485 BUG_ON(direction
== DMA_NONE
);
487 iommu
= dev
->archdata
.iommu
;
488 pbm
= dev
->archdata
.host_controller
;
489 devhandle
= pbm
->devhandle
;
491 spin_lock_irqsave(&iommu
->lock
, flags
);
495 dma_addr_t dma_handle
= sg
->dma_address
;
496 unsigned int len
= sg
->dma_length
;
497 unsigned long npages
, entry
;
501 npages
= iommu_num_pages(dma_handle
, len
);
502 iommu_range_free(iommu
, dma_handle
, npages
);
504 entry
= ((dma_handle
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
508 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
517 spin_unlock_irqrestore(&iommu
->lock
, flags
);
520 static void dma_4v_sync_single_for_cpu(struct device
*dev
,
521 dma_addr_t bus_addr
, size_t sz
,
522 enum dma_data_direction direction
)
524 /* Nothing to do... */
527 static void dma_4v_sync_sg_for_cpu(struct device
*dev
,
528 struct scatterlist
*sglist
, int nelems
,
529 enum dma_data_direction direction
)
531 /* Nothing to do... */
534 const struct dma_ops sun4v_dma_ops
= {
535 .alloc_coherent
= dma_4v_alloc_coherent
,
536 .free_coherent
= dma_4v_free_coherent
,
537 .map_single
= dma_4v_map_single
,
538 .unmap_single
= dma_4v_unmap_single
,
539 .map_sg
= dma_4v_map_sg
,
540 .unmap_sg
= dma_4v_unmap_sg
,
541 .sync_single_for_cpu
= dma_4v_sync_single_for_cpu
,
542 .sync_sg_for_cpu
= dma_4v_sync_sg_for_cpu
,
545 static void __init
pci_sun4v_scan_bus(struct pci_pbm_info
*pbm
)
547 struct property
*prop
;
548 struct device_node
*dp
;
551 prop
= of_find_property(dp
, "66mhz-capable", NULL
);
552 pbm
->is_66mhz_capable
= (prop
!= NULL
);
553 pbm
->pci_bus
= pci_scan_one_pbm(pbm
);
555 /* XXX register error interrupt handlers XXX */
558 static unsigned long __init
probe_existing_entries(struct pci_pbm_info
*pbm
,
561 struct iommu_arena
*arena
= &iommu
->arena
;
562 unsigned long i
, cnt
= 0;
565 devhandle
= pbm
->devhandle
;
566 for (i
= 0; i
< arena
->limit
; i
++) {
567 unsigned long ret
, io_attrs
, ra
;
569 ret
= pci_sun4v_iommu_getmap(devhandle
,
573 if (page_in_phys_avail(ra
)) {
574 pci_sun4v_iommu_demap(devhandle
,
575 HV_PCI_TSBID(0, i
), 1);
578 __set_bit(i
, arena
->map
);
586 static void __init
pci_sun4v_iommu_init(struct pci_pbm_info
*pbm
)
588 struct iommu
*iommu
= pbm
->iommu
;
589 struct property
*prop
;
590 unsigned long num_tsb_entries
, sz
, tsbsize
;
591 u32 vdma
[2], dma_mask
, dma_offset
;
593 prop
= of_find_property(pbm
->prom_node
, "virtual-dma", NULL
);
595 u32
*val
= prop
->value
;
600 /* No property, use default values. */
601 vdma
[0] = 0x80000000;
602 vdma
[1] = 0x80000000;
605 if ((vdma
[0] | vdma
[1]) & ~IO_PAGE_MASK
) {
606 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
611 dma_mask
= (roundup_pow_of_two(vdma
[1]) - 1UL);
612 num_tsb_entries
= vdma
[1] / IO_PAGE_SIZE
;
613 tsbsize
= num_tsb_entries
* sizeof(iopte_t
);
615 dma_offset
= vdma
[0];
617 /* Setup initial software IOMMU state. */
618 spin_lock_init(&iommu
->lock
);
619 iommu
->ctx_lowest_free
= 1;
620 iommu
->page_table_map_base
= dma_offset
;
621 iommu
->dma_addr_mask
= dma_mask
;
623 /* Allocate and initialize the free area map. */
624 sz
= (num_tsb_entries
+ 7) / 8;
625 sz
= (sz
+ 7UL) & ~7UL;
626 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
627 if (!iommu
->arena
.map
) {
628 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
631 iommu
->arena
.limit
= num_tsb_entries
;
633 sz
= probe_existing_entries(pbm
, iommu
);
635 printk("%s: Imported %lu TSB entries from OBP\n",
639 #ifdef CONFIG_PCI_MSI
640 struct pci_sun4v_msiq_entry
{
642 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
643 #define MSIQ_VERSION_SHIFT 32
644 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
645 #define MSIQ_TYPE_SHIFT 0
646 #define MSIQ_TYPE_NONE 0x00
647 #define MSIQ_TYPE_MSG 0x01
648 #define MSIQ_TYPE_MSI32 0x02
649 #define MSIQ_TYPE_MSI64 0x03
650 #define MSIQ_TYPE_INTX 0x08
651 #define MSIQ_TYPE_NONE2 0xff
656 u64 req_id
; /* bus/device/func */
657 #define MSIQ_REQID_BUS_MASK 0xff00UL
658 #define MSIQ_REQID_BUS_SHIFT 8
659 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
660 #define MSIQ_REQID_DEVICE_SHIFT 3
661 #define MSIQ_REQID_FUNC_MASK 0x0007UL
662 #define MSIQ_REQID_FUNC_SHIFT 0
666 /* The format of this value is message type dependent.
667 * For MSI bits 15:0 are the data from the MSI packet.
668 * For MSI-X bits 31:0 are the data from the MSI packet.
669 * For MSG, the message code and message routing code where:
670 * bits 39:32 is the bus/device/fn of the msg target-id
671 * bits 18:16 is the message routing code
672 * bits 7:0 is the message code
673 * For INTx the low order 2-bits are:
684 static int pci_sun4v_get_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
687 unsigned long err
, limit
;
689 err
= pci_sun4v_msiq_gethead(pbm
->devhandle
, msiqid
, head
);
693 limit
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
694 if (unlikely(*head
>= limit
))
700 static int pci_sun4v_dequeue_msi(struct pci_pbm_info
*pbm
,
701 unsigned long msiqid
, unsigned long *head
,
704 struct pci_sun4v_msiq_entry
*ep
;
705 unsigned long err
, type
;
707 /* Note: void pointer arithmetic, 'head' is a byte offset */
708 ep
= (pbm
->msi_queues
+ ((msiqid
- pbm
->msiq_first
) *
709 (pbm
->msiq_ent_count
*
710 sizeof(struct pci_sun4v_msiq_entry
))) +
713 if ((ep
->version_type
& MSIQ_TYPE_MASK
) == 0)
716 type
= (ep
->version_type
& MSIQ_TYPE_MASK
) >> MSIQ_TYPE_SHIFT
;
717 if (unlikely(type
!= MSIQ_TYPE_MSI32
&&
718 type
!= MSIQ_TYPE_MSI64
))
723 err
= pci_sun4v_msi_setstate(pbm
->devhandle
,
724 ep
->msi_data
/* msi_num */,
729 /* Clear the entry. */
730 ep
->version_type
&= ~MSIQ_TYPE_MASK
;
732 (*head
) += sizeof(struct pci_sun4v_msiq_entry
);
734 (pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
)))
740 static int pci_sun4v_set_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
745 err
= pci_sun4v_msiq_sethead(pbm
->devhandle
, msiqid
, head
);
752 static int pci_sun4v_msi_setup(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
753 unsigned long msi
, int is_msi64
)
755 if (pci_sun4v_msi_setmsiq(pbm
->devhandle
, msi
, msiqid
,
757 HV_MSITYPE_MSI64
: HV_MSITYPE_MSI32
)))
759 if (pci_sun4v_msi_setstate(pbm
->devhandle
, msi
, HV_MSISTATE_IDLE
))
761 if (pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_VALID
))
766 static int pci_sun4v_msi_teardown(struct pci_pbm_info
*pbm
, unsigned long msi
)
768 unsigned long err
, msiqid
;
770 err
= pci_sun4v_msi_getmsiq(pbm
->devhandle
, msi
, &msiqid
);
774 pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_INVALID
);
779 static int pci_sun4v_msiq_alloc(struct pci_pbm_info
*pbm
)
781 unsigned long q_size
, alloc_size
, pages
, order
;
784 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
785 alloc_size
= (pbm
->msiq_num
* q_size
);
786 order
= get_order(alloc_size
);
787 pages
= __get_free_pages(GFP_KERNEL
| __GFP_COMP
, order
);
789 printk(KERN_ERR
"MSI: Cannot allocate MSI queues (o=%lu).\n",
793 memset((char *)pages
, 0, PAGE_SIZE
<< order
);
794 pbm
->msi_queues
= (void *) pages
;
796 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
797 unsigned long err
, base
= __pa(pages
+ (i
* q_size
));
798 unsigned long ret1
, ret2
;
800 err
= pci_sun4v_msiq_conf(pbm
->devhandle
,
802 base
, pbm
->msiq_ent_count
);
804 printk(KERN_ERR
"MSI: msiq register fails (err=%lu)\n",
809 err
= pci_sun4v_msiq_info(pbm
->devhandle
,
813 printk(KERN_ERR
"MSI: Cannot read msiq (err=%lu)\n",
817 if (ret1
!= base
|| ret2
!= pbm
->msiq_ent_count
) {
818 printk(KERN_ERR
"MSI: Bogus qconf "
819 "expected[%lx:%x] got[%lx:%lx]\n",
820 base
, pbm
->msiq_ent_count
,
829 free_pages(pages
, order
);
833 static void pci_sun4v_msiq_free(struct pci_pbm_info
*pbm
)
835 unsigned long q_size
, alloc_size
, pages
, order
;
838 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
839 unsigned long msiqid
= pbm
->msiq_first
+ i
;
841 (void) pci_sun4v_msiq_conf(pbm
->devhandle
, msiqid
, 0UL, 0);
844 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
845 alloc_size
= (pbm
->msiq_num
* q_size
);
846 order
= get_order(alloc_size
);
848 pages
= (unsigned long) pbm
->msi_queues
;
850 free_pages(pages
, order
);
852 pbm
->msi_queues
= NULL
;
855 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info
*pbm
,
856 unsigned long msiqid
,
857 unsigned long devino
)
859 unsigned int virt_irq
= sun4v_build_irq(pbm
->devhandle
, devino
);
864 if (pci_sun4v_msiq_setstate(pbm
->devhandle
, msiqid
, HV_MSIQSTATE_IDLE
))
866 if (pci_sun4v_msiq_setvalid(pbm
->devhandle
, msiqid
, HV_MSIQ_VALID
))
872 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops
= {
873 .get_head
= pci_sun4v_get_head
,
874 .dequeue_msi
= pci_sun4v_dequeue_msi
,
875 .set_head
= pci_sun4v_set_head
,
876 .msi_setup
= pci_sun4v_msi_setup
,
877 .msi_teardown
= pci_sun4v_msi_teardown
,
878 .msiq_alloc
= pci_sun4v_msiq_alloc
,
879 .msiq_free
= pci_sun4v_msiq_free
,
880 .msiq_build_irq
= pci_sun4v_msiq_build_irq
,
883 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
885 sparc64_pbm_msi_init(pbm
, &pci_sun4v_msiq_ops
);
887 #else /* CONFIG_PCI_MSI */
888 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
891 #endif /* !(CONFIG_PCI_MSI) */
893 static void __init
pci_sun4v_pbm_init(struct pci_controller_info
*p
,
894 struct device_node
*dp
, u32 devhandle
)
896 struct pci_pbm_info
*pbm
;
898 if (devhandle
& 0x40)
903 pbm
->next
= pci_pbm_root
;
906 pbm
->numa_node
= of_node_to_nid(dp
);
908 pbm
->scan_bus
= pci_sun4v_scan_bus
;
909 pbm
->pci_ops
= &sun4v_pci_ops
;
910 pbm
->config_space_reg_bits
= 12;
912 pbm
->index
= pci_num_pbms
++;
917 pbm
->devhandle
= devhandle
;
919 pbm
->name
= dp
->full_name
;
921 printk("%s: SUN4V PCI Bus Module\n", pbm
->name
);
922 printk("%s: On NUMA node %d\n", pbm
->name
, pbm
->numa_node
);
924 pci_determine_mem_io_space(pbm
);
926 pci_get_pbm_props(pbm
);
927 pci_sun4v_iommu_init(pbm
);
928 pci_sun4v_msi_init(pbm
);
931 void __init
sun4v_pci_init(struct device_node
*dp
, char *model_name
)
933 static int hvapi_negotiated
= 0;
934 struct pci_controller_info
*p
;
935 struct pci_pbm_info
*pbm
;
937 struct property
*prop
;
938 struct linux_prom64_registers
*regs
;
942 if (!hvapi_negotiated
++) {
943 int err
= sun4v_hvapi_register(HV_GRP_PCI
,
948 prom_printf("SUN4V_PCI: Could not register hvapi, "
952 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
953 vpci_major
, vpci_minor
);
955 dma_ops
= &sun4v_dma_ops
;
958 prop
= of_find_property(dp
, "reg", NULL
);
960 prom_printf("SUN4V_PCI: Could not find config registers\n");
965 devhandle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
967 for (pbm
= pci_pbm_root
; pbm
; pbm
= pbm
->next
) {
968 if (pbm
->devhandle
== (devhandle
^ 0x40)) {
969 pci_sun4v_pbm_init(pbm
->parent
, dp
, devhandle
);
974 for_each_possible_cpu(i
) {
975 unsigned long page
= get_zeroed_page(GFP_ATOMIC
);
978 goto fatal_memory_error
;
980 per_cpu(iommu_batch
, i
).pglist
= (u64
*) page
;
983 p
= kzalloc(sizeof(struct pci_controller_info
), GFP_ATOMIC
);
985 goto fatal_memory_error
;
987 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
989 goto fatal_memory_error
;
991 p
->pbm_A
.iommu
= iommu
;
993 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
995 goto fatal_memory_error
;
997 p
->pbm_B
.iommu
= iommu
;
999 pci_sun4v_pbm_init(p
, dp
, devhandle
);
1003 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");