1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/export.h>
16 #include <linux/log2.h>
17 #include <linux/of_device.h>
19 #include <asm/iommu.h>
21 #include <asm/hypervisor.h>
25 #include "iommu_common.h"
27 #include "pci_sun4v.h"
29 #define DRIVER_NAME "pci_sun4v"
30 #define PFX DRIVER_NAME ": "
32 static unsigned long vpci_major
= 1;
33 static unsigned long vpci_minor
= 1;
35 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
38 struct device
*dev
; /* Device mapping is for. */
39 unsigned long prot
; /* IOMMU page protections */
40 unsigned long entry
; /* Index into IOTSB. */
41 u64
*pglist
; /* List of physical pages */
42 unsigned long npages
; /* Number of pages in list. */
45 static DEFINE_PER_CPU(struct iommu_batch
, iommu_batch
);
46 static int iommu_batch_initialized
;
48 /* Interrupts must be disabled. */
49 static inline void iommu_batch_start(struct device
*dev
, unsigned long prot
, unsigned long entry
)
51 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
59 /* Interrupts must be disabled. */
60 static long iommu_batch_flush(struct iommu_batch
*p
)
62 struct pci_pbm_info
*pbm
= p
->dev
->archdata
.host_controller
;
63 unsigned long devhandle
= pbm
->devhandle
;
64 unsigned long prot
= p
->prot
;
65 unsigned long entry
= p
->entry
;
66 u64
*pglist
= p
->pglist
;
67 unsigned long npages
= p
->npages
;
72 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
73 npages
, prot
, __pa(pglist
));
74 if (unlikely(num
< 0)) {
75 if (printk_ratelimit())
76 printk("iommu_batch_flush: IOMMU map of "
77 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
79 devhandle
, HV_PCI_TSBID(0, entry
),
80 npages
, prot
, __pa(pglist
), num
);
95 static inline void iommu_batch_new_entry(unsigned long entry
)
97 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
99 if (p
->entry
+ p
->npages
== entry
)
101 if (p
->entry
!= ~0UL)
102 iommu_batch_flush(p
);
106 /* Interrupts must be disabled. */
107 static inline long iommu_batch_add(u64 phys_page
)
109 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
111 BUG_ON(p
->npages
>= PGLIST_NENTS
);
113 p
->pglist
[p
->npages
++] = phys_page
;
114 if (p
->npages
== PGLIST_NENTS
)
115 return iommu_batch_flush(p
);
120 /* Interrupts must be disabled. */
121 static inline long iommu_batch_end(void)
123 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
125 BUG_ON(p
->npages
>= PGLIST_NENTS
);
127 return iommu_batch_flush(p
);
130 static void *dma_4v_alloc_coherent(struct device
*dev
, size_t size
,
131 dma_addr_t
*dma_addrp
, gfp_t gfp
)
133 unsigned long flags
, order
, first_page
, npages
, n
;
140 size
= IO_PAGE_ALIGN(size
);
141 order
= get_order(size
);
142 if (unlikely(order
>= MAX_ORDER
))
145 npages
= size
>> IO_PAGE_SHIFT
;
147 nid
= dev
->archdata
.numa_node
;
148 page
= alloc_pages_node(nid
, gfp
, order
);
152 first_page
= (unsigned long) page_address(page
);
153 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
155 iommu
= dev
->archdata
.iommu
;
157 spin_lock_irqsave(&iommu
->lock
, flags
);
158 entry
= iommu_range_alloc(dev
, iommu
, npages
, NULL
);
159 spin_unlock_irqrestore(&iommu
->lock
, flags
);
161 if (unlikely(entry
== DMA_ERROR_CODE
))
162 goto range_alloc_fail
;
164 *dma_addrp
= (iommu
->page_table_map_base
+
165 (entry
<< IO_PAGE_SHIFT
));
166 ret
= (void *) first_page
;
167 first_page
= __pa(first_page
);
169 local_irq_save(flags
);
171 iommu_batch_start(dev
,
172 (HV_PCI_MAP_ATTR_READ
|
173 HV_PCI_MAP_ATTR_WRITE
),
176 for (n
= 0; n
< npages
; n
++) {
177 long err
= iommu_batch_add(first_page
+ (n
* PAGE_SIZE
));
178 if (unlikely(err
< 0L))
182 if (unlikely(iommu_batch_end() < 0L))
185 local_irq_restore(flags
);
190 /* Interrupts are disabled. */
191 spin_lock(&iommu
->lock
);
192 iommu_range_free(iommu
, *dma_addrp
, npages
);
193 spin_unlock_irqrestore(&iommu
->lock
, flags
);
196 free_pages(first_page
, order
);
200 static void dma_4v_free_coherent(struct device
*dev
, size_t size
, void *cpu
,
203 struct pci_pbm_info
*pbm
;
205 unsigned long flags
, order
, npages
, entry
;
208 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
209 iommu
= dev
->archdata
.iommu
;
210 pbm
= dev
->archdata
.host_controller
;
211 devhandle
= pbm
->devhandle
;
212 entry
= ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
214 spin_lock_irqsave(&iommu
->lock
, flags
);
216 iommu_range_free(iommu
, dvma
, npages
);
221 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
225 } while (npages
!= 0);
227 spin_unlock_irqrestore(&iommu
->lock
, flags
);
229 order
= get_order(size
);
231 free_pages((unsigned long)cpu
, order
);
234 static dma_addr_t
dma_4v_map_page(struct device
*dev
, struct page
*page
,
235 unsigned long offset
, size_t sz
,
236 enum dma_data_direction direction
,
237 struct dma_attrs
*attrs
)
240 unsigned long flags
, npages
, oaddr
;
241 unsigned long i
, base_paddr
;
246 iommu
= dev
->archdata
.iommu
;
248 if (unlikely(direction
== DMA_NONE
))
251 oaddr
= (unsigned long)(page_address(page
) + offset
);
252 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
253 npages
>>= IO_PAGE_SHIFT
;
255 spin_lock_irqsave(&iommu
->lock
, flags
);
256 entry
= iommu_range_alloc(dev
, iommu
, npages
, NULL
);
257 spin_unlock_irqrestore(&iommu
->lock
, flags
);
259 if (unlikely(entry
== DMA_ERROR_CODE
))
262 bus_addr
= (iommu
->page_table_map_base
+
263 (entry
<< IO_PAGE_SHIFT
));
264 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
265 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
266 prot
= HV_PCI_MAP_ATTR_READ
;
267 if (direction
!= DMA_TO_DEVICE
)
268 prot
|= HV_PCI_MAP_ATTR_WRITE
;
270 local_irq_save(flags
);
272 iommu_batch_start(dev
, prot
, entry
);
274 for (i
= 0; i
< npages
; i
++, base_paddr
+= IO_PAGE_SIZE
) {
275 long err
= iommu_batch_add(base_paddr
);
276 if (unlikely(err
< 0L))
279 if (unlikely(iommu_batch_end() < 0L))
282 local_irq_restore(flags
);
287 if (printk_ratelimit())
289 return DMA_ERROR_CODE
;
292 /* Interrupts are disabled. */
293 spin_lock(&iommu
->lock
);
294 iommu_range_free(iommu
, bus_addr
, npages
);
295 spin_unlock_irqrestore(&iommu
->lock
, flags
);
297 return DMA_ERROR_CODE
;
300 static void dma_4v_unmap_page(struct device
*dev
, dma_addr_t bus_addr
,
301 size_t sz
, enum dma_data_direction direction
,
302 struct dma_attrs
*attrs
)
304 struct pci_pbm_info
*pbm
;
306 unsigned long flags
, npages
;
310 if (unlikely(direction
== DMA_NONE
)) {
311 if (printk_ratelimit())
316 iommu
= dev
->archdata
.iommu
;
317 pbm
= dev
->archdata
.host_controller
;
318 devhandle
= pbm
->devhandle
;
320 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
321 npages
>>= IO_PAGE_SHIFT
;
322 bus_addr
&= IO_PAGE_MASK
;
324 spin_lock_irqsave(&iommu
->lock
, flags
);
326 iommu_range_free(iommu
, bus_addr
, npages
);
328 entry
= (bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
332 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
336 } while (npages
!= 0);
338 spin_unlock_irqrestore(&iommu
->lock
, flags
);
341 static int dma_4v_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
342 int nelems
, enum dma_data_direction direction
,
343 struct dma_attrs
*attrs
)
345 struct scatterlist
*s
, *outs
, *segstart
;
346 unsigned long flags
, handle
, prot
;
347 dma_addr_t dma_next
= 0, dma_addr
;
348 unsigned int max_seg_size
;
349 unsigned long seg_boundary_size
;
350 int outcount
, incount
, i
;
352 unsigned long base_shift
;
355 BUG_ON(direction
== DMA_NONE
);
357 iommu
= dev
->archdata
.iommu
;
358 if (nelems
== 0 || !iommu
)
361 prot
= HV_PCI_MAP_ATTR_READ
;
362 if (direction
!= DMA_TO_DEVICE
)
363 prot
|= HV_PCI_MAP_ATTR_WRITE
;
365 outs
= s
= segstart
= &sglist
[0];
370 /* Init first segment length for backout at failure */
371 outs
->dma_length
= 0;
373 spin_lock_irqsave(&iommu
->lock
, flags
);
375 iommu_batch_start(dev
, prot
, ~0UL);
377 max_seg_size
= dma_get_max_seg_size(dev
);
378 seg_boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
379 IO_PAGE_SIZE
) >> IO_PAGE_SHIFT
;
380 base_shift
= iommu
->page_table_map_base
>> IO_PAGE_SHIFT
;
381 for_each_sg(sglist
, s
, nelems
, i
) {
382 unsigned long paddr
, npages
, entry
, out_entry
= 0, slen
;
390 /* Allocate iommu entries for that segment */
391 paddr
= (unsigned long) SG_ENT_PHYS_ADDRESS(s
);
392 npages
= iommu_num_pages(paddr
, slen
, IO_PAGE_SIZE
);
393 entry
= iommu_range_alloc(dev
, iommu
, npages
, &handle
);
396 if (unlikely(entry
== DMA_ERROR_CODE
)) {
397 if (printk_ratelimit())
398 printk(KERN_INFO
"iommu_alloc failed, iommu %p paddr %lx"
399 " npages %lx\n", iommu
, paddr
, npages
);
400 goto iommu_map_failed
;
403 iommu_batch_new_entry(entry
);
405 /* Convert entry to a dma_addr_t */
406 dma_addr
= iommu
->page_table_map_base
+
407 (entry
<< IO_PAGE_SHIFT
);
408 dma_addr
|= (s
->offset
& ~IO_PAGE_MASK
);
410 /* Insert into HW table */
411 paddr
&= IO_PAGE_MASK
;
413 err
= iommu_batch_add(paddr
);
414 if (unlikely(err
< 0L))
415 goto iommu_map_failed
;
416 paddr
+= IO_PAGE_SIZE
;
419 /* If we are in an open segment, try merging */
421 /* We cannot merge if:
422 * - allocated dma_addr isn't contiguous to previous allocation
424 if ((dma_addr
!= dma_next
) ||
425 (outs
->dma_length
+ s
->length
> max_seg_size
) ||
426 (is_span_boundary(out_entry
, base_shift
,
427 seg_boundary_size
, outs
, s
))) {
428 /* Can't merge: create a new segment */
431 outs
= sg_next(outs
);
433 outs
->dma_length
+= s
->length
;
438 /* This is a new segment, fill entries */
439 outs
->dma_address
= dma_addr
;
440 outs
->dma_length
= slen
;
444 /* Calculate next page pointer for contiguous check */
445 dma_next
= dma_addr
+ slen
;
448 err
= iommu_batch_end();
450 if (unlikely(err
< 0L))
451 goto iommu_map_failed
;
453 spin_unlock_irqrestore(&iommu
->lock
, flags
);
455 if (outcount
< incount
) {
456 outs
= sg_next(outs
);
457 outs
->dma_address
= DMA_ERROR_CODE
;
458 outs
->dma_length
= 0;
464 for_each_sg(sglist
, s
, nelems
, i
) {
465 if (s
->dma_length
!= 0) {
466 unsigned long vaddr
, npages
;
468 vaddr
= s
->dma_address
& IO_PAGE_MASK
;
469 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
,
471 iommu_range_free(iommu
, vaddr
, npages
);
473 s
->dma_address
= DMA_ERROR_CODE
;
479 spin_unlock_irqrestore(&iommu
->lock
, flags
);
484 static void dma_4v_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
485 int nelems
, enum dma_data_direction direction
,
486 struct dma_attrs
*attrs
)
488 struct pci_pbm_info
*pbm
;
489 struct scatterlist
*sg
;
494 BUG_ON(direction
== DMA_NONE
);
496 iommu
= dev
->archdata
.iommu
;
497 pbm
= dev
->archdata
.host_controller
;
498 devhandle
= pbm
->devhandle
;
500 spin_lock_irqsave(&iommu
->lock
, flags
);
504 dma_addr_t dma_handle
= sg
->dma_address
;
505 unsigned int len
= sg
->dma_length
;
506 unsigned long npages
, entry
;
510 npages
= iommu_num_pages(dma_handle
, len
, IO_PAGE_SIZE
);
511 iommu_range_free(iommu
, dma_handle
, npages
);
513 entry
= ((dma_handle
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
517 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
526 spin_unlock_irqrestore(&iommu
->lock
, flags
);
529 static struct dma_map_ops sun4v_dma_ops
= {
530 .alloc_coherent
= dma_4v_alloc_coherent
,
531 .free_coherent
= dma_4v_free_coherent
,
532 .map_page
= dma_4v_map_page
,
533 .unmap_page
= dma_4v_unmap_page
,
534 .map_sg
= dma_4v_map_sg
,
535 .unmap_sg
= dma_4v_unmap_sg
,
538 static void __devinit
pci_sun4v_scan_bus(struct pci_pbm_info
*pbm
,
539 struct device
*parent
)
541 struct property
*prop
;
542 struct device_node
*dp
;
544 dp
= pbm
->op
->dev
.of_node
;
545 prop
= of_find_property(dp
, "66mhz-capable", NULL
);
546 pbm
->is_66mhz_capable
= (prop
!= NULL
);
547 pbm
->pci_bus
= pci_scan_one_pbm(pbm
, parent
);
549 /* XXX register error interrupt handlers XXX */
552 static unsigned long __devinit
probe_existing_entries(struct pci_pbm_info
*pbm
,
555 struct iommu_arena
*arena
= &iommu
->arena
;
556 unsigned long i
, cnt
= 0;
559 devhandle
= pbm
->devhandle
;
560 for (i
= 0; i
< arena
->limit
; i
++) {
561 unsigned long ret
, io_attrs
, ra
;
563 ret
= pci_sun4v_iommu_getmap(devhandle
,
567 if (page_in_phys_avail(ra
)) {
568 pci_sun4v_iommu_demap(devhandle
,
569 HV_PCI_TSBID(0, i
), 1);
572 __set_bit(i
, arena
->map
);
580 static int __devinit
pci_sun4v_iommu_init(struct pci_pbm_info
*pbm
)
582 static const u32 vdma_default
[] = { 0x80000000, 0x80000000 };
583 struct iommu
*iommu
= pbm
->iommu
;
584 unsigned long num_tsb_entries
, sz
;
585 u32 dma_mask
, dma_offset
;
588 vdma
= of_get_property(pbm
->op
->dev
.of_node
, "virtual-dma", NULL
);
592 if ((vdma
[0] | vdma
[1]) & ~IO_PAGE_MASK
) {
593 printk(KERN_ERR PFX
"Strange virtual-dma[%08x:%08x].\n",
598 dma_mask
= (roundup_pow_of_two(vdma
[1]) - 1UL);
599 num_tsb_entries
= vdma
[1] / IO_PAGE_SIZE
;
601 dma_offset
= vdma
[0];
603 /* Setup initial software IOMMU state. */
604 spin_lock_init(&iommu
->lock
);
605 iommu
->ctx_lowest_free
= 1;
606 iommu
->page_table_map_base
= dma_offset
;
607 iommu
->dma_addr_mask
= dma_mask
;
609 /* Allocate and initialize the free area map. */
610 sz
= (num_tsb_entries
+ 7) / 8;
611 sz
= (sz
+ 7UL) & ~7UL;
612 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
613 if (!iommu
->arena
.map
) {
614 printk(KERN_ERR PFX
"Error, kmalloc(arena.map) failed.\n");
617 iommu
->arena
.limit
= num_tsb_entries
;
619 sz
= probe_existing_entries(pbm
, iommu
);
621 printk("%s: Imported %lu TSB entries from OBP\n",
627 #ifdef CONFIG_PCI_MSI
628 struct pci_sun4v_msiq_entry
{
630 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
631 #define MSIQ_VERSION_SHIFT 32
632 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
633 #define MSIQ_TYPE_SHIFT 0
634 #define MSIQ_TYPE_NONE 0x00
635 #define MSIQ_TYPE_MSG 0x01
636 #define MSIQ_TYPE_MSI32 0x02
637 #define MSIQ_TYPE_MSI64 0x03
638 #define MSIQ_TYPE_INTX 0x08
639 #define MSIQ_TYPE_NONE2 0xff
644 u64 req_id
; /* bus/device/func */
645 #define MSIQ_REQID_BUS_MASK 0xff00UL
646 #define MSIQ_REQID_BUS_SHIFT 8
647 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
648 #define MSIQ_REQID_DEVICE_SHIFT 3
649 #define MSIQ_REQID_FUNC_MASK 0x0007UL
650 #define MSIQ_REQID_FUNC_SHIFT 0
654 /* The format of this value is message type dependent.
655 * For MSI bits 15:0 are the data from the MSI packet.
656 * For MSI-X bits 31:0 are the data from the MSI packet.
657 * For MSG, the message code and message routing code where:
658 * bits 39:32 is the bus/device/fn of the msg target-id
659 * bits 18:16 is the message routing code
660 * bits 7:0 is the message code
661 * For INTx the low order 2-bits are:
672 static int pci_sun4v_get_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
675 unsigned long err
, limit
;
677 err
= pci_sun4v_msiq_gethead(pbm
->devhandle
, msiqid
, head
);
681 limit
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
682 if (unlikely(*head
>= limit
))
688 static int pci_sun4v_dequeue_msi(struct pci_pbm_info
*pbm
,
689 unsigned long msiqid
, unsigned long *head
,
692 struct pci_sun4v_msiq_entry
*ep
;
693 unsigned long err
, type
;
695 /* Note: void pointer arithmetic, 'head' is a byte offset */
696 ep
= (pbm
->msi_queues
+ ((msiqid
- pbm
->msiq_first
) *
697 (pbm
->msiq_ent_count
*
698 sizeof(struct pci_sun4v_msiq_entry
))) +
701 if ((ep
->version_type
& MSIQ_TYPE_MASK
) == 0)
704 type
= (ep
->version_type
& MSIQ_TYPE_MASK
) >> MSIQ_TYPE_SHIFT
;
705 if (unlikely(type
!= MSIQ_TYPE_MSI32
&&
706 type
!= MSIQ_TYPE_MSI64
))
711 err
= pci_sun4v_msi_setstate(pbm
->devhandle
,
712 ep
->msi_data
/* msi_num */,
717 /* Clear the entry. */
718 ep
->version_type
&= ~MSIQ_TYPE_MASK
;
720 (*head
) += sizeof(struct pci_sun4v_msiq_entry
);
722 (pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
)))
728 static int pci_sun4v_set_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
733 err
= pci_sun4v_msiq_sethead(pbm
->devhandle
, msiqid
, head
);
740 static int pci_sun4v_msi_setup(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
741 unsigned long msi
, int is_msi64
)
743 if (pci_sun4v_msi_setmsiq(pbm
->devhandle
, msi
, msiqid
,
745 HV_MSITYPE_MSI64
: HV_MSITYPE_MSI32
)))
747 if (pci_sun4v_msi_setstate(pbm
->devhandle
, msi
, HV_MSISTATE_IDLE
))
749 if (pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_VALID
))
754 static int pci_sun4v_msi_teardown(struct pci_pbm_info
*pbm
, unsigned long msi
)
756 unsigned long err
, msiqid
;
758 err
= pci_sun4v_msi_getmsiq(pbm
->devhandle
, msi
, &msiqid
);
762 pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_INVALID
);
767 static int pci_sun4v_msiq_alloc(struct pci_pbm_info
*pbm
)
769 unsigned long q_size
, alloc_size
, pages
, order
;
772 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
773 alloc_size
= (pbm
->msiq_num
* q_size
);
774 order
= get_order(alloc_size
);
775 pages
= __get_free_pages(GFP_KERNEL
| __GFP_COMP
, order
);
777 printk(KERN_ERR
"MSI: Cannot allocate MSI queues (o=%lu).\n",
781 memset((char *)pages
, 0, PAGE_SIZE
<< order
);
782 pbm
->msi_queues
= (void *) pages
;
784 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
785 unsigned long err
, base
= __pa(pages
+ (i
* q_size
));
786 unsigned long ret1
, ret2
;
788 err
= pci_sun4v_msiq_conf(pbm
->devhandle
,
790 base
, pbm
->msiq_ent_count
);
792 printk(KERN_ERR
"MSI: msiq register fails (err=%lu)\n",
797 err
= pci_sun4v_msiq_info(pbm
->devhandle
,
801 printk(KERN_ERR
"MSI: Cannot read msiq (err=%lu)\n",
805 if (ret1
!= base
|| ret2
!= pbm
->msiq_ent_count
) {
806 printk(KERN_ERR
"MSI: Bogus qconf "
807 "expected[%lx:%x] got[%lx:%lx]\n",
808 base
, pbm
->msiq_ent_count
,
817 free_pages(pages
, order
);
821 static void pci_sun4v_msiq_free(struct pci_pbm_info
*pbm
)
823 unsigned long q_size
, alloc_size
, pages
, order
;
826 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
827 unsigned long msiqid
= pbm
->msiq_first
+ i
;
829 (void) pci_sun4v_msiq_conf(pbm
->devhandle
, msiqid
, 0UL, 0);
832 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
833 alloc_size
= (pbm
->msiq_num
* q_size
);
834 order
= get_order(alloc_size
);
836 pages
= (unsigned long) pbm
->msi_queues
;
838 free_pages(pages
, order
);
840 pbm
->msi_queues
= NULL
;
843 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info
*pbm
,
844 unsigned long msiqid
,
845 unsigned long devino
)
847 unsigned int irq
= sun4v_build_irq(pbm
->devhandle
, devino
);
852 if (pci_sun4v_msiq_setvalid(pbm
->devhandle
, msiqid
, HV_MSIQ_VALID
))
854 if (pci_sun4v_msiq_setstate(pbm
->devhandle
, msiqid
, HV_MSIQSTATE_IDLE
))
860 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops
= {
861 .get_head
= pci_sun4v_get_head
,
862 .dequeue_msi
= pci_sun4v_dequeue_msi
,
863 .set_head
= pci_sun4v_set_head
,
864 .msi_setup
= pci_sun4v_msi_setup
,
865 .msi_teardown
= pci_sun4v_msi_teardown
,
866 .msiq_alloc
= pci_sun4v_msiq_alloc
,
867 .msiq_free
= pci_sun4v_msiq_free
,
868 .msiq_build_irq
= pci_sun4v_msiq_build_irq
,
871 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
873 sparc64_pbm_msi_init(pbm
, &pci_sun4v_msiq_ops
);
875 #else /* CONFIG_PCI_MSI */
876 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
879 #endif /* !(CONFIG_PCI_MSI) */
881 static int __devinit
pci_sun4v_pbm_init(struct pci_pbm_info
*pbm
,
882 struct platform_device
*op
, u32 devhandle
)
884 struct device_node
*dp
= op
->dev
.of_node
;
887 pbm
->numa_node
= of_node_to_nid(dp
);
889 pbm
->pci_ops
= &sun4v_pci_ops
;
890 pbm
->config_space_reg_bits
= 12;
892 pbm
->index
= pci_num_pbms
++;
896 pbm
->devhandle
= devhandle
;
898 pbm
->name
= dp
->full_name
;
900 printk("%s: SUN4V PCI Bus Module\n", pbm
->name
);
901 printk("%s: On NUMA node %d\n", pbm
->name
, pbm
->numa_node
);
903 pci_determine_mem_io_space(pbm
);
905 pci_get_pbm_props(pbm
);
907 err
= pci_sun4v_iommu_init(pbm
);
911 pci_sun4v_msi_init(pbm
);
913 pci_sun4v_scan_bus(pbm
, &op
->dev
);
915 pbm
->next
= pci_pbm_root
;
921 static int __devinit
pci_sun4v_probe(struct platform_device
*op
)
923 const struct linux_prom64_registers
*regs
;
924 static int hvapi_negotiated
= 0;
925 struct pci_pbm_info
*pbm
;
926 struct device_node
*dp
;
931 dp
= op
->dev
.of_node
;
933 if (!hvapi_negotiated
++) {
934 err
= sun4v_hvapi_register(HV_GRP_PCI
,
939 printk(KERN_ERR PFX
"Could not register hvapi, "
943 printk(KERN_INFO PFX
"Registered hvapi major[%lu] minor[%lu]\n",
944 vpci_major
, vpci_minor
);
946 dma_ops
= &sun4v_dma_ops
;
949 regs
= of_get_property(dp
, "reg", NULL
);
952 printk(KERN_ERR PFX
"Could not find config registers\n");
955 devhandle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
958 if (!iommu_batch_initialized
) {
959 for_each_possible_cpu(i
) {
960 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
965 per_cpu(iommu_batch
, i
).pglist
= (u64
*) page
;
967 iommu_batch_initialized
= 1;
970 pbm
= kzalloc(sizeof(*pbm
), GFP_KERNEL
);
972 printk(KERN_ERR PFX
"Could not allocate pci_pbm_info\n");
976 iommu
= kzalloc(sizeof(struct iommu
), GFP_KERNEL
);
978 printk(KERN_ERR PFX
"Could not allocate pbm iommu\n");
979 goto out_free_controller
;
984 err
= pci_sun4v_pbm_init(pbm
, op
, devhandle
);
988 dev_set_drvdata(&op
->dev
, pbm
);
1002 static const struct of_device_id pci_sun4v_match
[] = {
1005 .compatible
= "SUNW,sun4v-pci",
1010 static struct platform_driver pci_sun4v_driver
= {
1012 .name
= DRIVER_NAME
,
1013 .owner
= THIS_MODULE
,
1014 .of_match_table
= pci_sun4v_match
,
1016 .probe
= pci_sun4v_probe
,
1019 static int __init
pci_sun4v_init(void)
1021 return platform_driver_register(&pci_sun4v_driver
);
1024 subsys_initcall(pci_sun4v_init
);