1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16 #include <linux/of_device.h>
18 #include <asm/iommu.h>
20 #include <asm/hypervisor.h>
24 #include "iommu_common.h"
26 #include "pci_sun4v.h"
28 #define DRIVER_NAME "pci_sun4v"
29 #define PFX DRIVER_NAME ": "
31 static unsigned long vpci_major
= 1;
32 static unsigned long vpci_minor
= 1;
34 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
37 struct device
*dev
; /* Device mapping is for. */
38 unsigned long prot
; /* IOMMU page protections */
39 unsigned long entry
; /* Index into IOTSB. */
40 u64
*pglist
; /* List of physical pages */
41 unsigned long npages
; /* Number of pages in list. */
44 static DEFINE_PER_CPU(struct iommu_batch
, iommu_batch
);
45 static int iommu_batch_initialized
;
47 /* Interrupts must be disabled. */
48 static inline void iommu_batch_start(struct device
*dev
, unsigned long prot
, unsigned long entry
)
50 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
58 /* Interrupts must be disabled. */
59 static long iommu_batch_flush(struct iommu_batch
*p
)
61 struct pci_pbm_info
*pbm
= p
->dev
->archdata
.host_controller
;
62 unsigned long devhandle
= pbm
->devhandle
;
63 unsigned long prot
= p
->prot
;
64 unsigned long entry
= p
->entry
;
65 u64
*pglist
= p
->pglist
;
66 unsigned long npages
= p
->npages
;
71 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
72 npages
, prot
, __pa(pglist
));
73 if (unlikely(num
< 0)) {
74 if (printk_ratelimit())
75 printk("iommu_batch_flush: IOMMU map of "
76 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
78 devhandle
, HV_PCI_TSBID(0, entry
),
79 npages
, prot
, __pa(pglist
), num
);
94 static inline void iommu_batch_new_entry(unsigned long entry
)
96 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
98 if (p
->entry
+ p
->npages
== entry
)
100 if (p
->entry
!= ~0UL)
101 iommu_batch_flush(p
);
105 /* Interrupts must be disabled. */
106 static inline long iommu_batch_add(u64 phys_page
)
108 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
110 BUG_ON(p
->npages
>= PGLIST_NENTS
);
112 p
->pglist
[p
->npages
++] = phys_page
;
113 if (p
->npages
== PGLIST_NENTS
)
114 return iommu_batch_flush(p
);
119 /* Interrupts must be disabled. */
120 static inline long iommu_batch_end(void)
122 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
124 BUG_ON(p
->npages
>= PGLIST_NENTS
);
126 return iommu_batch_flush(p
);
129 static void *dma_4v_alloc_coherent(struct device
*dev
, size_t size
,
130 dma_addr_t
*dma_addrp
, gfp_t gfp
)
132 unsigned long flags
, order
, first_page
, npages
, n
;
139 size
= IO_PAGE_ALIGN(size
);
140 order
= get_order(size
);
141 if (unlikely(order
>= MAX_ORDER
))
144 npages
= size
>> IO_PAGE_SHIFT
;
146 nid
= dev
->archdata
.numa_node
;
147 page
= alloc_pages_node(nid
, gfp
, order
);
151 first_page
= (unsigned long) page_address(page
);
152 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
154 iommu
= dev
->archdata
.iommu
;
156 spin_lock_irqsave(&iommu
->lock
, flags
);
157 entry
= iommu_range_alloc(dev
, iommu
, npages
, NULL
);
158 spin_unlock_irqrestore(&iommu
->lock
, flags
);
160 if (unlikely(entry
== DMA_ERROR_CODE
))
161 goto range_alloc_fail
;
163 *dma_addrp
= (iommu
->page_table_map_base
+
164 (entry
<< IO_PAGE_SHIFT
));
165 ret
= (void *) first_page
;
166 first_page
= __pa(first_page
);
168 local_irq_save(flags
);
170 iommu_batch_start(dev
,
171 (HV_PCI_MAP_ATTR_READ
|
172 HV_PCI_MAP_ATTR_WRITE
),
175 for (n
= 0; n
< npages
; n
++) {
176 long err
= iommu_batch_add(first_page
+ (n
* PAGE_SIZE
));
177 if (unlikely(err
< 0L))
181 if (unlikely(iommu_batch_end() < 0L))
184 local_irq_restore(flags
);
189 /* Interrupts are disabled. */
190 spin_lock(&iommu
->lock
);
191 iommu_range_free(iommu
, *dma_addrp
, npages
);
192 spin_unlock_irqrestore(&iommu
->lock
, flags
);
195 free_pages(first_page
, order
);
199 static void dma_4v_free_coherent(struct device
*dev
, size_t size
, void *cpu
,
202 struct pci_pbm_info
*pbm
;
204 unsigned long flags
, order
, npages
, entry
;
207 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
208 iommu
= dev
->archdata
.iommu
;
209 pbm
= dev
->archdata
.host_controller
;
210 devhandle
= pbm
->devhandle
;
211 entry
= ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
213 spin_lock_irqsave(&iommu
->lock
, flags
);
215 iommu_range_free(iommu
, dvma
, npages
);
220 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
224 } while (npages
!= 0);
226 spin_unlock_irqrestore(&iommu
->lock
, flags
);
228 order
= get_order(size
);
230 free_pages((unsigned long)cpu
, order
);
233 static dma_addr_t
dma_4v_map_page(struct device
*dev
, struct page
*page
,
234 unsigned long offset
, size_t sz
,
235 enum dma_data_direction direction
,
236 struct dma_attrs
*attrs
)
239 unsigned long flags
, npages
, oaddr
;
240 unsigned long i
, base_paddr
;
245 iommu
= dev
->archdata
.iommu
;
247 if (unlikely(direction
== DMA_NONE
))
250 oaddr
= (unsigned long)(page_address(page
) + offset
);
251 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
252 npages
>>= IO_PAGE_SHIFT
;
254 spin_lock_irqsave(&iommu
->lock
, flags
);
255 entry
= iommu_range_alloc(dev
, iommu
, npages
, NULL
);
256 spin_unlock_irqrestore(&iommu
->lock
, flags
);
258 if (unlikely(entry
== DMA_ERROR_CODE
))
261 bus_addr
= (iommu
->page_table_map_base
+
262 (entry
<< IO_PAGE_SHIFT
));
263 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
264 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
265 prot
= HV_PCI_MAP_ATTR_READ
;
266 if (direction
!= DMA_TO_DEVICE
)
267 prot
|= HV_PCI_MAP_ATTR_WRITE
;
269 local_irq_save(flags
);
271 iommu_batch_start(dev
, prot
, entry
);
273 for (i
= 0; i
< npages
; i
++, base_paddr
+= IO_PAGE_SIZE
) {
274 long err
= iommu_batch_add(base_paddr
);
275 if (unlikely(err
< 0L))
278 if (unlikely(iommu_batch_end() < 0L))
281 local_irq_restore(flags
);
286 if (printk_ratelimit())
288 return DMA_ERROR_CODE
;
291 /* Interrupts are disabled. */
292 spin_lock(&iommu
->lock
);
293 iommu_range_free(iommu
, bus_addr
, npages
);
294 spin_unlock_irqrestore(&iommu
->lock
, flags
);
296 return DMA_ERROR_CODE
;
299 static void dma_4v_unmap_page(struct device
*dev
, dma_addr_t bus_addr
,
300 size_t sz
, enum dma_data_direction direction
,
301 struct dma_attrs
*attrs
)
303 struct pci_pbm_info
*pbm
;
305 unsigned long flags
, npages
;
309 if (unlikely(direction
== DMA_NONE
)) {
310 if (printk_ratelimit())
315 iommu
= dev
->archdata
.iommu
;
316 pbm
= dev
->archdata
.host_controller
;
317 devhandle
= pbm
->devhandle
;
319 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
320 npages
>>= IO_PAGE_SHIFT
;
321 bus_addr
&= IO_PAGE_MASK
;
323 spin_lock_irqsave(&iommu
->lock
, flags
);
325 iommu_range_free(iommu
, bus_addr
, npages
);
327 entry
= (bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
331 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
335 } while (npages
!= 0);
337 spin_unlock_irqrestore(&iommu
->lock
, flags
);
340 static int dma_4v_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
341 int nelems
, enum dma_data_direction direction
,
342 struct dma_attrs
*attrs
)
344 struct scatterlist
*s
, *outs
, *segstart
;
345 unsigned long flags
, handle
, prot
;
346 dma_addr_t dma_next
= 0, dma_addr
;
347 unsigned int max_seg_size
;
348 unsigned long seg_boundary_size
;
349 int outcount
, incount
, i
;
351 unsigned long base_shift
;
354 BUG_ON(direction
== DMA_NONE
);
356 iommu
= dev
->archdata
.iommu
;
357 if (nelems
== 0 || !iommu
)
360 prot
= HV_PCI_MAP_ATTR_READ
;
361 if (direction
!= DMA_TO_DEVICE
)
362 prot
|= HV_PCI_MAP_ATTR_WRITE
;
364 outs
= s
= segstart
= &sglist
[0];
369 /* Init first segment length for backout at failure */
370 outs
->dma_length
= 0;
372 spin_lock_irqsave(&iommu
->lock
, flags
);
374 iommu_batch_start(dev
, prot
, ~0UL);
376 max_seg_size
= dma_get_max_seg_size(dev
);
377 seg_boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
378 IO_PAGE_SIZE
) >> IO_PAGE_SHIFT
;
379 base_shift
= iommu
->page_table_map_base
>> IO_PAGE_SHIFT
;
380 for_each_sg(sglist
, s
, nelems
, i
) {
381 unsigned long paddr
, npages
, entry
, out_entry
= 0, slen
;
389 /* Allocate iommu entries for that segment */
390 paddr
= (unsigned long) SG_ENT_PHYS_ADDRESS(s
);
391 npages
= iommu_num_pages(paddr
, slen
, IO_PAGE_SIZE
);
392 entry
= iommu_range_alloc(dev
, iommu
, npages
, &handle
);
395 if (unlikely(entry
== DMA_ERROR_CODE
)) {
396 if (printk_ratelimit())
397 printk(KERN_INFO
"iommu_alloc failed, iommu %p paddr %lx"
398 " npages %lx\n", iommu
, paddr
, npages
);
399 goto iommu_map_failed
;
402 iommu_batch_new_entry(entry
);
404 /* Convert entry to a dma_addr_t */
405 dma_addr
= iommu
->page_table_map_base
+
406 (entry
<< IO_PAGE_SHIFT
);
407 dma_addr
|= (s
->offset
& ~IO_PAGE_MASK
);
409 /* Insert into HW table */
410 paddr
&= IO_PAGE_MASK
;
412 err
= iommu_batch_add(paddr
);
413 if (unlikely(err
< 0L))
414 goto iommu_map_failed
;
415 paddr
+= IO_PAGE_SIZE
;
418 /* If we are in an open segment, try merging */
420 /* We cannot merge if:
421 * - allocated dma_addr isn't contiguous to previous allocation
423 if ((dma_addr
!= dma_next
) ||
424 (outs
->dma_length
+ s
->length
> max_seg_size
) ||
425 (is_span_boundary(out_entry
, base_shift
,
426 seg_boundary_size
, outs
, s
))) {
427 /* Can't merge: create a new segment */
430 outs
= sg_next(outs
);
432 outs
->dma_length
+= s
->length
;
437 /* This is a new segment, fill entries */
438 outs
->dma_address
= dma_addr
;
439 outs
->dma_length
= slen
;
443 /* Calculate next page pointer for contiguous check */
444 dma_next
= dma_addr
+ slen
;
447 err
= iommu_batch_end();
449 if (unlikely(err
< 0L))
450 goto iommu_map_failed
;
452 spin_unlock_irqrestore(&iommu
->lock
, flags
);
454 if (outcount
< incount
) {
455 outs
= sg_next(outs
);
456 outs
->dma_address
= DMA_ERROR_CODE
;
457 outs
->dma_length
= 0;
463 for_each_sg(sglist
, s
, nelems
, i
) {
464 if (s
->dma_length
!= 0) {
465 unsigned long vaddr
, npages
;
467 vaddr
= s
->dma_address
& IO_PAGE_MASK
;
468 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
,
470 iommu_range_free(iommu
, vaddr
, npages
);
472 s
->dma_address
= DMA_ERROR_CODE
;
478 spin_unlock_irqrestore(&iommu
->lock
, flags
);
483 static void dma_4v_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
484 int nelems
, enum dma_data_direction direction
,
485 struct dma_attrs
*attrs
)
487 struct pci_pbm_info
*pbm
;
488 struct scatterlist
*sg
;
493 BUG_ON(direction
== DMA_NONE
);
495 iommu
= dev
->archdata
.iommu
;
496 pbm
= dev
->archdata
.host_controller
;
497 devhandle
= pbm
->devhandle
;
499 spin_lock_irqsave(&iommu
->lock
, flags
);
503 dma_addr_t dma_handle
= sg
->dma_address
;
504 unsigned int len
= sg
->dma_length
;
505 unsigned long npages
, entry
;
509 npages
= iommu_num_pages(dma_handle
, len
, IO_PAGE_SIZE
);
510 iommu_range_free(iommu
, dma_handle
, npages
);
512 entry
= ((dma_handle
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
516 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
525 spin_unlock_irqrestore(&iommu
->lock
, flags
);
528 static struct dma_map_ops sun4v_dma_ops
= {
529 .alloc_coherent
= dma_4v_alloc_coherent
,
530 .free_coherent
= dma_4v_free_coherent
,
531 .map_page
= dma_4v_map_page
,
532 .unmap_page
= dma_4v_unmap_page
,
533 .map_sg
= dma_4v_map_sg
,
534 .unmap_sg
= dma_4v_unmap_sg
,
537 static void __devinit
pci_sun4v_scan_bus(struct pci_pbm_info
*pbm
,
538 struct device
*parent
)
540 struct property
*prop
;
541 struct device_node
*dp
;
543 dp
= pbm
->op
->dev
.of_node
;
544 prop
= of_find_property(dp
, "66mhz-capable", NULL
);
545 pbm
->is_66mhz_capable
= (prop
!= NULL
);
546 pbm
->pci_bus
= pci_scan_one_pbm(pbm
, parent
);
548 /* XXX register error interrupt handlers XXX */
551 static unsigned long __devinit
probe_existing_entries(struct pci_pbm_info
*pbm
,
554 struct iommu_arena
*arena
= &iommu
->arena
;
555 unsigned long i
, cnt
= 0;
558 devhandle
= pbm
->devhandle
;
559 for (i
= 0; i
< arena
->limit
; i
++) {
560 unsigned long ret
, io_attrs
, ra
;
562 ret
= pci_sun4v_iommu_getmap(devhandle
,
566 if (page_in_phys_avail(ra
)) {
567 pci_sun4v_iommu_demap(devhandle
,
568 HV_PCI_TSBID(0, i
), 1);
571 __set_bit(i
, arena
->map
);
579 static int __devinit
pci_sun4v_iommu_init(struct pci_pbm_info
*pbm
)
581 static const u32 vdma_default
[] = { 0x80000000, 0x80000000 };
582 struct iommu
*iommu
= pbm
->iommu
;
583 unsigned long num_tsb_entries
, sz
;
584 u32 dma_mask
, dma_offset
;
587 vdma
= of_get_property(pbm
->op
->dev
.of_node
, "virtual-dma", NULL
);
591 if ((vdma
[0] | vdma
[1]) & ~IO_PAGE_MASK
) {
592 printk(KERN_ERR PFX
"Strange virtual-dma[%08x:%08x].\n",
597 dma_mask
= (roundup_pow_of_two(vdma
[1]) - 1UL);
598 num_tsb_entries
= vdma
[1] / IO_PAGE_SIZE
;
600 dma_offset
= vdma
[0];
602 /* Setup initial software IOMMU state. */
603 spin_lock_init(&iommu
->lock
);
604 iommu
->ctx_lowest_free
= 1;
605 iommu
->page_table_map_base
= dma_offset
;
606 iommu
->dma_addr_mask
= dma_mask
;
608 /* Allocate and initialize the free area map. */
609 sz
= (num_tsb_entries
+ 7) / 8;
610 sz
= (sz
+ 7UL) & ~7UL;
611 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
612 if (!iommu
->arena
.map
) {
613 printk(KERN_ERR PFX
"Error, kmalloc(arena.map) failed.\n");
616 iommu
->arena
.limit
= num_tsb_entries
;
618 sz
= probe_existing_entries(pbm
, iommu
);
620 printk("%s: Imported %lu TSB entries from OBP\n",
626 #ifdef CONFIG_PCI_MSI
627 struct pci_sun4v_msiq_entry
{
629 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
630 #define MSIQ_VERSION_SHIFT 32
631 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
632 #define MSIQ_TYPE_SHIFT 0
633 #define MSIQ_TYPE_NONE 0x00
634 #define MSIQ_TYPE_MSG 0x01
635 #define MSIQ_TYPE_MSI32 0x02
636 #define MSIQ_TYPE_MSI64 0x03
637 #define MSIQ_TYPE_INTX 0x08
638 #define MSIQ_TYPE_NONE2 0xff
643 u64 req_id
; /* bus/device/func */
644 #define MSIQ_REQID_BUS_MASK 0xff00UL
645 #define MSIQ_REQID_BUS_SHIFT 8
646 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
647 #define MSIQ_REQID_DEVICE_SHIFT 3
648 #define MSIQ_REQID_FUNC_MASK 0x0007UL
649 #define MSIQ_REQID_FUNC_SHIFT 0
653 /* The format of this value is message type dependent.
654 * For MSI bits 15:0 are the data from the MSI packet.
655 * For MSI-X bits 31:0 are the data from the MSI packet.
656 * For MSG, the message code and message routing code where:
657 * bits 39:32 is the bus/device/fn of the msg target-id
658 * bits 18:16 is the message routing code
659 * bits 7:0 is the message code
660 * For INTx the low order 2-bits are:
671 static int pci_sun4v_get_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
674 unsigned long err
, limit
;
676 err
= pci_sun4v_msiq_gethead(pbm
->devhandle
, msiqid
, head
);
680 limit
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
681 if (unlikely(*head
>= limit
))
687 static int pci_sun4v_dequeue_msi(struct pci_pbm_info
*pbm
,
688 unsigned long msiqid
, unsigned long *head
,
691 struct pci_sun4v_msiq_entry
*ep
;
692 unsigned long err
, type
;
694 /* Note: void pointer arithmetic, 'head' is a byte offset */
695 ep
= (pbm
->msi_queues
+ ((msiqid
- pbm
->msiq_first
) *
696 (pbm
->msiq_ent_count
*
697 sizeof(struct pci_sun4v_msiq_entry
))) +
700 if ((ep
->version_type
& MSIQ_TYPE_MASK
) == 0)
703 type
= (ep
->version_type
& MSIQ_TYPE_MASK
) >> MSIQ_TYPE_SHIFT
;
704 if (unlikely(type
!= MSIQ_TYPE_MSI32
&&
705 type
!= MSIQ_TYPE_MSI64
))
710 err
= pci_sun4v_msi_setstate(pbm
->devhandle
,
711 ep
->msi_data
/* msi_num */,
716 /* Clear the entry. */
717 ep
->version_type
&= ~MSIQ_TYPE_MASK
;
719 (*head
) += sizeof(struct pci_sun4v_msiq_entry
);
721 (pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
)))
727 static int pci_sun4v_set_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
732 err
= pci_sun4v_msiq_sethead(pbm
->devhandle
, msiqid
, head
);
739 static int pci_sun4v_msi_setup(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
740 unsigned long msi
, int is_msi64
)
742 if (pci_sun4v_msi_setmsiq(pbm
->devhandle
, msi
, msiqid
,
744 HV_MSITYPE_MSI64
: HV_MSITYPE_MSI32
)))
746 if (pci_sun4v_msi_setstate(pbm
->devhandle
, msi
, HV_MSISTATE_IDLE
))
748 if (pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_VALID
))
753 static int pci_sun4v_msi_teardown(struct pci_pbm_info
*pbm
, unsigned long msi
)
755 unsigned long err
, msiqid
;
757 err
= pci_sun4v_msi_getmsiq(pbm
->devhandle
, msi
, &msiqid
);
761 pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_INVALID
);
766 static int pci_sun4v_msiq_alloc(struct pci_pbm_info
*pbm
)
768 unsigned long q_size
, alloc_size
, pages
, order
;
771 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
772 alloc_size
= (pbm
->msiq_num
* q_size
);
773 order
= get_order(alloc_size
);
774 pages
= __get_free_pages(GFP_KERNEL
| __GFP_COMP
, order
);
776 printk(KERN_ERR
"MSI: Cannot allocate MSI queues (o=%lu).\n",
780 memset((char *)pages
, 0, PAGE_SIZE
<< order
);
781 pbm
->msi_queues
= (void *) pages
;
783 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
784 unsigned long err
, base
= __pa(pages
+ (i
* q_size
));
785 unsigned long ret1
, ret2
;
787 err
= pci_sun4v_msiq_conf(pbm
->devhandle
,
789 base
, pbm
->msiq_ent_count
);
791 printk(KERN_ERR
"MSI: msiq register fails (err=%lu)\n",
796 err
= pci_sun4v_msiq_info(pbm
->devhandle
,
800 printk(KERN_ERR
"MSI: Cannot read msiq (err=%lu)\n",
804 if (ret1
!= base
|| ret2
!= pbm
->msiq_ent_count
) {
805 printk(KERN_ERR
"MSI: Bogus qconf "
806 "expected[%lx:%x] got[%lx:%lx]\n",
807 base
, pbm
->msiq_ent_count
,
816 free_pages(pages
, order
);
820 static void pci_sun4v_msiq_free(struct pci_pbm_info
*pbm
)
822 unsigned long q_size
, alloc_size
, pages
, order
;
825 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
826 unsigned long msiqid
= pbm
->msiq_first
+ i
;
828 (void) pci_sun4v_msiq_conf(pbm
->devhandle
, msiqid
, 0UL, 0);
831 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
832 alloc_size
= (pbm
->msiq_num
* q_size
);
833 order
= get_order(alloc_size
);
835 pages
= (unsigned long) pbm
->msi_queues
;
837 free_pages(pages
, order
);
839 pbm
->msi_queues
= NULL
;
842 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info
*pbm
,
843 unsigned long msiqid
,
844 unsigned long devino
)
846 unsigned int irq
= sun4v_build_irq(pbm
->devhandle
, devino
);
851 if (pci_sun4v_msiq_setstate(pbm
->devhandle
, msiqid
, HV_MSIQSTATE_IDLE
))
853 if (pci_sun4v_msiq_setvalid(pbm
->devhandle
, msiqid
, HV_MSIQ_VALID
))
859 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops
= {
860 .get_head
= pci_sun4v_get_head
,
861 .dequeue_msi
= pci_sun4v_dequeue_msi
,
862 .set_head
= pci_sun4v_set_head
,
863 .msi_setup
= pci_sun4v_msi_setup
,
864 .msi_teardown
= pci_sun4v_msi_teardown
,
865 .msiq_alloc
= pci_sun4v_msiq_alloc
,
866 .msiq_free
= pci_sun4v_msiq_free
,
867 .msiq_build_irq
= pci_sun4v_msiq_build_irq
,
870 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
872 sparc64_pbm_msi_init(pbm
, &pci_sun4v_msiq_ops
);
874 #else /* CONFIG_PCI_MSI */
875 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
878 #endif /* !(CONFIG_PCI_MSI) */
880 static int __devinit
pci_sun4v_pbm_init(struct pci_pbm_info
*pbm
,
881 struct platform_device
*op
, u32 devhandle
)
883 struct device_node
*dp
= op
->dev
.of_node
;
886 pbm
->numa_node
= of_node_to_nid(dp
);
888 pbm
->pci_ops
= &sun4v_pci_ops
;
889 pbm
->config_space_reg_bits
= 12;
891 pbm
->index
= pci_num_pbms
++;
895 pbm
->devhandle
= devhandle
;
897 pbm
->name
= dp
->full_name
;
899 printk("%s: SUN4V PCI Bus Module\n", pbm
->name
);
900 printk("%s: On NUMA node %d\n", pbm
->name
, pbm
->numa_node
);
902 pci_determine_mem_io_space(pbm
);
904 pci_get_pbm_props(pbm
);
906 err
= pci_sun4v_iommu_init(pbm
);
910 pci_sun4v_msi_init(pbm
);
912 pci_sun4v_scan_bus(pbm
, &op
->dev
);
914 pbm
->next
= pci_pbm_root
;
920 static int __devinit
pci_sun4v_probe(struct platform_device
*op
)
922 const struct linux_prom64_registers
*regs
;
923 static int hvapi_negotiated
= 0;
924 struct pci_pbm_info
*pbm
;
925 struct device_node
*dp
;
930 dp
= op
->dev
.of_node
;
932 if (!hvapi_negotiated
++) {
933 err
= sun4v_hvapi_register(HV_GRP_PCI
,
938 printk(KERN_ERR PFX
"Could not register hvapi, "
942 printk(KERN_INFO PFX
"Registered hvapi major[%lu] minor[%lu]\n",
943 vpci_major
, vpci_minor
);
945 dma_ops
= &sun4v_dma_ops
;
948 regs
= of_get_property(dp
, "reg", NULL
);
951 printk(KERN_ERR PFX
"Could not find config registers\n");
954 devhandle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
957 if (!iommu_batch_initialized
) {
958 for_each_possible_cpu(i
) {
959 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
964 per_cpu(iommu_batch
, i
).pglist
= (u64
*) page
;
966 iommu_batch_initialized
= 1;
969 pbm
= kzalloc(sizeof(*pbm
), GFP_KERNEL
);
971 printk(KERN_ERR PFX
"Could not allocate pci_pbm_info\n");
975 iommu
= kzalloc(sizeof(struct iommu
), GFP_KERNEL
);
977 printk(KERN_ERR PFX
"Could not allocate pbm iommu\n");
978 goto out_free_controller
;
983 err
= pci_sun4v_pbm_init(pbm
, op
, devhandle
);
987 dev_set_drvdata(&op
->dev
, pbm
);
1001 static const struct of_device_id pci_sun4v_match
[] = {
1004 .compatible
= "SUNW,sun4v-pci",
1009 static struct platform_driver pci_sun4v_driver
= {
1011 .name
= DRIVER_NAME
,
1012 .owner
= THIS_MODULE
,
1013 .of_match_table
= pci_sun4v_match
,
1015 .probe
= pci_sun4v_probe
,
1018 static int __init
pci_sun4v_init(void)
1020 return platform_driver_register(&pci_sun4v_driver
);
1023 subsys_initcall(pci_sun4v_init
);