retransmit send timing bugfix
[cor_2_6_31.git] / arch / sparc / kernel / pci_sun4v.c
blob2485eaa231019676eb1780bfe9f5ff3965394540
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16 #include <linux/of_device.h>
18 #include <asm/iommu.h>
19 #include <asm/irq.h>
20 #include <asm/hypervisor.h>
21 #include <asm/prom.h>
23 #include "pci_impl.h"
24 #include "iommu_common.h"
26 #include "pci_sun4v.h"
28 #define DRIVER_NAME "pci_sun4v"
29 #define PFX DRIVER_NAME ": "
31 static unsigned long vpci_major = 1;
32 static unsigned long vpci_minor = 1;
34 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
36 struct iommu_batch {
37 struct device *dev; /* Device mapping is for. */
38 unsigned long prot; /* IOMMU page protections */
39 unsigned long entry; /* Index into IOTSB. */
40 u64 *pglist; /* List of physical pages */
41 unsigned long npages; /* Number of pages in list. */
44 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
45 static int iommu_batch_initialized;
47 /* Interrupts must be disabled. */
48 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
50 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
52 p->dev = dev;
53 p->prot = prot;
54 p->entry = entry;
55 p->npages = 0;
58 /* Interrupts must be disabled. */
59 static long iommu_batch_flush(struct iommu_batch *p)
61 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
62 unsigned long devhandle = pbm->devhandle;
63 unsigned long prot = p->prot;
64 unsigned long entry = p->entry;
65 u64 *pglist = p->pglist;
66 unsigned long npages = p->npages;
68 while (npages != 0) {
69 long num;
71 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
72 npages, prot, __pa(pglist));
73 if (unlikely(num < 0)) {
74 if (printk_ratelimit())
75 printk("iommu_batch_flush: IOMMU map of "
76 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
77 "status %ld\n",
78 devhandle, HV_PCI_TSBID(0, entry),
79 npages, prot, __pa(pglist), num);
80 return -1;
83 entry += num;
84 npages -= num;
85 pglist += num;
88 p->entry = entry;
89 p->npages = 0;
91 return 0;
94 static inline void iommu_batch_new_entry(unsigned long entry)
96 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
98 if (p->entry + p->npages == entry)
99 return;
100 if (p->entry != ~0UL)
101 iommu_batch_flush(p);
102 p->entry = entry;
105 /* Interrupts must be disabled. */
106 static inline long iommu_batch_add(u64 phys_page)
108 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
110 BUG_ON(p->npages >= PGLIST_NENTS);
112 p->pglist[p->npages++] = phys_page;
113 if (p->npages == PGLIST_NENTS)
114 return iommu_batch_flush(p);
116 return 0;
119 /* Interrupts must be disabled. */
120 static inline long iommu_batch_end(void)
122 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
124 BUG_ON(p->npages >= PGLIST_NENTS);
126 return iommu_batch_flush(p);
129 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
130 dma_addr_t *dma_addrp, gfp_t gfp)
132 unsigned long flags, order, first_page, npages, n;
133 struct iommu *iommu;
134 struct page *page;
135 void *ret;
136 long entry;
137 int nid;
139 size = IO_PAGE_ALIGN(size);
140 order = get_order(size);
141 if (unlikely(order >= MAX_ORDER))
142 return NULL;
144 npages = size >> IO_PAGE_SHIFT;
146 nid = dev->archdata.numa_node;
147 page = alloc_pages_node(nid, gfp, order);
148 if (unlikely(!page))
149 return NULL;
151 first_page = (unsigned long) page_address(page);
152 memset((char *)first_page, 0, PAGE_SIZE << order);
154 iommu = dev->archdata.iommu;
156 spin_lock_irqsave(&iommu->lock, flags);
157 entry = iommu_range_alloc(dev, iommu, npages, NULL);
158 spin_unlock_irqrestore(&iommu->lock, flags);
160 if (unlikely(entry == DMA_ERROR_CODE))
161 goto range_alloc_fail;
163 *dma_addrp = (iommu->page_table_map_base +
164 (entry << IO_PAGE_SHIFT));
165 ret = (void *) first_page;
166 first_page = __pa(first_page);
168 local_irq_save(flags);
170 iommu_batch_start(dev,
171 (HV_PCI_MAP_ATTR_READ |
172 HV_PCI_MAP_ATTR_WRITE),
173 entry);
175 for (n = 0; n < npages; n++) {
176 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
177 if (unlikely(err < 0L))
178 goto iommu_map_fail;
181 if (unlikely(iommu_batch_end() < 0L))
182 goto iommu_map_fail;
184 local_irq_restore(flags);
186 return ret;
188 iommu_map_fail:
189 /* Interrupts are disabled. */
190 spin_lock(&iommu->lock);
191 iommu_range_free(iommu, *dma_addrp, npages);
192 spin_unlock_irqrestore(&iommu->lock, flags);
194 range_alloc_fail:
195 free_pages(first_page, order);
196 return NULL;
199 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
200 dma_addr_t dvma)
202 struct pci_pbm_info *pbm;
203 struct iommu *iommu;
204 unsigned long flags, order, npages, entry;
205 u32 devhandle;
207 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
208 iommu = dev->archdata.iommu;
209 pbm = dev->archdata.host_controller;
210 devhandle = pbm->devhandle;
211 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
213 spin_lock_irqsave(&iommu->lock, flags);
215 iommu_range_free(iommu, dvma, npages);
217 do {
218 unsigned long num;
220 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
221 npages);
222 entry += num;
223 npages -= num;
224 } while (npages != 0);
226 spin_unlock_irqrestore(&iommu->lock, flags);
228 order = get_order(size);
229 if (order < 10)
230 free_pages((unsigned long)cpu, order);
233 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction)
237 struct iommu *iommu;
238 unsigned long flags, npages, oaddr;
239 unsigned long i, base_paddr;
240 u32 bus_addr, ret;
241 unsigned long prot;
242 long entry;
244 iommu = dev->archdata.iommu;
246 if (unlikely(direction == DMA_NONE))
247 goto bad;
249 oaddr = (unsigned long)(page_address(page) + offset);
250 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
251 npages >>= IO_PAGE_SHIFT;
253 spin_lock_irqsave(&iommu->lock, flags);
254 entry = iommu_range_alloc(dev, iommu, npages, NULL);
255 spin_unlock_irqrestore(&iommu->lock, flags);
257 if (unlikely(entry == DMA_ERROR_CODE))
258 goto bad;
260 bus_addr = (iommu->page_table_map_base +
261 (entry << IO_PAGE_SHIFT));
262 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
263 base_paddr = __pa(oaddr & IO_PAGE_MASK);
264 prot = HV_PCI_MAP_ATTR_READ;
265 if (direction != DMA_TO_DEVICE)
266 prot |= HV_PCI_MAP_ATTR_WRITE;
268 local_irq_save(flags);
270 iommu_batch_start(dev, prot, entry);
272 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
273 long err = iommu_batch_add(base_paddr);
274 if (unlikely(err < 0L))
275 goto iommu_map_fail;
277 if (unlikely(iommu_batch_end() < 0L))
278 goto iommu_map_fail;
280 local_irq_restore(flags);
282 return ret;
284 bad:
285 if (printk_ratelimit())
286 WARN_ON(1);
287 return DMA_ERROR_CODE;
289 iommu_map_fail:
290 /* Interrupts are disabled. */
291 spin_lock(&iommu->lock);
292 iommu_range_free(iommu, bus_addr, npages);
293 spin_unlock_irqrestore(&iommu->lock, flags);
295 return DMA_ERROR_CODE;
298 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
299 size_t sz, enum dma_data_direction direction)
301 struct pci_pbm_info *pbm;
302 struct iommu *iommu;
303 unsigned long flags, npages;
304 long entry;
305 u32 devhandle;
307 if (unlikely(direction == DMA_NONE)) {
308 if (printk_ratelimit())
309 WARN_ON(1);
310 return;
313 iommu = dev->archdata.iommu;
314 pbm = dev->archdata.host_controller;
315 devhandle = pbm->devhandle;
317 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
318 npages >>= IO_PAGE_SHIFT;
319 bus_addr &= IO_PAGE_MASK;
321 spin_lock_irqsave(&iommu->lock, flags);
323 iommu_range_free(iommu, bus_addr, npages);
325 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
326 do {
327 unsigned long num;
329 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
330 npages);
331 entry += num;
332 npages -= num;
333 } while (npages != 0);
335 spin_unlock_irqrestore(&iommu->lock, flags);
338 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
339 int nelems, enum dma_data_direction direction)
341 struct scatterlist *s, *outs, *segstart;
342 unsigned long flags, handle, prot;
343 dma_addr_t dma_next = 0, dma_addr;
344 unsigned int max_seg_size;
345 unsigned long seg_boundary_size;
346 int outcount, incount, i;
347 struct iommu *iommu;
348 unsigned long base_shift;
349 long err;
351 BUG_ON(direction == DMA_NONE);
353 iommu = dev->archdata.iommu;
354 if (nelems == 0 || !iommu)
355 return 0;
357 prot = HV_PCI_MAP_ATTR_READ;
358 if (direction != DMA_TO_DEVICE)
359 prot |= HV_PCI_MAP_ATTR_WRITE;
361 outs = s = segstart = &sglist[0];
362 outcount = 1;
363 incount = nelems;
364 handle = 0;
366 /* Init first segment length for backout at failure */
367 outs->dma_length = 0;
369 spin_lock_irqsave(&iommu->lock, flags);
371 iommu_batch_start(dev, prot, ~0UL);
373 max_seg_size = dma_get_max_seg_size(dev);
374 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
375 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
376 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
377 for_each_sg(sglist, s, nelems, i) {
378 unsigned long paddr, npages, entry, out_entry = 0, slen;
380 slen = s->length;
381 /* Sanity check */
382 if (slen == 0) {
383 dma_next = 0;
384 continue;
386 /* Allocate iommu entries for that segment */
387 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
388 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
389 entry = iommu_range_alloc(dev, iommu, npages, &handle);
391 /* Handle failure */
392 if (unlikely(entry == DMA_ERROR_CODE)) {
393 if (printk_ratelimit())
394 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
395 " npages %lx\n", iommu, paddr, npages);
396 goto iommu_map_failed;
399 iommu_batch_new_entry(entry);
401 /* Convert entry to a dma_addr_t */
402 dma_addr = iommu->page_table_map_base +
403 (entry << IO_PAGE_SHIFT);
404 dma_addr |= (s->offset & ~IO_PAGE_MASK);
406 /* Insert into HW table */
407 paddr &= IO_PAGE_MASK;
408 while (npages--) {
409 err = iommu_batch_add(paddr);
410 if (unlikely(err < 0L))
411 goto iommu_map_failed;
412 paddr += IO_PAGE_SIZE;
415 /* If we are in an open segment, try merging */
416 if (segstart != s) {
417 /* We cannot merge if:
418 * - allocated dma_addr isn't contiguous to previous allocation
420 if ((dma_addr != dma_next) ||
421 (outs->dma_length + s->length > max_seg_size) ||
422 (is_span_boundary(out_entry, base_shift,
423 seg_boundary_size, outs, s))) {
424 /* Can't merge: create a new segment */
425 segstart = s;
426 outcount++;
427 outs = sg_next(outs);
428 } else {
429 outs->dma_length += s->length;
433 if (segstart == s) {
434 /* This is a new segment, fill entries */
435 outs->dma_address = dma_addr;
436 outs->dma_length = slen;
437 out_entry = entry;
440 /* Calculate next page pointer for contiguous check */
441 dma_next = dma_addr + slen;
444 err = iommu_batch_end();
446 if (unlikely(err < 0L))
447 goto iommu_map_failed;
449 spin_unlock_irqrestore(&iommu->lock, flags);
451 if (outcount < incount) {
452 outs = sg_next(outs);
453 outs->dma_address = DMA_ERROR_CODE;
454 outs->dma_length = 0;
457 return outcount;
459 iommu_map_failed:
460 for_each_sg(sglist, s, nelems, i) {
461 if (s->dma_length != 0) {
462 unsigned long vaddr, npages;
464 vaddr = s->dma_address & IO_PAGE_MASK;
465 npages = iommu_num_pages(s->dma_address, s->dma_length,
466 IO_PAGE_SIZE);
467 iommu_range_free(iommu, vaddr, npages);
468 /* XXX demap? XXX */
469 s->dma_address = DMA_ERROR_CODE;
470 s->dma_length = 0;
472 if (s == outs)
473 break;
475 spin_unlock_irqrestore(&iommu->lock, flags);
477 return 0;
480 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
481 int nelems, enum dma_data_direction direction)
483 struct pci_pbm_info *pbm;
484 struct scatterlist *sg;
485 struct iommu *iommu;
486 unsigned long flags;
487 u32 devhandle;
489 BUG_ON(direction == DMA_NONE);
491 iommu = dev->archdata.iommu;
492 pbm = dev->archdata.host_controller;
493 devhandle = pbm->devhandle;
495 spin_lock_irqsave(&iommu->lock, flags);
497 sg = sglist;
498 while (nelems--) {
499 dma_addr_t dma_handle = sg->dma_address;
500 unsigned int len = sg->dma_length;
501 unsigned long npages, entry;
503 if (!len)
504 break;
505 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
506 iommu_range_free(iommu, dma_handle, npages);
508 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
509 while (npages) {
510 unsigned long num;
512 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
513 npages);
514 entry += num;
515 npages -= num;
518 sg = sg_next(sg);
521 spin_unlock_irqrestore(&iommu->lock, flags);
524 static void dma_4v_sync_single_for_cpu(struct device *dev,
525 dma_addr_t bus_addr, size_t sz,
526 enum dma_data_direction direction)
528 /* Nothing to do... */
531 static void dma_4v_sync_sg_for_cpu(struct device *dev,
532 struct scatterlist *sglist, int nelems,
533 enum dma_data_direction direction)
535 /* Nothing to do... */
538 static const struct dma_ops sun4v_dma_ops = {
539 .alloc_coherent = dma_4v_alloc_coherent,
540 .free_coherent = dma_4v_free_coherent,
541 .map_page = dma_4v_map_page,
542 .unmap_page = dma_4v_unmap_page,
543 .map_sg = dma_4v_map_sg,
544 .unmap_sg = dma_4v_unmap_sg,
545 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
546 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
549 static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
550 struct device *parent)
552 struct property *prop;
553 struct device_node *dp;
555 dp = pbm->op->node;
556 prop = of_find_property(dp, "66mhz-capable", NULL);
557 pbm->is_66mhz_capable = (prop != NULL);
558 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
560 /* XXX register error interrupt handlers XXX */
563 static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
564 struct iommu *iommu)
566 struct iommu_arena *arena = &iommu->arena;
567 unsigned long i, cnt = 0;
568 u32 devhandle;
570 devhandle = pbm->devhandle;
571 for (i = 0; i < arena->limit; i++) {
572 unsigned long ret, io_attrs, ra;
574 ret = pci_sun4v_iommu_getmap(devhandle,
575 HV_PCI_TSBID(0, i),
576 &io_attrs, &ra);
577 if (ret == HV_EOK) {
578 if (page_in_phys_avail(ra)) {
579 pci_sun4v_iommu_demap(devhandle,
580 HV_PCI_TSBID(0, i), 1);
581 } else {
582 cnt++;
583 __set_bit(i, arena->map);
588 return cnt;
591 static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
593 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
594 struct iommu *iommu = pbm->iommu;
595 unsigned long num_tsb_entries, sz, tsbsize;
596 u32 dma_mask, dma_offset;
597 const u32 *vdma;
599 vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
600 if (!vdma)
601 vdma = vdma_default;
603 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
604 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
605 vdma[0], vdma[1]);
606 return -EINVAL;
609 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
610 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
611 tsbsize = num_tsb_entries * sizeof(iopte_t);
613 dma_offset = vdma[0];
615 /* Setup initial software IOMMU state. */
616 spin_lock_init(&iommu->lock);
617 iommu->ctx_lowest_free = 1;
618 iommu->page_table_map_base = dma_offset;
619 iommu->dma_addr_mask = dma_mask;
621 /* Allocate and initialize the free area map. */
622 sz = (num_tsb_entries + 7) / 8;
623 sz = (sz + 7UL) & ~7UL;
624 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
625 if (!iommu->arena.map) {
626 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
627 return -ENOMEM;
629 iommu->arena.limit = num_tsb_entries;
631 sz = probe_existing_entries(pbm, iommu);
632 if (sz)
633 printk("%s: Imported %lu TSB entries from OBP\n",
634 pbm->name, sz);
636 return 0;
639 #ifdef CONFIG_PCI_MSI
640 struct pci_sun4v_msiq_entry {
641 u64 version_type;
642 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
643 #define MSIQ_VERSION_SHIFT 32
644 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
645 #define MSIQ_TYPE_SHIFT 0
646 #define MSIQ_TYPE_NONE 0x00
647 #define MSIQ_TYPE_MSG 0x01
648 #define MSIQ_TYPE_MSI32 0x02
649 #define MSIQ_TYPE_MSI64 0x03
650 #define MSIQ_TYPE_INTX 0x08
651 #define MSIQ_TYPE_NONE2 0xff
653 u64 intx_sysino;
654 u64 reserved1;
655 u64 stick;
656 u64 req_id; /* bus/device/func */
657 #define MSIQ_REQID_BUS_MASK 0xff00UL
658 #define MSIQ_REQID_BUS_SHIFT 8
659 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
660 #define MSIQ_REQID_DEVICE_SHIFT 3
661 #define MSIQ_REQID_FUNC_MASK 0x0007UL
662 #define MSIQ_REQID_FUNC_SHIFT 0
664 u64 msi_address;
666 /* The format of this value is message type dependent.
667 * For MSI bits 15:0 are the data from the MSI packet.
668 * For MSI-X bits 31:0 are the data from the MSI packet.
669 * For MSG, the message code and message routing code where:
670 * bits 39:32 is the bus/device/fn of the msg target-id
671 * bits 18:16 is the message routing code
672 * bits 7:0 is the message code
673 * For INTx the low order 2-bits are:
674 * 00 - INTA
675 * 01 - INTB
676 * 10 - INTC
677 * 11 - INTD
679 u64 msi_data;
681 u64 reserved2;
684 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
685 unsigned long *head)
687 unsigned long err, limit;
689 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
690 if (unlikely(err))
691 return -ENXIO;
693 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
694 if (unlikely(*head >= limit))
695 return -EFBIG;
697 return 0;
700 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
701 unsigned long msiqid, unsigned long *head,
702 unsigned long *msi)
704 struct pci_sun4v_msiq_entry *ep;
705 unsigned long err, type;
707 /* Note: void pointer arithmetic, 'head' is a byte offset */
708 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
709 (pbm->msiq_ent_count *
710 sizeof(struct pci_sun4v_msiq_entry))) +
711 *head);
713 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
714 return 0;
716 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
717 if (unlikely(type != MSIQ_TYPE_MSI32 &&
718 type != MSIQ_TYPE_MSI64))
719 return -EINVAL;
721 *msi = ep->msi_data;
723 err = pci_sun4v_msi_setstate(pbm->devhandle,
724 ep->msi_data /* msi_num */,
725 HV_MSISTATE_IDLE);
726 if (unlikely(err))
727 return -ENXIO;
729 /* Clear the entry. */
730 ep->version_type &= ~MSIQ_TYPE_MASK;
732 (*head) += sizeof(struct pci_sun4v_msiq_entry);
733 if (*head >=
734 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
735 *head = 0;
737 return 1;
740 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
741 unsigned long head)
743 unsigned long err;
745 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
746 if (unlikely(err))
747 return -EINVAL;
749 return 0;
752 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
753 unsigned long msi, int is_msi64)
755 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
756 (is_msi64 ?
757 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
758 return -ENXIO;
759 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
760 return -ENXIO;
761 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
762 return -ENXIO;
763 return 0;
766 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
768 unsigned long err, msiqid;
770 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
771 if (err)
772 return -ENXIO;
774 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
776 return 0;
779 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
781 unsigned long q_size, alloc_size, pages, order;
782 int i;
784 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
785 alloc_size = (pbm->msiq_num * q_size);
786 order = get_order(alloc_size);
787 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
788 if (pages == 0UL) {
789 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
790 order);
791 return -ENOMEM;
793 memset((char *)pages, 0, PAGE_SIZE << order);
794 pbm->msi_queues = (void *) pages;
796 for (i = 0; i < pbm->msiq_num; i++) {
797 unsigned long err, base = __pa(pages + (i * q_size));
798 unsigned long ret1, ret2;
800 err = pci_sun4v_msiq_conf(pbm->devhandle,
801 pbm->msiq_first + i,
802 base, pbm->msiq_ent_count);
803 if (err) {
804 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
805 err);
806 goto h_error;
809 err = pci_sun4v_msiq_info(pbm->devhandle,
810 pbm->msiq_first + i,
811 &ret1, &ret2);
812 if (err) {
813 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
814 err);
815 goto h_error;
817 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
818 printk(KERN_ERR "MSI: Bogus qconf "
819 "expected[%lx:%x] got[%lx:%lx]\n",
820 base, pbm->msiq_ent_count,
821 ret1, ret2);
822 goto h_error;
826 return 0;
828 h_error:
829 free_pages(pages, order);
830 return -EINVAL;
833 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
835 unsigned long q_size, alloc_size, pages, order;
836 int i;
838 for (i = 0; i < pbm->msiq_num; i++) {
839 unsigned long msiqid = pbm->msiq_first + i;
841 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
844 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
845 alloc_size = (pbm->msiq_num * q_size);
846 order = get_order(alloc_size);
848 pages = (unsigned long) pbm->msi_queues;
850 free_pages(pages, order);
852 pbm->msi_queues = NULL;
855 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
856 unsigned long msiqid,
857 unsigned long devino)
859 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
861 if (!virt_irq)
862 return -ENOMEM;
864 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
865 return -EINVAL;
866 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
867 return -EINVAL;
869 return virt_irq;
872 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
873 .get_head = pci_sun4v_get_head,
874 .dequeue_msi = pci_sun4v_dequeue_msi,
875 .set_head = pci_sun4v_set_head,
876 .msi_setup = pci_sun4v_msi_setup,
877 .msi_teardown = pci_sun4v_msi_teardown,
878 .msiq_alloc = pci_sun4v_msiq_alloc,
879 .msiq_free = pci_sun4v_msiq_free,
880 .msiq_build_irq = pci_sun4v_msiq_build_irq,
883 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
885 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
887 #else /* CONFIG_PCI_MSI */
888 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
891 #endif /* !(CONFIG_PCI_MSI) */
893 static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
894 struct of_device *op, u32 devhandle)
896 struct device_node *dp = op->node;
897 int err;
899 pbm->numa_node = of_node_to_nid(dp);
901 pbm->pci_ops = &sun4v_pci_ops;
902 pbm->config_space_reg_bits = 12;
904 pbm->index = pci_num_pbms++;
906 pbm->op = op;
908 pbm->devhandle = devhandle;
910 pbm->name = dp->full_name;
912 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
913 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
915 pci_determine_mem_io_space(pbm);
917 pci_get_pbm_props(pbm);
919 err = pci_sun4v_iommu_init(pbm);
920 if (err)
921 return err;
923 pci_sun4v_msi_init(pbm);
925 pci_sun4v_scan_bus(pbm, &op->dev);
927 pbm->next = pci_pbm_root;
928 pci_pbm_root = pbm;
930 return 0;
933 static int __devinit pci_sun4v_probe(struct of_device *op,
934 const struct of_device_id *match)
936 const struct linux_prom64_registers *regs;
937 static int hvapi_negotiated = 0;
938 struct pci_pbm_info *pbm;
939 struct device_node *dp;
940 struct iommu *iommu;
941 u32 devhandle;
942 int i, err;
944 dp = op->node;
946 if (!hvapi_negotiated++) {
947 err = sun4v_hvapi_register(HV_GRP_PCI,
948 vpci_major,
949 &vpci_minor);
951 if (err) {
952 printk(KERN_ERR PFX "Could not register hvapi, "
953 "err=%d\n", err);
954 return err;
956 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
957 vpci_major, vpci_minor);
959 dma_ops = &sun4v_dma_ops;
962 regs = of_get_property(dp, "reg", NULL);
963 err = -ENODEV;
964 if (!regs) {
965 printk(KERN_ERR PFX "Could not find config registers\n");
966 goto out_err;
968 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
970 err = -ENOMEM;
971 if (!iommu_batch_initialized) {
972 for_each_possible_cpu(i) {
973 unsigned long page = get_zeroed_page(GFP_KERNEL);
975 if (!page)
976 goto out_err;
978 per_cpu(iommu_batch, i).pglist = (u64 *) page;
980 iommu_batch_initialized = 1;
983 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
984 if (!pbm) {
985 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
986 goto out_err;
989 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
990 if (!iommu) {
991 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
992 goto out_free_controller;
995 pbm->iommu = iommu;
997 err = pci_sun4v_pbm_init(pbm, op, devhandle);
998 if (err)
999 goto out_free_iommu;
1001 dev_set_drvdata(&op->dev, pbm);
1003 return 0;
1005 out_free_iommu:
1006 kfree(pbm->iommu);
1008 out_free_controller:
1009 kfree(pbm);
1011 out_err:
1012 return err;
1015 static struct of_device_id __initdata pci_sun4v_match[] = {
1017 .name = "pci",
1018 .compatible = "SUNW,sun4v-pci",
1023 static struct of_platform_driver pci_sun4v_driver = {
1024 .name = DRIVER_NAME,
1025 .match_table = pci_sun4v_match,
1026 .probe = pci_sun4v_probe,
1029 static int __init pci_sun4v_init(void)
1031 return of_register_driver(&pci_sun4v_driver, &of_bus_type);
1034 subsys_initcall(pci_sun4v_init);