x86/xen: resume timer irqs early
[linux/fpc-iii.git] / arch / powerpc / platforms / powernv / pci-ioda.c
blob930e1fe78214175c095d8b7643741c18c3796c28
1 /*
2 * Support PCI/PCIe on PowerNV platforms
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #undef DEBUG
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/irq.h>
22 #include <linux/io.h>
23 #include <linux/msi.h>
25 #include <asm/sections.h>
26 #include <asm/io.h>
27 #include <asm/prom.h>
28 #include <asm/pci-bridge.h>
29 #include <asm/machdep.h>
30 #include <asm/msi_bitmap.h>
31 #include <asm/ppc-pci.h>
32 #include <asm/opal.h>
33 #include <asm/iommu.h>
34 #include <asm/tce.h>
35 #include <asm/xics.h>
36 #include <asm/debug.h>
38 #include "powernv.h"
39 #include "pci.h"
41 #define define_pe_printk_level(func, kern_level) \
42 static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
43 { \
44 struct va_format vaf; \
45 va_list args; \
46 char pfix[32]; \
47 int r; \
49 va_start(args, fmt); \
51 vaf.fmt = fmt; \
52 vaf.va = &args; \
54 if (pe->pdev) \
55 strlcpy(pfix, dev_name(&pe->pdev->dev), \
56 sizeof(pfix)); \
57 else \
58 sprintf(pfix, "%04x:%02x ", \
59 pci_domain_nr(pe->pbus), \
60 pe->pbus->number); \
61 r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
62 pfix, pe->pe_number, &vaf); \
64 va_end(args); \
66 return r; \
67 } \
69 define_pe_printk_level(pe_err, KERN_ERR);
70 define_pe_printk_level(pe_warn, KERN_WARNING);
71 define_pe_printk_level(pe_info, KERN_INFO);
73 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
75 unsigned long pe;
77 do {
78 pe = find_next_zero_bit(phb->ioda.pe_alloc,
79 phb->ioda.total_pe, 0);
80 if (pe >= phb->ioda.total_pe)
81 return IODA_INVALID_PE;
82 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
84 phb->ioda.pe_array[pe].phb = phb;
85 phb->ioda.pe_array[pe].pe_number = pe;
86 return pe;
89 static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
91 WARN_ON(phb->ioda.pe_array[pe].pdev);
93 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
94 clear_bit(pe, phb->ioda.pe_alloc);
97 /* Currently those 2 are only used when MSIs are enabled, this will change
98 * but in the meantime, we need to protect them to avoid warnings
100 #ifdef CONFIG_PCI_MSI
101 static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
103 struct pci_controller *hose = pci_bus_to_host(dev->bus);
104 struct pnv_phb *phb = hose->private_data;
105 struct pci_dn *pdn = pci_get_pdn(dev);
107 if (!pdn)
108 return NULL;
109 if (pdn->pe_number == IODA_INVALID_PE)
110 return NULL;
111 return &phb->ioda.pe_array[pdn->pe_number];
113 #endif /* CONFIG_PCI_MSI */
115 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
117 struct pci_dev *parent;
118 uint8_t bcomp, dcomp, fcomp;
119 long rc, rid_end, rid;
121 /* Bus validation ? */
122 if (pe->pbus) {
123 int count;
125 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
126 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
127 parent = pe->pbus->self;
128 if (pe->flags & PNV_IODA_PE_BUS_ALL)
129 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
130 else
131 count = 1;
133 switch(count) {
134 case 1: bcomp = OpalPciBusAll; break;
135 case 2: bcomp = OpalPciBus7Bits; break;
136 case 4: bcomp = OpalPciBus6Bits; break;
137 case 8: bcomp = OpalPciBus5Bits; break;
138 case 16: bcomp = OpalPciBus4Bits; break;
139 case 32: bcomp = OpalPciBus3Bits; break;
140 default:
141 pr_err("%s: Number of subordinate busses %d"
142 " unsupported\n",
143 pci_name(pe->pbus->self), count);
144 /* Do an exact match only */
145 bcomp = OpalPciBusAll;
147 rid_end = pe->rid + (count << 8);
148 } else {
149 parent = pe->pdev->bus->self;
150 bcomp = OpalPciBusAll;
151 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
152 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
153 rid_end = pe->rid + 1;
157 * Associate PE in PELT. We need add the PE into the
158 * corresponding PELT-V as well. Otherwise, the error
159 * originated from the PE might contribute to other
160 * PEs.
162 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
163 bcomp, dcomp, fcomp, OPAL_MAP_PE);
164 if (rc) {
165 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
166 return -ENXIO;
169 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
170 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
171 if (rc)
172 pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
173 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
174 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
176 /* Add to all parents PELT-V */
177 while (parent) {
178 struct pci_dn *pdn = pci_get_pdn(parent);
179 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
180 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
181 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
182 /* XXX What to do in case of error ? */
184 parent = parent->bus->self;
186 /* Setup reverse map */
187 for (rid = pe->rid; rid < rid_end; rid++)
188 phb->ioda.pe_rmap[rid] = pe->pe_number;
190 /* Setup one MVTs on IODA1 */
191 if (phb->type == PNV_PHB_IODA1) {
192 pe->mve_number = pe->pe_number;
193 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
194 pe->pe_number);
195 if (rc) {
196 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
197 rc, pe->mve_number);
198 pe->mve_number = -1;
199 } else {
200 rc = opal_pci_set_mve_enable(phb->opal_id,
201 pe->mve_number, OPAL_ENABLE_MVE);
202 if (rc) {
203 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
204 rc, pe->mve_number);
205 pe->mve_number = -1;
208 } else if (phb->type == PNV_PHB_IODA2)
209 pe->mve_number = 0;
211 return 0;
214 static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
215 struct pnv_ioda_pe *pe)
217 struct pnv_ioda_pe *lpe;
219 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
220 if (lpe->dma_weight < pe->dma_weight) {
221 list_add_tail(&pe->dma_link, &lpe->dma_link);
222 return;
225 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
228 static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
230 /* This is quite simplistic. The "base" weight of a device
231 * is 10. 0 means no DMA is to be accounted for it.
234 /* If it's a bridge, no DMA */
235 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
236 return 0;
238 /* Reduce the weight of slow USB controllers */
239 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
240 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
241 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
242 return 3;
244 /* Increase the weight of RAID (includes Obsidian) */
245 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
246 return 15;
248 /* Default */
249 return 10;
252 #if 0
253 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
255 struct pci_controller *hose = pci_bus_to_host(dev->bus);
256 struct pnv_phb *phb = hose->private_data;
257 struct pci_dn *pdn = pci_get_pdn(dev);
258 struct pnv_ioda_pe *pe;
259 int pe_num;
261 if (!pdn) {
262 pr_err("%s: Device tree node not associated properly\n",
263 pci_name(dev));
264 return NULL;
266 if (pdn->pe_number != IODA_INVALID_PE)
267 return NULL;
269 /* PE#0 has been pre-set */
270 if (dev->bus->number == 0)
271 pe_num = 0;
272 else
273 pe_num = pnv_ioda_alloc_pe(phb);
274 if (pe_num == IODA_INVALID_PE) {
275 pr_warning("%s: Not enough PE# available, disabling device\n",
276 pci_name(dev));
277 return NULL;
280 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
281 * pointer in the PE data structure, both should be destroyed at the
282 * same time. However, this needs to be looked at more closely again
283 * once we actually start removing things (Hotplug, SR-IOV, ...)
285 * At some point we want to remove the PDN completely anyways
287 pe = &phb->ioda.pe_array[pe_num];
288 pci_dev_get(dev);
289 pdn->pcidev = dev;
290 pdn->pe_number = pe_num;
291 pe->pdev = dev;
292 pe->pbus = NULL;
293 pe->tce32_seg = -1;
294 pe->mve_number = -1;
295 pe->rid = dev->bus->number << 8 | pdn->devfn;
297 pe_info(pe, "Associated device to PE\n");
299 if (pnv_ioda_configure_pe(phb, pe)) {
300 /* XXX What do we do here ? */
301 if (pe_num)
302 pnv_ioda_free_pe(phb, pe_num);
303 pdn->pe_number = IODA_INVALID_PE;
304 pe->pdev = NULL;
305 pci_dev_put(dev);
306 return NULL;
309 /* Assign a DMA weight to the device */
310 pe->dma_weight = pnv_ioda_dma_weight(dev);
311 if (pe->dma_weight != 0) {
312 phb->ioda.dma_weight += pe->dma_weight;
313 phb->ioda.dma_pe_count++;
316 /* Link the PE */
317 pnv_ioda_link_pe_by_weight(phb, pe);
319 return pe;
321 #endif /* Useful for SRIOV case */
323 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
325 struct pci_dev *dev;
327 list_for_each_entry(dev, &bus->devices, bus_list) {
328 struct pci_dn *pdn = pci_get_pdn(dev);
330 if (pdn == NULL) {
331 pr_warn("%s: No device node associated with device !\n",
332 pci_name(dev));
333 continue;
335 pci_dev_get(dev);
336 pdn->pcidev = dev;
337 pdn->pe_number = pe->pe_number;
338 pe->dma_weight += pnv_ioda_dma_weight(dev);
339 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
340 pnv_ioda_setup_same_PE(dev->subordinate, pe);
345 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
346 * single PCI bus. Another one that contains the primary PCI bus and its
347 * subordinate PCI devices and buses. The second type of PE is normally
348 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
350 static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
352 struct pci_controller *hose = pci_bus_to_host(bus);
353 struct pnv_phb *phb = hose->private_data;
354 struct pnv_ioda_pe *pe;
355 int pe_num;
357 pe_num = pnv_ioda_alloc_pe(phb);
358 if (pe_num == IODA_INVALID_PE) {
359 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
360 __func__, pci_domain_nr(bus), bus->number);
361 return;
364 pe = &phb->ioda.pe_array[pe_num];
365 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
366 pe->pbus = bus;
367 pe->pdev = NULL;
368 pe->tce32_seg = -1;
369 pe->mve_number = -1;
370 pe->rid = bus->busn_res.start << 8;
371 pe->dma_weight = 0;
373 if (all)
374 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
375 bus->busn_res.start, bus->busn_res.end, pe_num);
376 else
377 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
378 bus->busn_res.start, pe_num);
380 if (pnv_ioda_configure_pe(phb, pe)) {
381 /* XXX What do we do here ? */
382 if (pe_num)
383 pnv_ioda_free_pe(phb, pe_num);
384 pe->pbus = NULL;
385 return;
388 /* Associate it with all child devices */
389 pnv_ioda_setup_same_PE(bus, pe);
391 /* Put PE to the list */
392 list_add_tail(&pe->list, &phb->ioda.pe_list);
394 /* Account for one DMA PE if at least one DMA capable device exist
395 * below the bridge
397 if (pe->dma_weight != 0) {
398 phb->ioda.dma_weight += pe->dma_weight;
399 phb->ioda.dma_pe_count++;
402 /* Link the PE */
403 pnv_ioda_link_pe_by_weight(phb, pe);
406 static void pnv_ioda_setup_PEs(struct pci_bus *bus)
408 struct pci_dev *dev;
410 pnv_ioda_setup_bus_PE(bus, 0);
412 list_for_each_entry(dev, &bus->devices, bus_list) {
413 if (dev->subordinate) {
414 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
415 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
416 else
417 pnv_ioda_setup_PEs(dev->subordinate);
423 * Configure PEs so that the downstream PCI buses and devices
424 * could have their associated PE#. Unfortunately, we didn't
425 * figure out the way to identify the PLX bridge yet. So we
426 * simply put the PCI bus and the subordinate behind the root
427 * port to PE# here. The game rule here is expected to be changed
428 * as soon as we can detected PLX bridge correctly.
430 static void pnv_pci_ioda_setup_PEs(void)
432 struct pci_controller *hose, *tmp;
434 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
435 pnv_ioda_setup_PEs(hose->bus);
439 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
441 struct pci_dn *pdn = pci_get_pdn(pdev);
442 struct pnv_ioda_pe *pe;
445 * The function can be called while the PE#
446 * hasn't been assigned. Do nothing for the
447 * case.
449 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
450 return;
452 pe = &phb->ioda.pe_array[pdn->pe_number];
453 set_iommu_table_base(&pdev->dev, &pe->tce32_table);
456 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
458 struct pci_dev *dev;
460 list_for_each_entry(dev, &bus->devices, bus_list) {
461 set_iommu_table_base(&dev->dev, &pe->tce32_table);
462 if (dev->subordinate)
463 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
467 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
468 u64 *startp, u64 *endp)
470 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
471 unsigned long start, end, inc;
473 start = __pa(startp);
474 end = __pa(endp);
476 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
477 if (tbl->it_busno) {
478 start <<= 12;
479 end <<= 12;
480 inc = 128 << 12;
481 start |= tbl->it_busno;
482 end |= tbl->it_busno;
483 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
484 /* p7ioc-style invalidation, 2 TCEs per write */
485 start |= (1ull << 63);
486 end |= (1ull << 63);
487 inc = 16;
488 } else {
489 /* Default (older HW) */
490 inc = 128;
493 end |= inc - 1; /* round up end to be different than start */
495 mb(); /* Ensure above stores are visible */
496 while (start <= end) {
497 __raw_writeq(start, invalidate);
498 start += inc;
502 * The iommu layer will do another mb() for us on build()
503 * and we don't care on free()
507 static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
508 struct iommu_table *tbl,
509 u64 *startp, u64 *endp)
511 unsigned long start, end, inc;
512 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
514 /* We'll invalidate DMA address in PE scope */
515 start = 0x2ul << 60;
516 start |= (pe->pe_number & 0xFF);
517 end = start;
519 /* Figure out the start, end and step */
520 inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
521 start |= (inc << 12);
522 inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
523 end |= (inc << 12);
524 inc = (0x1ul << 12);
525 mb();
527 while (start <= end) {
528 __raw_writeq(start, invalidate);
529 start += inc;
533 void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
534 u64 *startp, u64 *endp)
536 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
537 tce32_table);
538 struct pnv_phb *phb = pe->phb;
540 if (phb->type == PNV_PHB_IODA1)
541 pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
542 else
543 pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
546 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
547 struct pnv_ioda_pe *pe, unsigned int base,
548 unsigned int segs)
551 struct page *tce_mem = NULL;
552 const __be64 *swinvp;
553 struct iommu_table *tbl;
554 unsigned int i;
555 int64_t rc;
556 void *addr;
558 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
559 #define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
561 /* XXX FIXME: Handle 64-bit only DMA devices */
562 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
563 /* XXX FIXME: Allocate multi-level tables on PHB3 */
565 /* We shouldn't already have a 32-bit DMA associated */
566 if (WARN_ON(pe->tce32_seg >= 0))
567 return;
569 /* Grab a 32-bit TCE table */
570 pe->tce32_seg = base;
571 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
572 (base << 28), ((base + segs) << 28) - 1);
574 /* XXX Currently, we allocate one big contiguous table for the
575 * TCEs. We only really need one chunk per 256M of TCE space
576 * (ie per segment) but that's an optimization for later, it
577 * requires some added smarts with our get/put_tce implementation
579 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
580 get_order(TCE32_TABLE_SIZE * segs));
581 if (!tce_mem) {
582 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
583 goto fail;
585 addr = page_address(tce_mem);
586 memset(addr, 0, TCE32_TABLE_SIZE * segs);
588 /* Configure HW */
589 for (i = 0; i < segs; i++) {
590 rc = opal_pci_map_pe_dma_window(phb->opal_id,
591 pe->pe_number,
592 base + i, 1,
593 __pa(addr) + TCE32_TABLE_SIZE * i,
594 TCE32_TABLE_SIZE, 0x1000);
595 if (rc) {
596 pe_err(pe, " Failed to configure 32-bit TCE table,"
597 " err %ld\n", rc);
598 goto fail;
602 /* Setup linux iommu table */
603 tbl = &pe->tce32_table;
604 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
605 base << 28);
607 /* OPAL variant of P7IOC SW invalidated TCEs */
608 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
609 if (swinvp) {
610 /* We need a couple more fields -- an address and a data
611 * to or. Since the bus is only printed out on table free
612 * errors, and on the first pass the data will be a relative
613 * bus number, print that out instead.
615 tbl->it_busno = 0;
616 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
617 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
618 TCE_PCI_SWINV_PAIR;
620 iommu_init_table(tbl, phb->hose->node);
621 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
623 if (pe->pdev)
624 set_iommu_table_base(&pe->pdev->dev, tbl);
625 else
626 pnv_ioda_setup_bus_dma(pe, pe->pbus);
628 return;
629 fail:
630 /* XXX Failure: Try to fallback to 64-bit only ? */
631 if (pe->tce32_seg >= 0)
632 pe->tce32_seg = -1;
633 if (tce_mem)
634 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
637 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
638 struct pnv_ioda_pe *pe)
640 struct page *tce_mem = NULL;
641 void *addr;
642 const __be64 *swinvp;
643 struct iommu_table *tbl;
644 unsigned int tce_table_size, end;
645 int64_t rc;
647 /* We shouldn't already have a 32-bit DMA associated */
648 if (WARN_ON(pe->tce32_seg >= 0))
649 return;
651 /* The PE will reserve all possible 32-bits space */
652 pe->tce32_seg = 0;
653 end = (1 << ilog2(phb->ioda.m32_pci_base));
654 tce_table_size = (end / 0x1000) * 8;
655 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
656 end);
658 /* Allocate TCE table */
659 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
660 get_order(tce_table_size));
661 if (!tce_mem) {
662 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
663 goto fail;
665 addr = page_address(tce_mem);
666 memset(addr, 0, tce_table_size);
669 * Map TCE table through TVT. The TVE index is the PE number
670 * shifted by 1 bit for 32-bits DMA space.
672 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
673 pe->pe_number << 1, 1, __pa(addr),
674 tce_table_size, 0x1000);
675 if (rc) {
676 pe_err(pe, "Failed to configure 32-bit TCE table,"
677 " err %ld\n", rc);
678 goto fail;
681 /* Setup linux iommu table */
682 tbl = &pe->tce32_table;
683 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0);
685 /* OPAL variant of PHB3 invalidated TCEs */
686 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
687 if (swinvp) {
688 /* We need a couple more fields -- an address and a data
689 * to or. Since the bus is only printed out on table free
690 * errors, and on the first pass the data will be a relative
691 * bus number, print that out instead.
693 tbl->it_busno = 0;
694 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
695 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
697 iommu_init_table(tbl, phb->hose->node);
699 if (pe->pdev)
700 set_iommu_table_base(&pe->pdev->dev, tbl);
701 else
702 pnv_ioda_setup_bus_dma(pe, pe->pbus);
704 return;
705 fail:
706 if (pe->tce32_seg >= 0)
707 pe->tce32_seg = -1;
708 if (tce_mem)
709 __free_pages(tce_mem, get_order(tce_table_size));
712 static void pnv_ioda_setup_dma(struct pnv_phb *phb)
714 struct pci_controller *hose = phb->hose;
715 unsigned int residual, remaining, segs, tw, base;
716 struct pnv_ioda_pe *pe;
718 /* If we have more PE# than segments available, hand out one
719 * per PE until we run out and let the rest fail. If not,
720 * then we assign at least one segment per PE, plus more based
721 * on the amount of devices under that PE
723 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
724 residual = 0;
725 else
726 residual = phb->ioda.tce32_count -
727 phb->ioda.dma_pe_count;
729 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
730 hose->global_number, phb->ioda.tce32_count);
731 pr_info("PCI: %d PE# for a total weight of %d\n",
732 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
734 /* Walk our PE list and configure their DMA segments, hand them
735 * out one base segment plus any residual segments based on
736 * weight
738 remaining = phb->ioda.tce32_count;
739 tw = phb->ioda.dma_weight;
740 base = 0;
741 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
742 if (!pe->dma_weight)
743 continue;
744 if (!remaining) {
745 pe_warn(pe, "No DMA32 resources available\n");
746 continue;
748 segs = 1;
749 if (residual) {
750 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
751 if (segs > remaining)
752 segs = remaining;
756 * For IODA2 compliant PHB3, we needn't care about the weight.
757 * The all available 32-bits DMA space will be assigned to
758 * the specific PE.
760 if (phb->type == PNV_PHB_IODA1) {
761 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
762 pe->dma_weight, segs);
763 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
764 } else {
765 pe_info(pe, "Assign DMA32 space\n");
766 segs = 0;
767 pnv_pci_ioda2_setup_dma_pe(phb, pe);
770 remaining -= segs;
771 base += segs;
775 #ifdef CONFIG_PCI_MSI
776 static void pnv_ioda2_msi_eoi(struct irq_data *d)
778 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
779 struct irq_chip *chip = irq_data_get_irq_chip(d);
780 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
781 ioda.irq_chip);
782 int64_t rc;
784 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
785 WARN_ON_ONCE(rc);
787 icp_native_eoi(d);
790 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
791 unsigned int hwirq, unsigned int virq,
792 unsigned int is_64, struct msi_msg *msg)
794 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
795 struct pci_dn *pdn = pci_get_pdn(dev);
796 struct irq_data *idata;
797 struct irq_chip *ichip;
798 unsigned int xive_num = hwirq - phb->msi_base;
799 uint64_t addr64;
800 uint32_t addr32, data;
801 int rc;
803 /* No PE assigned ? bail out ... no MSI for you ! */
804 if (pe == NULL)
805 return -ENXIO;
807 /* Check if we have an MVE */
808 if (pe->mve_number < 0)
809 return -ENXIO;
811 /* Force 32-bit MSI on some broken devices */
812 if (pdn && pdn->force_32bit_msi)
813 is_64 = 0;
815 /* Assign XIVE to PE */
816 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
817 if (rc) {
818 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
819 pci_name(dev), rc, xive_num);
820 return -EIO;
823 if (is_64) {
824 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
825 &addr64, &data);
826 if (rc) {
827 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
828 pci_name(dev), rc);
829 return -EIO;
831 msg->address_hi = addr64 >> 32;
832 msg->address_lo = addr64 & 0xfffffffful;
833 } else {
834 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
835 &addr32, &data);
836 if (rc) {
837 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
838 pci_name(dev), rc);
839 return -EIO;
841 msg->address_hi = 0;
842 msg->address_lo = addr32;
844 msg->data = data;
847 * Change the IRQ chip for the MSI interrupts on PHB3.
848 * The corresponding IRQ chip should be populated for
849 * the first time.
851 if (phb->type == PNV_PHB_IODA2) {
852 if (!phb->ioda.irq_chip_init) {
853 idata = irq_get_irq_data(virq);
854 ichip = irq_data_get_irq_chip(idata);
855 phb->ioda.irq_chip_init = 1;
856 phb->ioda.irq_chip = *ichip;
857 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
860 irq_set_chip(virq, &phb->ioda.irq_chip);
863 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
864 " address=%x_%08x data=%x PE# %d\n",
865 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
866 msg->address_hi, msg->address_lo, data, pe->pe_number);
868 return 0;
871 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
873 unsigned int count;
874 const __be32 *prop = of_get_property(phb->hose->dn,
875 "ibm,opal-msi-ranges", NULL);
876 if (!prop) {
877 /* BML Fallback */
878 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
880 if (!prop)
881 return;
883 phb->msi_base = be32_to_cpup(prop);
884 count = be32_to_cpup(prop + 1);
885 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
886 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
887 phb->hose->global_number);
888 return;
891 phb->msi_setup = pnv_pci_ioda_msi_setup;
892 phb->msi32_support = 1;
893 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
894 count, phb->msi_base);
896 #else
897 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
898 #endif /* CONFIG_PCI_MSI */
901 * This function is supposed to be called on basis of PE from top
902 * to bottom style. So the the I/O or MMIO segment assigned to
903 * parent PE could be overrided by its child PEs if necessary.
905 static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
906 struct pnv_ioda_pe *pe)
908 struct pnv_phb *phb = hose->private_data;
909 struct pci_bus_region region;
910 struct resource *res;
911 int i, index;
912 int rc;
915 * NOTE: We only care PCI bus based PE for now. For PCI
916 * device based PE, for example SRIOV sensitive VF should
917 * be figured out later.
919 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
921 pci_bus_for_each_resource(pe->pbus, res, i) {
922 if (!res || !res->flags ||
923 res->start > res->end)
924 continue;
926 if (res->flags & IORESOURCE_IO) {
927 region.start = res->start - phb->ioda.io_pci_base;
928 region.end = res->end - phb->ioda.io_pci_base;
929 index = region.start / phb->ioda.io_segsize;
931 while (index < phb->ioda.total_pe &&
932 region.start <= region.end) {
933 phb->ioda.io_segmap[index] = pe->pe_number;
934 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
935 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
936 if (rc != OPAL_SUCCESS) {
937 pr_err("%s: OPAL error %d when mapping IO "
938 "segment #%d to PE#%d\n",
939 __func__, rc, index, pe->pe_number);
940 break;
943 region.start += phb->ioda.io_segsize;
944 index++;
946 } else if (res->flags & IORESOURCE_MEM) {
947 /* WARNING: Assumes M32 is mem region 0 in PHB. We need to
948 * harden that algorithm when we start supporting M64
950 region.start = res->start -
951 hose->mem_offset[0] -
952 phb->ioda.m32_pci_base;
953 region.end = res->end -
954 hose->mem_offset[0] -
955 phb->ioda.m32_pci_base;
956 index = region.start / phb->ioda.m32_segsize;
958 while (index < phb->ioda.total_pe &&
959 region.start <= region.end) {
960 phb->ioda.m32_segmap[index] = pe->pe_number;
961 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
962 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
963 if (rc != OPAL_SUCCESS) {
964 pr_err("%s: OPAL error %d when mapping M32 "
965 "segment#%d to PE#%d",
966 __func__, rc, index, pe->pe_number);
967 break;
970 region.start += phb->ioda.m32_segsize;
971 index++;
977 static void pnv_pci_ioda_setup_seg(void)
979 struct pci_controller *tmp, *hose;
980 struct pnv_phb *phb;
981 struct pnv_ioda_pe *pe;
983 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
984 phb = hose->private_data;
985 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
986 pnv_ioda_setup_pe_seg(hose, pe);
991 static void pnv_pci_ioda_setup_DMA(void)
993 struct pci_controller *hose, *tmp;
994 struct pnv_phb *phb;
996 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
997 pnv_ioda_setup_dma(hose->private_data);
999 /* Mark the PHB initialization done */
1000 phb = hose->private_data;
1001 phb->initialized = 1;
1005 static void pnv_pci_ioda_create_dbgfs(void)
1007 #ifdef CONFIG_DEBUG_FS
1008 struct pci_controller *hose, *tmp;
1009 struct pnv_phb *phb;
1010 char name[16];
1012 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1013 phb = hose->private_data;
1015 sprintf(name, "PCI%04x", hose->global_number);
1016 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
1017 if (!phb->dbgfs)
1018 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
1019 __func__, hose->global_number);
1021 #endif /* CONFIG_DEBUG_FS */
1024 static void pnv_pci_ioda_fixup(void)
1026 pnv_pci_ioda_setup_PEs();
1027 pnv_pci_ioda_setup_seg();
1028 pnv_pci_ioda_setup_DMA();
1030 pnv_pci_ioda_create_dbgfs();
1032 #ifdef CONFIG_EEH
1033 eeh_probe_mode_set(EEH_PROBE_MODE_DEV);
1034 eeh_addr_cache_build();
1035 eeh_init();
1036 #endif
1040 * Returns the alignment for I/O or memory windows for P2P
1041 * bridges. That actually depends on how PEs are segmented.
1042 * For now, we return I/O or M32 segment size for PE sensitive
1043 * P2P bridges. Otherwise, the default values (4KiB for I/O,
1044 * 1MiB for memory) will be returned.
1046 * The current PCI bus might be put into one PE, which was
1047 * create against the parent PCI bridge. For that case, we
1048 * needn't enlarge the alignment so that we can save some
1049 * resources.
1051 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
1052 unsigned long type)
1054 struct pci_dev *bridge;
1055 struct pci_controller *hose = pci_bus_to_host(bus);
1056 struct pnv_phb *phb = hose->private_data;
1057 int num_pci_bridges = 0;
1059 bridge = bus->self;
1060 while (bridge) {
1061 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
1062 num_pci_bridges++;
1063 if (num_pci_bridges >= 2)
1064 return 1;
1067 bridge = bridge->bus->self;
1070 /* We need support prefetchable memory window later */
1071 if (type & IORESOURCE_MEM)
1072 return phb->ioda.m32_segsize;
1074 return phb->ioda.io_segsize;
1077 /* Prevent enabling devices for which we couldn't properly
1078 * assign a PE
1080 static int pnv_pci_enable_device_hook(struct pci_dev *dev)
1082 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1083 struct pnv_phb *phb = hose->private_data;
1084 struct pci_dn *pdn;
1086 /* The function is probably called while the PEs have
1087 * not be created yet. For example, resource reassignment
1088 * during PCI probe period. We just skip the check if
1089 * PEs isn't ready.
1091 if (!phb->initialized)
1092 return 0;
1094 pdn = pci_get_pdn(dev);
1095 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1096 return -EINVAL;
1098 return 0;
1101 static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1102 u32 devfn)
1104 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1107 static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
1109 opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
1110 OPAL_ASSERT_RESET);
1113 void __init pnv_pci_init_ioda_phb(struct device_node *np,
1114 u64 hub_id, int ioda_type)
1116 struct pci_controller *hose;
1117 struct pnv_phb *phb;
1118 unsigned long size, m32map_off, iomap_off, pemap_off;
1119 const u64 *prop64;
1120 const u32 *prop32;
1121 int len;
1122 u64 phb_id;
1123 void *aux;
1124 long rc;
1126 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
1128 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
1129 if (!prop64) {
1130 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
1131 return;
1133 phb_id = be64_to_cpup(prop64);
1134 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
1136 phb = alloc_bootmem(sizeof(struct pnv_phb));
1137 if (!phb) {
1138 pr_err(" Out of memory !\n");
1139 return;
1142 /* Allocate PCI controller */
1143 memset(phb, 0, sizeof(struct pnv_phb));
1144 phb->hose = hose = pcibios_alloc_controller(np);
1145 if (!phb->hose) {
1146 pr_err(" Can't allocate PCI controller for %s\n",
1147 np->full_name);
1148 free_bootmem((unsigned long)phb, sizeof(struct pnv_phb));
1149 return;
1152 spin_lock_init(&phb->lock);
1153 prop32 = of_get_property(np, "bus-range", &len);
1154 if (prop32 && len == 8) {
1155 hose->first_busno = prop32[0];
1156 hose->last_busno = prop32[1];
1157 } else {
1158 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
1159 hose->first_busno = 0;
1160 hose->last_busno = 0xff;
1162 hose->private_data = phb;
1163 phb->hub_id = hub_id;
1164 phb->opal_id = phb_id;
1165 phb->type = ioda_type;
1167 /* Detect specific models for error handling */
1168 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
1169 phb->model = PNV_PHB_MODEL_P7IOC;
1170 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
1171 phb->model = PNV_PHB_MODEL_PHB3;
1172 else
1173 phb->model = PNV_PHB_MODEL_UNKNOWN;
1175 /* Parse 32-bit and IO ranges (if any) */
1176 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
1178 /* Get registers */
1179 phb->regs = of_iomap(np, 0);
1180 if (phb->regs == NULL)
1181 pr_err(" Failed to map registers !\n");
1183 /* Initialize more IODA stuff */
1184 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
1185 if (!prop32)
1186 phb->ioda.total_pe = 1;
1187 else
1188 phb->ioda.total_pe = *prop32;
1190 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
1191 /* FW Has already off top 64k of M32 space (MSI space) */
1192 phb->ioda.m32_size += 0x10000;
1194 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
1195 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
1196 phb->ioda.io_size = hose->pci_io_size;
1197 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
1198 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
1200 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
1201 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1202 m32map_off = size;
1203 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1204 iomap_off = size;
1205 if (phb->type == PNV_PHB_IODA1) {
1206 iomap_off = size;
1207 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1209 pemap_off = size;
1210 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1211 aux = alloc_bootmem(size);
1212 memset(aux, 0, size);
1213 phb->ioda.pe_alloc = aux;
1214 phb->ioda.m32_segmap = aux + m32map_off;
1215 if (phb->type == PNV_PHB_IODA1)
1216 phb->ioda.io_segmap = aux + iomap_off;
1217 phb->ioda.pe_array = aux + pemap_off;
1218 set_bit(0, phb->ioda.pe_alloc);
1220 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
1221 INIT_LIST_HEAD(&phb->ioda.pe_list);
1223 /* Calculate how many 32-bit TCE segments we have */
1224 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
1226 /* Clear unusable m64 */
1227 hose->mem_resources[1].flags = 0;
1228 hose->mem_resources[1].start = 0;
1229 hose->mem_resources[1].end = 0;
1230 hose->mem_resources[2].flags = 0;
1231 hose->mem_resources[2].start = 0;
1232 hose->mem_resources[2].end = 0;
1234 #if 0 /* We should really do that ... */
1235 rc = opal_pci_set_phb_mem_window(opal->phb_id,
1236 window_type,
1237 window_num,
1238 starting_real_address,
1239 starting_pci_address,
1240 segment_size);
1241 #endif
1243 pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n",
1244 phb->ioda.total_pe,
1245 phb->ioda.m32_size, phb->ioda.m32_segsize,
1246 phb->ioda.io_size, phb->ioda.io_segsize);
1248 phb->hose->ops = &pnv_pci_ops;
1249 #ifdef CONFIG_EEH
1250 phb->eeh_ops = &ioda_eeh_ops;
1251 #endif
1253 /* Setup RID -> PE mapping function */
1254 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
1256 /* Setup TCEs */
1257 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1259 /* Setup shutdown function for kexec */
1260 phb->shutdown = pnv_pci_ioda_shutdown;
1262 /* Setup MSI support */
1263 pnv_pci_init_ioda_msis(phb);
1266 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1267 * to let the PCI core do resource assignment. It's supposed
1268 * that the PCI core will do correct I/O and MMIO alignment
1269 * for the P2P bridge bars so that each PCI bus (excluding
1270 * the child P2P bridges) can form individual PE.
1272 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
1273 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1274 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
1275 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
1277 /* Reset IODA tables to a clean state */
1278 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
1279 if (rc)
1280 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
1283 * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset
1284 * has cleared the RTT which has the same effect
1286 if (ioda_type == PNV_PHB_IODA1)
1287 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
1290 void __init pnv_pci_init_ioda2_phb(struct device_node *np)
1292 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
1295 void __init pnv_pci_init_ioda_hub(struct device_node *np)
1297 struct device_node *phbn;
1298 const u64 *prop64;
1299 u64 hub_id;
1301 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
1303 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
1304 if (!prop64) {
1305 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
1306 return;
1308 hub_id = be64_to_cpup(prop64);
1309 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
1311 /* Count child PHBs */
1312 for_each_child_of_node(np, phbn) {
1313 /* Look for IODA1 PHBs */
1314 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
1315 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);