rtnetlink: check DO_SETLINK_NOTIFY correctly in do_setlink
[linux/fpc-iii.git] / arch / powerpc / platforms / pseries / iommu.c
blob7c181467d0ad9b14785291a8aa11c5d9b6284587
1 /*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup:
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/memblock.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34 #include <linux/pci.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/crash_dump.h>
37 #include <linux/memory.h>
38 #include <linux/of.h>
39 #include <linux/iommu.h>
40 #include <linux/rculist.h>
41 #include <asm/io.h>
42 #include <asm/prom.h>
43 #include <asm/rtas.h>
44 #include <asm/iommu.h>
45 #include <asm/pci-bridge.h>
46 #include <asm/machdep.h>
47 #include <asm/firmware.h>
48 #include <asm/tce.h>
49 #include <asm/ppc-pci.h>
50 #include <asm/udbg.h>
51 #include <asm/mmzone.h>
52 #include <asm/plpar_wrappers.h>
54 #include "pseries.h"
56 static struct iommu_table_group *iommu_pseries_alloc_group(int node)
58 struct iommu_table_group *table_group = NULL;
59 struct iommu_table *tbl = NULL;
60 struct iommu_table_group_link *tgl = NULL;
62 table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL,
63 node);
64 if (!table_group)
65 goto fail_exit;
67 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
68 if (!tbl)
69 goto fail_exit;
71 tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
72 node);
73 if (!tgl)
74 goto fail_exit;
76 INIT_LIST_HEAD_RCU(&tbl->it_group_list);
77 kref_init(&tbl->it_kref);
78 tgl->table_group = table_group;
79 list_add_rcu(&tgl->next, &tbl->it_group_list);
81 table_group->tables[0] = tbl;
83 return table_group;
85 fail_exit:
86 kfree(tgl);
87 kfree(table_group);
88 kfree(tbl);
90 return NULL;
93 static void iommu_pseries_free_group(struct iommu_table_group *table_group,
94 const char *node_name)
96 struct iommu_table *tbl;
97 #ifdef CONFIG_IOMMU_API
98 struct iommu_table_group_link *tgl;
99 #endif
101 if (!table_group)
102 return;
104 tbl = table_group->tables[0];
105 #ifdef CONFIG_IOMMU_API
106 tgl = list_first_entry_or_null(&tbl->it_group_list,
107 struct iommu_table_group_link, next);
109 WARN_ON_ONCE(!tgl);
110 if (tgl) {
111 list_del_rcu(&tgl->next);
112 kfree(tgl);
114 if (table_group->group) {
115 iommu_group_put(table_group->group);
116 BUG_ON(table_group->group);
118 #endif
119 iommu_tce_table_put(tbl);
121 kfree(table_group);
124 static int tce_build_pSeries(struct iommu_table *tbl, long index,
125 long npages, unsigned long uaddr,
126 enum dma_data_direction direction,
127 unsigned long attrs)
129 u64 proto_tce;
130 __be64 *tcep, *tces;
131 u64 rpn;
133 proto_tce = TCE_PCI_READ; // Read allowed
135 if (direction != DMA_TO_DEVICE)
136 proto_tce |= TCE_PCI_WRITE;
138 tces = tcep = ((__be64 *)tbl->it_base) + index;
140 while (npages--) {
141 /* can't move this out since we might cross MEMBLOCK boundary */
142 rpn = __pa(uaddr) >> TCE_SHIFT;
143 *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
145 uaddr += TCE_PAGE_SIZE;
146 tcep++;
148 return 0;
152 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
154 __be64 *tcep, *tces;
156 tces = tcep = ((__be64 *)tbl->it_base) + index;
158 while (npages--)
159 *(tcep++) = 0;
162 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
164 __be64 *tcep;
166 tcep = ((__be64 *)tbl->it_base) + index;
168 return be64_to_cpu(*tcep);
171 static void tce_free_pSeriesLP(struct iommu_table*, long, long);
172 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
174 static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
175 long npages, unsigned long uaddr,
176 enum dma_data_direction direction,
177 unsigned long attrs)
179 u64 rc = 0;
180 u64 proto_tce, tce;
181 u64 rpn;
182 int ret = 0;
183 long tcenum_start = tcenum, npages_start = npages;
185 rpn = __pa(uaddr) >> TCE_SHIFT;
186 proto_tce = TCE_PCI_READ;
187 if (direction != DMA_TO_DEVICE)
188 proto_tce |= TCE_PCI_WRITE;
190 while (npages--) {
191 tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
192 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
194 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
195 ret = (int)rc;
196 tce_free_pSeriesLP(tbl, tcenum_start,
197 (npages_start - (npages + 1)));
198 break;
201 if (rc && printk_ratelimit()) {
202 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
203 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
204 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
205 printk("\ttce val = 0x%llx\n", tce );
206 dump_stack();
209 tcenum++;
210 rpn++;
212 return ret;
215 static DEFINE_PER_CPU(__be64 *, tce_page);
217 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
218 long npages, unsigned long uaddr,
219 enum dma_data_direction direction,
220 unsigned long attrs)
222 u64 rc = 0;
223 u64 proto_tce;
224 __be64 *tcep;
225 u64 rpn;
226 long l, limit;
227 long tcenum_start = tcenum, npages_start = npages;
228 int ret = 0;
229 unsigned long flags;
231 if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
232 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
233 direction, attrs);
236 local_irq_save(flags); /* to protect tcep and the page behind it */
238 tcep = __this_cpu_read(tce_page);
240 /* This is safe to do since interrupts are off when we're called
241 * from iommu_alloc{,_sg}()
243 if (!tcep) {
244 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
245 /* If allocation fails, fall back to the loop implementation */
246 if (!tcep) {
247 local_irq_restore(flags);
248 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
249 direction, attrs);
251 __this_cpu_write(tce_page, tcep);
254 rpn = __pa(uaddr) >> TCE_SHIFT;
255 proto_tce = TCE_PCI_READ;
256 if (direction != DMA_TO_DEVICE)
257 proto_tce |= TCE_PCI_WRITE;
259 /* We can map max one pageful of TCEs at a time */
260 do {
262 * Set up the page with TCE data, looping through and setting
263 * the values.
265 limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
267 for (l = 0; l < limit; l++) {
268 tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
269 rpn++;
272 rc = plpar_tce_put_indirect((u64)tbl->it_index,
273 (u64)tcenum << 12,
274 (u64)__pa(tcep),
275 limit);
277 npages -= limit;
278 tcenum += limit;
279 } while (npages > 0 && !rc);
281 local_irq_restore(flags);
283 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
284 ret = (int)rc;
285 tce_freemulti_pSeriesLP(tbl, tcenum_start,
286 (npages_start - (npages + limit)));
287 return ret;
290 if (rc && printk_ratelimit()) {
291 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
292 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
293 printk("\tnpages = 0x%llx\n", (u64)npages);
294 printk("\ttce[0] val = 0x%llx\n", tcep[0]);
295 dump_stack();
297 return ret;
300 static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
302 u64 rc;
304 while (npages--) {
305 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
307 if (rc && printk_ratelimit()) {
308 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
309 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
310 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
311 dump_stack();
314 tcenum++;
319 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
321 u64 rc;
323 if (!firmware_has_feature(FW_FEATURE_MULTITCE))
324 return tce_free_pSeriesLP(tbl, tcenum, npages);
326 rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
328 if (rc && printk_ratelimit()) {
329 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
330 printk("\trc = %lld\n", rc);
331 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
332 printk("\tnpages = 0x%llx\n", (u64)npages);
333 dump_stack();
337 static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
339 u64 rc;
340 unsigned long tce_ret;
342 rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
344 if (rc && printk_ratelimit()) {
345 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
346 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
347 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
348 dump_stack();
351 return tce_ret;
354 /* this is compatible with cells for the device tree property */
355 struct dynamic_dma_window_prop {
356 __be32 liobn; /* tce table number */
357 __be64 dma_base; /* address hi,lo */
358 __be32 tce_shift; /* ilog2(tce_page_size) */
359 __be32 window_shift; /* ilog2(tce_window_size) */
362 struct direct_window {
363 struct device_node *device;
364 const struct dynamic_dma_window_prop *prop;
365 struct list_head list;
368 /* Dynamic DMA Window support */
369 struct ddw_query_response {
370 u32 windows_available;
371 u32 largest_available_block;
372 u32 page_size;
373 u32 migration_capable;
376 struct ddw_create_response {
377 u32 liobn;
378 u32 addr_hi;
379 u32 addr_lo;
382 static LIST_HEAD(direct_window_list);
383 /* prevents races between memory on/offline and window creation */
384 static DEFINE_SPINLOCK(direct_window_list_lock);
385 /* protects initializing window twice for same device */
386 static DEFINE_MUTEX(direct_window_init_mutex);
387 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
389 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
390 unsigned long num_pfn, const void *arg)
392 const struct dynamic_dma_window_prop *maprange = arg;
393 int rc;
394 u64 tce_size, num_tce, dma_offset, next;
395 u32 tce_shift;
396 long limit;
398 tce_shift = be32_to_cpu(maprange->tce_shift);
399 tce_size = 1ULL << tce_shift;
400 next = start_pfn << PAGE_SHIFT;
401 num_tce = num_pfn << PAGE_SHIFT;
403 /* round back to the beginning of the tce page size */
404 num_tce += next & (tce_size - 1);
405 next &= ~(tce_size - 1);
407 /* covert to number of tces */
408 num_tce |= tce_size - 1;
409 num_tce >>= tce_shift;
411 do {
413 * Set up the page with TCE data, looping through and setting
414 * the values.
416 limit = min_t(long, num_tce, 512);
417 dma_offset = next + be64_to_cpu(maprange->dma_base);
419 rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
420 dma_offset,
421 0, limit);
422 next += limit * tce_size;
423 num_tce -= limit;
424 } while (num_tce > 0 && !rc);
426 return rc;
429 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
430 unsigned long num_pfn, const void *arg)
432 const struct dynamic_dma_window_prop *maprange = arg;
433 u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
434 __be64 *tcep;
435 u32 tce_shift;
436 u64 rc = 0;
437 long l, limit;
439 local_irq_disable(); /* to protect tcep and the page behind it */
440 tcep = __this_cpu_read(tce_page);
442 if (!tcep) {
443 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
444 if (!tcep) {
445 local_irq_enable();
446 return -ENOMEM;
448 __this_cpu_write(tce_page, tcep);
451 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
453 liobn = (u64)be32_to_cpu(maprange->liobn);
454 tce_shift = be32_to_cpu(maprange->tce_shift);
455 tce_size = 1ULL << tce_shift;
456 next = start_pfn << PAGE_SHIFT;
457 num_tce = num_pfn << PAGE_SHIFT;
459 /* round back to the beginning of the tce page size */
460 num_tce += next & (tce_size - 1);
461 next &= ~(tce_size - 1);
463 /* covert to number of tces */
464 num_tce |= tce_size - 1;
465 num_tce >>= tce_shift;
467 /* We can map max one pageful of TCEs at a time */
468 do {
470 * Set up the page with TCE data, looping through and setting
471 * the values.
473 limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
474 dma_offset = next + be64_to_cpu(maprange->dma_base);
476 for (l = 0; l < limit; l++) {
477 tcep[l] = cpu_to_be64(proto_tce | next);
478 next += tce_size;
481 rc = plpar_tce_put_indirect(liobn,
482 dma_offset,
483 (u64)__pa(tcep),
484 limit);
486 num_tce -= limit;
487 } while (num_tce > 0 && !rc);
489 /* error cleanup: caller will clear whole range */
491 local_irq_enable();
492 return rc;
495 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
496 unsigned long num_pfn, void *arg)
498 return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
501 static void iommu_table_setparms(struct pci_controller *phb,
502 struct device_node *dn,
503 struct iommu_table *tbl)
505 struct device_node *node;
506 const unsigned long *basep;
507 const u32 *sizep;
509 node = phb->dn;
511 basep = of_get_property(node, "linux,tce-base", NULL);
512 sizep = of_get_property(node, "linux,tce-size", NULL);
513 if (basep == NULL || sizep == NULL) {
514 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has "
515 "missing tce entries !\n", dn);
516 return;
519 tbl->it_base = (unsigned long)__va(*basep);
521 if (!is_kdump_kernel())
522 memset((void *)tbl->it_base, 0, *sizep);
524 tbl->it_busno = phb->bus->number;
525 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
527 /* Units of tce entries */
528 tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
530 /* Test if we are going over 2GB of DMA space */
531 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
532 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
533 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
536 phb->dma_window_base_cur += phb->dma_window_size;
538 /* Set the tce table size - measured in entries */
539 tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
541 tbl->it_index = 0;
542 tbl->it_blocksize = 16;
543 tbl->it_type = TCE_PCI;
547 * iommu_table_setparms_lpar
549 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
551 static void iommu_table_setparms_lpar(struct pci_controller *phb,
552 struct device_node *dn,
553 struct iommu_table *tbl,
554 struct iommu_table_group *table_group,
555 const __be32 *dma_window)
557 unsigned long offset, size;
559 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
561 tbl->it_busno = phb->bus->number;
562 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
563 tbl->it_base = 0;
564 tbl->it_blocksize = 16;
565 tbl->it_type = TCE_PCI;
566 tbl->it_offset = offset >> tbl->it_page_shift;
567 tbl->it_size = size >> tbl->it_page_shift;
569 table_group->tce32_start = offset;
570 table_group->tce32_size = size;
573 struct iommu_table_ops iommu_table_pseries_ops = {
574 .set = tce_build_pSeries,
575 .clear = tce_free_pSeries,
576 .get = tce_get_pseries
579 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
581 struct device_node *dn;
582 struct iommu_table *tbl;
583 struct device_node *isa_dn, *isa_dn_orig;
584 struct device_node *tmp;
585 struct pci_dn *pci;
586 int children;
588 dn = pci_bus_to_OF_node(bus);
590 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn);
592 if (bus->self) {
593 /* This is not a root bus, any setup will be done for the
594 * device-side of the bridge in iommu_dev_setup_pSeries().
596 return;
598 pci = PCI_DN(dn);
600 /* Check if the ISA bus on the system is under
601 * this PHB.
603 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
605 while (isa_dn && isa_dn != dn)
606 isa_dn = isa_dn->parent;
608 of_node_put(isa_dn_orig);
610 /* Count number of direct PCI children of the PHB. */
611 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
612 children++;
614 pr_debug("Children: %d\n", children);
616 /* Calculate amount of DMA window per slot. Each window must be
617 * a power of two (due to pci_alloc_consistent requirements).
619 * Keep 256MB aside for PHBs with ISA.
622 if (!isa_dn) {
623 /* No ISA/IDE - just set window size and return */
624 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
626 while (pci->phb->dma_window_size * children > 0x80000000ul)
627 pci->phb->dma_window_size >>= 1;
628 pr_debug("No ISA/IDE, window size is 0x%llx\n",
629 pci->phb->dma_window_size);
630 pci->phb->dma_window_base_cur = 0;
632 return;
635 /* If we have ISA, then we probably have an IDE
636 * controller too. Allocate a 128MB table but
637 * skip the first 128MB to avoid stepping on ISA
638 * space.
640 pci->phb->dma_window_size = 0x8000000ul;
641 pci->phb->dma_window_base_cur = 0x8000000ul;
643 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
644 tbl = pci->table_group->tables[0];
646 iommu_table_setparms(pci->phb, dn, tbl);
647 tbl->it_ops = &iommu_table_pseries_ops;
648 iommu_init_table(tbl, pci->phb->node);
649 iommu_register_group(pci->table_group, pci_domain_nr(bus), 0);
651 /* Divide the rest (1.75GB) among the children */
652 pci->phb->dma_window_size = 0x80000000ul;
653 while (pci->phb->dma_window_size * children > 0x70000000ul)
654 pci->phb->dma_window_size >>= 1;
656 pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
659 #ifdef CONFIG_IOMMU_API
660 static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
661 long *tce, enum dma_data_direction *direction)
663 long rc;
664 unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
665 unsigned long flags, oldtce = 0;
666 u64 proto_tce = iommu_direction_to_tce_perm(*direction);
667 unsigned long newtce = *tce | proto_tce;
669 spin_lock_irqsave(&tbl->large_pool.lock, flags);
671 rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
672 if (!rc)
673 rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
675 if (!rc) {
676 *direction = iommu_tce_direction(oldtce);
677 *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
680 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
682 return rc;
684 #endif
686 struct iommu_table_ops iommu_table_lpar_multi_ops = {
687 .set = tce_buildmulti_pSeriesLP,
688 #ifdef CONFIG_IOMMU_API
689 .exchange = tce_exchange_pseries,
690 #endif
691 .clear = tce_freemulti_pSeriesLP,
692 .get = tce_get_pSeriesLP
695 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
697 struct iommu_table *tbl;
698 struct device_node *dn, *pdn;
699 struct pci_dn *ppci;
700 const __be32 *dma_window = NULL;
702 dn = pci_bus_to_OF_node(bus);
704 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
705 dn);
707 /* Find nearest ibm,dma-window, walking up the device tree */
708 for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
709 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
710 if (dma_window != NULL)
711 break;
714 if (dma_window == NULL) {
715 pr_debug(" no ibm,dma-window property !\n");
716 return;
719 ppci = PCI_DN(pdn);
721 pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
722 pdn, ppci->table_group);
724 if (!ppci->table_group) {
725 ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
726 tbl = ppci->table_group->tables[0];
727 iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
728 ppci->table_group, dma_window);
729 tbl->it_ops = &iommu_table_lpar_multi_ops;
730 iommu_init_table(tbl, ppci->phb->node);
731 iommu_register_group(ppci->table_group,
732 pci_domain_nr(bus), 0);
733 pr_debug(" created table: %p\n", ppci->table_group);
738 static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
740 struct device_node *dn;
741 struct iommu_table *tbl;
743 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
745 dn = dev->dev.of_node;
747 /* If we're the direct child of a root bus, then we need to allocate
748 * an iommu table ourselves. The bus setup code should have setup
749 * the window sizes already.
751 if (!dev->bus->self) {
752 struct pci_controller *phb = PCI_DN(dn)->phb;
754 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
755 PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
756 tbl = PCI_DN(dn)->table_group->tables[0];
757 iommu_table_setparms(phb, dn, tbl);
758 tbl->it_ops = &iommu_table_pseries_ops;
759 iommu_init_table(tbl, phb->node);
760 iommu_register_group(PCI_DN(dn)->table_group,
761 pci_domain_nr(phb->bus), 0);
762 set_iommu_table_base(&dev->dev, tbl);
763 iommu_add_device(&dev->dev);
764 return;
767 /* If this device is further down the bus tree, search upwards until
768 * an already allocated iommu table is found and use that.
771 while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL)
772 dn = dn->parent;
774 if (dn && PCI_DN(dn)) {
775 set_iommu_table_base(&dev->dev,
776 PCI_DN(dn)->table_group->tables[0]);
777 iommu_add_device(&dev->dev);
778 } else
779 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
780 pci_name(dev));
783 static int __read_mostly disable_ddw;
785 static int __init disable_ddw_setup(char *str)
787 disable_ddw = 1;
788 printk(KERN_INFO "ppc iommu: disabling ddw.\n");
790 return 0;
793 early_param("disable_ddw", disable_ddw_setup);
795 static void remove_ddw(struct device_node *np, bool remove_prop)
797 struct dynamic_dma_window_prop *dwp;
798 struct property *win64;
799 u32 ddw_avail[3];
800 u64 liobn;
801 int ret = 0;
803 ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
804 &ddw_avail[0], 3);
806 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
807 if (!win64)
808 return;
810 if (ret || win64->length < sizeof(*dwp))
811 goto delprop;
813 dwp = win64->value;
814 liobn = (u64)be32_to_cpu(dwp->liobn);
816 /* clear the whole window, note the arg is in kernel pages */
817 ret = tce_clearrange_multi_pSeriesLP(0,
818 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
819 if (ret)
820 pr_warning("%pOF failed to clear tces in window.\n",
821 np);
822 else
823 pr_debug("%pOF successfully cleared tces in window.\n",
824 np);
826 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
827 if (ret)
828 pr_warning("%pOF: failed to remove direct window: rtas returned "
829 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
830 np, ret, ddw_avail[2], liobn);
831 else
832 pr_debug("%pOF: successfully removed direct window: rtas returned "
833 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
834 np, ret, ddw_avail[2], liobn);
836 delprop:
837 if (remove_prop)
838 ret = of_remove_property(np, win64);
839 if (ret)
840 pr_warning("%pOF: failed to remove direct window property: %d\n",
841 np, ret);
844 static u64 find_existing_ddw(struct device_node *pdn)
846 struct direct_window *window;
847 const struct dynamic_dma_window_prop *direct64;
848 u64 dma_addr = 0;
850 spin_lock(&direct_window_list_lock);
851 /* check if we already created a window and dupe that config if so */
852 list_for_each_entry(window, &direct_window_list, list) {
853 if (window->device == pdn) {
854 direct64 = window->prop;
855 dma_addr = be64_to_cpu(direct64->dma_base);
856 break;
859 spin_unlock(&direct_window_list_lock);
861 return dma_addr;
864 static int find_existing_ddw_windows(void)
866 int len;
867 struct device_node *pdn;
868 struct direct_window *window;
869 const struct dynamic_dma_window_prop *direct64;
871 if (!firmware_has_feature(FW_FEATURE_LPAR))
872 return 0;
874 for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
875 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
876 if (!direct64)
877 continue;
879 window = kzalloc(sizeof(*window), GFP_KERNEL);
880 if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
881 kfree(window);
882 remove_ddw(pdn, true);
883 continue;
886 window->device = pdn;
887 window->prop = direct64;
888 spin_lock(&direct_window_list_lock);
889 list_add(&window->list, &direct_window_list);
890 spin_unlock(&direct_window_list_lock);
893 return 0;
895 machine_arch_initcall(pseries, find_existing_ddw_windows);
897 static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
898 struct ddw_query_response *query)
900 struct device_node *dn;
901 struct pci_dn *pdn;
902 u32 cfg_addr;
903 u64 buid;
904 int ret;
907 * Get the config address and phb buid of the PE window.
908 * Rely on eeh to retrieve this for us.
909 * Retrieve them from the pci device, not the node with the
910 * dma-window property
912 dn = pci_device_to_OF_node(dev);
913 pdn = PCI_DN(dn);
914 buid = pdn->phb->buid;
915 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
917 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
918 cfg_addr, BUID_HI(buid), BUID_LO(buid));
919 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
920 " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
921 BUID_LO(buid), ret);
922 return ret;
925 static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
926 struct ddw_create_response *create, int page_shift,
927 int window_shift)
929 struct device_node *dn;
930 struct pci_dn *pdn;
931 u32 cfg_addr;
932 u64 buid;
933 int ret;
936 * Get the config address and phb buid of the PE window.
937 * Rely on eeh to retrieve this for us.
938 * Retrieve them from the pci device, not the node with the
939 * dma-window property
941 dn = pci_device_to_OF_node(dev);
942 pdn = PCI_DN(dn);
943 buid = pdn->phb->buid;
944 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
946 do {
947 /* extra outputs are LIOBN and dma-addr (hi, lo) */
948 ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create,
949 cfg_addr, BUID_HI(buid), BUID_LO(buid),
950 page_shift, window_shift);
951 } while (rtas_busy_delay(ret));
952 dev_info(&dev->dev,
953 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
954 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
955 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
956 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
958 return ret;
961 struct failed_ddw_pdn {
962 struct device_node *pdn;
963 struct list_head list;
966 static LIST_HEAD(failed_ddw_pdn_list);
969 * If the PE supports dynamic dma windows, and there is space for a table
970 * that can map all pages in a linear offset, then setup such a table,
971 * and record the dma-offset in the struct device.
973 * dev: the pci device we are checking
974 * pdn: the parent pe node with the ibm,dma_window property
975 * Future: also check if we can remap the base window for our base page size
977 * returns the dma offset for use by dma_set_mask
979 static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
981 int len, ret;
982 struct ddw_query_response query;
983 struct ddw_create_response create;
984 int page_shift;
985 u64 dma_addr, max_addr;
986 struct device_node *dn;
987 u32 ddw_avail[3];
988 struct direct_window *window;
989 struct property *win64;
990 struct dynamic_dma_window_prop *ddwprop;
991 struct failed_ddw_pdn *fpdn;
993 mutex_lock(&direct_window_init_mutex);
995 dma_addr = find_existing_ddw(pdn);
996 if (dma_addr != 0)
997 goto out_unlock;
1000 * If we already went through this for a previous function of
1001 * the same device and failed, we don't want to muck with the
1002 * DMA window again, as it will race with in-flight operations
1003 * and can lead to EEHs. The above mutex protects access to the
1004 * list.
1006 list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
1007 if (fpdn->pdn == pdn)
1008 goto out_unlock;
1012 * the ibm,ddw-applicable property holds the tokens for:
1013 * ibm,query-pe-dma-window
1014 * ibm,create-pe-dma-window
1015 * ibm,remove-pe-dma-window
1016 * for the given node in that order.
1017 * the property is actually in the parent, not the PE
1019 ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
1020 &ddw_avail[0], 3);
1021 if (ret)
1022 goto out_failed;
1025 * Query if there is a second window of size to map the
1026 * whole partition. Query returns number of windows, largest
1027 * block assigned to PE (partition endpoint), and two bitmasks
1028 * of page sizes: supported and supported for migrate-dma.
1030 dn = pci_device_to_OF_node(dev);
1031 ret = query_ddw(dev, ddw_avail, &query);
1032 if (ret != 0)
1033 goto out_failed;
1035 if (query.windows_available == 0) {
1037 * no additional windows are available for this device.
1038 * We might be able to reallocate the existing window,
1039 * trading in for a larger page size.
1041 dev_dbg(&dev->dev, "no free dynamic windows");
1042 goto out_failed;
1044 if (query.page_size & 4) {
1045 page_shift = 24; /* 16MB */
1046 } else if (query.page_size & 2) {
1047 page_shift = 16; /* 64kB */
1048 } else if (query.page_size & 1) {
1049 page_shift = 12; /* 4kB */
1050 } else {
1051 dev_dbg(&dev->dev, "no supported direct page size in mask %x",
1052 query.page_size);
1053 goto out_failed;
1055 /* verify the window * number of ptes will map the partition */
1056 /* check largest block * page size > max memory hotplug addr */
1057 max_addr = memory_hotplug_max();
1058 if (query.largest_available_block < (max_addr >> page_shift)) {
1059 dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u "
1060 "%llu-sized pages\n", max_addr, query.largest_available_block,
1061 1ULL << page_shift);
1062 goto out_failed;
1064 len = order_base_2(max_addr);
1065 win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
1066 if (!win64) {
1067 dev_info(&dev->dev,
1068 "couldn't allocate property for 64bit dma window\n");
1069 goto out_failed;
1071 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
1072 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
1073 win64->length = sizeof(*ddwprop);
1074 if (!win64->name || !win64->value) {
1075 dev_info(&dev->dev,
1076 "couldn't allocate property name and value\n");
1077 goto out_free_prop;
1080 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
1081 if (ret != 0)
1082 goto out_free_prop;
1084 ddwprop->liobn = cpu_to_be32(create.liobn);
1085 ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) |
1086 create.addr_lo);
1087 ddwprop->tce_shift = cpu_to_be32(page_shift);
1088 ddwprop->window_shift = cpu_to_be32(len);
1090 dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n",
1091 create.liobn, dn);
1093 window = kzalloc(sizeof(*window), GFP_KERNEL);
1094 if (!window)
1095 goto out_clear_window;
1097 ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
1098 win64->value, tce_setrange_multi_pSeriesLP_walk);
1099 if (ret) {
1100 dev_info(&dev->dev, "failed to map direct window for %pOF: %d\n",
1101 dn, ret);
1102 goto out_free_window;
1105 ret = of_add_property(pdn, win64);
1106 if (ret) {
1107 dev_err(&dev->dev, "unable to add dma window property for %pOF: %d",
1108 pdn, ret);
1109 goto out_free_window;
1112 window->device = pdn;
1113 window->prop = ddwprop;
1114 spin_lock(&direct_window_list_lock);
1115 list_add(&window->list, &direct_window_list);
1116 spin_unlock(&direct_window_list_lock);
1118 dma_addr = be64_to_cpu(ddwprop->dma_base);
1119 goto out_unlock;
1121 out_free_window:
1122 kfree(window);
1124 out_clear_window:
1125 remove_ddw(pdn, true);
1127 out_free_prop:
1128 kfree(win64->name);
1129 kfree(win64->value);
1130 kfree(win64);
1132 out_failed:
1134 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1135 if (!fpdn)
1136 goto out_unlock;
1137 fpdn->pdn = pdn;
1138 list_add(&fpdn->list, &failed_ddw_pdn_list);
1140 out_unlock:
1141 mutex_unlock(&direct_window_init_mutex);
1142 return dma_addr;
1145 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1147 struct device_node *pdn, *dn;
1148 struct iommu_table *tbl;
1149 const __be32 *dma_window = NULL;
1150 struct pci_dn *pci;
1152 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
1154 /* dev setup for LPAR is a little tricky, since the device tree might
1155 * contain the dma-window properties per-device and not necessarily
1156 * for the bus. So we need to search upwards in the tree until we
1157 * either hit a dma-window property, OR find a parent with a table
1158 * already allocated.
1160 dn = pci_device_to_OF_node(dev);
1161 pr_debug(" node is %pOF\n", dn);
1163 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
1164 pdn = pdn->parent) {
1165 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1166 if (dma_window)
1167 break;
1170 if (!pdn || !PCI_DN(pdn)) {
1171 printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
1172 "no DMA window found for pci dev=%s dn=%pOF\n",
1173 pci_name(dev), dn);
1174 return;
1176 pr_debug(" parent is %pOF\n", pdn);
1178 pci = PCI_DN(pdn);
1179 if (!pci->table_group) {
1180 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
1181 tbl = pci->table_group->tables[0];
1182 iommu_table_setparms_lpar(pci->phb, pdn, tbl,
1183 pci->table_group, dma_window);
1184 tbl->it_ops = &iommu_table_lpar_multi_ops;
1185 iommu_init_table(tbl, pci->phb->node);
1186 iommu_register_group(pci->table_group,
1187 pci_domain_nr(pci->phb->bus), 0);
1188 pr_debug(" created table: %p\n", pci->table_group);
1189 } else {
1190 pr_debug(" found DMA window, table: %p\n", pci->table_group);
1193 set_iommu_table_base(&dev->dev, pci->table_group->tables[0]);
1194 iommu_add_device(&dev->dev);
1197 static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1199 bool ddw_enabled = false;
1200 struct device_node *pdn, *dn;
1201 struct pci_dev *pdev;
1202 const __be32 *dma_window = NULL;
1203 u64 dma_offset;
1205 if (!dev->dma_mask)
1206 return -EIO;
1208 if (!dev_is_pci(dev))
1209 goto check_mask;
1211 pdev = to_pci_dev(dev);
1213 /* only attempt to use a new window if 64-bit DMA is requested */
1214 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
1215 dn = pci_device_to_OF_node(pdev);
1216 dev_dbg(dev, "node is %pOF\n", dn);
1219 * the device tree might contain the dma-window properties
1220 * per-device and not necessarily for the bus. So we need to
1221 * search upwards in the tree until we either hit a dma-window
1222 * property, OR find a parent with a table already allocated.
1224 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
1225 pdn = pdn->parent) {
1226 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1227 if (dma_window)
1228 break;
1230 if (pdn && PCI_DN(pdn)) {
1231 dma_offset = enable_ddw(pdev, pdn);
1232 if (dma_offset != 0) {
1233 dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
1234 set_dma_offset(dev, dma_offset);
1235 set_dma_ops(dev, &dma_direct_ops);
1236 ddw_enabled = true;
1241 /* fall back on iommu ops */
1242 if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
1243 dev_info(dev, "Restoring 32-bit DMA via iommu\n");
1244 set_dma_ops(dev, &dma_iommu_ops);
1247 check_mask:
1248 if (!dma_supported(dev, dma_mask))
1249 return -EIO;
1251 *dev->dma_mask = dma_mask;
1252 return 0;
1255 static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
1257 if (!dev->dma_mask)
1258 return 0;
1260 if (!disable_ddw && dev_is_pci(dev)) {
1261 struct pci_dev *pdev = to_pci_dev(dev);
1262 struct device_node *dn;
1264 dn = pci_device_to_OF_node(pdev);
1266 /* search upwards for ibm,dma-window */
1267 for (; dn && PCI_DN(dn) && !PCI_DN(dn)->table_group;
1268 dn = dn->parent)
1269 if (of_get_property(dn, "ibm,dma-window", NULL))
1270 break;
1271 /* if there is a ibm,ddw-applicable property require 64 bits */
1272 if (dn && PCI_DN(dn) &&
1273 of_get_property(dn, "ibm,ddw-applicable", NULL))
1274 return DMA_BIT_MASK(64);
1277 return dma_iommu_ops.get_required_mask(dev);
1280 static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
1281 void *data)
1283 struct direct_window *window;
1284 struct memory_notify *arg = data;
1285 int ret = 0;
1287 switch (action) {
1288 case MEM_GOING_ONLINE:
1289 spin_lock(&direct_window_list_lock);
1290 list_for_each_entry(window, &direct_window_list, list) {
1291 ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
1292 arg->nr_pages, window->prop);
1293 /* XXX log error */
1295 spin_unlock(&direct_window_list_lock);
1296 break;
1297 case MEM_CANCEL_ONLINE:
1298 case MEM_OFFLINE:
1299 spin_lock(&direct_window_list_lock);
1300 list_for_each_entry(window, &direct_window_list, list) {
1301 ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
1302 arg->nr_pages, window->prop);
1303 /* XXX log error */
1305 spin_unlock(&direct_window_list_lock);
1306 break;
1307 default:
1308 break;
1310 if (ret && action != MEM_CANCEL_ONLINE)
1311 return NOTIFY_BAD;
1313 return NOTIFY_OK;
1316 static struct notifier_block iommu_mem_nb = {
1317 .notifier_call = iommu_mem_notifier,
1320 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
1322 int err = NOTIFY_OK;
1323 struct of_reconfig_data *rd = data;
1324 struct device_node *np = rd->dn;
1325 struct pci_dn *pci = PCI_DN(np);
1326 struct direct_window *window;
1328 switch (action) {
1329 case OF_RECONFIG_DETACH_NODE:
1331 * Removing the property will invoke the reconfig
1332 * notifier again, which causes dead-lock on the
1333 * read-write semaphore of the notifier chain. So
1334 * we have to remove the property when releasing
1335 * the device node.
1337 remove_ddw(np, false);
1338 if (pci && pci->table_group)
1339 iommu_pseries_free_group(pci->table_group,
1340 np->full_name);
1342 spin_lock(&direct_window_list_lock);
1343 list_for_each_entry(window, &direct_window_list, list) {
1344 if (window->device == np) {
1345 list_del(&window->list);
1346 kfree(window);
1347 break;
1350 spin_unlock(&direct_window_list_lock);
1351 break;
1352 default:
1353 err = NOTIFY_DONE;
1354 break;
1356 return err;
1359 static struct notifier_block iommu_reconfig_nb = {
1360 .notifier_call = iommu_reconfig_notifier,
1363 /* These are called very early. */
1364 void iommu_init_early_pSeries(void)
1366 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
1367 return;
1369 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1370 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1371 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1372 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
1373 ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
1374 } else {
1375 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
1376 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
1380 of_reconfig_notifier_register(&iommu_reconfig_nb);
1381 register_memory_notifier(&iommu_mem_nb);
1383 set_pci_dma_ops(&dma_iommu_ops);
1386 static int __init disable_multitce(char *str)
1388 if (strcmp(str, "off") == 0 &&
1389 firmware_has_feature(FW_FEATURE_LPAR) &&
1390 firmware_has_feature(FW_FEATURE_MULTITCE)) {
1391 printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
1392 powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
1394 return 1;
1397 __setup("multitce=", disable_multitce);
1399 machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);