2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/memblock.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched.h> /* for show_stack */
34 #include <linux/string.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/crash_dump.h>
38 #include <linux/memory.h>
43 #include <asm/iommu.h>
44 #include <asm/pci-bridge.h>
45 #include <asm/machdep.h>
46 #include <asm/firmware.h>
48 #include <asm/ppc-pci.h>
50 #include <asm/mmzone.h>
51 #include <asm/plpar_wrappers.h>
54 static void tce_invalidate_pSeries_sw(struct iommu_table
*tbl
,
55 u64
*startp
, u64
*endp
)
57 u64 __iomem
*invalidate
= (u64 __iomem
*)tbl
->it_index
;
58 unsigned long start
, end
, inc
;
62 inc
= L1_CACHE_BYTES
; /* invalidate a cacheline of TCEs at a time */
64 /* If this is non-zero, change the format. We shift the
65 * address and or in the magic from the device tree. */
70 start
|= tbl
->it_busno
;
74 end
|= inc
- 1; /* round up end to be different than start */
76 mb(); /* Make sure TCEs in memory are written */
77 while (start
<= end
) {
78 out_be64(invalidate
, start
);
83 static int tce_build_pSeries(struct iommu_table
*tbl
, long index
,
84 long npages
, unsigned long uaddr
,
85 enum dma_data_direction direction
,
86 struct dma_attrs
*attrs
)
92 proto_tce
= TCE_PCI_READ
; // Read allowed
94 if (direction
!= DMA_TO_DEVICE
)
95 proto_tce
|= TCE_PCI_WRITE
;
97 tces
= tcep
= ((u64
*)tbl
->it_base
) + index
;
100 /* can't move this out since we might cross MEMBLOCK boundary */
101 rpn
= __pa(uaddr
) >> TCE_SHIFT
;
102 *tcep
= proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
104 uaddr
+= TCE_PAGE_SIZE
;
108 if (tbl
->it_type
& TCE_PCI_SWINV_CREATE
)
109 tce_invalidate_pSeries_sw(tbl
, tces
, tcep
- 1);
114 static void tce_free_pSeries(struct iommu_table
*tbl
, long index
, long npages
)
118 tces
= tcep
= ((u64
*)tbl
->it_base
) + index
;
123 if (tbl
->it_type
& TCE_PCI_SWINV_FREE
)
124 tce_invalidate_pSeries_sw(tbl
, tces
, tcep
- 1);
127 static unsigned long tce_get_pseries(struct iommu_table
*tbl
, long index
)
131 tcep
= ((u64
*)tbl
->it_base
) + index
;
136 static void tce_free_pSeriesLP(struct iommu_table
*, long, long);
137 static void tce_freemulti_pSeriesLP(struct iommu_table
*, long, long);
139 static int tce_build_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
140 long npages
, unsigned long uaddr
,
141 enum dma_data_direction direction
,
142 struct dma_attrs
*attrs
)
148 long tcenum_start
= tcenum
, npages_start
= npages
;
150 rpn
= __pa(uaddr
) >> TCE_SHIFT
;
151 proto_tce
= TCE_PCI_READ
;
152 if (direction
!= DMA_TO_DEVICE
)
153 proto_tce
|= TCE_PCI_WRITE
;
156 tce
= proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
157 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, tce
);
159 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
161 tce_free_pSeriesLP(tbl
, tcenum_start
,
162 (npages_start
- (npages
+ 1)));
166 if (rc
&& printk_ratelimit()) {
167 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
168 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
169 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
170 printk("\ttce val = 0x%llx\n", tce
);
171 show_stack(current
, (unsigned long *)__get_SP());
180 static DEFINE_PER_CPU(u64
*, tce_page
);
182 static int tce_buildmulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
183 long npages
, unsigned long uaddr
,
184 enum dma_data_direction direction
,
185 struct dma_attrs
*attrs
)
192 long tcenum_start
= tcenum
, npages_start
= npages
;
197 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
201 local_irq_save(flags
); /* to protect tcep and the page behind it */
203 tcep
= __get_cpu_var(tce_page
);
205 /* This is safe to do since interrupts are off when we're called
206 * from iommu_alloc{,_sg}()
209 tcep
= (u64
*)__get_free_page(GFP_ATOMIC
);
210 /* If allocation fails, fall back to the loop implementation */
212 local_irq_restore(flags
);
213 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
216 __get_cpu_var(tce_page
) = tcep
;
219 rpn
= __pa(uaddr
) >> TCE_SHIFT
;
220 proto_tce
= TCE_PCI_READ
;
221 if (direction
!= DMA_TO_DEVICE
)
222 proto_tce
|= TCE_PCI_WRITE
;
224 /* We can map max one pageful of TCEs at a time */
227 * Set up the page with TCE data, looping through and setting
230 limit
= min_t(long, npages
, 4096/TCE_ENTRY_SIZE
);
232 for (l
= 0; l
< limit
; l
++) {
233 tcep
[l
] = proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
237 rc
= plpar_tce_put_indirect((u64
)tbl
->it_index
,
244 } while (npages
> 0 && !rc
);
246 local_irq_restore(flags
);
248 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
250 tce_freemulti_pSeriesLP(tbl
, tcenum_start
,
251 (npages_start
- (npages
+ limit
)));
255 if (rc
&& printk_ratelimit()) {
256 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
257 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
258 printk("\tnpages = 0x%llx\n", (u64
)npages
);
259 printk("\ttce[0] val = 0x%llx\n", tcep
[0]);
260 show_stack(current
, (unsigned long *)__get_SP());
265 static void tce_free_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
270 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0);
272 if (rc
&& printk_ratelimit()) {
273 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
274 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
275 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
276 show_stack(current
, (unsigned long *)__get_SP());
284 static void tce_freemulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
288 rc
= plpar_tce_stuff((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0, npages
);
290 if (rc
&& printk_ratelimit()) {
291 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
292 printk("\trc = %lld\n", rc
);
293 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
294 printk("\tnpages = 0x%llx\n", (u64
)npages
);
295 show_stack(current
, (unsigned long *)__get_SP());
299 static unsigned long tce_get_pSeriesLP(struct iommu_table
*tbl
, long tcenum
)
302 unsigned long tce_ret
;
304 rc
= plpar_tce_get((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, &tce_ret
);
306 if (rc
&& printk_ratelimit()) {
307 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc
);
308 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
309 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
310 show_stack(current
, (unsigned long *)__get_SP());
316 /* this is compatible with cells for the device tree property */
317 struct dynamic_dma_window_prop
{
318 __be32 liobn
; /* tce table number */
319 __be64 dma_base
; /* address hi,lo */
320 __be32 tce_shift
; /* ilog2(tce_page_size) */
321 __be32 window_shift
; /* ilog2(tce_window_size) */
324 struct direct_window
{
325 struct device_node
*device
;
326 const struct dynamic_dma_window_prop
*prop
;
327 struct list_head list
;
330 /* Dynamic DMA Window support */
331 struct ddw_query_response
{
332 u32 windows_available
;
333 u32 largest_available_block
;
335 u32 migration_capable
;
338 struct ddw_create_response
{
344 static LIST_HEAD(direct_window_list
);
345 /* prevents races between memory on/offline and window creation */
346 static DEFINE_SPINLOCK(direct_window_list_lock
);
347 /* protects initializing window twice for same device */
348 static DEFINE_MUTEX(direct_window_init_mutex
);
349 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
351 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn
,
352 unsigned long num_pfn
, const void *arg
)
354 const struct dynamic_dma_window_prop
*maprange
= arg
;
356 u64 tce_size
, num_tce
, dma_offset
, next
;
360 tce_shift
= be32_to_cpu(maprange
->tce_shift
);
361 tce_size
= 1ULL << tce_shift
;
362 next
= start_pfn
<< PAGE_SHIFT
;
363 num_tce
= num_pfn
<< PAGE_SHIFT
;
365 /* round back to the beginning of the tce page size */
366 num_tce
+= next
& (tce_size
- 1);
367 next
&= ~(tce_size
- 1);
369 /* covert to number of tces */
370 num_tce
|= tce_size
- 1;
371 num_tce
>>= tce_shift
;
375 * Set up the page with TCE data, looping through and setting
378 limit
= min_t(long, num_tce
, 512);
379 dma_offset
= next
+ be64_to_cpu(maprange
->dma_base
);
381 rc
= plpar_tce_stuff((u64
)be32_to_cpu(maprange
->liobn
),
384 next
+= limit
* tce_size
;
386 } while (num_tce
> 0 && !rc
);
391 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn
,
392 unsigned long num_pfn
, const void *arg
)
394 const struct dynamic_dma_window_prop
*maprange
= arg
;
395 u64
*tcep
, tce_size
, num_tce
, dma_offset
, next
, proto_tce
, liobn
;
400 local_irq_disable(); /* to protect tcep and the page behind it */
401 tcep
= __get_cpu_var(tce_page
);
404 tcep
= (u64
*)__get_free_page(GFP_ATOMIC
);
409 __get_cpu_var(tce_page
) = tcep
;
412 proto_tce
= TCE_PCI_READ
| TCE_PCI_WRITE
;
414 liobn
= (u64
)be32_to_cpu(maprange
->liobn
);
415 tce_shift
= be32_to_cpu(maprange
->tce_shift
);
416 tce_size
= 1ULL << tce_shift
;
417 next
= start_pfn
<< PAGE_SHIFT
;
418 num_tce
= num_pfn
<< PAGE_SHIFT
;
420 /* round back to the beginning of the tce page size */
421 num_tce
+= next
& (tce_size
- 1);
422 next
&= ~(tce_size
- 1);
424 /* covert to number of tces */
425 num_tce
|= tce_size
- 1;
426 num_tce
>>= tce_shift
;
428 /* We can map max one pageful of TCEs at a time */
431 * Set up the page with TCE data, looping through and setting
434 limit
= min_t(long, num_tce
, 4096/TCE_ENTRY_SIZE
);
435 dma_offset
= next
+ be64_to_cpu(maprange
->dma_base
);
437 for (l
= 0; l
< limit
; l
++) {
438 tcep
[l
] = proto_tce
| next
;
442 rc
= plpar_tce_put_indirect(liobn
,
448 } while (num_tce
> 0 && !rc
);
450 /* error cleanup: caller will clear whole range */
456 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn
,
457 unsigned long num_pfn
, void *arg
)
459 return tce_setrange_multi_pSeriesLP(start_pfn
, num_pfn
, arg
);
464 static void iommu_table_setparms(struct pci_controller
*phb
,
465 struct device_node
*dn
,
466 struct iommu_table
*tbl
)
468 struct device_node
*node
;
469 const unsigned long *basep
, *sw_inval
;
474 basep
= of_get_property(node
, "linux,tce-base", NULL
);
475 sizep
= of_get_property(node
, "linux,tce-size", NULL
);
476 if (basep
== NULL
|| sizep
== NULL
) {
477 printk(KERN_ERR
"PCI_DMA: iommu_table_setparms: %s has "
478 "missing tce entries !\n", dn
->full_name
);
482 tbl
->it_base
= (unsigned long)__va(*basep
);
484 if (!is_kdump_kernel())
485 memset((void *)tbl
->it_base
, 0, *sizep
);
487 tbl
->it_busno
= phb
->bus
->number
;
489 /* Units of tce entries */
490 tbl
->it_offset
= phb
->dma_window_base_cur
>> IOMMU_PAGE_SHIFT
;
492 /* Test if we are going over 2GB of DMA space */
493 if (phb
->dma_window_base_cur
+ phb
->dma_window_size
> 0x80000000ul
) {
494 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
495 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
498 phb
->dma_window_base_cur
+= phb
->dma_window_size
;
500 /* Set the tce table size - measured in entries */
501 tbl
->it_size
= phb
->dma_window_size
>> IOMMU_PAGE_SHIFT
;
504 tbl
->it_blocksize
= 16;
505 tbl
->it_type
= TCE_PCI
;
507 sw_inval
= of_get_property(node
, "linux,tce-sw-invalidate-info", NULL
);
510 * This property contains information on how to
511 * invalidate the TCE entry. The first property is
512 * the base MMIO address used to invalidate entries.
513 * The second property tells us the format of the TCE
514 * invalidate (whether it needs to be shifted) and
515 * some magic routing info to add to our invalidate
518 tbl
->it_index
= (unsigned long) ioremap(sw_inval
[0], 8);
519 tbl
->it_busno
= sw_inval
[1]; /* overload this with magic */
520 tbl
->it_type
= TCE_PCI_SWINV_CREATE
| TCE_PCI_SWINV_FREE
;
525 * iommu_table_setparms_lpar
527 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
529 static void iommu_table_setparms_lpar(struct pci_controller
*phb
,
530 struct device_node
*dn
,
531 struct iommu_table
*tbl
,
532 const __be32
*dma_window
)
534 unsigned long offset
, size
;
536 of_parse_dma_window(dn
, dma_window
, &tbl
->it_index
, &offset
, &size
);
538 tbl
->it_busno
= phb
->bus
->number
;
540 tbl
->it_blocksize
= 16;
541 tbl
->it_type
= TCE_PCI
;
542 tbl
->it_offset
= offset
>> IOMMU_PAGE_SHIFT
;
543 tbl
->it_size
= size
>> IOMMU_PAGE_SHIFT
;
546 static void pci_dma_bus_setup_pSeries(struct pci_bus
*bus
)
548 struct device_node
*dn
;
549 struct iommu_table
*tbl
;
550 struct device_node
*isa_dn
, *isa_dn_orig
;
551 struct device_node
*tmp
;
555 dn
= pci_bus_to_OF_node(bus
);
557 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn
->full_name
);
560 /* This is not a root bus, any setup will be done for the
561 * device-side of the bridge in iommu_dev_setup_pSeries().
567 /* Check if the ISA bus on the system is under
570 isa_dn
= isa_dn_orig
= of_find_node_by_type(NULL
, "isa");
572 while (isa_dn
&& isa_dn
!= dn
)
573 isa_dn
= isa_dn
->parent
;
576 of_node_put(isa_dn_orig
);
578 /* Count number of direct PCI children of the PHB. */
579 for (children
= 0, tmp
= dn
->child
; tmp
; tmp
= tmp
->sibling
)
582 pr_debug("Children: %d\n", children
);
584 /* Calculate amount of DMA window per slot. Each window must be
585 * a power of two (due to pci_alloc_consistent requirements).
587 * Keep 256MB aside for PHBs with ISA.
591 /* No ISA/IDE - just set window size and return */
592 pci
->phb
->dma_window_size
= 0x80000000ul
; /* To be divided */
594 while (pci
->phb
->dma_window_size
* children
> 0x80000000ul
)
595 pci
->phb
->dma_window_size
>>= 1;
596 pr_debug("No ISA/IDE, window size is 0x%llx\n",
597 pci
->phb
->dma_window_size
);
598 pci
->phb
->dma_window_base_cur
= 0;
603 /* If we have ISA, then we probably have an IDE
604 * controller too. Allocate a 128MB table but
605 * skip the first 128MB to avoid stepping on ISA
608 pci
->phb
->dma_window_size
= 0x8000000ul
;
609 pci
->phb
->dma_window_base_cur
= 0x8000000ul
;
611 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
614 iommu_table_setparms(pci
->phb
, dn
, tbl
);
615 pci
->iommu_table
= iommu_init_table(tbl
, pci
->phb
->node
);
616 iommu_register_group(tbl
, pci_domain_nr(bus
), 0);
618 /* Divide the rest (1.75GB) among the children */
619 pci
->phb
->dma_window_size
= 0x80000000ul
;
620 while (pci
->phb
->dma_window_size
* children
> 0x70000000ul
)
621 pci
->phb
->dma_window_size
>>= 1;
623 pr_debug("ISA/IDE, window size is 0x%llx\n", pci
->phb
->dma_window_size
);
627 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus
*bus
)
629 struct iommu_table
*tbl
;
630 struct device_node
*dn
, *pdn
;
632 const __be32
*dma_window
= NULL
;
634 dn
= pci_bus_to_OF_node(bus
);
636 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
639 /* Find nearest ibm,dma-window, walking up the device tree */
640 for (pdn
= dn
; pdn
!= NULL
; pdn
= pdn
->parent
) {
641 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
642 if (dma_window
!= NULL
)
646 if (dma_window
== NULL
) {
647 pr_debug(" no ibm,dma-window property !\n");
653 pr_debug(" parent is %s, iommu_table: 0x%p\n",
654 pdn
->full_name
, ppci
->iommu_table
);
656 if (!ppci
->iommu_table
) {
657 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
659 iommu_table_setparms_lpar(ppci
->phb
, pdn
, tbl
, dma_window
);
660 ppci
->iommu_table
= iommu_init_table(tbl
, ppci
->phb
->node
);
661 iommu_register_group(tbl
, pci_domain_nr(bus
), 0);
662 pr_debug(" created table: %p\n", ppci
->iommu_table
);
667 static void pci_dma_dev_setup_pSeries(struct pci_dev
*dev
)
669 struct device_node
*dn
;
670 struct iommu_table
*tbl
;
672 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev
));
674 dn
= dev
->dev
.of_node
;
676 /* If we're the direct child of a root bus, then we need to allocate
677 * an iommu table ourselves. The bus setup code should have setup
678 * the window sizes already.
680 if (!dev
->bus
->self
) {
681 struct pci_controller
*phb
= PCI_DN(dn
)->phb
;
683 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
684 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
686 iommu_table_setparms(phb
, dn
, tbl
);
687 PCI_DN(dn
)->iommu_table
= iommu_init_table(tbl
, phb
->node
);
688 iommu_register_group(tbl
, pci_domain_nr(phb
->bus
), 0);
689 set_iommu_table_base(&dev
->dev
, PCI_DN(dn
)->iommu_table
);
693 /* If this device is further down the bus tree, search upwards until
694 * an already allocated iommu table is found and use that.
697 while (dn
&& PCI_DN(dn
) && PCI_DN(dn
)->iommu_table
== NULL
)
700 if (dn
&& PCI_DN(dn
))
701 set_iommu_table_base(&dev
->dev
, PCI_DN(dn
)->iommu_table
);
703 printk(KERN_WARNING
"iommu: Device %s has no iommu table\n",
707 static int __read_mostly disable_ddw
;
709 static int __init
disable_ddw_setup(char *str
)
712 printk(KERN_INFO
"ppc iommu: disabling ddw.\n");
717 early_param("disable_ddw", disable_ddw_setup
);
719 static inline void __remove_ddw(struct device_node
*np
, const u32
*ddw_avail
, u64 liobn
)
723 ret
= rtas_call(ddw_avail
[2], 1, 1, NULL
, liobn
);
725 pr_warning("%s: failed to remove DMA window: rtas returned "
726 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
727 np
->full_name
, ret
, ddw_avail
[2], liobn
);
729 pr_debug("%s: successfully removed DMA window: rtas returned "
730 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
731 np
->full_name
, ret
, ddw_avail
[2], liobn
);
734 static void remove_ddw(struct device_node
*np
)
736 struct dynamic_dma_window_prop
*dwp
;
737 struct property
*win64
;
738 const u32
*ddw_avail
;
742 ddw_avail
= of_get_property(np
, "ibm,ddw-applicable", &len
);
743 win64
= of_find_property(np
, DIRECT64_PROPNAME
, NULL
);
747 if (!ddw_avail
|| len
< 3 * sizeof(u32
) || win64
->length
< sizeof(*dwp
))
751 liobn
= (u64
)be32_to_cpu(dwp
->liobn
);
753 /* clear the whole window, note the arg is in kernel pages */
754 ret
= tce_clearrange_multi_pSeriesLP(0,
755 1ULL << (be32_to_cpu(dwp
->window_shift
) - PAGE_SHIFT
), dwp
);
757 pr_warning("%s failed to clear tces in window.\n",
760 pr_debug("%s successfully cleared tces in window.\n",
763 __remove_ddw(np
, ddw_avail
, liobn
);
766 ret
= of_remove_property(np
, win64
);
768 pr_warning("%s: failed to remove direct window property: %d\n",
772 static u64
find_existing_ddw(struct device_node
*pdn
)
774 struct direct_window
*window
;
775 const struct dynamic_dma_window_prop
*direct64
;
778 spin_lock(&direct_window_list_lock
);
779 /* check if we already created a window and dupe that config if so */
780 list_for_each_entry(window
, &direct_window_list
, list
) {
781 if (window
->device
== pdn
) {
782 direct64
= window
->prop
;
783 dma_addr
= direct64
->dma_base
;
787 spin_unlock(&direct_window_list_lock
);
792 static void __restore_default_window(struct eeh_dev
*edev
,
793 u32 ddw_restore_token
)
800 * Get the config address and phb buid of the PE window.
801 * Rely on eeh to retrieve this for us.
802 * Retrieve them from the pci device, not the node with the
803 * dma-window property
805 cfg_addr
= edev
->config_addr
;
806 if (edev
->pe_config_addr
)
807 cfg_addr
= edev
->pe_config_addr
;
808 buid
= edev
->phb
->buid
;
811 ret
= rtas_call(ddw_restore_token
, 3, 1, NULL
, cfg_addr
,
812 BUID_HI(buid
), BUID_LO(buid
));
813 } while (rtas_busy_delay(ret
));
814 pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
815 ddw_restore_token
, cfg_addr
, BUID_HI(buid
), BUID_LO(buid
), ret
);
818 static int find_existing_ddw_windows(void)
820 struct device_node
*pdn
;
821 const struct dynamic_dma_window_prop
*direct64
;
822 const u32
*ddw_extensions
;
824 if (!firmware_has_feature(FW_FEATURE_LPAR
))
827 for_each_node_with_property(pdn
, DIRECT64_PROPNAME
) {
828 direct64
= of_get_property(pdn
, DIRECT64_PROPNAME
, NULL
);
833 * We need to ensure the IOMMU table is active when we
834 * return from the IOMMU setup so that the common code
835 * can clear the table or find the holes. To that end,
836 * first, remove any existing DDW configuration.
841 * Second, if we are running on a new enough level of
842 * firmware where the restore API is present, use it to
843 * restore the 32-bit window, which was removed in
845 * If the API is not present, then create_ddw couldn't
846 * have removed the 32-bit window in the first place, so
847 * removing the DDW configuration should be sufficient.
849 ddw_extensions
= of_get_property(pdn
, "ibm,ddw-extensions",
851 if (ddw_extensions
&& ddw_extensions
[0] > 0)
852 __restore_default_window(of_node_to_eeh_dev(pdn
),
858 machine_arch_initcall(pseries
, find_existing_ddw_windows
);
860 static int query_ddw(struct pci_dev
*dev
, const u32
*ddw_avail
,
861 struct ddw_query_response
*query
)
863 struct eeh_dev
*edev
;
869 * Get the config address and phb buid of the PE window.
870 * Rely on eeh to retrieve this for us.
871 * Retrieve them from the pci device, not the node with the
872 * dma-window property
874 edev
= pci_dev_to_eeh_dev(dev
);
875 cfg_addr
= edev
->config_addr
;
876 if (edev
->pe_config_addr
)
877 cfg_addr
= edev
->pe_config_addr
;
878 buid
= edev
->phb
->buid
;
880 ret
= rtas_call(ddw_avail
[0], 3, 5, (u32
*)query
,
881 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
));
882 dev_info(&dev
->dev
, "ibm,query-pe-dma-windows(%x) %x %x %x"
883 " returned %d\n", ddw_avail
[0], cfg_addr
, BUID_HI(buid
),
888 static int create_ddw(struct pci_dev
*dev
, const u32
*ddw_avail
,
889 struct ddw_create_response
*create
, int page_shift
,
892 struct eeh_dev
*edev
;
898 * Get the config address and phb buid of the PE window.
899 * Rely on eeh to retrieve this for us.
900 * Retrieve them from the pci device, not the node with the
901 * dma-window property
903 edev
= pci_dev_to_eeh_dev(dev
);
904 cfg_addr
= edev
->config_addr
;
905 if (edev
->pe_config_addr
)
906 cfg_addr
= edev
->pe_config_addr
;
907 buid
= edev
->phb
->buid
;
910 /* extra outputs are LIOBN and dma-addr (hi, lo) */
911 ret
= rtas_call(ddw_avail
[1], 5, 4, (u32
*)create
, cfg_addr
,
912 BUID_HI(buid
), BUID_LO(buid
), page_shift
, window_shift
);
913 } while (rtas_busy_delay(ret
));
915 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
916 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail
[1],
917 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
), page_shift
,
918 window_shift
, ret
, create
->liobn
, create
->addr_hi
, create
->addr_lo
);
923 static void restore_default_window(struct pci_dev
*dev
,
924 u32 ddw_restore_token
)
926 __restore_default_window(pci_dev_to_eeh_dev(dev
), ddw_restore_token
);
929 struct failed_ddw_pdn
{
930 struct device_node
*pdn
;
931 struct list_head list
;
934 static LIST_HEAD(failed_ddw_pdn_list
);
937 * If the PE supports dynamic dma windows, and there is space for a table
938 * that can map all pages in a linear offset, then setup such a table,
939 * and record the dma-offset in the struct device.
941 * dev: the pci device we are checking
942 * pdn: the parent pe node with the ibm,dma_window property
943 * Future: also check if we can remap the base window for our base page size
945 * returns the dma offset for use by dma_set_mask
947 static u64
enable_ddw(struct pci_dev
*dev
, struct device_node
*pdn
)
950 struct ddw_query_response query
;
951 struct ddw_create_response create
;
953 u64 dma_addr
, max_addr
;
954 struct device_node
*dn
;
955 const u32
*uninitialized_var(ddw_avail
);
956 const u32
*uninitialized_var(ddw_extensions
);
957 u32 ddw_restore_token
= 0;
958 struct direct_window
*window
;
959 struct property
*win64
;
960 struct dynamic_dma_window_prop
*ddwprop
;
961 const void *dma_window
= NULL
;
962 unsigned long liobn
, offset
, size
;
963 struct failed_ddw_pdn
*fpdn
;
965 mutex_lock(&direct_window_init_mutex
);
967 dma_addr
= find_existing_ddw(pdn
);
972 * If we already went through this for a previous function of
973 * the same device and failed, we don't want to muck with the
974 * DMA window again, as it will race with in-flight operations
975 * and can lead to EEHs. The above mutex protects access to the
978 list_for_each_entry(fpdn
, &failed_ddw_pdn_list
, list
) {
979 if (!strcmp(fpdn
->pdn
->full_name
, pdn
->full_name
))
984 * the ibm,ddw-applicable property holds the tokens for:
985 * ibm,query-pe-dma-window
986 * ibm,create-pe-dma-window
987 * ibm,remove-pe-dma-window
988 * for the given node in that order.
989 * the property is actually in the parent, not the PE
991 ddw_avail
= of_get_property(pdn
, "ibm,ddw-applicable", &len
);
992 if (!ddw_avail
|| len
< 3 * sizeof(u32
))
996 * the extensions property is only required to exist in certain
997 * levels of firmware and later
998 * the ibm,ddw-extensions property is a list with the first
999 * element containing the number of extensions and each
1000 * subsequent entry is a value corresponding to that extension
1002 ddw_extensions
= of_get_property(pdn
, "ibm,ddw-extensions", &len
);
1003 if (ddw_extensions
) {
1005 * each new defined extension length should be added to
1006 * the top of the switch so the "earlier" entries also
1009 switch (ddw_extensions
[0]) {
1010 /* ibm,reset-pe-dma-windows */
1012 ddw_restore_token
= ddw_extensions
[1];
1018 * Only remove the existing DMA window if we can restore back to
1019 * the default state. Removing the existing window maximizes the
1020 * resources available to firmware for dynamic window creation.
1022 if (ddw_restore_token
) {
1023 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
1024 of_parse_dma_window(pdn
, dma_window
, &liobn
, &offset
, &size
);
1025 __remove_ddw(pdn
, ddw_avail
, liobn
);
1029 * Query if there is a second window of size to map the
1030 * whole partition. Query returns number of windows, largest
1031 * block assigned to PE (partition endpoint), and two bitmasks
1032 * of page sizes: supported and supported for migrate-dma.
1034 dn
= pci_device_to_OF_node(dev
);
1035 ret
= query_ddw(dev
, ddw_avail
, &query
);
1037 goto out_restore_window
;
1039 if (query
.windows_available
== 0) {
1041 * no additional windows are available for this device.
1042 * We might be able to reallocate the existing window,
1043 * trading in for a larger page size.
1045 dev_dbg(&dev
->dev
, "no free dynamic windows");
1046 goto out_restore_window
;
1048 if (query
.page_size
& 4) {
1049 page_shift
= 24; /* 16MB */
1050 } else if (query
.page_size
& 2) {
1051 page_shift
= 16; /* 64kB */
1052 } else if (query
.page_size
& 1) {
1053 page_shift
= 12; /* 4kB */
1055 dev_dbg(&dev
->dev
, "no supported direct page size in mask %x",
1057 goto out_restore_window
;
1059 /* verify the window * number of ptes will map the partition */
1060 /* check largest block * page size > max memory hotplug addr */
1061 max_addr
= memory_hotplug_max();
1062 if (query
.largest_available_block
< (max_addr
>> page_shift
)) {
1063 dev_dbg(&dev
->dev
, "can't map partiton max 0x%llx with %u "
1064 "%llu-sized pages\n", max_addr
, query
.largest_available_block
,
1065 1ULL << page_shift
);
1066 goto out_restore_window
;
1068 len
= order_base_2(max_addr
);
1069 win64
= kzalloc(sizeof(struct property
), GFP_KERNEL
);
1072 "couldn't allocate property for 64bit dma window\n");
1073 goto out_restore_window
;
1075 win64
->name
= kstrdup(DIRECT64_PROPNAME
, GFP_KERNEL
);
1076 win64
->value
= ddwprop
= kmalloc(sizeof(*ddwprop
), GFP_KERNEL
);
1077 win64
->length
= sizeof(*ddwprop
);
1078 if (!win64
->name
|| !win64
->value
) {
1080 "couldn't allocate property name and value\n");
1084 ret
= create_ddw(dev
, ddw_avail
, &create
, page_shift
, len
);
1088 ddwprop
->liobn
= cpu_to_be32(create
.liobn
);
1089 ddwprop
->dma_base
= cpu_to_be64(of_read_number(&create
.addr_hi
, 2));
1090 ddwprop
->tce_shift
= cpu_to_be32(page_shift
);
1091 ddwprop
->window_shift
= cpu_to_be32(len
);
1093 dev_dbg(&dev
->dev
, "created tce table LIOBN 0x%x for %s\n",
1094 create
.liobn
, dn
->full_name
);
1096 window
= kzalloc(sizeof(*window
), GFP_KERNEL
);
1098 goto out_clear_window
;
1100 ret
= walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT
,
1101 win64
->value
, tce_setrange_multi_pSeriesLP_walk
);
1103 dev_info(&dev
->dev
, "failed to map direct window for %s: %d\n",
1104 dn
->full_name
, ret
);
1105 goto out_free_window
;
1108 ret
= of_add_property(pdn
, win64
);
1110 dev_err(&dev
->dev
, "unable to add dma window property for %s: %d",
1111 pdn
->full_name
, ret
);
1112 goto out_free_window
;
1115 window
->device
= pdn
;
1116 window
->prop
= ddwprop
;
1117 spin_lock(&direct_window_list_lock
);
1118 list_add(&window
->list
, &direct_window_list
);
1119 spin_unlock(&direct_window_list_lock
);
1121 dma_addr
= of_read_number(&create
.addr_hi
, 2);
1132 kfree(win64
->value
);
1136 if (ddw_restore_token
)
1137 restore_default_window(dev
, ddw_restore_token
);
1139 fpdn
= kzalloc(sizeof(*fpdn
), GFP_KERNEL
);
1143 list_add(&fpdn
->list
, &failed_ddw_pdn_list
);
1146 mutex_unlock(&direct_window_init_mutex
);
1150 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev
*dev
)
1152 struct device_node
*pdn
, *dn
;
1153 struct iommu_table
*tbl
;
1154 const __be32
*dma_window
= NULL
;
1157 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev
));
1159 /* dev setup for LPAR is a little tricky, since the device tree might
1160 * contain the dma-window properties per-device and not necessarily
1161 * for the bus. So we need to search upwards in the tree until we
1162 * either hit a dma-window property, OR find a parent with a table
1163 * already allocated.
1165 dn
= pci_device_to_OF_node(dev
);
1166 pr_debug(" node is %s\n", dn
->full_name
);
1168 for (pdn
= dn
; pdn
&& PCI_DN(pdn
) && !PCI_DN(pdn
)->iommu_table
;
1169 pdn
= pdn
->parent
) {
1170 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
1175 if (!pdn
|| !PCI_DN(pdn
)) {
1176 printk(KERN_WARNING
"pci_dma_dev_setup_pSeriesLP: "
1177 "no DMA window found for pci dev=%s dn=%s\n",
1178 pci_name(dev
), of_node_full_name(dn
));
1181 pr_debug(" parent is %s\n", pdn
->full_name
);
1184 if (!pci
->iommu_table
) {
1185 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
1187 iommu_table_setparms_lpar(pci
->phb
, pdn
, tbl
, dma_window
);
1188 pci
->iommu_table
= iommu_init_table(tbl
, pci
->phb
->node
);
1189 iommu_register_group(tbl
, pci_domain_nr(pci
->phb
->bus
), 0);
1190 pr_debug(" created table: %p\n", pci
->iommu_table
);
1192 pr_debug(" found DMA window, table: %p\n", pci
->iommu_table
);
1195 set_iommu_table_base(&dev
->dev
, pci
->iommu_table
);
1198 static int dma_set_mask_pSeriesLP(struct device
*dev
, u64 dma_mask
)
1200 bool ddw_enabled
= false;
1201 struct device_node
*pdn
, *dn
;
1202 struct pci_dev
*pdev
;
1203 const __be32
*dma_window
= NULL
;
1209 if (!dev_is_pci(dev
))
1212 pdev
= to_pci_dev(dev
);
1214 /* only attempt to use a new window if 64-bit DMA is requested */
1215 if (!disable_ddw
&& dma_mask
== DMA_BIT_MASK(64)) {
1216 dn
= pci_device_to_OF_node(pdev
);
1217 dev_dbg(dev
, "node is %s\n", dn
->full_name
);
1220 * the device tree might contain the dma-window properties
1221 * per-device and not necessarily for the bus. So we need to
1222 * search upwards in the tree until we either hit a dma-window
1223 * property, OR find a parent with a table already allocated.
1225 for (pdn
= dn
; pdn
&& PCI_DN(pdn
) && !PCI_DN(pdn
)->iommu_table
;
1226 pdn
= pdn
->parent
) {
1227 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
1231 if (pdn
&& PCI_DN(pdn
)) {
1232 dma_offset
= enable_ddw(pdev
, pdn
);
1233 if (dma_offset
!= 0) {
1234 dev_info(dev
, "Using 64-bit direct DMA at offset %llx\n", dma_offset
);
1235 set_dma_offset(dev
, dma_offset
);
1236 set_dma_ops(dev
, &dma_direct_ops
);
1242 /* fall back on iommu ops, restore table pointer with ops */
1243 if (!ddw_enabled
&& get_dma_ops(dev
) != &dma_iommu_ops
) {
1244 dev_info(dev
, "Restoring 32-bit DMA via iommu\n");
1245 set_dma_ops(dev
, &dma_iommu_ops
);
1246 pci_dma_dev_setup_pSeriesLP(pdev
);
1250 if (!dma_supported(dev
, dma_mask
))
1253 *dev
->dma_mask
= dma_mask
;
1257 static u64
dma_get_required_mask_pSeriesLP(struct device
*dev
)
1262 if (!disable_ddw
&& dev_is_pci(dev
)) {
1263 struct pci_dev
*pdev
= to_pci_dev(dev
);
1264 struct device_node
*dn
;
1266 dn
= pci_device_to_OF_node(pdev
);
1268 /* search upwards for ibm,dma-window */
1269 for (; dn
&& PCI_DN(dn
) && !PCI_DN(dn
)->iommu_table
;
1271 if (of_get_property(dn
, "ibm,dma-window", NULL
))
1273 /* if there is a ibm,ddw-applicable property require 64 bits */
1274 if (dn
&& PCI_DN(dn
) &&
1275 of_get_property(dn
, "ibm,ddw-applicable", NULL
))
1276 return DMA_BIT_MASK(64);
1279 return dma_iommu_ops
.get_required_mask(dev
);
1282 #else /* CONFIG_PCI */
1283 #define pci_dma_bus_setup_pSeries NULL
1284 #define pci_dma_dev_setup_pSeries NULL
1285 #define pci_dma_bus_setup_pSeriesLP NULL
1286 #define pci_dma_dev_setup_pSeriesLP NULL
1287 #define dma_set_mask_pSeriesLP NULL
1288 #define dma_get_required_mask_pSeriesLP NULL
1289 #endif /* !CONFIG_PCI */
1291 static int iommu_mem_notifier(struct notifier_block
*nb
, unsigned long action
,
1294 struct direct_window
*window
;
1295 struct memory_notify
*arg
= data
;
1299 case MEM_GOING_ONLINE
:
1300 spin_lock(&direct_window_list_lock
);
1301 list_for_each_entry(window
, &direct_window_list
, list
) {
1302 ret
|= tce_setrange_multi_pSeriesLP(arg
->start_pfn
,
1303 arg
->nr_pages
, window
->prop
);
1306 spin_unlock(&direct_window_list_lock
);
1308 case MEM_CANCEL_ONLINE
:
1310 spin_lock(&direct_window_list_lock
);
1311 list_for_each_entry(window
, &direct_window_list
, list
) {
1312 ret
|= tce_clearrange_multi_pSeriesLP(arg
->start_pfn
,
1313 arg
->nr_pages
, window
->prop
);
1316 spin_unlock(&direct_window_list_lock
);
1321 if (ret
&& action
!= MEM_CANCEL_ONLINE
)
1327 static struct notifier_block iommu_mem_nb
= {
1328 .notifier_call
= iommu_mem_notifier
,
1331 static int iommu_reconfig_notifier(struct notifier_block
*nb
, unsigned long action
, void *node
)
1333 int err
= NOTIFY_OK
;
1334 struct device_node
*np
= node
;
1335 struct pci_dn
*pci
= PCI_DN(np
);
1336 struct direct_window
*window
;
1339 case OF_RECONFIG_DETACH_NODE
:
1341 if (pci
&& pci
->iommu_table
)
1342 iommu_free_table(pci
->iommu_table
, np
->full_name
);
1344 spin_lock(&direct_window_list_lock
);
1345 list_for_each_entry(window
, &direct_window_list
, list
) {
1346 if (window
->device
== np
) {
1347 list_del(&window
->list
);
1352 spin_unlock(&direct_window_list_lock
);
1361 static struct notifier_block iommu_reconfig_nb
= {
1362 .notifier_call
= iommu_reconfig_notifier
,
1365 /* These are called very early. */
1366 void iommu_init_early_pSeries(void)
1368 if (of_chosen
&& of_get_property(of_chosen
, "linux,iommu-off", NULL
))
1371 if (firmware_has_feature(FW_FEATURE_LPAR
)) {
1372 if (firmware_has_feature(FW_FEATURE_MULTITCE
)) {
1373 ppc_md
.tce_build
= tce_buildmulti_pSeriesLP
;
1374 ppc_md
.tce_free
= tce_freemulti_pSeriesLP
;
1376 ppc_md
.tce_build
= tce_build_pSeriesLP
;
1377 ppc_md
.tce_free
= tce_free_pSeriesLP
;
1379 ppc_md
.tce_get
= tce_get_pSeriesLP
;
1380 ppc_md
.pci_dma_bus_setup
= pci_dma_bus_setup_pSeriesLP
;
1381 ppc_md
.pci_dma_dev_setup
= pci_dma_dev_setup_pSeriesLP
;
1382 ppc_md
.dma_set_mask
= dma_set_mask_pSeriesLP
;
1383 ppc_md
.dma_get_required_mask
= dma_get_required_mask_pSeriesLP
;
1385 ppc_md
.tce_build
= tce_build_pSeries
;
1386 ppc_md
.tce_free
= tce_free_pSeries
;
1387 ppc_md
.tce_get
= tce_get_pseries
;
1388 ppc_md
.pci_dma_bus_setup
= pci_dma_bus_setup_pSeries
;
1389 ppc_md
.pci_dma_dev_setup
= pci_dma_dev_setup_pSeries
;
1393 of_reconfig_notifier_register(&iommu_reconfig_nb
);
1394 register_memory_notifier(&iommu_mem_nb
);
1396 set_pci_dma_ops(&dma_iommu_ops
);
1399 static int __init
disable_multitce(char *str
)
1401 if (strcmp(str
, "off") == 0 &&
1402 firmware_has_feature(FW_FEATURE_LPAR
) &&
1403 firmware_has_feature(FW_FEATURE_MULTITCE
)) {
1404 printk(KERN_INFO
"Disabling MULTITCE firmware feature\n");
1405 ppc_md
.tce_build
= tce_build_pSeriesLP
;
1406 ppc_md
.tce_free
= tce_free_pSeriesLP
;
1407 powerpc_firmware_features
&= ~FW_FEATURE_MULTITCE
;
1412 __setup("multitce=", disable_multitce
);