2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33 #include <linux/pci.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/crash_dump.h>
36 #include <linux/memory.h>
40 #include <asm/iommu.h>
41 #include <asm/pci-bridge.h>
42 #include <asm/machdep.h>
43 #include <asm/abs_addr.h>
44 #include <asm/pSeries_reconfig.h>
45 #include <asm/firmware.h>
47 #include <asm/ppc-pci.h>
49 #include <asm/mmzone.h>
51 #include "plpar_wrappers.h"
54 static int tce_build_pSeries(struct iommu_table
*tbl
, long index
,
55 long npages
, unsigned long uaddr
,
56 enum dma_data_direction direction
,
57 struct dma_attrs
*attrs
)
63 proto_tce
= TCE_PCI_READ
; // Read allowed
65 if (direction
!= DMA_TO_DEVICE
)
66 proto_tce
|= TCE_PCI_WRITE
;
68 tcep
= ((u64
*)tbl
->it_base
) + index
;
71 /* can't move this out since we might cross MEMBLOCK boundary */
72 rpn
= (virt_to_abs(uaddr
)) >> TCE_SHIFT
;
73 *tcep
= proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
75 uaddr
+= TCE_PAGE_SIZE
;
82 static void tce_free_pSeries(struct iommu_table
*tbl
, long index
, long npages
)
86 tcep
= ((u64
*)tbl
->it_base
) + index
;
92 static unsigned long tce_get_pseries(struct iommu_table
*tbl
, long index
)
96 tcep
= ((u64
*)tbl
->it_base
) + index
;
101 static void tce_free_pSeriesLP(struct iommu_table
*, long, long);
102 static void tce_freemulti_pSeriesLP(struct iommu_table
*, long, long);
104 static int tce_build_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
105 long npages
, unsigned long uaddr
,
106 enum dma_data_direction direction
,
107 struct dma_attrs
*attrs
)
113 long tcenum_start
= tcenum
, npages_start
= npages
;
115 rpn
= (virt_to_abs(uaddr
)) >> TCE_SHIFT
;
116 proto_tce
= TCE_PCI_READ
;
117 if (direction
!= DMA_TO_DEVICE
)
118 proto_tce
|= TCE_PCI_WRITE
;
121 tce
= proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
122 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, tce
);
124 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
126 tce_free_pSeriesLP(tbl
, tcenum_start
,
127 (npages_start
- (npages
+ 1)));
131 if (rc
&& printk_ratelimit()) {
132 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
133 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
134 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
135 printk("\ttce val = 0x%llx\n", tce
);
136 show_stack(current
, (unsigned long *)__get_SP());
145 static DEFINE_PER_CPU(u64
*, tce_page
);
147 static int tce_buildmulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
148 long npages
, unsigned long uaddr
,
149 enum dma_data_direction direction
,
150 struct dma_attrs
*attrs
)
157 long tcenum_start
= tcenum
, npages_start
= npages
;
161 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
165 tcep
= __get_cpu_var(tce_page
);
167 /* This is safe to do since interrupts are off when we're called
168 * from iommu_alloc{,_sg}()
171 tcep
= (u64
*)__get_free_page(GFP_ATOMIC
);
172 /* If allocation fails, fall back to the loop implementation */
174 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
177 __get_cpu_var(tce_page
) = tcep
;
180 rpn
= (virt_to_abs(uaddr
)) >> TCE_SHIFT
;
181 proto_tce
= TCE_PCI_READ
;
182 if (direction
!= DMA_TO_DEVICE
)
183 proto_tce
|= TCE_PCI_WRITE
;
185 /* We can map max one pageful of TCEs at a time */
188 * Set up the page with TCE data, looping through and setting
191 limit
= min_t(long, npages
, 4096/TCE_ENTRY_SIZE
);
193 for (l
= 0; l
< limit
; l
++) {
194 tcep
[l
] = proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
198 rc
= plpar_tce_put_indirect((u64
)tbl
->it_index
,
200 (u64
)virt_to_abs(tcep
),
205 } while (npages
> 0 && !rc
);
207 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
209 tce_freemulti_pSeriesLP(tbl
, tcenum_start
,
210 (npages_start
- (npages
+ limit
)));
214 if (rc
&& printk_ratelimit()) {
215 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
216 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
217 printk("\tnpages = 0x%llx\n", (u64
)npages
);
218 printk("\ttce[0] val = 0x%llx\n", tcep
[0]);
219 show_stack(current
, (unsigned long *)__get_SP());
224 static void tce_free_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
229 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0);
231 if (rc
&& printk_ratelimit()) {
232 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
233 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
234 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
235 show_stack(current
, (unsigned long *)__get_SP());
243 static void tce_freemulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
247 rc
= plpar_tce_stuff((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0, npages
);
249 if (rc
&& printk_ratelimit()) {
250 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
251 printk("\trc = %lld\n", rc
);
252 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
253 printk("\tnpages = 0x%llx\n", (u64
)npages
);
254 show_stack(current
, (unsigned long *)__get_SP());
258 static unsigned long tce_get_pSeriesLP(struct iommu_table
*tbl
, long tcenum
)
261 unsigned long tce_ret
;
263 rc
= plpar_tce_get((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, &tce_ret
);
265 if (rc
&& printk_ratelimit()) {
266 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc
);
267 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
268 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
269 show_stack(current
, (unsigned long *)__get_SP());
275 /* this is compatible with cells for the device tree property */
276 struct dynamic_dma_window_prop
{
277 __be32 liobn
; /* tce table number */
278 __be64 dma_base
; /* address hi,lo */
279 __be32 tce_shift
; /* ilog2(tce_page_size) */
280 __be32 window_shift
; /* ilog2(tce_window_size) */
283 struct direct_window
{
284 struct device_node
*device
;
285 const struct dynamic_dma_window_prop
*prop
;
286 struct list_head list
;
289 /* Dynamic DMA Window support */
290 struct ddw_query_response
{
291 u32 windows_available
;
292 u32 largest_available_block
;
294 u32 migration_capable
;
297 struct ddw_create_response
{
303 static LIST_HEAD(direct_window_list
);
304 /* prevents races between memory on/offline and window creation */
305 static DEFINE_SPINLOCK(direct_window_list_lock
);
306 /* protects initializing window twice for same device */
307 static DEFINE_MUTEX(direct_window_init_mutex
);
308 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
310 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn
,
311 unsigned long num_pfn
, const void *arg
)
313 const struct dynamic_dma_window_prop
*maprange
= arg
;
315 u64 tce_size
, num_tce
, dma_offset
, next
;
319 tce_shift
= be32_to_cpu(maprange
->tce_shift
);
320 tce_size
= 1ULL << tce_shift
;
321 next
= start_pfn
<< PAGE_SHIFT
;
322 num_tce
= num_pfn
<< PAGE_SHIFT
;
324 /* round back to the beginning of the tce page size */
325 num_tce
+= next
& (tce_size
- 1);
326 next
&= ~(tce_size
- 1);
328 /* covert to number of tces */
329 num_tce
|= tce_size
- 1;
330 num_tce
>>= tce_shift
;
334 * Set up the page with TCE data, looping through and setting
337 limit
= min_t(long, num_tce
, 512);
338 dma_offset
= next
+ be64_to_cpu(maprange
->dma_base
);
340 rc
= plpar_tce_stuff((u64
)be32_to_cpu(maprange
->liobn
),
344 } while (num_tce
> 0 && !rc
);
349 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn
,
350 unsigned long num_pfn
, const void *arg
)
352 const struct dynamic_dma_window_prop
*maprange
= arg
;
353 u64
*tcep
, tce_size
, num_tce
, dma_offset
, next
, proto_tce
, liobn
;
358 local_irq_disable(); /* to protect tcep and the page behind it */
359 tcep
= __get_cpu_var(tce_page
);
362 tcep
= (u64
*)__get_free_page(GFP_ATOMIC
);
367 __get_cpu_var(tce_page
) = tcep
;
370 proto_tce
= TCE_PCI_READ
| TCE_PCI_WRITE
;
372 liobn
= (u64
)be32_to_cpu(maprange
->liobn
);
373 tce_shift
= be32_to_cpu(maprange
->tce_shift
);
374 tce_size
= 1ULL << tce_shift
;
375 next
= start_pfn
<< PAGE_SHIFT
;
376 num_tce
= num_pfn
<< PAGE_SHIFT
;
378 /* round back to the beginning of the tce page size */
379 num_tce
+= next
& (tce_size
- 1);
380 next
&= ~(tce_size
- 1);
382 /* covert to number of tces */
383 num_tce
|= tce_size
- 1;
384 num_tce
>>= tce_shift
;
386 /* We can map max one pageful of TCEs at a time */
389 * Set up the page with TCE data, looping through and setting
392 limit
= min_t(long, num_tce
, 4096/TCE_ENTRY_SIZE
);
393 dma_offset
= next
+ be64_to_cpu(maprange
->dma_base
);
395 for (l
= 0; l
< limit
; l
++) {
396 tcep
[l
] = proto_tce
| next
;
400 rc
= plpar_tce_put_indirect(liobn
,
402 (u64
)virt_to_abs(tcep
),
406 } while (num_tce
> 0 && !rc
);
408 /* error cleanup: caller will clear whole range */
414 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn
,
415 unsigned long num_pfn
, void *arg
)
417 return tce_setrange_multi_pSeriesLP(start_pfn
, num_pfn
, arg
);
422 static void iommu_table_setparms(struct pci_controller
*phb
,
423 struct device_node
*dn
,
424 struct iommu_table
*tbl
)
426 struct device_node
*node
;
427 const unsigned long *basep
;
432 basep
= of_get_property(node
, "linux,tce-base", NULL
);
433 sizep
= of_get_property(node
, "linux,tce-size", NULL
);
434 if (basep
== NULL
|| sizep
== NULL
) {
435 printk(KERN_ERR
"PCI_DMA: iommu_table_setparms: %s has "
436 "missing tce entries !\n", dn
->full_name
);
440 tbl
->it_base
= (unsigned long)__va(*basep
);
442 if (!is_kdump_kernel())
443 memset((void *)tbl
->it_base
, 0, *sizep
);
445 tbl
->it_busno
= phb
->bus
->number
;
447 /* Units of tce entries */
448 tbl
->it_offset
= phb
->dma_window_base_cur
>> IOMMU_PAGE_SHIFT
;
450 /* Test if we are going over 2GB of DMA space */
451 if (phb
->dma_window_base_cur
+ phb
->dma_window_size
> 0x80000000ul
) {
452 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
453 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
456 phb
->dma_window_base_cur
+= phb
->dma_window_size
;
458 /* Set the tce table size - measured in entries */
459 tbl
->it_size
= phb
->dma_window_size
>> IOMMU_PAGE_SHIFT
;
462 tbl
->it_blocksize
= 16;
463 tbl
->it_type
= TCE_PCI
;
467 * iommu_table_setparms_lpar
469 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
471 static void iommu_table_setparms_lpar(struct pci_controller
*phb
,
472 struct device_node
*dn
,
473 struct iommu_table
*tbl
,
474 const void *dma_window
)
476 unsigned long offset
, size
;
478 of_parse_dma_window(dn
, dma_window
, &tbl
->it_index
, &offset
, &size
);
480 tbl
->it_busno
= phb
->bus
->number
;
482 tbl
->it_blocksize
= 16;
483 tbl
->it_type
= TCE_PCI
;
484 tbl
->it_offset
= offset
>> IOMMU_PAGE_SHIFT
;
485 tbl
->it_size
= size
>> IOMMU_PAGE_SHIFT
;
488 static void pci_dma_bus_setup_pSeries(struct pci_bus
*bus
)
490 struct device_node
*dn
;
491 struct iommu_table
*tbl
;
492 struct device_node
*isa_dn
, *isa_dn_orig
;
493 struct device_node
*tmp
;
497 dn
= pci_bus_to_OF_node(bus
);
499 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn
->full_name
);
502 /* This is not a root bus, any setup will be done for the
503 * device-side of the bridge in iommu_dev_setup_pSeries().
509 /* Check if the ISA bus on the system is under
512 isa_dn
= isa_dn_orig
= of_find_node_by_type(NULL
, "isa");
514 while (isa_dn
&& isa_dn
!= dn
)
515 isa_dn
= isa_dn
->parent
;
518 of_node_put(isa_dn_orig
);
520 /* Count number of direct PCI children of the PHB. */
521 for (children
= 0, tmp
= dn
->child
; tmp
; tmp
= tmp
->sibling
)
524 pr_debug("Children: %d\n", children
);
526 /* Calculate amount of DMA window per slot. Each window must be
527 * a power of two (due to pci_alloc_consistent requirements).
529 * Keep 256MB aside for PHBs with ISA.
533 /* No ISA/IDE - just set window size and return */
534 pci
->phb
->dma_window_size
= 0x80000000ul
; /* To be divided */
536 while (pci
->phb
->dma_window_size
* children
> 0x80000000ul
)
537 pci
->phb
->dma_window_size
>>= 1;
538 pr_debug("No ISA/IDE, window size is 0x%llx\n",
539 pci
->phb
->dma_window_size
);
540 pci
->phb
->dma_window_base_cur
= 0;
545 /* If we have ISA, then we probably have an IDE
546 * controller too. Allocate a 128MB table but
547 * skip the first 128MB to avoid stepping on ISA
550 pci
->phb
->dma_window_size
= 0x8000000ul
;
551 pci
->phb
->dma_window_base_cur
= 0x8000000ul
;
553 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
556 iommu_table_setparms(pci
->phb
, dn
, tbl
);
557 pci
->iommu_table
= iommu_init_table(tbl
, pci
->phb
->node
);
559 /* Divide the rest (1.75GB) among the children */
560 pci
->phb
->dma_window_size
= 0x80000000ul
;
561 while (pci
->phb
->dma_window_size
* children
> 0x70000000ul
)
562 pci
->phb
->dma_window_size
>>= 1;
564 pr_debug("ISA/IDE, window size is 0x%llx\n", pci
->phb
->dma_window_size
);
568 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus
*bus
)
570 struct iommu_table
*tbl
;
571 struct device_node
*dn
, *pdn
;
573 const void *dma_window
= NULL
;
575 dn
= pci_bus_to_OF_node(bus
);
577 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
580 /* Find nearest ibm,dma-window, walking up the device tree */
581 for (pdn
= dn
; pdn
!= NULL
; pdn
= pdn
->parent
) {
582 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
583 if (dma_window
!= NULL
)
587 if (dma_window
== NULL
) {
588 pr_debug(" no ibm,dma-window property !\n");
594 pr_debug(" parent is %s, iommu_table: 0x%p\n",
595 pdn
->full_name
, ppci
->iommu_table
);
597 if (!ppci
->iommu_table
) {
598 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
600 iommu_table_setparms_lpar(ppci
->phb
, pdn
, tbl
, dma_window
);
601 ppci
->iommu_table
= iommu_init_table(tbl
, ppci
->phb
->node
);
602 pr_debug(" created table: %p\n", ppci
->iommu_table
);
607 static void pci_dma_dev_setup_pSeries(struct pci_dev
*dev
)
609 struct device_node
*dn
;
610 struct iommu_table
*tbl
;
612 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev
));
614 dn
= dev
->dev
.of_node
;
616 /* If we're the direct child of a root bus, then we need to allocate
617 * an iommu table ourselves. The bus setup code should have setup
618 * the window sizes already.
620 if (!dev
->bus
->self
) {
621 struct pci_controller
*phb
= PCI_DN(dn
)->phb
;
623 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
624 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
626 iommu_table_setparms(phb
, dn
, tbl
);
627 PCI_DN(dn
)->iommu_table
= iommu_init_table(tbl
, phb
->node
);
628 set_iommu_table_base(&dev
->dev
, PCI_DN(dn
)->iommu_table
);
632 /* If this device is further down the bus tree, search upwards until
633 * an already allocated iommu table is found and use that.
636 while (dn
&& PCI_DN(dn
) && PCI_DN(dn
)->iommu_table
== NULL
)
639 if (dn
&& PCI_DN(dn
))
640 set_iommu_table_base(&dev
->dev
, PCI_DN(dn
)->iommu_table
);
642 printk(KERN_WARNING
"iommu: Device %s has no iommu table\n",
646 static int __read_mostly disable_ddw
;
648 static int __init
disable_ddw_setup(char *str
)
651 printk(KERN_INFO
"ppc iommu: disabling ddw.\n");
656 early_param("disable_ddw", disable_ddw_setup
);
658 static void remove_ddw(struct device_node
*np
)
660 struct dynamic_dma_window_prop
*dwp
;
661 struct property
*win64
;
662 const u32
*ddw_avail
;
666 ddw_avail
= of_get_property(np
, "ibm,ddw-applicable", &len
);
667 win64
= of_find_property(np
, DIRECT64_PROPNAME
, NULL
);
671 if (!ddw_avail
|| len
< 3 * sizeof(u32
) || win64
->length
< sizeof(*dwp
))
675 liobn
= (u64
)be32_to_cpu(dwp
->liobn
);
677 /* clear the whole window, note the arg is in kernel pages */
678 ret
= tce_clearrange_multi_pSeriesLP(0,
679 1ULL << (be32_to_cpu(dwp
->window_shift
) - PAGE_SHIFT
), dwp
);
681 pr_warning("%s failed to clear tces in window.\n",
684 pr_debug("%s successfully cleared tces in window.\n",
687 ret
= rtas_call(ddw_avail
[2], 1, 1, NULL
, liobn
);
689 pr_warning("%s: failed to remove direct window: rtas returned "
690 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
691 np
->full_name
, ret
, ddw_avail
[2], liobn
);
693 pr_debug("%s: successfully removed direct window: rtas returned "
694 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
695 np
->full_name
, ret
, ddw_avail
[2], liobn
);
698 ret
= prom_remove_property(np
, win64
);
700 pr_warning("%s: failed to remove direct window property: %d\n",
704 static u64
find_existing_ddw(struct device_node
*pdn
)
706 struct direct_window
*window
;
707 const struct dynamic_dma_window_prop
*direct64
;
710 spin_lock(&direct_window_list_lock
);
711 /* check if we already created a window and dupe that config if so */
712 list_for_each_entry(window
, &direct_window_list
, list
) {
713 if (window
->device
== pdn
) {
714 direct64
= window
->prop
;
715 dma_addr
= direct64
->dma_base
;
719 spin_unlock(&direct_window_list_lock
);
724 static int find_existing_ddw_windows(void)
727 struct device_node
*pdn
;
728 struct direct_window
*window
;
729 const struct dynamic_dma_window_prop
*direct64
;
731 if (!firmware_has_feature(FW_FEATURE_LPAR
))
734 for_each_node_with_property(pdn
, DIRECT64_PROPNAME
) {
735 direct64
= of_get_property(pdn
, DIRECT64_PROPNAME
, &len
);
739 window
= kzalloc(sizeof(*window
), GFP_KERNEL
);
740 if (!window
|| len
< sizeof(struct dynamic_dma_window_prop
)) {
746 window
->device
= pdn
;
747 window
->prop
= direct64
;
748 spin_lock(&direct_window_list_lock
);
749 list_add(&window
->list
, &direct_window_list
);
750 spin_unlock(&direct_window_list_lock
);
755 machine_arch_initcall(pseries
, find_existing_ddw_windows
);
757 static int query_ddw(struct pci_dev
*dev
, const u32
*ddw_avail
,
758 struct ddw_query_response
*query
)
760 struct device_node
*dn
;
761 struct pci_dn
*pcidn
;
767 * Get the config address and phb buid of the PE window.
768 * Rely on eeh to retrieve this for us.
769 * Retrieve them from the pci device, not the node with the
770 * dma-window property
772 dn
= pci_device_to_OF_node(dev
);
774 cfg_addr
= pcidn
->eeh_config_addr
;
775 if (pcidn
->eeh_pe_config_addr
)
776 cfg_addr
= pcidn
->eeh_pe_config_addr
;
777 buid
= pcidn
->phb
->buid
;
778 ret
= rtas_call(ddw_avail
[0], 3, 5, (u32
*)query
,
779 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
));
780 dev_info(&dev
->dev
, "ibm,query-pe-dma-windows(%x) %x %x %x"
781 " returned %d\n", ddw_avail
[0], cfg_addr
, BUID_HI(buid
),
786 static int create_ddw(struct pci_dev
*dev
, const u32
*ddw_avail
,
787 struct ddw_create_response
*create
, int page_shift
,
790 struct device_node
*dn
;
791 struct pci_dn
*pcidn
;
797 * Get the config address and phb buid of the PE window.
798 * Rely on eeh to retrieve this for us.
799 * Retrieve them from the pci device, not the node with the
800 * dma-window property
802 dn
= pci_device_to_OF_node(dev
);
804 cfg_addr
= pcidn
->eeh_config_addr
;
805 if (pcidn
->eeh_pe_config_addr
)
806 cfg_addr
= pcidn
->eeh_pe_config_addr
;
807 buid
= pcidn
->phb
->buid
;
810 /* extra outputs are LIOBN and dma-addr (hi, lo) */
811 ret
= rtas_call(ddw_avail
[1], 5, 4, (u32
*)create
, cfg_addr
,
812 BUID_HI(buid
), BUID_LO(buid
), page_shift
, window_shift
);
813 } while (rtas_busy_delay(ret
));
815 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
816 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail
[1],
817 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
), page_shift
,
818 window_shift
, ret
, create
->liobn
, create
->addr_hi
, create
->addr_lo
);
824 * If the PE supports dynamic dma windows, and there is space for a table
825 * that can map all pages in a linear offset, then setup such a table,
826 * and record the dma-offset in the struct device.
828 * dev: the pci device we are checking
829 * pdn: the parent pe node with the ibm,dma_window property
830 * Future: also check if we can remap the base window for our base page size
832 * returns the dma offset for use by dma_set_mask
834 static u64
enable_ddw(struct pci_dev
*dev
, struct device_node
*pdn
)
837 struct ddw_query_response query
;
838 struct ddw_create_response create
;
840 u64 dma_addr
, max_addr
;
841 struct device_node
*dn
;
842 const u32
*uninitialized_var(ddw_avail
);
843 struct direct_window
*window
;
844 struct property
*win64
;
845 struct dynamic_dma_window_prop
*ddwprop
;
847 mutex_lock(&direct_window_init_mutex
);
849 dma_addr
= find_existing_ddw(pdn
);
854 * the ibm,ddw-applicable property holds the tokens for:
855 * ibm,query-pe-dma-window
856 * ibm,create-pe-dma-window
857 * ibm,remove-pe-dma-window
858 * for the given node in that order.
859 * the property is actually in the parent, not the PE
861 ddw_avail
= of_get_property(pdn
, "ibm,ddw-applicable", &len
);
862 if (!ddw_avail
|| len
< 3 * sizeof(u32
))
866 * Query if there is a second window of size to map the
867 * whole partition. Query returns number of windows, largest
868 * block assigned to PE (partition endpoint), and two bitmasks
869 * of page sizes: supported and supported for migrate-dma.
871 dn
= pci_device_to_OF_node(dev
);
872 ret
= query_ddw(dev
, ddw_avail
, &query
);
876 if (query
.windows_available
== 0) {
878 * no additional windows are available for this device.
879 * We might be able to reallocate the existing window,
880 * trading in for a larger page size.
882 dev_dbg(&dev
->dev
, "no free dynamic windows");
885 if (query
.page_size
& 4) {
886 page_shift
= 24; /* 16MB */
887 } else if (query
.page_size
& 2) {
888 page_shift
= 16; /* 64kB */
889 } else if (query
.page_size
& 1) {
890 page_shift
= 12; /* 4kB */
892 dev_dbg(&dev
->dev
, "no supported direct page size in mask %x",
896 /* verify the window * number of ptes will map the partition */
897 /* check largest block * page size > max memory hotplug addr */
898 max_addr
= memory_hotplug_max();
899 if (query
.largest_available_block
< (max_addr
>> page_shift
)) {
900 dev_dbg(&dev
->dev
, "can't map partiton max 0x%llx with %u "
901 "%llu-sized pages\n", max_addr
, query
.largest_available_block
,
905 len
= order_base_2(max_addr
);
906 win64
= kzalloc(sizeof(struct property
), GFP_KERNEL
);
909 "couldn't allocate property for 64bit dma window\n");
912 win64
->name
= kstrdup(DIRECT64_PROPNAME
, GFP_KERNEL
);
913 win64
->value
= ddwprop
= kmalloc(sizeof(*ddwprop
), GFP_KERNEL
);
914 win64
->length
= sizeof(*ddwprop
);
915 if (!win64
->name
|| !win64
->value
) {
917 "couldn't allocate property name and value\n");
921 ret
= create_ddw(dev
, ddw_avail
, &create
, page_shift
, len
);
925 ddwprop
->liobn
= cpu_to_be32(create
.liobn
);
926 ddwprop
->dma_base
= cpu_to_be64(of_read_number(&create
.addr_hi
, 2));
927 ddwprop
->tce_shift
= cpu_to_be32(page_shift
);
928 ddwprop
->window_shift
= cpu_to_be32(len
);
930 dev_dbg(&dev
->dev
, "created tce table LIOBN 0x%x for %s\n",
931 create
.liobn
, dn
->full_name
);
933 window
= kzalloc(sizeof(*window
), GFP_KERNEL
);
935 goto out_clear_window
;
937 ret
= walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT
,
938 win64
->value
, tce_setrange_multi_pSeriesLP_walk
);
940 dev_info(&dev
->dev
, "failed to map direct window for %s: %d\n",
942 goto out_clear_window
;
945 ret
= prom_add_property(pdn
, win64
);
947 dev_err(&dev
->dev
, "unable to add dma window property for %s: %d",
948 pdn
->full_name
, ret
);
949 goto out_clear_window
;
952 window
->device
= pdn
;
953 window
->prop
= ddwprop
;
954 spin_lock(&direct_window_list_lock
);
955 list_add(&window
->list
, &direct_window_list
);
956 spin_unlock(&direct_window_list_lock
);
958 dma_addr
= of_read_number(&create
.addr_hi
, 2);
970 mutex_unlock(&direct_window_init_mutex
);
974 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev
*dev
)
976 struct device_node
*pdn
, *dn
;
977 struct iommu_table
*tbl
;
978 const void *dma_window
= NULL
;
981 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev
));
983 /* dev setup for LPAR is a little tricky, since the device tree might
984 * contain the dma-window properties per-device and not necessarily
985 * for the bus. So we need to search upwards in the tree until we
986 * either hit a dma-window property, OR find a parent with a table
989 dn
= pci_device_to_OF_node(dev
);
990 pr_debug(" node is %s\n", dn
->full_name
);
992 for (pdn
= dn
; pdn
&& PCI_DN(pdn
) && !PCI_DN(pdn
)->iommu_table
;
994 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
999 if (!pdn
|| !PCI_DN(pdn
)) {
1000 printk(KERN_WARNING
"pci_dma_dev_setup_pSeriesLP: "
1001 "no DMA window found for pci dev=%s dn=%s\n",
1002 pci_name(dev
), dn
? dn
->full_name
: "<null>");
1005 pr_debug(" parent is %s\n", pdn
->full_name
);
1008 if (!pci
->iommu_table
) {
1009 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
1011 iommu_table_setparms_lpar(pci
->phb
, pdn
, tbl
, dma_window
);
1012 pci
->iommu_table
= iommu_init_table(tbl
, pci
->phb
->node
);
1013 pr_debug(" created table: %p\n", pci
->iommu_table
);
1015 pr_debug(" found DMA window, table: %p\n", pci
->iommu_table
);
1018 set_iommu_table_base(&dev
->dev
, pci
->iommu_table
);
1021 static int dma_set_mask_pSeriesLP(struct device
*dev
, u64 dma_mask
)
1023 bool ddw_enabled
= false;
1024 struct device_node
*pdn
, *dn
;
1025 struct pci_dev
*pdev
;
1026 const void *dma_window
= NULL
;
1032 if (!dev_is_pci(dev
))
1035 pdev
= to_pci_dev(dev
);
1037 /* only attempt to use a new window if 64-bit DMA is requested */
1038 if (!disable_ddw
&& dma_mask
== DMA_BIT_MASK(64)) {
1039 dn
= pci_device_to_OF_node(pdev
);
1040 dev_dbg(dev
, "node is %s\n", dn
->full_name
);
1043 * the device tree might contain the dma-window properties
1044 * per-device and not necessarily for the bus. So we need to
1045 * search upwards in the tree until we either hit a dma-window
1046 * property, OR find a parent with a table already allocated.
1048 for (pdn
= dn
; pdn
&& PCI_DN(pdn
) && !PCI_DN(pdn
)->iommu_table
;
1049 pdn
= pdn
->parent
) {
1050 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
1054 if (pdn
&& PCI_DN(pdn
)) {
1055 dma_offset
= enable_ddw(pdev
, pdn
);
1056 if (dma_offset
!= 0) {
1057 dev_info(dev
, "Using 64-bit direct DMA at offset %llx\n", dma_offset
);
1058 set_dma_offset(dev
, dma_offset
);
1059 set_dma_ops(dev
, &dma_direct_ops
);
1065 /* fall back on iommu ops, restore table pointer with ops */
1066 if (!ddw_enabled
&& get_dma_ops(dev
) != &dma_iommu_ops
) {
1067 dev_info(dev
, "Restoring 32-bit DMA via iommu\n");
1068 set_dma_ops(dev
, &dma_iommu_ops
);
1069 pci_dma_dev_setup_pSeriesLP(pdev
);
1073 if (!dma_supported(dev
, dma_mask
))
1076 *dev
->dma_mask
= dma_mask
;
1080 #else /* CONFIG_PCI */
1081 #define pci_dma_bus_setup_pSeries NULL
1082 #define pci_dma_dev_setup_pSeries NULL
1083 #define pci_dma_bus_setup_pSeriesLP NULL
1084 #define pci_dma_dev_setup_pSeriesLP NULL
1085 #define dma_set_mask_pSeriesLP NULL
1086 #endif /* !CONFIG_PCI */
1088 static int iommu_mem_notifier(struct notifier_block
*nb
, unsigned long action
,
1091 struct direct_window
*window
;
1092 struct memory_notify
*arg
= data
;
1096 case MEM_GOING_ONLINE
:
1097 spin_lock(&direct_window_list_lock
);
1098 list_for_each_entry(window
, &direct_window_list
, list
) {
1099 ret
|= tce_setrange_multi_pSeriesLP(arg
->start_pfn
,
1100 arg
->nr_pages
, window
->prop
);
1103 spin_unlock(&direct_window_list_lock
);
1105 case MEM_CANCEL_ONLINE
:
1107 spin_lock(&direct_window_list_lock
);
1108 list_for_each_entry(window
, &direct_window_list
, list
) {
1109 ret
|= tce_clearrange_multi_pSeriesLP(arg
->start_pfn
,
1110 arg
->nr_pages
, window
->prop
);
1113 spin_unlock(&direct_window_list_lock
);
1118 if (ret
&& action
!= MEM_CANCEL_ONLINE
)
1124 static struct notifier_block iommu_mem_nb
= {
1125 .notifier_call
= iommu_mem_notifier
,
1128 static int iommu_reconfig_notifier(struct notifier_block
*nb
, unsigned long action
, void *node
)
1130 int err
= NOTIFY_OK
;
1131 struct device_node
*np
= node
;
1132 struct pci_dn
*pci
= PCI_DN(np
);
1133 struct direct_window
*window
;
1136 case PSERIES_RECONFIG_REMOVE
:
1137 if (pci
&& pci
->iommu_table
)
1138 iommu_free_table(pci
->iommu_table
, np
->full_name
);
1140 spin_lock(&direct_window_list_lock
);
1141 list_for_each_entry(window
, &direct_window_list
, list
) {
1142 if (window
->device
== np
) {
1143 list_del(&window
->list
);
1148 spin_unlock(&direct_window_list_lock
);
1151 * Because the notifier runs after isolation of the
1152 * slot, we are guaranteed any DMA window has already
1153 * been revoked and the TCEs have been marked invalid,
1154 * so we don't need a call to remove_ddw(np). However,
1155 * if an additional notifier action is added before the
1156 * isolate call, we should update this code for
1157 * completeness with such a call.
1167 static struct notifier_block iommu_reconfig_nb
= {
1168 .notifier_call
= iommu_reconfig_notifier
,
1171 /* These are called very early. */
1172 void iommu_init_early_pSeries(void)
1174 if (of_chosen
&& of_get_property(of_chosen
, "linux,iommu-off", NULL
))
1177 if (firmware_has_feature(FW_FEATURE_LPAR
)) {
1178 if (firmware_has_feature(FW_FEATURE_MULTITCE
)) {
1179 ppc_md
.tce_build
= tce_buildmulti_pSeriesLP
;
1180 ppc_md
.tce_free
= tce_freemulti_pSeriesLP
;
1182 ppc_md
.tce_build
= tce_build_pSeriesLP
;
1183 ppc_md
.tce_free
= tce_free_pSeriesLP
;
1185 ppc_md
.tce_get
= tce_get_pSeriesLP
;
1186 ppc_md
.pci_dma_bus_setup
= pci_dma_bus_setup_pSeriesLP
;
1187 ppc_md
.pci_dma_dev_setup
= pci_dma_dev_setup_pSeriesLP
;
1188 ppc_md
.dma_set_mask
= dma_set_mask_pSeriesLP
;
1190 ppc_md
.tce_build
= tce_build_pSeries
;
1191 ppc_md
.tce_free
= tce_free_pSeries
;
1192 ppc_md
.tce_get
= tce_get_pseries
;
1193 ppc_md
.pci_dma_bus_setup
= pci_dma_bus_setup_pSeries
;
1194 ppc_md
.pci_dma_dev_setup
= pci_dma_dev_setup_pSeries
;
1198 pSeries_reconfig_notifier_register(&iommu_reconfig_nb
);
1199 register_memory_notifier(&iommu_mem_nb
);
1201 set_pci_dma_ops(&dma_iommu_ops
);
1204 static int __init
disable_multitce(char *str
)
1206 if (strcmp(str
, "off") == 0 &&
1207 firmware_has_feature(FW_FEATURE_LPAR
) &&
1208 firmware_has_feature(FW_FEATURE_MULTITCE
)) {
1209 printk(KERN_INFO
"Disabling MULTITCE firmware feature\n");
1210 ppc_md
.tce_build
= tce_build_pSeriesLP
;
1211 ppc_md
.tce_free
= tce_free_pSeriesLP
;
1212 powerpc_firmware_features
&= ~FW_FEATURE_MULTITCE
;
1217 __setup("multitce=", disable_multitce
);