2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33 #include <linux/pci.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/crash_dump.h>
39 #include <asm/iommu.h>
40 #include <asm/pci-bridge.h>
41 #include <asm/machdep.h>
42 #include <asm/abs_addr.h>
43 #include <asm/pSeries_reconfig.h>
44 #include <asm/firmware.h>
46 #include <asm/ppc-pci.h>
49 #include "plpar_wrappers.h"
52 static int tce_build_pSeries(struct iommu_table
*tbl
, long index
,
53 long npages
, unsigned long uaddr
,
54 enum dma_data_direction direction
,
55 struct dma_attrs
*attrs
)
61 proto_tce
= TCE_PCI_READ
; // Read allowed
63 if (direction
!= DMA_TO_DEVICE
)
64 proto_tce
|= TCE_PCI_WRITE
;
66 tcep
= ((u64
*)tbl
->it_base
) + index
;
69 /* can't move this out since we might cross LMB boundary */
70 rpn
= (virt_to_abs(uaddr
)) >> TCE_SHIFT
;
71 *tcep
= proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
73 uaddr
+= TCE_PAGE_SIZE
;
80 static void tce_free_pSeries(struct iommu_table
*tbl
, long index
, long npages
)
84 tcep
= ((u64
*)tbl
->it_base
) + index
;
90 static unsigned long tce_get_pseries(struct iommu_table
*tbl
, long index
)
94 tcep
= ((u64
*)tbl
->it_base
) + index
;
99 static void tce_free_pSeriesLP(struct iommu_table
*, long, long);
100 static void tce_freemulti_pSeriesLP(struct iommu_table
*, long, long);
102 static int tce_build_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
103 long npages
, unsigned long uaddr
,
104 enum dma_data_direction direction
,
105 struct dma_attrs
*attrs
)
111 long tcenum_start
= tcenum
, npages_start
= npages
;
113 rpn
= (virt_to_abs(uaddr
)) >> TCE_SHIFT
;
114 proto_tce
= TCE_PCI_READ
;
115 if (direction
!= DMA_TO_DEVICE
)
116 proto_tce
|= TCE_PCI_WRITE
;
119 tce
= proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
120 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, tce
);
122 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
124 tce_free_pSeriesLP(tbl
, tcenum_start
,
125 (npages_start
- (npages
+ 1)));
129 if (rc
&& printk_ratelimit()) {
130 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
131 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
132 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
133 printk("\ttce val = 0x%llx\n", tce
);
134 show_stack(current
, (unsigned long *)__get_SP());
143 static DEFINE_PER_CPU(u64
*, tce_page
) = NULL
;
145 static int tce_buildmulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
146 long npages
, unsigned long uaddr
,
147 enum dma_data_direction direction
,
148 struct dma_attrs
*attrs
)
155 long tcenum_start
= tcenum
, npages_start
= npages
;
159 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
163 tcep
= __get_cpu_var(tce_page
);
165 /* This is safe to do since interrupts are off when we're called
166 * from iommu_alloc{,_sg}()
169 tcep
= (u64
*)__get_free_page(GFP_ATOMIC
);
170 /* If allocation fails, fall back to the loop implementation */
172 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
175 __get_cpu_var(tce_page
) = tcep
;
178 rpn
= (virt_to_abs(uaddr
)) >> TCE_SHIFT
;
179 proto_tce
= TCE_PCI_READ
;
180 if (direction
!= DMA_TO_DEVICE
)
181 proto_tce
|= TCE_PCI_WRITE
;
183 /* We can map max one pageful of TCEs at a time */
186 * Set up the page with TCE data, looping through and setting
189 limit
= min_t(long, npages
, 4096/TCE_ENTRY_SIZE
);
191 for (l
= 0; l
< limit
; l
++) {
192 tcep
[l
] = proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
196 rc
= plpar_tce_put_indirect((u64
)tbl
->it_index
,
198 (u64
)virt_to_abs(tcep
),
203 } while (npages
> 0 && !rc
);
205 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
207 tce_freemulti_pSeriesLP(tbl
, tcenum_start
,
208 (npages_start
- (npages
+ limit
)));
212 if (rc
&& printk_ratelimit()) {
213 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
214 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
215 printk("\tnpages = 0x%llx\n", (u64
)npages
);
216 printk("\ttce[0] val = 0x%llx\n", tcep
[0]);
217 show_stack(current
, (unsigned long *)__get_SP());
222 static void tce_free_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
227 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0);
229 if (rc
&& printk_ratelimit()) {
230 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
231 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
232 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
233 show_stack(current
, (unsigned long *)__get_SP());
241 static void tce_freemulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
245 rc
= plpar_tce_stuff((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0, npages
);
247 if (rc
&& printk_ratelimit()) {
248 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
249 printk("\trc = %lld\n", rc
);
250 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
251 printk("\tnpages = 0x%llx\n", (u64
)npages
);
252 show_stack(current
, (unsigned long *)__get_SP());
256 static unsigned long tce_get_pSeriesLP(struct iommu_table
*tbl
, long tcenum
)
259 unsigned long tce_ret
;
261 rc
= plpar_tce_get((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, &tce_ret
);
263 if (rc
&& printk_ratelimit()) {
264 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc
);
265 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
266 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
267 show_stack(current
, (unsigned long *)__get_SP());
274 static void iommu_table_setparms(struct pci_controller
*phb
,
275 struct device_node
*dn
,
276 struct iommu_table
*tbl
)
278 struct device_node
*node
;
279 const unsigned long *basep
;
284 basep
= of_get_property(node
, "linux,tce-base", NULL
);
285 sizep
= of_get_property(node
, "linux,tce-size", NULL
);
286 if (basep
== NULL
|| sizep
== NULL
) {
287 printk(KERN_ERR
"PCI_DMA: iommu_table_setparms: %s has "
288 "missing tce entries !\n", dn
->full_name
);
292 tbl
->it_base
= (unsigned long)__va(*basep
);
294 if (!is_kdump_kernel())
295 memset((void *)tbl
->it_base
, 0, *sizep
);
297 tbl
->it_busno
= phb
->bus
->number
;
299 /* Units of tce entries */
300 tbl
->it_offset
= phb
->dma_window_base_cur
>> IOMMU_PAGE_SHIFT
;
302 /* Test if we are going over 2GB of DMA space */
303 if (phb
->dma_window_base_cur
+ phb
->dma_window_size
> 0x80000000ul
) {
304 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
305 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
308 phb
->dma_window_base_cur
+= phb
->dma_window_size
;
310 /* Set the tce table size - measured in entries */
311 tbl
->it_size
= phb
->dma_window_size
>> IOMMU_PAGE_SHIFT
;
314 tbl
->it_blocksize
= 16;
315 tbl
->it_type
= TCE_PCI
;
319 * iommu_table_setparms_lpar
321 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
323 static void iommu_table_setparms_lpar(struct pci_controller
*phb
,
324 struct device_node
*dn
,
325 struct iommu_table
*tbl
,
326 const void *dma_window
,
329 unsigned long offset
, size
;
331 tbl
->it_busno
= bussubno
;
332 of_parse_dma_window(dn
, dma_window
, &tbl
->it_index
, &offset
, &size
);
335 tbl
->it_blocksize
= 16;
336 tbl
->it_type
= TCE_PCI
;
337 tbl
->it_offset
= offset
>> IOMMU_PAGE_SHIFT
;
338 tbl
->it_size
= size
>> IOMMU_PAGE_SHIFT
;
341 static void pci_dma_bus_setup_pSeries(struct pci_bus
*bus
)
343 struct device_node
*dn
;
344 struct iommu_table
*tbl
;
345 struct device_node
*isa_dn
, *isa_dn_orig
;
346 struct device_node
*tmp
;
350 dn
= pci_bus_to_OF_node(bus
);
352 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn
->full_name
);
355 /* This is not a root bus, any setup will be done for the
356 * device-side of the bridge in iommu_dev_setup_pSeries().
362 /* Check if the ISA bus on the system is under
365 isa_dn
= isa_dn_orig
= of_find_node_by_type(NULL
, "isa");
367 while (isa_dn
&& isa_dn
!= dn
)
368 isa_dn
= isa_dn
->parent
;
371 of_node_put(isa_dn_orig
);
373 /* Count number of direct PCI children of the PHB. */
374 for (children
= 0, tmp
= dn
->child
; tmp
; tmp
= tmp
->sibling
)
377 pr_debug("Children: %d\n", children
);
379 /* Calculate amount of DMA window per slot. Each window must be
380 * a power of two (due to pci_alloc_consistent requirements).
382 * Keep 256MB aside for PHBs with ISA.
386 /* No ISA/IDE - just set window size and return */
387 pci
->phb
->dma_window_size
= 0x80000000ul
; /* To be divided */
389 while (pci
->phb
->dma_window_size
* children
> 0x80000000ul
)
390 pci
->phb
->dma_window_size
>>= 1;
391 pr_debug("No ISA/IDE, window size is 0x%llx\n",
392 pci
->phb
->dma_window_size
);
393 pci
->phb
->dma_window_base_cur
= 0;
398 /* If we have ISA, then we probably have an IDE
399 * controller too. Allocate a 128MB table but
400 * skip the first 128MB to avoid stepping on ISA
403 pci
->phb
->dma_window_size
= 0x8000000ul
;
404 pci
->phb
->dma_window_base_cur
= 0x8000000ul
;
406 tbl
= kmalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
409 iommu_table_setparms(pci
->phb
, dn
, tbl
);
410 pci
->iommu_table
= iommu_init_table(tbl
, pci
->phb
->node
);
412 /* Divide the rest (1.75GB) among the children */
413 pci
->phb
->dma_window_size
= 0x80000000ul
;
414 while (pci
->phb
->dma_window_size
* children
> 0x70000000ul
)
415 pci
->phb
->dma_window_size
>>= 1;
417 pr_debug("ISA/IDE, window size is 0x%llx\n", pci
->phb
->dma_window_size
);
421 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus
*bus
)
423 struct iommu_table
*tbl
;
424 struct device_node
*dn
, *pdn
;
426 const void *dma_window
= NULL
;
428 dn
= pci_bus_to_OF_node(bus
);
430 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
433 /* Find nearest ibm,dma-window, walking up the device tree */
434 for (pdn
= dn
; pdn
!= NULL
; pdn
= pdn
->parent
) {
435 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
436 if (dma_window
!= NULL
)
440 if (dma_window
== NULL
) {
441 pr_debug(" no ibm,dma-window property !\n");
447 pr_debug(" parent is %s, iommu_table: 0x%p\n",
448 pdn
->full_name
, ppci
->iommu_table
);
450 if (!ppci
->iommu_table
) {
451 tbl
= kmalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
453 iommu_table_setparms_lpar(ppci
->phb
, pdn
, tbl
, dma_window
,
455 ppci
->iommu_table
= iommu_init_table(tbl
, ppci
->phb
->node
);
456 pr_debug(" created table: %p\n", ppci
->iommu_table
);
460 PCI_DN(dn
)->iommu_table
= ppci
->iommu_table
;
464 static void pci_dma_dev_setup_pSeries(struct pci_dev
*dev
)
466 struct device_node
*dn
;
467 struct iommu_table
*tbl
;
469 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev
));
471 dn
= dev
->dev
.archdata
.of_node
;
473 /* If we're the direct child of a root bus, then we need to allocate
474 * an iommu table ourselves. The bus setup code should have setup
475 * the window sizes already.
477 if (!dev
->bus
->self
) {
478 struct pci_controller
*phb
= PCI_DN(dn
)->phb
;
480 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
481 tbl
= kmalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
483 iommu_table_setparms(phb
, dn
, tbl
);
484 PCI_DN(dn
)->iommu_table
= iommu_init_table(tbl
, phb
->node
);
485 dev
->dev
.archdata
.dma_data
= PCI_DN(dn
)->iommu_table
;
489 /* If this device is further down the bus tree, search upwards until
490 * an already allocated iommu table is found and use that.
493 while (dn
&& PCI_DN(dn
) && PCI_DN(dn
)->iommu_table
== NULL
)
496 if (dn
&& PCI_DN(dn
))
497 dev
->dev
.archdata
.dma_data
= PCI_DN(dn
)->iommu_table
;
499 printk(KERN_WARNING
"iommu: Device %s has no iommu table\n",
503 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev
*dev
)
505 struct device_node
*pdn
, *dn
;
506 struct iommu_table
*tbl
;
507 const void *dma_window
= NULL
;
510 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev
));
512 /* dev setup for LPAR is a little tricky, since the device tree might
513 * contain the dma-window properties per-device and not neccesarily
514 * for the bus. So we need to search upwards in the tree until we
515 * either hit a dma-window property, OR find a parent with a table
518 dn
= pci_device_to_OF_node(dev
);
519 pr_debug(" node is %s\n", dn
->full_name
);
521 for (pdn
= dn
; pdn
&& PCI_DN(pdn
) && !PCI_DN(pdn
)->iommu_table
;
523 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
528 if (!pdn
|| !PCI_DN(pdn
)) {
529 printk(KERN_WARNING
"pci_dma_dev_setup_pSeriesLP: "
530 "no DMA window found for pci dev=%s dn=%s\n",
531 pci_name(dev
), dn
? dn
->full_name
: "<null>");
534 pr_debug(" parent is %s\n", pdn
->full_name
);
536 /* Check for parent == NULL so we don't try to setup the empty EADS
537 * slots on POWER4 machines.
539 if (dma_window
== NULL
|| pdn
->parent
== NULL
) {
540 pr_debug(" no dma window for device, linking to parent\n");
541 dev
->dev
.archdata
.dma_data
= PCI_DN(pdn
)->iommu_table
;
546 if (!pci
->iommu_table
) {
547 tbl
= kmalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
549 iommu_table_setparms_lpar(pci
->phb
, pdn
, tbl
, dma_window
,
550 pci
->phb
->bus
->number
);
551 pci
->iommu_table
= iommu_init_table(tbl
, pci
->phb
->node
);
552 pr_debug(" created table: %p\n", pci
->iommu_table
);
554 pr_debug(" found DMA window, table: %p\n", pci
->iommu_table
);
557 dev
->dev
.archdata
.dma_data
= pci
->iommu_table
;
559 #else /* CONFIG_PCI */
560 #define pci_dma_bus_setup_pSeries NULL
561 #define pci_dma_dev_setup_pSeries NULL
562 #define pci_dma_bus_setup_pSeriesLP NULL
563 #define pci_dma_dev_setup_pSeriesLP NULL
564 #endif /* !CONFIG_PCI */
566 static int iommu_reconfig_notifier(struct notifier_block
*nb
, unsigned long action
, void *node
)
569 struct device_node
*np
= node
;
570 struct pci_dn
*pci
= PCI_DN(np
);
573 case PSERIES_RECONFIG_REMOVE
:
574 if (pci
&& pci
->iommu_table
&&
575 of_get_property(np
, "ibm,dma-window", NULL
))
576 iommu_free_table(pci
->iommu_table
, np
->full_name
);
585 static struct notifier_block iommu_reconfig_nb
= {
586 .notifier_call
= iommu_reconfig_notifier
,
589 /* These are called very early. */
590 void iommu_init_early_pSeries(void)
592 if (of_chosen
&& of_get_property(of_chosen
, "linux,iommu-off", NULL
)) {
593 /* Direct I/O, IOMMU off */
594 ppc_md
.pci_dma_dev_setup
= NULL
;
595 ppc_md
.pci_dma_bus_setup
= NULL
;
596 set_pci_dma_ops(&dma_direct_ops
);
600 if (firmware_has_feature(FW_FEATURE_LPAR
)) {
601 if (firmware_has_feature(FW_FEATURE_MULTITCE
)) {
602 ppc_md
.tce_build
= tce_buildmulti_pSeriesLP
;
603 ppc_md
.tce_free
= tce_freemulti_pSeriesLP
;
605 ppc_md
.tce_build
= tce_build_pSeriesLP
;
606 ppc_md
.tce_free
= tce_free_pSeriesLP
;
608 ppc_md
.tce_get
= tce_get_pSeriesLP
;
609 ppc_md
.pci_dma_bus_setup
= pci_dma_bus_setup_pSeriesLP
;
610 ppc_md
.pci_dma_dev_setup
= pci_dma_dev_setup_pSeriesLP
;
612 ppc_md
.tce_build
= tce_build_pSeries
;
613 ppc_md
.tce_free
= tce_free_pSeries
;
614 ppc_md
.tce_get
= tce_get_pseries
;
615 ppc_md
.pci_dma_bus_setup
= pci_dma_bus_setup_pSeries
;
616 ppc_md
.pci_dma_dev_setup
= pci_dma_dev_setup_pSeries
;
620 pSeries_reconfig_notifier_register(&iommu_reconfig_nb
);
622 set_pci_dma_ops(&dma_iommu_ops
);