2 * IOMMU implementation for Cell Broadband Processor Architecture
4 * (C) Copyright IBM Corporation 2006-2008
6 * Author: Jeremy Kerr <jk@ozlabs.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/notifier.h>
30 #include <linux/of_platform.h>
33 #include <asm/iommu.h>
34 #include <asm/machdep.h>
35 #include <asm/pci-bridge.h>
38 #include <asm/firmware.h>
39 #include <asm/cell-regs.h>
41 #include "interrupt.h"
43 /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
44 * instead of leaving them mapped to some dummy page. This can be
45 * enabled once the appropriate workarounds for spider bugs have
48 #define CELL_IOMMU_REAL_UNMAP
50 /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
51 * IO PTEs based on the transfer direction. That can be enabled
52 * once spider-net has been fixed to pass the correct direction
53 * to the DMA mapping functions
55 #define CELL_IOMMU_STRICT_PROTECTION
60 /* IOC mmap registers */
61 #define IOC_Reg_Size 0x2000
63 #define IOC_IOPT_CacheInvd 0x908
64 #define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
65 #define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
66 #define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
68 #define IOC_IOST_Origin 0x918
69 #define IOC_IOST_Origin_E 0x8000000000000000ul
70 #define IOC_IOST_Origin_HW 0x0000000000000800ul
71 #define IOC_IOST_Origin_HL 0x0000000000000400ul
73 #define IOC_IO_ExcpStat 0x920
74 #define IOC_IO_ExcpStat_V 0x8000000000000000ul
75 #define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
76 #define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
77 #define IOC_IO_ExcpStat_SPF_P 0x4000000000000000ul
78 #define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
79 #define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
80 #define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
82 #define IOC_IO_ExcpMask 0x928
83 #define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
84 #define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
86 #define IOC_IOCmd_Offset 0x1000
88 #define IOC_IOCmd_Cfg 0xc00
89 #define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
92 /* Segment table entries */
93 #define IOSTE_V 0x8000000000000000ul /* valid */
94 #define IOSTE_H 0x4000000000000000ul /* cache hint */
95 #define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
96 #define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
97 #define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
98 #define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
99 #define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
100 #define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
101 #define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
103 /* Page table entries */
104 #define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
105 #define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
106 #define IOPTE_M 0x2000000000000000ul /* coherency required */
107 #define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
108 #define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
109 #define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
110 #define IOPTE_H 0x0000000000000800ul /* cache hint */
111 #define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
115 #define IO_SEGMENT_SHIFT 28
116 #define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift))
118 /* The high bit needs to be set on every DMA address */
119 #define SPIDER_DMA_OFFSET 0x80000000ul
121 struct iommu_window
{
122 struct list_head list
;
123 struct cbe_iommu
*iommu
;
124 unsigned long offset
;
127 struct iommu_table table
;
134 void __iomem
*xlate_regs
;
135 void __iomem
*cmd_regs
;
139 struct list_head windows
;
142 /* Static array of iommus, one per node
143 * each contains a list of windows, keyed from dma_window property
144 * - on bus setup, look for a matching window, or create one
145 * - on dev setup, assign iommu_table ptr
147 static struct cbe_iommu iommus
[NR_IOMMUS
];
148 static int cbe_nr_iommus
;
150 static void invalidate_tce_cache(struct cbe_iommu
*iommu
, unsigned long *pte
,
153 unsigned long __iomem
*reg
;
157 reg
= iommu
->xlate_regs
+ IOC_IOPT_CacheInvd
;
160 /* we can invalidate up to 1 << 11 PTEs at once */
161 n
= min(n_ptes
, 1l << 11);
162 val
= (((n
/*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask
)
163 | (__pa(pte
) & IOC_IOPT_CacheInvd_IOPTE_Mask
)
164 | IOC_IOPT_CacheInvd_Busy
;
167 while (in_be64(reg
) & IOC_IOPT_CacheInvd_Busy
)
175 static void tce_build_cell(struct iommu_table
*tbl
, long index
, long npages
,
176 unsigned long uaddr
, enum dma_data_direction direction
)
179 unsigned long *io_pte
, base_pte
;
180 struct iommu_window
*window
=
181 container_of(tbl
, struct iommu_window
, table
);
183 /* implementing proper protection causes problems with the spidernet
184 * driver - check mapping directions later, but allow read & write by
186 #ifdef CELL_IOMMU_STRICT_PROTECTION
187 /* to avoid referencing a global, we use a trick here to setup the
188 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
189 * together for each of the 3 supported direction values. It is then
190 * shifted left so that the fields matching the desired direction
191 * lands on the appropriate bits, and other bits are masked out.
193 const unsigned long prot
= 0xc48;
195 ((prot
<< (52 + 4 * direction
)) & (IOPTE_PP_W
| IOPTE_PP_R
))
196 | IOPTE_M
| IOPTE_SO_RW
| (window
->ioid
& IOPTE_IOID_Mask
);
198 base_pte
= IOPTE_PP_W
| IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
|
199 (window
->ioid
& IOPTE_IOID_Mask
);
202 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- tbl
->it_offset
);
204 for (i
= 0; i
< npages
; i
++, uaddr
+= IOMMU_PAGE_SIZE
)
205 io_pte
[i
] = base_pte
| (__pa(uaddr
) & IOPTE_RPN_Mask
);
209 invalidate_tce_cache(window
->iommu
, io_pte
, npages
);
211 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
212 index
, npages
, direction
, base_pte
);
215 static void tce_free_cell(struct iommu_table
*tbl
, long index
, long npages
)
219 unsigned long *io_pte
, pte
;
220 struct iommu_window
*window
=
221 container_of(tbl
, struct iommu_window
, table
);
223 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index
, npages
);
225 #ifdef CELL_IOMMU_REAL_UNMAP
228 /* spider bridge does PCI reads after freeing - insert a mapping
229 * to a scratch page instead of an invalid entry */
230 pte
= IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
| __pa(window
->iommu
->pad_page
)
231 | (window
->ioid
& IOPTE_IOID_Mask
);
234 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- tbl
->it_offset
);
236 for (i
= 0; i
< npages
; i
++)
241 invalidate_tce_cache(window
->iommu
, io_pte
, npages
);
244 static irqreturn_t
ioc_interrupt(int irq
, void *data
)
247 struct cbe_iommu
*iommu
= data
;
249 stat
= in_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
);
251 /* Might want to rate limit it */
252 printk(KERN_ERR
"iommu: DMA exception 0x%016lx\n", stat
);
253 printk(KERN_ERR
" V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
254 !!(stat
& IOC_IO_ExcpStat_V
),
255 (stat
& IOC_IO_ExcpStat_SPF_S
) ? 'S' : ' ',
256 (stat
& IOC_IO_ExcpStat_SPF_P
) ? 'P' : ' ',
257 (stat
& IOC_IO_ExcpStat_RW_Mask
) ? "Read" : "Write",
258 (unsigned int)(stat
& IOC_IO_ExcpStat_IOID_Mask
));
259 printk(KERN_ERR
" page=0x%016lx\n",
260 stat
& IOC_IO_ExcpStat_ADDR_Mask
);
262 /* clear interrupt */
263 stat
&= ~IOC_IO_ExcpStat_V
;
264 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
, stat
);
269 static int cell_iommu_find_ioc(int nid
, unsigned long *base
)
271 struct device_node
*np
;
276 /* First look for new style /be nodes */
277 for_each_node_by_name(np
, "ioc") {
278 if (of_node_to_nid(np
) != nid
)
280 if (of_address_to_resource(np
, 0, &r
)) {
281 printk(KERN_ERR
"iommu: can't get address for %s\n",
290 /* Ok, let's try the old way */
291 for_each_node_by_type(np
, "cpu") {
292 const unsigned int *nidp
;
293 const unsigned long *tmp
;
295 nidp
= of_get_property(np
, "node-id", NULL
);
296 if (nidp
&& *nidp
== nid
) {
297 tmp
= of_get_property(np
, "ioc-translation", NULL
);
309 static void cell_iommu_setup_stab(struct cbe_iommu
*iommu
,
310 unsigned long dbase
, unsigned long dsize
,
311 unsigned long fbase
, unsigned long fsize
)
314 unsigned long segments
, stab_size
;
316 segments
= max(dbase
+ dsize
, fbase
+ fsize
) >> IO_SEGMENT_SHIFT
;
318 pr_debug("%s: iommu[%d]: segments: %lu\n",
319 __FUNCTION__
, iommu
->nid
, segments
);
321 /* set up the segment table */
322 stab_size
= segments
* sizeof(unsigned long);
323 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, get_order(stab_size
));
325 iommu
->stab
= page_address(page
);
326 memset(iommu
->stab
, 0, stab_size
);
329 static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu
*iommu
,
330 unsigned long base
, unsigned long size
, unsigned long gap_base
,
331 unsigned long gap_size
, unsigned long page_shift
)
335 unsigned long reg
, segments
, pages_per_segment
, ptab_size
,
336 n_pte_pages
, start_seg
, *ptab
;
338 start_seg
= base
>> IO_SEGMENT_SHIFT
;
339 segments
= size
>> IO_SEGMENT_SHIFT
;
340 pages_per_segment
= 1ull << IO_PAGENO_BITS(page_shift
);
341 /* PTEs for each segment must start on a 4K bounday */
342 pages_per_segment
= max(pages_per_segment
,
343 (1 << 12) / sizeof(unsigned long));
345 ptab_size
= segments
* pages_per_segment
* sizeof(unsigned long);
346 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__
,
347 iommu
->nid
, ptab_size
, get_order(ptab_size
));
348 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, get_order(ptab_size
));
351 ptab
= page_address(page
);
352 memset(ptab
, 0, ptab_size
);
354 /* number of 4K pages needed for a page table */
355 n_pte_pages
= (pages_per_segment
* sizeof(unsigned long)) >> 12;
357 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
358 __FUNCTION__
, iommu
->nid
, iommu
->stab
, ptab
,
361 /* initialise the STEs */
362 reg
= IOSTE_V
| ((n_pte_pages
- 1) << 5);
364 switch (page_shift
) {
365 case 12: reg
|= IOSTE_PS_4K
; break;
366 case 16: reg
|= IOSTE_PS_64K
; break;
367 case 20: reg
|= IOSTE_PS_1M
; break;
368 case 24: reg
|= IOSTE_PS_16M
; break;
372 gap_base
= gap_base
>> IO_SEGMENT_SHIFT
;
373 gap_size
= gap_size
>> IO_SEGMENT_SHIFT
;
375 pr_debug("Setting up IOMMU stab:\n");
376 for (i
= start_seg
; i
< (start_seg
+ segments
); i
++) {
377 if (i
>= gap_base
&& i
< (gap_base
+ gap_size
)) {
378 pr_debug("\toverlap at %d, skipping\n", i
);
381 iommu
->stab
[i
] = reg
| (__pa(ptab
) + (n_pte_pages
<< 12) *
383 pr_debug("\t[%d] 0x%016lx\n", i
, iommu
->stab
[i
]);
389 static void cell_iommu_enable_hardware(struct cbe_iommu
*iommu
)
392 unsigned long reg
, xlate_base
;
395 if (cell_iommu_find_ioc(iommu
->nid
, &xlate_base
))
396 panic("%s: missing IOC register mappings for node %d\n",
397 __FUNCTION__
, iommu
->nid
);
399 iommu
->xlate_regs
= ioremap(xlate_base
, IOC_Reg_Size
);
400 iommu
->cmd_regs
= iommu
->xlate_regs
+ IOC_IOCmd_Offset
;
402 /* ensure that the STEs have updated */
405 /* setup interrupts for the iommu. */
406 reg
= in_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
);
407 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
,
408 reg
& ~IOC_IO_ExcpStat_V
);
409 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpMask
,
410 IOC_IO_ExcpMask_PFE
| IOC_IO_ExcpMask_SFE
);
412 virq
= irq_create_mapping(NULL
,
413 IIC_IRQ_IOEX_ATI
| (iommu
->nid
<< IIC_IRQ_NODE_SHIFT
));
414 BUG_ON(virq
== NO_IRQ
);
416 ret
= request_irq(virq
, ioc_interrupt
, IRQF_DISABLED
,
420 /* set the IOC segment table origin register (and turn on the iommu) */
421 reg
= IOC_IOST_Origin_E
| __pa(iommu
->stab
) | IOC_IOST_Origin_HW
;
422 out_be64(iommu
->xlate_regs
+ IOC_IOST_Origin
, reg
);
423 in_be64(iommu
->xlate_regs
+ IOC_IOST_Origin
);
425 /* turn on IO translation */
426 reg
= in_be64(iommu
->cmd_regs
+ IOC_IOCmd_Cfg
) | IOC_IOCmd_Cfg_TE
;
427 out_be64(iommu
->cmd_regs
+ IOC_IOCmd_Cfg
, reg
);
430 static void cell_iommu_setup_hardware(struct cbe_iommu
*iommu
,
431 unsigned long base
, unsigned long size
)
433 cell_iommu_setup_stab(iommu
, base
, size
, 0, 0);
434 iommu
->ptab
= cell_iommu_alloc_ptab(iommu
, base
, size
, 0, 0,
436 cell_iommu_enable_hardware(iommu
);
439 #if 0/* Unused for now */
440 static struct iommu_window
*find_window(struct cbe_iommu
*iommu
,
441 unsigned long offset
, unsigned long size
)
443 struct iommu_window
*window
;
445 /* todo: check for overlapping (but not equal) windows) */
447 list_for_each_entry(window
, &(iommu
->windows
), list
) {
448 if (window
->offset
== offset
&& window
->size
== size
)
456 static inline u32
cell_iommu_get_ioid(struct device_node
*np
)
460 ioid
= of_get_property(np
, "ioid", NULL
);
462 printk(KERN_WARNING
"iommu: missing ioid for %s using 0\n",
470 static struct iommu_window
* __init
471 cell_iommu_setup_window(struct cbe_iommu
*iommu
, struct device_node
*np
,
472 unsigned long offset
, unsigned long size
,
473 unsigned long pte_offset
)
475 struct iommu_window
*window
;
479 ioid
= cell_iommu_get_ioid(np
);
481 window
= kmalloc_node(sizeof(*window
), GFP_KERNEL
, iommu
->nid
);
482 BUG_ON(window
== NULL
);
484 window
->offset
= offset
;
487 window
->iommu
= iommu
;
489 window
->table
.it_blocksize
= 16;
490 window
->table
.it_base
= (unsigned long)iommu
->ptab
;
491 window
->table
.it_index
= iommu
->nid
;
492 window
->table
.it_offset
= (offset
>> IOMMU_PAGE_SHIFT
) + pte_offset
;
493 window
->table
.it_size
= size
>> IOMMU_PAGE_SHIFT
;
495 iommu_init_table(&window
->table
, iommu
->nid
);
497 pr_debug("\tioid %d\n", window
->ioid
);
498 pr_debug("\tblocksize %ld\n", window
->table
.it_blocksize
);
499 pr_debug("\tbase 0x%016lx\n", window
->table
.it_base
);
500 pr_debug("\toffset 0x%lx\n", window
->table
.it_offset
);
501 pr_debug("\tsize %ld\n", window
->table
.it_size
);
503 list_add(&window
->list
, &iommu
->windows
);
508 /* We need to map and reserve the first IOMMU page since it's used
509 * by the spider workaround. In theory, we only need to do that when
510 * running on spider but it doesn't really matter.
512 * This code also assumes that we have a window that starts at 0,
513 * which is the case on all spider based blades.
515 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, 0);
517 iommu
->pad_page
= page_address(page
);
518 clear_page(iommu
->pad_page
);
520 __set_bit(0, window
->table
.it_map
);
521 tce_build_cell(&window
->table
, window
->table
.it_offset
, 1,
522 (unsigned long)iommu
->pad_page
, DMA_TO_DEVICE
);
523 window
->table
.it_hint
= window
->table
.it_blocksize
;
528 static struct cbe_iommu
*cell_iommu_for_node(int nid
)
532 for (i
= 0; i
< cbe_nr_iommus
; i
++)
533 if (iommus
[i
].nid
== nid
)
538 static unsigned long cell_dma_direct_offset
;
540 static unsigned long dma_iommu_fixed_base
;
541 struct dma_mapping_ops dma_iommu_fixed_ops
;
543 static void cell_dma_dev_setup_iommu(struct device
*dev
)
545 struct iommu_window
*window
;
546 struct cbe_iommu
*iommu
;
547 struct dev_archdata
*archdata
= &dev
->archdata
;
549 /* Current implementation uses the first window available in that
550 * node's iommu. We -might- do something smarter later though it may
553 iommu
= cell_iommu_for_node(archdata
->numa_node
);
554 if (iommu
== NULL
|| list_empty(&iommu
->windows
)) {
555 printk(KERN_ERR
"iommu: missing iommu for %s (node %d)\n",
556 archdata
->of_node
? archdata
->of_node
->full_name
: "?",
557 archdata
->numa_node
);
560 window
= list_entry(iommu
->windows
.next
, struct iommu_window
, list
);
562 archdata
->dma_data
= &window
->table
;
565 static void cell_dma_dev_setup_fixed(struct device
*dev
);
567 static void cell_dma_dev_setup(struct device
*dev
)
569 struct dev_archdata
*archdata
= &dev
->archdata
;
571 /* Order is important here, these are not mutually exclusive */
572 if (get_dma_ops(dev
) == &dma_iommu_fixed_ops
)
573 cell_dma_dev_setup_fixed(dev
);
574 else if (get_pci_dma_ops() == &dma_iommu_ops
)
575 cell_dma_dev_setup_iommu(dev
);
576 else if (get_pci_dma_ops() == &dma_direct_ops
)
577 archdata
->dma_data
= (void *)cell_dma_direct_offset
;
582 static void cell_pci_dma_dev_setup(struct pci_dev
*dev
)
584 cell_dma_dev_setup(&dev
->dev
);
587 static int cell_of_bus_notify(struct notifier_block
*nb
, unsigned long action
,
590 struct device
*dev
= data
;
592 /* We are only intereted in device addition */
593 if (action
!= BUS_NOTIFY_ADD_DEVICE
)
596 /* We use the PCI DMA ops */
597 dev
->archdata
.dma_ops
= get_pci_dma_ops();
599 cell_dma_dev_setup(dev
);
604 static struct notifier_block cell_of_bus_notifier
= {
605 .notifier_call
= cell_of_bus_notify
608 static int __init
cell_iommu_get_window(struct device_node
*np
,
612 const void *dma_window
;
615 /* Use ibm,dma-window if available, else, hard code ! */
616 dma_window
= of_get_property(np
, "ibm,dma-window", NULL
);
617 if (dma_window
== NULL
) {
623 of_parse_dma_window(np
, dma_window
, &index
, base
, size
);
627 static struct cbe_iommu
* __init
cell_iommu_alloc(struct device_node
*np
)
629 struct cbe_iommu
*iommu
;
633 nid
= of_node_to_nid(np
);
635 printk(KERN_ERR
"iommu: failed to get node for %s\n",
639 pr_debug("iommu: setting up iommu for node %d (%s)\n",
642 /* XXX todo: If we can have multiple windows on the same IOMMU, which
643 * isn't the case today, we probably want here to check wether the
644 * iommu for that node is already setup.
645 * However, there might be issue with getting the size right so let's
646 * ignore that for now. We might want to completely get rid of the
647 * multiple window support since the cell iommu supports per-page ioids
650 if (cbe_nr_iommus
>= NR_IOMMUS
) {
651 printk(KERN_ERR
"iommu: too many IOMMUs detected ! (%s)\n",
656 /* Init base fields */
661 snprintf(iommu
->name
, sizeof(iommu
->name
), "iommu%d", i
);
662 INIT_LIST_HEAD(&iommu
->windows
);
667 static void __init
cell_iommu_init_one(struct device_node
*np
,
668 unsigned long offset
)
670 struct cbe_iommu
*iommu
;
671 unsigned long base
, size
;
673 iommu
= cell_iommu_alloc(np
);
677 /* Obtain a window for it */
678 cell_iommu_get_window(np
, &base
, &size
);
680 pr_debug("\ttranslating window 0x%lx...0x%lx\n",
681 base
, base
+ size
- 1);
683 /* Initialize the hardware */
684 cell_iommu_setup_hardware(iommu
, base
, size
);
686 /* Setup the iommu_table */
687 cell_iommu_setup_window(iommu
, np
, base
, size
,
688 offset
>> IOMMU_PAGE_SHIFT
);
691 static void __init
cell_disable_iommus(void)
694 unsigned long base
, val
;
695 void __iomem
*xregs
, *cregs
;
697 /* Make sure IOC translation is disabled on all nodes */
698 for_each_online_node(node
) {
699 if (cell_iommu_find_ioc(node
, &base
))
701 xregs
= ioremap(base
, IOC_Reg_Size
);
704 cregs
= xregs
+ IOC_IOCmd_Offset
;
706 pr_debug("iommu: cleaning up iommu on node %d\n", node
);
708 out_be64(xregs
+ IOC_IOST_Origin
, 0);
709 (void)in_be64(xregs
+ IOC_IOST_Origin
);
710 val
= in_be64(cregs
+ IOC_IOCmd_Cfg
);
711 val
&= ~IOC_IOCmd_Cfg_TE
;
712 out_be64(cregs
+ IOC_IOCmd_Cfg
, val
);
713 (void)in_be64(cregs
+ IOC_IOCmd_Cfg
);
719 static int __init
cell_iommu_init_disabled(void)
721 struct device_node
*np
= NULL
;
722 unsigned long base
= 0, size
;
724 /* When no iommu is present, we use direct DMA ops */
725 set_pci_dma_ops(&dma_direct_ops
);
727 /* First make sure all IOC translation is turned off */
728 cell_disable_iommus();
730 /* If we have no Axon, we set up the spider DMA magic offset */
731 if (of_find_node_by_name(NULL
, "axon") == NULL
)
732 cell_dma_direct_offset
= SPIDER_DMA_OFFSET
;
734 /* Now we need to check to see where the memory is mapped
735 * in PCI space. We assume that all busses use the same dma
736 * window which is always the case so far on Cell, thus we
737 * pick up the first pci-internal node we can find and check
738 * the DMA window from there.
740 for_each_node_by_name(np
, "axon") {
741 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
743 if (cell_iommu_get_window(np
, &base
, &size
) == 0)
747 for_each_node_by_name(np
, "pci-internal") {
748 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
750 if (cell_iommu_get_window(np
, &base
, &size
) == 0)
756 /* If we found a DMA window, we check if it's big enough to enclose
757 * all of physical memory. If not, we force enable IOMMU
759 if (np
&& size
< lmb_end_of_DRAM()) {
760 printk(KERN_WARNING
"iommu: force-enabled, dma window"
761 " (%ldMB) smaller than total memory (%ldMB)\n",
762 size
>> 20, lmb_end_of_DRAM() >> 20);
766 cell_dma_direct_offset
+= base
;
768 if (cell_dma_direct_offset
!= 0)
769 ppc_md
.pci_dma_dev_setup
= cell_pci_dma_dev_setup
;
771 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
772 cell_dma_direct_offset
);
778 * Fixed IOMMU mapping support
780 * This code adds support for setting up a fixed IOMMU mapping on certain
781 * cell machines. For 64-bit devices this avoids the performance overhead of
782 * mapping and unmapping pages at runtime. 32-bit devices are unable to use
785 * The fixed mapping is established at boot, and maps all of physical memory
786 * 1:1 into device space at some offset. On machines with < 30 GB of memory
787 * we setup the fixed mapping immediately above the normal IOMMU window.
789 * For example a machine with 4GB of memory would end up with the normal
790 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
791 * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
792 * 3GB, plus any offset required by firmware. The firmware offset is encoded
793 * in the "dma-ranges" property.
795 * On machines with 30GB or more of memory, we are unable to place the fixed
796 * mapping above the normal IOMMU window as we would run out of address space.
797 * Instead we move the normal IOMMU window to coincide with the hash page
798 * table, this region does not need to be part of the fixed mapping as no
799 * device should ever be DMA'ing to it. We then setup the fixed mapping
803 static u64
cell_iommu_get_fixed_address(struct device
*dev
)
805 u64 cpu_addr
, size
, best_size
, pci_addr
= OF_BAD_ADDR
;
806 struct device_node
*np
;
807 const u32
*ranges
= NULL
;
810 np
= of_node_get(dev
->archdata
.of_node
);
812 ranges
= of_get_property(np
, "dma-ranges", &len
);
815 np
= of_get_next_parent(np
);
819 dev_dbg(dev
, "iommu: no dma-ranges found\n");
825 /* dma-ranges format:
827 * 2 cells: pci address
828 * 2 cells: parent address
831 for (i
= 0, best
= -1, best_size
= 0; i
< len
; i
+= 7) {
832 cpu_addr
= of_translate_dma_address(np
, ranges
+i
+ 3);
833 size
= of_read_number(ranges
+ i
+ 5, 2);
835 if (cpu_addr
== 0 && size
> best_size
) {
842 pci_addr
= of_read_number(ranges
+ best
+ 1, 2);
844 dev_dbg(dev
, "iommu: no suitable range found!\n");
852 static int dma_set_mask_and_switch(struct device
*dev
, u64 dma_mask
)
854 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
857 if (dma_mask
== DMA_BIT_MASK(64) &&
858 cell_iommu_get_fixed_address(dev
) != OF_BAD_ADDR
)
860 dev_dbg(dev
, "iommu: 64-bit OK, using fixed ops\n");
861 set_dma_ops(dev
, &dma_iommu_fixed_ops
);
863 dev_dbg(dev
, "iommu: not 64-bit, using default ops\n");
864 set_dma_ops(dev
, get_pci_dma_ops());
867 cell_dma_dev_setup(dev
);
869 *dev
->dma_mask
= dma_mask
;
874 static void cell_dma_dev_setup_fixed(struct device
*dev
)
876 struct dev_archdata
*archdata
= &dev
->archdata
;
879 addr
= cell_iommu_get_fixed_address(dev
) + dma_iommu_fixed_base
;
880 archdata
->dma_data
= (void *)addr
;
882 dev_dbg(dev
, "iommu: fixed addr = %lx\n", addr
);
885 static void insert_16M_pte(unsigned long addr
, unsigned long *ptab
,
886 unsigned long base_pte
)
888 unsigned long segment
, offset
;
890 segment
= addr
>> IO_SEGMENT_SHIFT
;
891 offset
= (addr
>> 24) - (segment
<< IO_PAGENO_BITS(24));
892 ptab
= ptab
+ (segment
* (1 << 12) / sizeof(unsigned long));
894 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
895 addr
, ptab
, segment
, offset
);
897 ptab
[offset
] = base_pte
| (__pa(addr
) & IOPTE_RPN_Mask
);
900 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu
*iommu
,
901 struct device_node
*np
, unsigned long dbase
, unsigned long dsize
,
902 unsigned long fbase
, unsigned long fsize
)
904 unsigned long base_pte
, uaddr
, ioaddr
, *ptab
;
906 ptab
= cell_iommu_alloc_ptab(iommu
, fbase
, fsize
, dbase
, dsize
, 24);
908 dma_iommu_fixed_base
= fbase
;
910 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize
, fbase
);
912 base_pte
= IOPTE_PP_W
| IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
913 | (cell_iommu_get_ioid(np
) & IOPTE_IOID_Mask
);
915 for (uaddr
= 0; uaddr
< fsize
; uaddr
+= (1 << 24)) {
916 /* Don't touch the dynamic region */
917 ioaddr
= uaddr
+ fbase
;
918 if (ioaddr
>= dbase
&& ioaddr
< (dbase
+ dsize
)) {
919 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
923 insert_16M_pte(uaddr
, ptab
, base_pte
);
929 static int __init
cell_iommu_fixed_mapping_init(void)
931 unsigned long dbase
, dsize
, fbase
, fsize
, hbase
, hend
;
932 struct cbe_iommu
*iommu
;
933 struct device_node
*np
;
935 /* The fixed mapping is only supported on axon machines */
936 np
= of_find_node_by_name(NULL
, "axon");
938 pr_debug("iommu: fixed mapping disabled, no axons found\n");
942 /* We must have dma-ranges properties for fixed mapping to work */
943 for (np
= NULL
; (np
= of_find_all_nodes(np
));) {
944 if (of_find_property(np
, "dma-ranges", NULL
))
950 pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
954 /* The default setup is to have the fixed mapping sit after the
955 * dynamic region, so find the top of the largest IOMMU window
956 * on any axon, then add the size of RAM and that's our max value.
957 * If that is > 32GB we have to do other shennanigans.
960 for_each_node_by_name(np
, "axon") {
961 cell_iommu_get_window(np
, &dbase
, &dsize
);
962 fbase
= max(fbase
, dbase
+ dsize
);
965 fbase
= _ALIGN_UP(fbase
, 1 << IO_SEGMENT_SHIFT
);
966 fsize
= lmb_phys_mem_size();
968 if ((fbase
+ fsize
) <= 0x800000000)
969 hbase
= 0; /* use the device tree window */
971 /* If we're over 32 GB we need to cheat. We can't map all of
972 * RAM with the fixed mapping, and also fit the dynamic
973 * region. So try to place the dynamic region where the hash
974 * table sits, drivers never need to DMA to it, we don't
975 * need a fixed mapping for that area.
978 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
981 hbase
= __pa(htab_address
);
982 hend
= hbase
+ htab_size_bytes
;
984 /* The window must start and end on a segment boundary */
985 if ((hbase
!= _ALIGN_UP(hbase
, 1 << IO_SEGMENT_SHIFT
)) ||
986 (hend
!= _ALIGN_UP(hend
, 1 << IO_SEGMENT_SHIFT
))) {
987 pr_debug("iommu: hash window not segment aligned\n");
991 /* Check the hash window fits inside the real DMA window */
992 for_each_node_by_name(np
, "axon") {
993 cell_iommu_get_window(np
, &dbase
, &dsize
);
995 if (hbase
< dbase
|| (hend
> (dbase
+ dsize
))) {
996 pr_debug("iommu: hash window doesn't fit in"
997 "real DMA window\n");
1005 /* Setup the dynamic regions */
1006 for_each_node_by_name(np
, "axon") {
1007 iommu
= cell_iommu_alloc(np
);
1011 cell_iommu_get_window(np
, &dbase
, &dsize
);
1014 dsize
= htab_size_bytes
;
1017 printk(KERN_DEBUG
"iommu: node %d, dynamic window 0x%lx-0x%lx "
1018 "fixed window 0x%lx-0x%lx\n", iommu
->nid
, dbase
,
1019 dbase
+ dsize
, fbase
, fbase
+ fsize
);
1021 cell_iommu_setup_stab(iommu
, dbase
, dsize
, fbase
, fsize
);
1022 iommu
->ptab
= cell_iommu_alloc_ptab(iommu
, dbase
, dsize
, 0, 0,
1024 cell_iommu_setup_fixed_ptab(iommu
, np
, dbase
, dsize
,
1026 cell_iommu_enable_hardware(iommu
);
1027 cell_iommu_setup_window(iommu
, np
, dbase
, dsize
, 0);
1030 dma_iommu_fixed_ops
= dma_direct_ops
;
1031 dma_iommu_fixed_ops
.set_dma_mask
= dma_set_mask_and_switch
;
1033 dma_iommu_ops
.set_dma_mask
= dma_set_mask_and_switch
;
1034 set_pci_dma_ops(&dma_iommu_ops
);
1039 static int iommu_fixed_disabled
;
1041 static int __init
setup_iommu_fixed(char *str
)
1043 if (strcmp(str
, "off") == 0)
1044 iommu_fixed_disabled
= 1;
1048 __setup("iommu_fixed=", setup_iommu_fixed
);
1050 static int __init
cell_iommu_init(void)
1052 struct device_node
*np
;
1054 /* If IOMMU is disabled or we have little enough RAM to not need
1055 * to enable it, we setup a direct mapping.
1057 * Note: should we make sure we have the IOMMU actually disabled ?
1060 (!iommu_force_on
&& lmb_end_of_DRAM() <= 0x80000000ull
))
1061 if (cell_iommu_init_disabled() == 0)
1064 /* Setup various ppc_md. callbacks */
1065 ppc_md
.pci_dma_dev_setup
= cell_pci_dma_dev_setup
;
1066 ppc_md
.tce_build
= tce_build_cell
;
1067 ppc_md
.tce_free
= tce_free_cell
;
1069 if (!iommu_fixed_disabled
&& cell_iommu_fixed_mapping_init() == 0)
1072 /* Create an iommu for each /axon node. */
1073 for_each_node_by_name(np
, "axon") {
1074 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
1076 cell_iommu_init_one(np
, 0);
1079 /* Create an iommu for each toplevel /pci-internal node for
1080 * old hardware/firmware
1082 for_each_node_by_name(np
, "pci-internal") {
1083 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
1085 cell_iommu_init_one(np
, SPIDER_DMA_OFFSET
);
1088 /* Setup default PCI iommu ops */
1089 set_pci_dma_ops(&dma_iommu_ops
);
1092 /* Register callbacks on OF platform device addition/removal
1093 * to handle linking them to the right DMA operations
1095 bus_register_notifier(&of_platform_bus_type
, &cell_of_bus_notifier
);
1099 machine_arch_initcall(cell
, cell_iommu_init
);
1100 machine_arch_initcall(celleb_native
, cell_iommu_init
);