2 * IOMMU implementation for Cell Broadband Processor Architecture
4 * (C) Copyright IBM Corporation 2006-2008
6 * Author: Jeremy Kerr <jk@ozlabs.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/notifier.h>
30 #include <linux/of_platform.h>
33 #include <asm/iommu.h>
34 #include <asm/machdep.h>
35 #include <asm/pci-bridge.h>
38 #include <asm/firmware.h>
39 #include <asm/cell-regs.h>
41 #include "interrupt.h"
43 /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
44 * instead of leaving them mapped to some dummy page. This can be
45 * enabled once the appropriate workarounds for spider bugs have
48 #define CELL_IOMMU_REAL_UNMAP
50 /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
51 * IO PTEs based on the transfer direction. That can be enabled
52 * once spider-net has been fixed to pass the correct direction
53 * to the DMA mapping functions
55 #define CELL_IOMMU_STRICT_PROTECTION
60 /* IOC mmap registers */
61 #define IOC_Reg_Size 0x2000
63 #define IOC_IOPT_CacheInvd 0x908
64 #define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
65 #define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
66 #define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
68 #define IOC_IOST_Origin 0x918
69 #define IOC_IOST_Origin_E 0x8000000000000000ul
70 #define IOC_IOST_Origin_HW 0x0000000000000800ul
71 #define IOC_IOST_Origin_HL 0x0000000000000400ul
73 #define IOC_IO_ExcpStat 0x920
74 #define IOC_IO_ExcpStat_V 0x8000000000000000ul
75 #define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
76 #define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
77 #define IOC_IO_ExcpStat_SPF_P 0x4000000000000000ul
78 #define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
79 #define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
80 #define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
82 #define IOC_IO_ExcpMask 0x928
83 #define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
84 #define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
86 #define IOC_IOCmd_Offset 0x1000
88 #define IOC_IOCmd_Cfg 0xc00
89 #define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
92 /* Segment table entries */
93 #define IOSTE_V 0x8000000000000000ul /* valid */
94 #define IOSTE_H 0x4000000000000000ul /* cache hint */
95 #define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
96 #define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
97 #define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
98 #define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
99 #define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
100 #define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
101 #define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
103 /* Page table entries */
104 #define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
105 #define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
106 #define IOPTE_M 0x2000000000000000ul /* coherency required */
107 #define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
108 #define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
109 #define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
110 #define IOPTE_H 0x0000000000000800ul /* cache hint */
111 #define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
115 #define IO_SEGMENT_SHIFT 28
116 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
117 #define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT)
119 #define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift))
120 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
122 /* The high bit needs to be set on every DMA address */
123 #define SPIDER_DMA_OFFSET 0x80000000ul
125 struct iommu_window
{
126 struct list_head list
;
127 struct cbe_iommu
*iommu
;
128 unsigned long offset
;
130 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
131 unsigned long pte_offset
;
133 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
135 struct iommu_table table
;
142 void __iomem
*xlate_regs
;
143 void __iomem
*cmd_regs
;
147 struct list_head windows
;
150 /* Static array of iommus, one per node
151 * each contains a list of windows, keyed from dma_window property
152 * - on bus setup, look for a matching window, or create one
153 * - on dev setup, assign iommu_table ptr
155 static struct cbe_iommu iommus
[NR_IOMMUS
];
156 static int cbe_nr_iommus
;
158 static void invalidate_tce_cache(struct cbe_iommu
*iommu
, unsigned long *pte
,
161 unsigned long __iomem
*reg
;
165 reg
= iommu
->xlate_regs
+ IOC_IOPT_CacheInvd
;
168 /* we can invalidate up to 1 << 11 PTEs at once */
169 n
= min(n_ptes
, 1l << 11);
170 val
= (((n
/*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask
)
171 | (__pa(pte
) & IOC_IOPT_CacheInvd_IOPTE_Mask
)
172 | IOC_IOPT_CacheInvd_Busy
;
175 while (in_be64(reg
) & IOC_IOPT_CacheInvd_Busy
)
183 static void tce_build_cell(struct iommu_table
*tbl
, long index
, long npages
,
184 unsigned long uaddr
, enum dma_data_direction direction
)
187 unsigned long *io_pte
, base_pte
;
188 struct iommu_window
*window
=
189 container_of(tbl
, struct iommu_window
, table
);
191 /* implementing proper protection causes problems with the spidernet
192 * driver - check mapping directions later, but allow read & write by
194 #ifdef CELL_IOMMU_STRICT_PROTECTION
195 /* to avoid referencing a global, we use a trick here to setup the
196 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
197 * together for each of the 3 supported direction values. It is then
198 * shifted left so that the fields matching the desired direction
199 * lands on the appropriate bits, and other bits are masked out.
201 const unsigned long prot
= 0xc48;
203 ((prot
<< (52 + 4 * direction
)) & (IOPTE_PP_W
| IOPTE_PP_R
))
204 | IOPTE_M
| IOPTE_SO_RW
| (window
->ioid
& IOPTE_IOID_Mask
);
206 base_pte
= IOPTE_PP_W
| IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
|
207 (window
->ioid
& IOPTE_IOID_Mask
);
210 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
211 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- window
->pte_offset
);
213 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- tbl
->it_offset
);
214 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
216 for (i
= 0; i
< npages
; i
++, uaddr
+= IOMMU_PAGE_SIZE
)
217 io_pte
[i
] = base_pte
| (__pa(uaddr
) & IOPTE_RPN_Mask
);
221 invalidate_tce_cache(window
->iommu
, io_pte
, npages
);
223 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
224 index
, npages
, direction
, base_pte
);
227 static void tce_free_cell(struct iommu_table
*tbl
, long index
, long npages
)
231 unsigned long *io_pte
, pte
;
232 struct iommu_window
*window
=
233 container_of(tbl
, struct iommu_window
, table
);
235 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index
, npages
);
237 #ifdef CELL_IOMMU_REAL_UNMAP
240 /* spider bridge does PCI reads after freeing - insert a mapping
241 * to a scratch page instead of an invalid entry */
242 pte
= IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
| __pa(window
->iommu
->pad_page
)
243 | (window
->ioid
& IOPTE_IOID_Mask
);
246 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
247 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- window
->pte_offset
);
249 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- tbl
->it_offset
);
250 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
252 for (i
= 0; i
< npages
; i
++)
257 invalidate_tce_cache(window
->iommu
, io_pte
, npages
);
260 static irqreturn_t
ioc_interrupt(int irq
, void *data
)
263 struct cbe_iommu
*iommu
= data
;
265 stat
= in_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
);
267 /* Might want to rate limit it */
268 printk(KERN_ERR
"iommu: DMA exception 0x%016lx\n", stat
);
269 printk(KERN_ERR
" V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
270 !!(stat
& IOC_IO_ExcpStat_V
),
271 (stat
& IOC_IO_ExcpStat_SPF_S
) ? 'S' : ' ',
272 (stat
& IOC_IO_ExcpStat_SPF_P
) ? 'P' : ' ',
273 (stat
& IOC_IO_ExcpStat_RW_Mask
) ? "Read" : "Write",
274 (unsigned int)(stat
& IOC_IO_ExcpStat_IOID_Mask
));
275 printk(KERN_ERR
" page=0x%016lx\n",
276 stat
& IOC_IO_ExcpStat_ADDR_Mask
);
278 /* clear interrupt */
279 stat
&= ~IOC_IO_ExcpStat_V
;
280 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
, stat
);
285 static int cell_iommu_find_ioc(int nid
, unsigned long *base
)
287 struct device_node
*np
;
292 /* First look for new style /be nodes */
293 for_each_node_by_name(np
, "ioc") {
294 if (of_node_to_nid(np
) != nid
)
296 if (of_address_to_resource(np
, 0, &r
)) {
297 printk(KERN_ERR
"iommu: can't get address for %s\n",
306 /* Ok, let's try the old way */
307 for_each_node_by_type(np
, "cpu") {
308 const unsigned int *nidp
;
309 const unsigned long *tmp
;
311 nidp
= of_get_property(np
, "node-id", NULL
);
312 if (nidp
&& *nidp
== nid
) {
313 tmp
= of_get_property(np
, "ioc-translation", NULL
);
325 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
326 static void cell_iommu_setup_page_tables(struct cbe_iommu
*iommu
,
328 static void cell_iommu_setup_stab(struct cbe_iommu
*iommu
,
329 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
330 unsigned long dbase
, unsigned long dsize
,
331 unsigned long fbase
, unsigned long fsize
)
334 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
336 unsigned long reg
, segments
, pages_per_segment
, ptab_size
, stab_size
,
341 base
= min(fbase
, dbase
);
343 unsigned long segments
, stab_size
;
344 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
346 segments
= max(dbase
+ dsize
, fbase
+ fsize
) >> IO_SEGMENT_SHIFT
;
347 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
348 pages_per_segment
= 1ull << IO_PAGENO_BITS
;
350 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
352 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
353 pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
354 __FUNCTION__
, iommu
->nid
, segments
, pages_per_segment
);
356 pr_debug("%s: iommu[%d]: segments: %lu\n",
357 __FUNCTION__
, iommu
->nid
, segments
);
358 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
360 /* set up the segment table */
361 stab_size
= segments
* sizeof(unsigned long);
362 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, get_order(stab_size
));
364 iommu
->stab
= page_address(page
);
365 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
366 clear_page(iommu
->stab
);
368 memset(iommu
->stab
, 0, stab_size
);
371 static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu
*iommu
,
372 unsigned long base
, unsigned long size
, unsigned long gap_base
,
373 unsigned long gap_size
, unsigned long page_shift
)
377 unsigned long reg
, segments
, pages_per_segment
, ptab_size
,
378 n_pte_pages
, start_seg
, *ptab
;
380 start_seg
= base
>> IO_SEGMENT_SHIFT
;
381 segments
= size
>> IO_SEGMENT_SHIFT
;
382 pages_per_segment
= 1ull << IO_PAGENO_BITS(page_shift
);
383 /* PTEs for each segment must start on a 4K bounday */
384 pages_per_segment
= max(pages_per_segment
,
385 (1 << 12) / sizeof(unsigned long));
386 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
388 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
389 /* ... and the page tables. Since these are contiguous, we can treat
390 * the page tables as one array of ptes, like pSeries does.
393 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
394 ptab_size
= segments
* pages_per_segment
* sizeof(unsigned long);
395 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__
,
396 iommu
->nid
, ptab_size
, get_order(ptab_size
));
397 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, get_order(ptab_size
));
400 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
401 iommu
->ptab
= page_address(page
);
402 memset(iommu
->ptab
, 0, ptab_size
);
404 ptab
= page_address(page
);
405 memset(ptab
, 0, ptab_size
);
406 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
408 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
409 /* allocate a bogus page for the end of each mapping */
410 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, 0);
412 iommu
->pad_page
= page_address(page
);
413 clear_page(iommu
->pad_page
);
415 /* number of pages needed for a page table */
416 n_pte_pages
= (pages_per_segment
*
417 sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT
;
419 /* number of 4K pages needed for a page table */
420 n_pte_pages
= (pages_per_segment
* sizeof(unsigned long)) >> 12;
421 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
423 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
424 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
425 __FUNCTION__
, iommu
->nid
, iommu
->stab
, iommu
->ptab
,
427 __FUNCTION__
, iommu
->nid
, iommu
->stab
, ptab
,
428 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
431 /* initialise the STEs */
432 reg
= IOSTE_V
| ((n_pte_pages
- 1) << 5);
434 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
435 if (IOMMU_PAGE_SIZE
== 0x1000)
437 else if (IOMMU_PAGE_SIZE
== 0x10000)
440 extern void __unknown_page_size_error(void);
441 __unknown_page_size_error();
443 switch (page_shift
) {
444 case 12: reg
|= IOSTE_PS_4K
; break;
445 case 16: reg
|= IOSTE_PS_64K
; break;
446 case 20: reg
|= IOSTE_PS_1M
; break;
447 case 24: reg
|= IOSTE_PS_16M
; break;
449 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
452 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
454 gap_base
= gap_base
>> IO_SEGMENT_SHIFT
;
455 gap_size
= gap_size
>> IO_SEGMENT_SHIFT
;
457 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
458 pr_debug("Setting up IOMMU stab:\n");
459 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
460 for (i
= base
>> IO_SEGMENT_SHIFT
; i
< segments
; i
++) {
461 iommu
->stab
[i
] = reg
|
462 (__pa(iommu
->ptab
) + n_pte_pages
* IOMMU_PAGE_SIZE
* i
);
464 for (i
= start_seg
; i
< (start_seg
+ segments
); i
++) {
465 if (i
>= gap_base
&& i
< (gap_base
+ gap_size
)) {
466 pr_debug("\toverlap at %d, skipping\n", i
);
469 iommu
->stab
[i
] = reg
| (__pa(ptab
) + (n_pte_pages
<< 12) *
471 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
472 pr_debug("\t[%d] 0x%016lx\n", i
, iommu
->stab
[i
]);
474 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
478 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
481 static void cell_iommu_enable_hardware(struct cbe_iommu
*iommu
)
484 unsigned long reg
, xlate_base
;
487 if (cell_iommu_find_ioc(iommu
->nid
, &xlate_base
))
488 panic("%s: missing IOC register mappings for node %d\n",
489 __FUNCTION__
, iommu
->nid
);
491 iommu
->xlate_regs
= ioremap(xlate_base
, IOC_Reg_Size
);
492 iommu
->cmd_regs
= iommu
->xlate_regs
+ IOC_IOCmd_Offset
;
494 /* ensure that the STEs have updated */
497 /* setup interrupts for the iommu. */
498 reg
= in_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
);
499 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
,
500 reg
& ~IOC_IO_ExcpStat_V
);
501 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpMask
,
502 IOC_IO_ExcpMask_PFE
| IOC_IO_ExcpMask_SFE
);
504 virq
= irq_create_mapping(NULL
,
505 IIC_IRQ_IOEX_ATI
| (iommu
->nid
<< IIC_IRQ_NODE_SHIFT
));
506 BUG_ON(virq
== NO_IRQ
);
508 ret
= request_irq(virq
, ioc_interrupt
, IRQF_DISABLED
,
512 /* set the IOC segment table origin register (and turn on the iommu) */
513 reg
= IOC_IOST_Origin_E
| __pa(iommu
->stab
) | IOC_IOST_Origin_HW
;
514 out_be64(iommu
->xlate_regs
+ IOC_IOST_Origin
, reg
);
515 in_be64(iommu
->xlate_regs
+ IOC_IOST_Origin
);
517 /* turn on IO translation */
518 reg
= in_be64(iommu
->cmd_regs
+ IOC_IOCmd_Cfg
) | IOC_IOCmd_Cfg_TE
;
519 out_be64(iommu
->cmd_regs
+ IOC_IOCmd_Cfg
, reg
);
522 static void cell_iommu_setup_hardware(struct cbe_iommu
*iommu
,
523 unsigned long base
, unsigned long size
)
525 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
526 cell_iommu_setup_page_tables(iommu
, base
, size
, 0, 0);
528 cell_iommu_setup_stab(iommu
, base
, size
, 0, 0);
529 iommu
->ptab
= cell_iommu_alloc_ptab(iommu
, base
, size
, 0, 0,
531 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
532 cell_iommu_enable_hardware(iommu
);
535 #if 0/* Unused for now */
536 static struct iommu_window
*find_window(struct cbe_iommu
*iommu
,
537 unsigned long offset
, unsigned long size
)
539 struct iommu_window
*window
;
541 /* todo: check for overlapping (but not equal) windows) */
543 list_for_each_entry(window
, &(iommu
->windows
), list
) {
544 if (window
->offset
== offset
&& window
->size
== size
)
552 static inline u32
cell_iommu_get_ioid(struct device_node
*np
)
556 ioid
= of_get_property(np
, "ioid", NULL
);
558 printk(KERN_WARNING
"iommu: missing ioid for %s using 0\n",
566 static struct iommu_window
* __init
567 cell_iommu_setup_window(struct cbe_iommu
*iommu
, struct device_node
*np
,
568 unsigned long offset
, unsigned long size
,
569 unsigned long pte_offset
)
571 struct iommu_window
*window
;
572 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
575 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
578 ioid
= cell_iommu_get_ioid(np
);
580 window
= kmalloc_node(sizeof(*window
), GFP_KERNEL
, iommu
->nid
);
581 BUG_ON(window
== NULL
);
583 window
->offset
= offset
;
586 window
->iommu
= iommu
;
587 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
588 window
->pte_offset
= pte_offset
;
590 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
592 window
->table
.it_blocksize
= 16;
593 window
->table
.it_base
= (unsigned long)iommu
->ptab
;
594 window
->table
.it_index
= iommu
->nid
;
595 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
596 window
->table
.it_offset
= (offset
>> IOMMU_PAGE_SHIFT
) +
599 window
->table
.it_offset
= (offset
>> IOMMU_PAGE_SHIFT
) + pte_offset
;
600 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
601 window
->table
.it_size
= size
>> IOMMU_PAGE_SHIFT
;
603 iommu_init_table(&window
->table
, iommu
->nid
);
605 pr_debug("\tioid %d\n", window
->ioid
);
606 pr_debug("\tblocksize %ld\n", window
->table
.it_blocksize
);
607 pr_debug("\tbase 0x%016lx\n", window
->table
.it_base
);
608 pr_debug("\toffset 0x%lx\n", window
->table
.it_offset
);
609 pr_debug("\tsize %ld\n", window
->table
.it_size
);
611 list_add(&window
->list
, &iommu
->windows
);
616 /* We need to map and reserve the first IOMMU page since it's used
617 * by the spider workaround. In theory, we only need to do that when
618 * running on spider but it doesn't really matter.
620 * This code also assumes that we have a window that starts at 0,
621 * which is the case on all spider based blades.
623 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
625 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, 0);
627 iommu
->pad_page
= page_address(page
);
628 clear_page(iommu
->pad_page
);
630 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
631 __set_bit(0, window
->table
.it_map
);
632 tce_build_cell(&window
->table
, window
->table
.it_offset
, 1,
633 (unsigned long)iommu
->pad_page
, DMA_TO_DEVICE
);
634 window
->table
.it_hint
= window
->table
.it_blocksize
;
639 static struct cbe_iommu
*cell_iommu_for_node(int nid
)
643 for (i
= 0; i
< cbe_nr_iommus
; i
++)
644 if (iommus
[i
].nid
== nid
)
649 static unsigned long cell_dma_direct_offset
;
651 static unsigned long dma_iommu_fixed_base
;
652 struct dma_mapping_ops dma_iommu_fixed_ops
;
654 static void cell_dma_dev_setup_iommu(struct device
*dev
)
656 struct iommu_window
*window
;
657 struct cbe_iommu
*iommu
;
658 struct dev_archdata
*archdata
= &dev
->archdata
;
660 /* Current implementation uses the first window available in that
661 * node's iommu. We -might- do something smarter later though it may
664 iommu
= cell_iommu_for_node(archdata
->numa_node
);
665 if (iommu
== NULL
|| list_empty(&iommu
->windows
)) {
666 printk(KERN_ERR
"iommu: missing iommu for %s (node %d)\n",
667 archdata
->of_node
? archdata
->of_node
->full_name
: "?",
668 archdata
->numa_node
);
671 window
= list_entry(iommu
->windows
.next
, struct iommu_window
, list
);
673 archdata
->dma_data
= &window
->table
;
676 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
677 static void cell_dma_dev_setup_static(struct device
*dev
);
679 static void cell_dma_dev_setup_fixed(struct device
*dev
);
680 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
682 static void cell_dma_dev_setup(struct device
*dev
)
684 struct dev_archdata
*archdata
= &dev
->archdata
;
686 /* Order is important here, these are not mutually exclusive */
687 if (get_dma_ops(dev
) == &dma_iommu_fixed_ops
)
688 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
689 cell_dma_dev_setup_static(dev
);
691 cell_dma_dev_setup_fixed(dev
);
692 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
693 else if (get_pci_dma_ops() == &dma_iommu_ops
)
694 cell_dma_dev_setup_iommu(dev
);
695 else if (get_pci_dma_ops() == &dma_direct_ops
)
696 archdata
->dma_data
= (void *)cell_dma_direct_offset
;
701 static void cell_pci_dma_dev_setup(struct pci_dev
*dev
)
703 cell_dma_dev_setup(&dev
->dev
);
706 static int cell_of_bus_notify(struct notifier_block
*nb
, unsigned long action
,
709 struct device
*dev
= data
;
711 /* We are only intereted in device addition */
712 if (action
!= BUS_NOTIFY_ADD_DEVICE
)
715 /* We use the PCI DMA ops */
716 dev
->archdata
.dma_ops
= get_pci_dma_ops();
718 cell_dma_dev_setup(dev
);
723 static struct notifier_block cell_of_bus_notifier
= {
724 .notifier_call
= cell_of_bus_notify
727 static int __init
cell_iommu_get_window(struct device_node
*np
,
731 const void *dma_window
;
734 /* Use ibm,dma-window if available, else, hard code ! */
735 dma_window
= of_get_property(np
, "ibm,dma-window", NULL
);
736 if (dma_window
== NULL
) {
742 of_parse_dma_window(np
, dma_window
, &index
, base
, size
);
746 static struct cbe_iommu
* __init
cell_iommu_alloc(struct device_node
*np
)
748 struct cbe_iommu
*iommu
;
752 nid
= of_node_to_nid(np
);
754 printk(KERN_ERR
"iommu: failed to get node for %s\n",
758 pr_debug("iommu: setting up iommu for node %d (%s)\n",
761 /* XXX todo: If we can have multiple windows on the same IOMMU, which
762 * isn't the case today, we probably want here to check wether the
763 * iommu for that node is already setup.
764 * However, there might be issue with getting the size right so let's
765 * ignore that for now. We might want to completely get rid of the
766 * multiple window support since the cell iommu supports per-page ioids
769 if (cbe_nr_iommus
>= NR_IOMMUS
) {
770 printk(KERN_ERR
"iommu: too many IOMMUs detected ! (%s)\n",
775 /* Init base fields */
780 snprintf(iommu
->name
, sizeof(iommu
->name
), "iommu%d", i
);
781 INIT_LIST_HEAD(&iommu
->windows
);
786 static void __init
cell_iommu_init_one(struct device_node
*np
,
787 unsigned long offset
)
789 struct cbe_iommu
*iommu
;
790 unsigned long base
, size
;
792 iommu
= cell_iommu_alloc(np
);
796 /* Obtain a window for it */
797 cell_iommu_get_window(np
, &base
, &size
);
799 pr_debug("\ttranslating window 0x%lx...0x%lx\n",
800 base
, base
+ size
- 1);
802 /* Initialize the hardware */
803 cell_iommu_setup_hardware(iommu
, base
, size
);
805 /* Setup the iommu_table */
806 cell_iommu_setup_window(iommu
, np
, base
, size
,
807 offset
>> IOMMU_PAGE_SHIFT
);
810 static void __init
cell_disable_iommus(void)
813 unsigned long base
, val
;
814 void __iomem
*xregs
, *cregs
;
816 /* Make sure IOC translation is disabled on all nodes */
817 for_each_online_node(node
) {
818 if (cell_iommu_find_ioc(node
, &base
))
820 xregs
= ioremap(base
, IOC_Reg_Size
);
823 cregs
= xregs
+ IOC_IOCmd_Offset
;
825 pr_debug("iommu: cleaning up iommu on node %d\n", node
);
827 out_be64(xregs
+ IOC_IOST_Origin
, 0);
828 (void)in_be64(xregs
+ IOC_IOST_Origin
);
829 val
= in_be64(cregs
+ IOC_IOCmd_Cfg
);
830 val
&= ~IOC_IOCmd_Cfg_TE
;
831 out_be64(cregs
+ IOC_IOCmd_Cfg
, val
);
832 (void)in_be64(cregs
+ IOC_IOCmd_Cfg
);
838 static int __init
cell_iommu_init_disabled(void)
840 struct device_node
*np
= NULL
;
841 unsigned long base
= 0, size
;
843 /* When no iommu is present, we use direct DMA ops */
844 set_pci_dma_ops(&dma_direct_ops
);
846 /* First make sure all IOC translation is turned off */
847 cell_disable_iommus();
849 /* If we have no Axon, we set up the spider DMA magic offset */
850 if (of_find_node_by_name(NULL
, "axon") == NULL
)
851 cell_dma_direct_offset
= SPIDER_DMA_OFFSET
;
853 /* Now we need to check to see where the memory is mapped
854 * in PCI space. We assume that all busses use the same dma
855 * window which is always the case so far on Cell, thus we
856 * pick up the first pci-internal node we can find and check
857 * the DMA window from there.
859 for_each_node_by_name(np
, "axon") {
860 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
862 if (cell_iommu_get_window(np
, &base
, &size
) == 0)
866 for_each_node_by_name(np
, "pci-internal") {
867 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
869 if (cell_iommu_get_window(np
, &base
, &size
) == 0)
875 /* If we found a DMA window, we check if it's big enough to enclose
876 * all of physical memory. If not, we force enable IOMMU
878 if (np
&& size
< lmb_end_of_DRAM()) {
879 printk(KERN_WARNING
"iommu: force-enabled, dma window"
880 " (%ldMB) smaller than total memory (%ldMB)\n",
881 size
>> 20, lmb_end_of_DRAM() >> 20);
885 cell_dma_direct_offset
+= base
;
887 if (cell_dma_direct_offset
!= 0)
888 ppc_md
.pci_dma_dev_setup
= cell_pci_dma_dev_setup
;
890 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
891 cell_dma_direct_offset
);
897 * Fixed IOMMU mapping support
899 * This code adds support for setting up a fixed IOMMU mapping on certain
900 * cell machines. For 64-bit devices this avoids the performance overhead of
901 * mapping and unmapping pages at runtime. 32-bit devices are unable to use
904 * The fixed mapping is established at boot, and maps all of physical memory
905 * 1:1 into device space at some offset. On machines with < 30 GB of memory
906 * we setup the fixed mapping immediately above the normal IOMMU window.
908 * For example a machine with 4GB of memory would end up with the normal
909 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
910 * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
911 * 3GB, plus any offset required by firmware. The firmware offset is encoded
912 * in the "dma-ranges" property.
914 * On machines with 30GB or more of memory, we are unable to place the fixed
915 * mapping above the normal IOMMU window as we would run out of address space.
916 * Instead we move the normal IOMMU window to coincide with the hash page
917 * table, this region does not need to be part of the fixed mapping as no
918 * device should ever be DMA'ing to it. We then setup the fixed mapping
922 static u64
cell_iommu_get_fixed_address(struct device
*dev
)
924 u64 cpu_addr
, size
, best_size
, pci_addr
= OF_BAD_ADDR
;
925 struct device_node
*np
;
926 const u32
*ranges
= NULL
;
929 np
= of_node_get(dev
->archdata
.of_node
);
931 ranges
= of_get_property(np
, "dma-ranges", &len
);
934 np
= of_get_next_parent(np
);
938 dev_dbg(dev
, "iommu: no dma-ranges found\n");
944 /* dma-ranges format:
946 * 2 cells: pci address
947 * 2 cells: parent address
950 for (i
= 0, best
= -1, best_size
= 0; i
< len
; i
+= 7) {
951 cpu_addr
= of_translate_dma_address(np
, ranges
+i
+ 3);
952 size
= of_read_number(ranges
+ i
+ 5, 2);
954 if (cpu_addr
== 0 && size
> best_size
) {
961 pci_addr
= of_read_number(ranges
+ best
+ 1, 2);
963 dev_dbg(dev
, "iommu: no suitable range found!\n");
971 static int dma_set_mask_and_switch(struct device
*dev
, u64 dma_mask
)
973 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
976 if (dma_mask
== DMA_BIT_MASK(64) &&
977 cell_iommu_get_fixed_address(dev
) != OF_BAD_ADDR
)
979 dev_dbg(dev
, "iommu: 64-bit OK, using fixed ops\n");
980 set_dma_ops(dev
, &dma_iommu_fixed_ops
);
982 dev_dbg(dev
, "iommu: not 64-bit, using default ops\n");
983 set_dma_ops(dev
, get_pci_dma_ops());
986 cell_dma_dev_setup(dev
);
988 *dev
->dma_mask
= dma_mask
;
993 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
994 static void cell_dma_dev_setup_static(struct device
*dev
)
996 static void cell_dma_dev_setup_fixed(struct device
*dev
)
997 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
999 struct dev_archdata
*archdata
= &dev
->archdata
;
1002 addr
= cell_iommu_get_fixed_address(dev
) + dma_iommu_fixed_base
;
1003 archdata
->dma_data
= (void *)addr
;
1005 dev_dbg(dev
, "iommu: fixed addr = %lx\n", addr
);
1008 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
1010 static void insert_16M_pte(unsigned long addr
, unsigned long *ptab
,
1011 unsigned long base_pte
)
1013 unsigned long segment
, offset
;
1015 segment
= addr
>> IO_SEGMENT_SHIFT
;
1016 offset
= (addr
>> 24) - (segment
<< IO_PAGENO_BITS(24));
1017 ptab
= ptab
+ (segment
* (1 << 12) / sizeof(unsigned long));
1019 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
1020 addr
, ptab
, segment
, offset
);
1022 ptab
[offset
] = base_pte
| (__pa(addr
) & IOPTE_RPN_Mask
);
1025 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
1026 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu
*iommu
,
1027 struct device_node
*np
, unsigned long dbase
, unsigned long dsize
,
1028 unsigned long fbase
, unsigned long fsize
)
1030 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
1031 unsigned long base_pte
, uaddr
, *io_pte
;
1034 unsigned long base_pte
, uaddr
, ioaddr
, *ptab
;
1035 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
1037 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
1038 dma_iommu_fixed_base
= fbase
;
1040 ptab
= cell_iommu_alloc_ptab(iommu
, fbase
, fsize
, dbase
, dsize
, 24);
1041 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
1043 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
1044 /* convert from bytes into page table indices */
1045 dbase
= dbase
>> IOMMU_PAGE_SHIFT
;
1046 dsize
= dsize
>> IOMMU_PAGE_SHIFT
;
1047 fbase
= fbase
>> IOMMU_PAGE_SHIFT
;
1048 fsize
= fsize
>> IOMMU_PAGE_SHIFT
;
1050 dma_iommu_fixed_base
= fbase
;
1051 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
1053 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize
, fbase
);
1055 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
1056 io_pte
= iommu
->ptab
;
1058 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
1059 base_pte
= IOPTE_PP_W
| IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
1060 | (cell_iommu_get_ioid(np
) & IOPTE_IOID_Mask
);
1062 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
1064 for (i
= fbase
; i
< fbase
+ fsize
; i
++, uaddr
+= IOMMU_PAGE_SIZE
) {
1066 for (uaddr
= 0; uaddr
< fsize
; uaddr
+= (1 << 24)) {
1067 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
1068 /* Don't touch the dynamic region */
1069 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
1070 if (i
>= dbase
&& i
< (dbase
+ dsize
)) {
1071 pr_debug("iommu: static/dynamic overlap, skipping\n");
1073 ioaddr
= uaddr
+ fbase
;
1074 if (ioaddr
>= dbase
&& ioaddr
< (dbase
+ dsize
)) {
1075 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
1076 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
1079 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
1080 io_pte
[i
] = base_pte
| (__pa(uaddr
) & IOPTE_RPN_Mask
);
1083 insert_16M_pte(uaddr
, ptab
, base_pte
);
1084 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
1090 static int __init
cell_iommu_fixed_mapping_init(void)
1092 unsigned long dbase
, dsize
, fbase
, fsize
, hbase
, hend
;
1093 struct cbe_iommu
*iommu
;
1094 struct device_node
*np
;
1096 /* The fixed mapping is only supported on axon machines */
1097 np
= of_find_node_by_name(NULL
, "axon");
1099 pr_debug("iommu: fixed mapping disabled, no axons found\n");
1103 /* We must have dma-ranges properties for fixed mapping to work */
1104 for (np
= NULL
; (np
= of_find_all_nodes(np
));) {
1105 if (of_find_property(np
, "dma-ranges", NULL
))
1111 pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
1115 /* The default setup is to have the fixed mapping sit after the
1116 * dynamic region, so find the top of the largest IOMMU window
1117 * on any axon, then add the size of RAM and that's our max value.
1118 * If that is > 32GB we have to do other shennanigans.
1121 for_each_node_by_name(np
, "axon") {
1122 cell_iommu_get_window(np
, &dbase
, &dsize
);
1123 fbase
= max(fbase
, dbase
+ dsize
);
1126 fbase
= _ALIGN_UP(fbase
, 1 << IO_SEGMENT_SHIFT
);
1127 fsize
= lmb_phys_mem_size();
1129 if ((fbase
+ fsize
) <= 0x800000000)
1130 hbase
= 0; /* use the device tree window */
1132 /* If we're over 32 GB we need to cheat. We can't map all of
1133 * RAM with the fixed mapping, and also fit the dynamic
1134 * region. So try to place the dynamic region where the hash
1135 * table sits, drivers never need to DMA to it, we don't
1136 * need a fixed mapping for that area.
1138 if (!htab_address
) {
1139 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
1142 hbase
= __pa(htab_address
);
1143 hend
= hbase
+ htab_size_bytes
;
1145 /* The window must start and end on a segment boundary */
1146 if ((hbase
!= _ALIGN_UP(hbase
, 1 << IO_SEGMENT_SHIFT
)) ||
1147 (hend
!= _ALIGN_UP(hend
, 1 << IO_SEGMENT_SHIFT
))) {
1148 pr_debug("iommu: hash window not segment aligned\n");
1152 /* Check the hash window fits inside the real DMA window */
1153 for_each_node_by_name(np
, "axon") {
1154 cell_iommu_get_window(np
, &dbase
, &dsize
);
1156 if (hbase
< dbase
|| (hend
> (dbase
+ dsize
))) {
1157 pr_debug("iommu: hash window doesn't fit in"
1158 "real DMA window\n");
1166 /* Setup the dynamic regions */
1167 for_each_node_by_name(np
, "axon") {
1168 iommu
= cell_iommu_alloc(np
);
1172 cell_iommu_get_window(np
, &dbase
, &dsize
);
1175 dsize
= htab_size_bytes
;
1178 printk(KERN_DEBUG
"iommu: node %d, dynamic window 0x%lx-0x%lx "
1179 "fixed window 0x%lx-0x%lx\n", iommu
->nid
, dbase
,
1180 dbase
+ dsize
, fbase
, fbase
+ fsize
);
1182 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/iommu
.c
1183 cell_iommu_setup_page_tables(iommu
, dbase
, dsize
, fbase
, fsize
);
1185 cell_iommu_setup_stab(iommu
, dbase
, dsize
, fbase
, fsize
);
1186 iommu
->ptab
= cell_iommu_alloc_ptab(iommu
, dbase
, dsize
, 0, 0,
1188 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/iommu
.c
1189 cell_iommu_setup_fixed_ptab(iommu
, np
, dbase
, dsize
,
1191 cell_iommu_enable_hardware(iommu
);
1192 cell_iommu_setup_window(iommu
, np
, dbase
, dsize
, 0);
1195 dma_iommu_fixed_ops
= dma_direct_ops
;
1196 dma_iommu_fixed_ops
.set_dma_mask
= dma_set_mask_and_switch
;
1198 dma_iommu_ops
.set_dma_mask
= dma_set_mask_and_switch
;
1199 set_pci_dma_ops(&dma_iommu_ops
);
1204 static int iommu_fixed_disabled
;
1206 static int __init
setup_iommu_fixed(char *str
)
1208 if (strcmp(str
, "off") == 0)
1209 iommu_fixed_disabled
= 1;
1213 __setup("iommu_fixed=", setup_iommu_fixed
);
1215 static int __init
cell_iommu_init(void)
1217 struct device_node
*np
;
1219 /* If IOMMU is disabled or we have little enough RAM to not need
1220 * to enable it, we setup a direct mapping.
1222 * Note: should we make sure we have the IOMMU actually disabled ?
1225 (!iommu_force_on
&& lmb_end_of_DRAM() <= 0x80000000ull
))
1226 if (cell_iommu_init_disabled() == 0)
1229 /* Setup various ppc_md. callbacks */
1230 ppc_md
.pci_dma_dev_setup
= cell_pci_dma_dev_setup
;
1231 ppc_md
.tce_build
= tce_build_cell
;
1232 ppc_md
.tce_free
= tce_free_cell
;
1234 if (!iommu_fixed_disabled
&& cell_iommu_fixed_mapping_init() == 0)
1237 /* Create an iommu for each /axon node. */
1238 for_each_node_by_name(np
, "axon") {
1239 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
1241 cell_iommu_init_one(np
, 0);
1244 /* Create an iommu for each toplevel /pci-internal node for
1245 * old hardware/firmware
1247 for_each_node_by_name(np
, "pci-internal") {
1248 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
1250 cell_iommu_init_one(np
, SPIDER_DMA_OFFSET
);
1253 /* Setup default PCI iommu ops */
1254 set_pci_dma_ops(&dma_iommu_ops
);
1257 /* Register callbacks on OF platform device addition/removal
1258 * to handle linking them to the right DMA operations
1260 bus_register_notifier(&of_platform_bus_type
, &cell_of_bus_notifier
);
1264 machine_arch_initcall(cell
, cell_iommu_init
);
1265 machine_arch_initcall(celleb_native
, cell_iommu_init
);