2 * IOMMU implementation for Cell Broadband Processor Architecture
4 * (C) Copyright IBM Corporation 2006-2008
6 * Author: Jeremy Kerr <jk@ozlabs.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/notifier.h>
29 #include <linux/of_platform.h>
32 #include <asm/iommu.h>
33 #include <asm/machdep.h>
34 #include <asm/pci-bridge.h>
37 #include <asm/firmware.h>
38 #include <asm/cell-regs.h>
40 #include "interrupt.h"
42 /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
43 * instead of leaving them mapped to some dummy page. This can be
44 * enabled once the appropriate workarounds for spider bugs have
47 #define CELL_IOMMU_REAL_UNMAP
49 /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
50 * IO PTEs based on the transfer direction. That can be enabled
51 * once spider-net has been fixed to pass the correct direction
52 * to the DMA mapping functions
54 #define CELL_IOMMU_STRICT_PROTECTION
59 /* IOC mmap registers */
60 #define IOC_Reg_Size 0x2000
62 #define IOC_IOPT_CacheInvd 0x908
63 #define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
64 #define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
65 #define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
67 #define IOC_IOST_Origin 0x918
68 #define IOC_IOST_Origin_E 0x8000000000000000ul
69 #define IOC_IOST_Origin_HW 0x0000000000000800ul
70 #define IOC_IOST_Origin_HL 0x0000000000000400ul
72 #define IOC_IO_ExcpStat 0x920
73 #define IOC_IO_ExcpStat_V 0x8000000000000000ul
74 #define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
75 #define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
76 #define IOC_IO_ExcpStat_SPF_P 0x4000000000000000ul
77 #define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
78 #define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
79 #define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
81 #define IOC_IO_ExcpMask 0x928
82 #define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
83 #define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
85 #define IOC_IOCmd_Offset 0x1000
87 #define IOC_IOCmd_Cfg 0xc00
88 #define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
91 /* Segment table entries */
92 #define IOSTE_V 0x8000000000000000ul /* valid */
93 #define IOSTE_H 0x4000000000000000ul /* cache hint */
94 #define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
95 #define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
96 #define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
97 #define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
98 #define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
99 #define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
100 #define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
102 /* Page table entries */
103 #define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
104 #define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
105 #define IOPTE_M 0x2000000000000000ul /* coherency required */
106 #define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
107 #define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
108 #define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
109 #define IOPTE_H 0x0000000000000800ul /* cache hint */
110 #define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
114 #define IO_SEGMENT_SHIFT 28
115 #define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT)
117 /* The high bit needs to be set on every DMA address */
118 #define SPIDER_DMA_OFFSET 0x80000000ul
120 struct iommu_window
{
121 struct list_head list
;
122 struct cbe_iommu
*iommu
;
123 unsigned long offset
;
125 unsigned long pte_offset
;
127 struct iommu_table table
;
134 void __iomem
*xlate_regs
;
135 void __iomem
*cmd_regs
;
139 struct list_head windows
;
142 /* Static array of iommus, one per node
143 * each contains a list of windows, keyed from dma_window property
144 * - on bus setup, look for a matching window, or create one
145 * - on dev setup, assign iommu_table ptr
147 static struct cbe_iommu iommus
[NR_IOMMUS
];
148 static int cbe_nr_iommus
;
150 static void invalidate_tce_cache(struct cbe_iommu
*iommu
, unsigned long *pte
,
153 unsigned long __iomem
*reg
;
157 reg
= iommu
->xlate_regs
+ IOC_IOPT_CacheInvd
;
160 /* we can invalidate up to 1 << 11 PTEs at once */
161 n
= min(n_ptes
, 1l << 11);
162 val
= (((n
/*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask
)
163 | (__pa(pte
) & IOC_IOPT_CacheInvd_IOPTE_Mask
)
164 | IOC_IOPT_CacheInvd_Busy
;
167 while (in_be64(reg
) & IOC_IOPT_CacheInvd_Busy
)
175 static void tce_build_cell(struct iommu_table
*tbl
, long index
, long npages
,
176 unsigned long uaddr
, enum dma_data_direction direction
)
179 unsigned long *io_pte
, base_pte
;
180 struct iommu_window
*window
=
181 container_of(tbl
, struct iommu_window
, table
);
183 /* implementing proper protection causes problems with the spidernet
184 * driver - check mapping directions later, but allow read & write by
186 #ifdef CELL_IOMMU_STRICT_PROTECTION
187 /* to avoid referencing a global, we use a trick here to setup the
188 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
189 * together for each of the 3 supported direction values. It is then
190 * shifted left so that the fields matching the desired direction
191 * lands on the appropriate bits, and other bits are masked out.
193 const unsigned long prot
= 0xc48;
195 ((prot
<< (52 + 4 * direction
)) & (IOPTE_PP_W
| IOPTE_PP_R
))
196 | IOPTE_M
| IOPTE_SO_RW
| (window
->ioid
& IOPTE_IOID_Mask
);
198 base_pte
= IOPTE_PP_W
| IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
|
199 (window
->ioid
& IOPTE_IOID_Mask
);
202 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- window
->pte_offset
);
204 for (i
= 0; i
< npages
; i
++, uaddr
+= IOMMU_PAGE_SIZE
)
205 io_pte
[i
] = base_pte
| (__pa(uaddr
) & IOPTE_RPN_Mask
);
209 invalidate_tce_cache(window
->iommu
, io_pte
, npages
);
211 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
212 index
, npages
, direction
, base_pte
);
215 static void tce_free_cell(struct iommu_table
*tbl
, long index
, long npages
)
219 unsigned long *io_pte
, pte
;
220 struct iommu_window
*window
=
221 container_of(tbl
, struct iommu_window
, table
);
223 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index
, npages
);
225 #ifdef CELL_IOMMU_REAL_UNMAP
228 /* spider bridge does PCI reads after freeing - insert a mapping
229 * to a scratch page instead of an invalid entry */
230 pte
= IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
| __pa(window
->iommu
->pad_page
)
231 | (window
->ioid
& IOPTE_IOID_Mask
);
234 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- window
->pte_offset
);
236 for (i
= 0; i
< npages
; i
++)
241 invalidate_tce_cache(window
->iommu
, io_pte
, npages
);
244 static irqreturn_t
ioc_interrupt(int irq
, void *data
)
247 struct cbe_iommu
*iommu
= data
;
249 stat
= in_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
);
251 /* Might want to rate limit it */
252 printk(KERN_ERR
"iommu: DMA exception 0x%016lx\n", stat
);
253 printk(KERN_ERR
" V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
254 !!(stat
& IOC_IO_ExcpStat_V
),
255 (stat
& IOC_IO_ExcpStat_SPF_S
) ? 'S' : ' ',
256 (stat
& IOC_IO_ExcpStat_SPF_P
) ? 'P' : ' ',
257 (stat
& IOC_IO_ExcpStat_RW_Mask
) ? "Read" : "Write",
258 (unsigned int)(stat
& IOC_IO_ExcpStat_IOID_Mask
));
259 printk(KERN_ERR
" page=0x%016lx\n",
260 stat
& IOC_IO_ExcpStat_ADDR_Mask
);
262 /* clear interrupt */
263 stat
&= ~IOC_IO_ExcpStat_V
;
264 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
, stat
);
269 static int cell_iommu_find_ioc(int nid
, unsigned long *base
)
271 struct device_node
*np
;
276 /* First look for new style /be nodes */
277 for_each_node_by_name(np
, "ioc") {
278 if (of_node_to_nid(np
) != nid
)
280 if (of_address_to_resource(np
, 0, &r
)) {
281 printk(KERN_ERR
"iommu: can't get address for %s\n",
290 /* Ok, let's try the old way */
291 for_each_node_by_type(np
, "cpu") {
292 const unsigned int *nidp
;
293 const unsigned long *tmp
;
295 nidp
= of_get_property(np
, "node-id", NULL
);
296 if (nidp
&& *nidp
== nid
) {
297 tmp
= of_get_property(np
, "ioc-translation", NULL
);
309 static void cell_iommu_setup_page_tables(struct cbe_iommu
*iommu
,
310 unsigned long dbase
, unsigned long dsize
,
311 unsigned long fbase
, unsigned long fsize
)
315 unsigned long reg
, segments
, pages_per_segment
, ptab_size
, stab_size
,
320 base
= min(fbase
, dbase
);
322 segments
= max(dbase
+ dsize
, fbase
+ fsize
) >> IO_SEGMENT_SHIFT
;
323 pages_per_segment
= 1ull << IO_PAGENO_BITS
;
325 pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
326 __FUNCTION__
, iommu
->nid
, segments
, pages_per_segment
);
328 /* set up the segment table */
329 stab_size
= segments
* sizeof(unsigned long);
330 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, get_order(stab_size
));
332 iommu
->stab
= page_address(page
);
333 clear_page(iommu
->stab
);
335 /* ... and the page tables. Since these are contiguous, we can treat
336 * the page tables as one array of ptes, like pSeries does.
338 ptab_size
= segments
* pages_per_segment
* sizeof(unsigned long);
339 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__
,
340 iommu
->nid
, ptab_size
, get_order(ptab_size
));
341 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, get_order(ptab_size
));
344 iommu
->ptab
= page_address(page
);
345 memset(iommu
->ptab
, 0, ptab_size
);
347 /* allocate a bogus page for the end of each mapping */
348 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, 0);
350 iommu
->pad_page
= page_address(page
);
351 clear_page(iommu
->pad_page
);
353 /* number of pages needed for a page table */
354 n_pte_pages
= (pages_per_segment
*
355 sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT
;
357 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
358 __FUNCTION__
, iommu
->nid
, iommu
->stab
, iommu
->ptab
,
361 /* initialise the STEs */
362 reg
= IOSTE_V
| ((n_pte_pages
- 1) << 5);
364 if (IOMMU_PAGE_SIZE
== 0x1000)
366 else if (IOMMU_PAGE_SIZE
== 0x10000)
369 extern void __unknown_page_size_error(void);
370 __unknown_page_size_error();
373 pr_debug("Setting up IOMMU stab:\n");
374 for (i
= base
>> IO_SEGMENT_SHIFT
; i
< segments
; i
++) {
375 iommu
->stab
[i
] = reg
|
376 (__pa(iommu
->ptab
) + n_pte_pages
* IOMMU_PAGE_SIZE
* i
);
377 pr_debug("\t[%d] 0x%016lx\n", i
, iommu
->stab
[i
]);
381 static void cell_iommu_enable_hardware(struct cbe_iommu
*iommu
)
384 unsigned long reg
, xlate_base
;
387 if (cell_iommu_find_ioc(iommu
->nid
, &xlate_base
))
388 panic("%s: missing IOC register mappings for node %d\n",
389 __FUNCTION__
, iommu
->nid
);
391 iommu
->xlate_regs
= ioremap(xlate_base
, IOC_Reg_Size
);
392 iommu
->cmd_regs
= iommu
->xlate_regs
+ IOC_IOCmd_Offset
;
394 /* ensure that the STEs have updated */
397 /* setup interrupts for the iommu. */
398 reg
= in_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
);
399 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
,
400 reg
& ~IOC_IO_ExcpStat_V
);
401 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpMask
,
402 IOC_IO_ExcpMask_PFE
| IOC_IO_ExcpMask_SFE
);
404 virq
= irq_create_mapping(NULL
,
405 IIC_IRQ_IOEX_ATI
| (iommu
->nid
<< IIC_IRQ_NODE_SHIFT
));
406 BUG_ON(virq
== NO_IRQ
);
408 ret
= request_irq(virq
, ioc_interrupt
, IRQF_DISABLED
,
412 /* set the IOC segment table origin register (and turn on the iommu) */
413 reg
= IOC_IOST_Origin_E
| __pa(iommu
->stab
) | IOC_IOST_Origin_HW
;
414 out_be64(iommu
->xlate_regs
+ IOC_IOST_Origin
, reg
);
415 in_be64(iommu
->xlate_regs
+ IOC_IOST_Origin
);
417 /* turn on IO translation */
418 reg
= in_be64(iommu
->cmd_regs
+ IOC_IOCmd_Cfg
) | IOC_IOCmd_Cfg_TE
;
419 out_be64(iommu
->cmd_regs
+ IOC_IOCmd_Cfg
, reg
);
422 static void cell_iommu_setup_hardware(struct cbe_iommu
*iommu
,
423 unsigned long base
, unsigned long size
)
425 cell_iommu_setup_page_tables(iommu
, base
, size
, 0, 0);
426 cell_iommu_enable_hardware(iommu
);
429 #if 0/* Unused for now */
430 static struct iommu_window
*find_window(struct cbe_iommu
*iommu
,
431 unsigned long offset
, unsigned long size
)
433 struct iommu_window
*window
;
435 /* todo: check for overlapping (but not equal) windows) */
437 list_for_each_entry(window
, &(iommu
->windows
), list
) {
438 if (window
->offset
== offset
&& window
->size
== size
)
446 static inline u32
cell_iommu_get_ioid(struct device_node
*np
)
450 ioid
= of_get_property(np
, "ioid", NULL
);
452 printk(KERN_WARNING
"iommu: missing ioid for %s using 0\n",
460 static struct iommu_window
* __init
461 cell_iommu_setup_window(struct cbe_iommu
*iommu
, struct device_node
*np
,
462 unsigned long offset
, unsigned long size
,
463 unsigned long pte_offset
)
465 struct iommu_window
*window
;
468 ioid
= cell_iommu_get_ioid(np
);
470 window
= kmalloc_node(sizeof(*window
), GFP_KERNEL
, iommu
->nid
);
471 BUG_ON(window
== NULL
);
473 window
->offset
= offset
;
476 window
->iommu
= iommu
;
477 window
->pte_offset
= pte_offset
;
479 window
->table
.it_blocksize
= 16;
480 window
->table
.it_base
= (unsigned long)iommu
->ptab
;
481 window
->table
.it_index
= iommu
->nid
;
482 window
->table
.it_offset
= (offset
>> IOMMU_PAGE_SHIFT
) +
484 window
->table
.it_size
= size
>> IOMMU_PAGE_SHIFT
;
486 iommu_init_table(&window
->table
, iommu
->nid
);
488 pr_debug("\tioid %d\n", window
->ioid
);
489 pr_debug("\tblocksize %ld\n", window
->table
.it_blocksize
);
490 pr_debug("\tbase 0x%016lx\n", window
->table
.it_base
);
491 pr_debug("\toffset 0x%lx\n", window
->table
.it_offset
);
492 pr_debug("\tsize %ld\n", window
->table
.it_size
);
494 list_add(&window
->list
, &iommu
->windows
);
499 /* We need to map and reserve the first IOMMU page since it's used
500 * by the spider workaround. In theory, we only need to do that when
501 * running on spider but it doesn't really matter.
503 * This code also assumes that we have a window that starts at 0,
504 * which is the case on all spider based blades.
506 __set_bit(0, window
->table
.it_map
);
507 tce_build_cell(&window
->table
, window
->table
.it_offset
, 1,
508 (unsigned long)iommu
->pad_page
, DMA_TO_DEVICE
);
509 window
->table
.it_hint
= window
->table
.it_blocksize
;
514 static struct cbe_iommu
*cell_iommu_for_node(int nid
)
518 for (i
= 0; i
< cbe_nr_iommus
; i
++)
519 if (iommus
[i
].nid
== nid
)
524 static unsigned long cell_dma_direct_offset
;
526 static unsigned long dma_iommu_fixed_base
;
527 struct dma_mapping_ops dma_iommu_fixed_ops
;
529 static void cell_dma_dev_setup_iommu(struct device
*dev
)
531 struct iommu_window
*window
;
532 struct cbe_iommu
*iommu
;
533 struct dev_archdata
*archdata
= &dev
->archdata
;
535 /* Current implementation uses the first window available in that
536 * node's iommu. We -might- do something smarter later though it may
539 iommu
= cell_iommu_for_node(archdata
->numa_node
);
540 if (iommu
== NULL
|| list_empty(&iommu
->windows
)) {
541 printk(KERN_ERR
"iommu: missing iommu for %s (node %d)\n",
542 archdata
->of_node
? archdata
->of_node
->full_name
: "?",
543 archdata
->numa_node
);
546 window
= list_entry(iommu
->windows
.next
, struct iommu_window
, list
);
548 archdata
->dma_data
= &window
->table
;
551 static void cell_dma_dev_setup_static(struct device
*dev
);
553 static void cell_dma_dev_setup(struct device
*dev
)
555 struct dev_archdata
*archdata
= &dev
->archdata
;
557 /* Order is important here, these are not mutually exclusive */
558 if (get_dma_ops(dev
) == &dma_iommu_fixed_ops
)
559 cell_dma_dev_setup_static(dev
);
560 else if (get_pci_dma_ops() == &dma_iommu_ops
)
561 cell_dma_dev_setup_iommu(dev
);
562 else if (get_pci_dma_ops() == &dma_direct_ops
)
563 archdata
->dma_data
= (void *)cell_dma_direct_offset
;
568 static void cell_pci_dma_dev_setup(struct pci_dev
*dev
)
570 cell_dma_dev_setup(&dev
->dev
);
573 static int cell_of_bus_notify(struct notifier_block
*nb
, unsigned long action
,
576 struct device
*dev
= data
;
578 /* We are only intereted in device addition */
579 if (action
!= BUS_NOTIFY_ADD_DEVICE
)
582 /* We use the PCI DMA ops */
583 dev
->archdata
.dma_ops
= get_pci_dma_ops();
585 cell_dma_dev_setup(dev
);
590 static struct notifier_block cell_of_bus_notifier
= {
591 .notifier_call
= cell_of_bus_notify
594 static int __init
cell_iommu_get_window(struct device_node
*np
,
598 const void *dma_window
;
601 /* Use ibm,dma-window if available, else, hard code ! */
602 dma_window
= of_get_property(np
, "ibm,dma-window", NULL
);
603 if (dma_window
== NULL
) {
609 of_parse_dma_window(np
, dma_window
, &index
, base
, size
);
613 static struct cbe_iommu
* __init
cell_iommu_alloc(struct device_node
*np
)
615 struct cbe_iommu
*iommu
;
619 nid
= of_node_to_nid(np
);
621 printk(KERN_ERR
"iommu: failed to get node for %s\n",
625 pr_debug("iommu: setting up iommu for node %d (%s)\n",
628 /* XXX todo: If we can have multiple windows on the same IOMMU, which
629 * isn't the case today, we probably want here to check wether the
630 * iommu for that node is already setup.
631 * However, there might be issue with getting the size right so let's
632 * ignore that for now. We might want to completely get rid of the
633 * multiple window support since the cell iommu supports per-page ioids
636 if (cbe_nr_iommus
>= NR_IOMMUS
) {
637 printk(KERN_ERR
"iommu: too many IOMMUs detected ! (%s)\n",
642 /* Init base fields */
647 snprintf(iommu
->name
, sizeof(iommu
->name
), "iommu%d", i
);
648 INIT_LIST_HEAD(&iommu
->windows
);
653 static void __init
cell_iommu_init_one(struct device_node
*np
,
654 unsigned long offset
)
656 struct cbe_iommu
*iommu
;
657 unsigned long base
, size
;
659 iommu
= cell_iommu_alloc(np
);
663 /* Obtain a window for it */
664 cell_iommu_get_window(np
, &base
, &size
);
666 pr_debug("\ttranslating window 0x%lx...0x%lx\n",
667 base
, base
+ size
- 1);
669 /* Initialize the hardware */
670 cell_iommu_setup_hardware(iommu
, base
, size
);
672 /* Setup the iommu_table */
673 cell_iommu_setup_window(iommu
, np
, base
, size
,
674 offset
>> IOMMU_PAGE_SHIFT
);
677 static void __init
cell_disable_iommus(void)
680 unsigned long base
, val
;
681 void __iomem
*xregs
, *cregs
;
683 /* Make sure IOC translation is disabled on all nodes */
684 for_each_online_node(node
) {
685 if (cell_iommu_find_ioc(node
, &base
))
687 xregs
= ioremap(base
, IOC_Reg_Size
);
690 cregs
= xregs
+ IOC_IOCmd_Offset
;
692 pr_debug("iommu: cleaning up iommu on node %d\n", node
);
694 out_be64(xregs
+ IOC_IOST_Origin
, 0);
695 (void)in_be64(xregs
+ IOC_IOST_Origin
);
696 val
= in_be64(cregs
+ IOC_IOCmd_Cfg
);
697 val
&= ~IOC_IOCmd_Cfg_TE
;
698 out_be64(cregs
+ IOC_IOCmd_Cfg
, val
);
699 (void)in_be64(cregs
+ IOC_IOCmd_Cfg
);
705 static int __init
cell_iommu_init_disabled(void)
707 struct device_node
*np
= NULL
;
708 unsigned long base
= 0, size
;
710 /* When no iommu is present, we use direct DMA ops */
711 set_pci_dma_ops(&dma_direct_ops
);
713 /* First make sure all IOC translation is turned off */
714 cell_disable_iommus();
716 /* If we have no Axon, we set up the spider DMA magic offset */
717 if (of_find_node_by_name(NULL
, "axon") == NULL
)
718 cell_dma_direct_offset
= SPIDER_DMA_OFFSET
;
720 /* Now we need to check to see where the memory is mapped
721 * in PCI space. We assume that all busses use the same dma
722 * window which is always the case so far on Cell, thus we
723 * pick up the first pci-internal node we can find and check
724 * the DMA window from there.
726 for_each_node_by_name(np
, "axon") {
727 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
729 if (cell_iommu_get_window(np
, &base
, &size
) == 0)
733 for_each_node_by_name(np
, "pci-internal") {
734 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
736 if (cell_iommu_get_window(np
, &base
, &size
) == 0)
742 /* If we found a DMA window, we check if it's big enough to enclose
743 * all of physical memory. If not, we force enable IOMMU
745 if (np
&& size
< lmb_end_of_DRAM()) {
746 printk(KERN_WARNING
"iommu: force-enabled, dma window"
747 " (%ldMB) smaller than total memory (%ldMB)\n",
748 size
>> 20, lmb_end_of_DRAM() >> 20);
752 cell_dma_direct_offset
+= base
;
754 if (cell_dma_direct_offset
!= 0)
755 ppc_md
.pci_dma_dev_setup
= cell_pci_dma_dev_setup
;
757 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
758 cell_dma_direct_offset
);
764 * Fixed IOMMU mapping support
766 * This code adds support for setting up a fixed IOMMU mapping on certain
767 * cell machines. For 64-bit devices this avoids the performance overhead of
768 * mapping and unmapping pages at runtime. 32-bit devices are unable to use
771 * The fixed mapping is established at boot, and maps all of physical memory
772 * 1:1 into device space at some offset. On machines with < 30 GB of memory
773 * we setup the fixed mapping immediately above the normal IOMMU window.
775 * For example a machine with 4GB of memory would end up with the normal
776 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
777 * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
778 * 3GB, plus any offset required by firmware. The firmware offset is encoded
779 * in the "dma-ranges" property.
781 * On machines with 30GB or more of memory, we are unable to place the fixed
782 * mapping above the normal IOMMU window as we would run out of address space.
783 * Instead we move the normal IOMMU window to coincide with the hash page
784 * table, this region does not need to be part of the fixed mapping as no
785 * device should ever be DMA'ing to it. We then setup the fixed mapping
789 static u64
cell_iommu_get_fixed_address(struct device
*dev
)
791 u64 cpu_addr
, size
, best_size
, pci_addr
= OF_BAD_ADDR
;
792 struct device_node
*tmp
, *np
;
793 const u32
*ranges
= NULL
;
796 np
= dev
->archdata
.of_node
;
798 ranges
= of_get_property(np
, "dma-ranges", &len
);
799 while (!ranges
&& np
) {
800 tmp
= of_get_parent(np
);
803 ranges
= of_get_property(np
, "dma-ranges", &len
);
807 dev_dbg(dev
, "iommu: no dma-ranges found\n");
813 /* dma-ranges format:
815 * 2 cells: pci address
816 * 2 cells: parent address
819 for (i
= 0, best
= -1, best_size
= 0; i
< len
; i
+= 7) {
820 cpu_addr
= of_translate_dma_address(np
, ranges
+i
+ 3);
821 size
= of_read_number(ranges
+ i
+ 5, 2);
823 if (cpu_addr
== 0 && size
> best_size
) {
830 pci_addr
= of_read_number(ranges
+ best
+ 1, 2);
832 dev_dbg(dev
, "iommu: no suitable range found!\n");
840 static int dma_set_mask_and_switch(struct device
*dev
, u64 dma_mask
)
842 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
845 if (dma_mask
== DMA_BIT_MASK(64)) {
846 if (cell_iommu_get_fixed_address(dev
) == OF_BAD_ADDR
)
847 dev_dbg(dev
, "iommu: 64-bit OK, but bad addr\n");
849 dev_dbg(dev
, "iommu: 64-bit OK, using fixed ops\n");
850 set_dma_ops(dev
, &dma_iommu_fixed_ops
);
851 cell_dma_dev_setup(dev
);
854 dev_dbg(dev
, "iommu: not 64-bit, using default ops\n");
855 set_dma_ops(dev
, get_pci_dma_ops());
858 *dev
->dma_mask
= dma_mask
;
863 static void cell_dma_dev_setup_static(struct device
*dev
)
865 struct dev_archdata
*archdata
= &dev
->archdata
;
868 addr
= cell_iommu_get_fixed_address(dev
) + dma_iommu_fixed_base
;
869 archdata
->dma_data
= (void *)addr
;
871 dev_dbg(dev
, "iommu: fixed addr = %lx\n", addr
);
874 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu
*iommu
,
875 struct device_node
*np
, unsigned long dbase
, unsigned long dsize
,
876 unsigned long fbase
, unsigned long fsize
)
878 unsigned long base_pte
, uaddr
, *io_pte
;
881 dma_iommu_fixed_base
= fbase
;
883 /* convert from bytes into page table indices */
884 dbase
= dbase
>> IOMMU_PAGE_SHIFT
;
885 dsize
= dsize
>> IOMMU_PAGE_SHIFT
;
886 fbase
= fbase
>> IOMMU_PAGE_SHIFT
;
887 fsize
= fsize
>> IOMMU_PAGE_SHIFT
;
889 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize
, fbase
);
891 io_pte
= iommu
->ptab
;
892 base_pte
= IOPTE_PP_W
| IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
893 | (cell_iommu_get_ioid(np
) & IOPTE_IOID_Mask
);
896 for (i
= fbase
; i
< fbase
+ fsize
; i
++, uaddr
+= IOMMU_PAGE_SIZE
) {
897 /* Don't touch the dynamic region */
898 if (i
>= dbase
&& i
< (dbase
+ dsize
)) {
899 pr_debug("iommu: static/dynamic overlap, skipping\n");
902 io_pte
[i
] = base_pte
| (__pa(uaddr
) & IOPTE_RPN_Mask
);
908 static int __init
cell_iommu_fixed_mapping_init(void)
910 unsigned long dbase
, dsize
, fbase
, fsize
, hbase
, hend
;
911 struct cbe_iommu
*iommu
;
912 struct device_node
*np
;
914 /* The fixed mapping is only supported on axon machines */
915 np
= of_find_node_by_name(NULL
, "axon");
917 pr_debug("iommu: fixed mapping disabled, no axons found\n");
921 /* The default setup is to have the fixed mapping sit after the
922 * dynamic region, so find the top of the largest IOMMU window
923 * on any axon, then add the size of RAM and that's our max value.
924 * If that is > 32GB we have to do other shennanigans.
927 for_each_node_by_name(np
, "axon") {
928 cell_iommu_get_window(np
, &dbase
, &dsize
);
929 fbase
= max(fbase
, dbase
+ dsize
);
932 fbase
= _ALIGN_UP(fbase
, 1 << IO_SEGMENT_SHIFT
);
933 fsize
= lmb_phys_mem_size();
935 if ((fbase
+ fsize
) <= 0x800000000)
936 hbase
= 0; /* use the device tree window */
938 /* If we're over 32 GB we need to cheat. We can't map all of
939 * RAM with the fixed mapping, and also fit the dynamic
940 * region. So try to place the dynamic region where the hash
941 * table sits, drivers never need to DMA to it, we don't
942 * need a fixed mapping for that area.
945 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
948 hbase
= __pa(htab_address
);
949 hend
= hbase
+ htab_size_bytes
;
951 /* The window must start and end on a segment boundary */
952 if ((hbase
!= _ALIGN_UP(hbase
, 1 << IO_SEGMENT_SHIFT
)) ||
953 (hend
!= _ALIGN_UP(hend
, 1 << IO_SEGMENT_SHIFT
))) {
954 pr_debug("iommu: hash window not segment aligned\n");
958 /* Check the hash window fits inside the real DMA window */
959 for_each_node_by_name(np
, "axon") {
960 cell_iommu_get_window(np
, &dbase
, &dsize
);
962 if (hbase
< dbase
|| (hend
> (dbase
+ dsize
))) {
963 pr_debug("iommu: hash window doesn't fit in"
964 "real DMA window\n");
972 /* Setup the dynamic regions */
973 for_each_node_by_name(np
, "axon") {
974 iommu
= cell_iommu_alloc(np
);
978 cell_iommu_get_window(np
, &dbase
, &dsize
);
981 dsize
= htab_size_bytes
;
984 pr_debug("iommu: setting up %d, dynamic window %lx-%lx " \
985 "fixed window %lx-%lx\n", iommu
->nid
, dbase
,
986 dbase
+ dsize
, fbase
, fbase
+ fsize
);
988 cell_iommu_setup_page_tables(iommu
, dbase
, dsize
, fbase
, fsize
);
989 cell_iommu_setup_fixed_ptab(iommu
, np
, dbase
, dsize
,
991 cell_iommu_enable_hardware(iommu
);
992 cell_iommu_setup_window(iommu
, np
, dbase
, dsize
, 0);
995 dma_iommu_fixed_ops
= dma_direct_ops
;
996 dma_iommu_fixed_ops
.set_dma_mask
= dma_set_mask_and_switch
;
998 dma_iommu_ops
.set_dma_mask
= dma_set_mask_and_switch
;
999 set_pci_dma_ops(&dma_iommu_ops
);
1001 printk(KERN_DEBUG
"IOMMU fixed mapping established.\n");
1006 static int iommu_fixed_disabled
;
1008 static int __init
setup_iommu_fixed(char *str
)
1010 if (strcmp(str
, "off") == 0)
1011 iommu_fixed_disabled
= 1;
1015 __setup("iommu_fixed=", setup_iommu_fixed
);
1017 static int __init
cell_iommu_init(void)
1019 struct device_node
*np
;
1021 /* If IOMMU is disabled or we have little enough RAM to not need
1022 * to enable it, we setup a direct mapping.
1024 * Note: should we make sure we have the IOMMU actually disabled ?
1027 (!iommu_force_on
&& lmb_end_of_DRAM() <= 0x80000000ull
))
1028 if (cell_iommu_init_disabled() == 0)
1031 /* Setup various ppc_md. callbacks */
1032 ppc_md
.pci_dma_dev_setup
= cell_pci_dma_dev_setup
;
1033 ppc_md
.tce_build
= tce_build_cell
;
1034 ppc_md
.tce_free
= tce_free_cell
;
1036 if (!iommu_fixed_disabled
&& cell_iommu_fixed_mapping_init() == 0)
1039 /* Create an iommu for each /axon node. */
1040 for_each_node_by_name(np
, "axon") {
1041 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
1043 cell_iommu_init_one(np
, 0);
1046 /* Create an iommu for each toplevel /pci-internal node for
1047 * old hardware/firmware
1049 for_each_node_by_name(np
, "pci-internal") {
1050 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
1052 cell_iommu_init_one(np
, SPIDER_DMA_OFFSET
);
1055 /* Setup default PCI iommu ops */
1056 set_pci_dma_ops(&dma_iommu_ops
);
1059 /* Register callbacks on OF platform device addition/removal
1060 * to handle linking them to the right DMA operations
1062 bus_register_notifier(&of_platform_bus_type
, &cell_of_bus_notifier
);
1066 machine_arch_initcall(cell
, cell_iommu_init
);
1067 machine_arch_initcall(celleb_native
, cell_iommu_init
);