1 // SPDX-License-Identifier: GPL-2.0-only
3 * omap iommu: tlb and pagetable primitives
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
8 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
9 * Paul Mundt and Toshihiro Kobayashi
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/ioport.h>
17 #include <linux/platform_device.h>
18 #include <linux/iommu.h>
19 #include <linux/omap-iommu.h>
20 #include <linux/mutex.h>
21 #include <linux/spinlock.h>
23 #include <linux/pm_runtime.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_platform.h>
27 #include <linux/regmap.h>
28 #include <linux/mfd/syscon.h>
30 #include <linux/platform_data/iommu-omap.h>
32 #include "omap-iopgtable.h"
33 #include "omap-iommu.h"
35 static const struct iommu_ops omap_iommu_ops
;
37 #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
39 /* bitmap of the page sizes currently supported */
40 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
42 #define MMU_LOCK_BASE_SHIFT 10
43 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
44 #define MMU_LOCK_BASE(x) \
45 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
47 #define MMU_LOCK_VICT_SHIFT 4
48 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
49 #define MMU_LOCK_VICT(x) \
50 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
52 static struct platform_driver omap_iommu_driver
;
53 static struct kmem_cache
*iopte_cachep
;
56 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
57 * @dom: generic iommu domain handle
59 static struct omap_iommu_domain
*to_omap_domain(struct iommu_domain
*dom
)
61 return container_of(dom
, struct omap_iommu_domain
, domain
);
65 * omap_iommu_save_ctx - Save registers for pm off-mode support
68 * This should be treated as an deprecated API. It is preserved only
69 * to maintain existing functionality for OMAP3 ISP driver.
71 void omap_iommu_save_ctx(struct device
*dev
)
73 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
74 struct omap_iommu
*obj
;
81 while (arch_data
->iommu_dev
) {
82 obj
= arch_data
->iommu_dev
;
84 for (i
= 0; i
< (MMU_REG_SIZE
/ sizeof(u32
)); i
++) {
85 p
[i
] = iommu_read_reg(obj
, i
* sizeof(u32
));
86 dev_dbg(obj
->dev
, "%s\t[%02d] %08x\n", __func__
, i
,
92 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx
);
95 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
98 * This should be treated as an deprecated API. It is preserved only
99 * to maintain existing functionality for OMAP3 ISP driver.
101 void omap_iommu_restore_ctx(struct device
*dev
)
103 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
104 struct omap_iommu
*obj
;
111 while (arch_data
->iommu_dev
) {
112 obj
= arch_data
->iommu_dev
;
114 for (i
= 0; i
< (MMU_REG_SIZE
/ sizeof(u32
)); i
++) {
115 iommu_write_reg(obj
, p
[i
], i
* sizeof(u32
));
116 dev_dbg(obj
->dev
, "%s\t[%02d] %08x\n", __func__
, i
,
122 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx
);
124 static void dra7_cfg_dspsys_mmu(struct omap_iommu
*obj
, bool enable
)
131 mask
= (1 << (obj
->id
* DSP_SYS_MMU_CONFIG_EN_SHIFT
));
132 val
= enable
? mask
: 0;
133 regmap_update_bits(obj
->syscfg
, DSP_SYS_MMU_CONFIG
, mask
, val
);
136 static void __iommu_set_twl(struct omap_iommu
*obj
, bool on
)
138 u32 l
= iommu_read_reg(obj
, MMU_CNTL
);
141 iommu_write_reg(obj
, MMU_IRQ_TWL_MASK
, MMU_IRQENABLE
);
143 iommu_write_reg(obj
, MMU_IRQ_TLB_MISS_MASK
, MMU_IRQENABLE
);
147 l
|= (MMU_CNTL_MMU_EN
| MMU_CNTL_TWL_EN
);
149 l
|= (MMU_CNTL_MMU_EN
);
151 iommu_write_reg(obj
, l
, MMU_CNTL
);
154 static int omap2_iommu_enable(struct omap_iommu
*obj
)
158 if (!obj
->iopgd
|| !IS_ALIGNED((unsigned long)obj
->iopgd
, SZ_16K
))
161 pa
= virt_to_phys(obj
->iopgd
);
162 if (!IS_ALIGNED(pa
, SZ_16K
))
165 l
= iommu_read_reg(obj
, MMU_REVISION
);
166 dev_info(obj
->dev
, "%s: version %d.%d\n", obj
->name
,
167 (l
>> 4) & 0xf, l
& 0xf);
169 iommu_write_reg(obj
, pa
, MMU_TTB
);
171 dra7_cfg_dspsys_mmu(obj
, true);
173 if (obj
->has_bus_err_back
)
174 iommu_write_reg(obj
, MMU_GP_REG_BUS_ERR_BACK_EN
, MMU_GP_REG
);
176 __iommu_set_twl(obj
, true);
181 static void omap2_iommu_disable(struct omap_iommu
*obj
)
183 u32 l
= iommu_read_reg(obj
, MMU_CNTL
);
186 iommu_write_reg(obj
, l
, MMU_CNTL
);
187 dra7_cfg_dspsys_mmu(obj
, false);
189 dev_dbg(obj
->dev
, "%s is shutting down\n", obj
->name
);
192 static int iommu_enable(struct omap_iommu
*obj
)
196 ret
= pm_runtime_get_sync(obj
->dev
);
198 pm_runtime_put_noidle(obj
->dev
);
200 return ret
< 0 ? ret
: 0;
203 static void iommu_disable(struct omap_iommu
*obj
)
205 pm_runtime_put_sync(obj
->dev
);
211 static u32
iotlb_cr_to_virt(struct cr_regs
*cr
)
213 u32 page_size
= cr
->cam
& MMU_CAM_PGSZ_MASK
;
214 u32 mask
= get_cam_va_mask(cr
->cam
& page_size
);
216 return cr
->cam
& mask
;
219 static u32
get_iopte_attr(struct iotlb_entry
*e
)
223 attr
= e
->mixed
<< 5;
225 attr
|= e
->elsz
>> 3;
226 attr
<<= (((e
->pgsz
== MMU_CAM_PGSZ_4K
) ||
227 (e
->pgsz
== MMU_CAM_PGSZ_64K
)) ? 0 : 6);
231 static u32
iommu_report_fault(struct omap_iommu
*obj
, u32
*da
)
233 u32 status
, fault_addr
;
235 status
= iommu_read_reg(obj
, MMU_IRQSTATUS
);
236 status
&= MMU_IRQ_MASK
;
242 fault_addr
= iommu_read_reg(obj
, MMU_FAULT_AD
);
245 iommu_write_reg(obj
, status
, MMU_IRQSTATUS
);
250 void iotlb_lock_get(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
254 val
= iommu_read_reg(obj
, MMU_LOCK
);
256 l
->base
= MMU_LOCK_BASE(val
);
257 l
->vict
= MMU_LOCK_VICT(val
);
260 void iotlb_lock_set(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
264 val
= (l
->base
<< MMU_LOCK_BASE_SHIFT
);
265 val
|= (l
->vict
<< MMU_LOCK_VICT_SHIFT
);
267 iommu_write_reg(obj
, val
, MMU_LOCK
);
270 static void iotlb_read_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
272 cr
->cam
= iommu_read_reg(obj
, MMU_READ_CAM
);
273 cr
->ram
= iommu_read_reg(obj
, MMU_READ_RAM
);
276 static void iotlb_load_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
278 iommu_write_reg(obj
, cr
->cam
| MMU_CAM_V
, MMU_CAM
);
279 iommu_write_reg(obj
, cr
->ram
, MMU_RAM
);
281 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
282 iommu_write_reg(obj
, 1, MMU_LD_TLB
);
285 /* only used in iotlb iteration for-loop */
286 struct cr_regs
__iotlb_read_cr(struct omap_iommu
*obj
, int n
)
291 iotlb_lock_get(obj
, &l
);
293 iotlb_lock_set(obj
, &l
);
294 iotlb_read_cr(obj
, &cr
);
299 #ifdef PREFETCH_IOTLB
300 static struct cr_regs
*iotlb_alloc_cr(struct omap_iommu
*obj
,
301 struct iotlb_entry
*e
)
308 if (e
->da
& ~(get_cam_va_mask(e
->pgsz
))) {
309 dev_err(obj
->dev
, "%s:\twrong alignment: %08x\n", __func__
,
311 return ERR_PTR(-EINVAL
);
314 cr
= kmalloc(sizeof(*cr
), GFP_KERNEL
);
316 return ERR_PTR(-ENOMEM
);
318 cr
->cam
= (e
->da
& MMU_CAM_VATAG_MASK
) | e
->prsvd
| e
->pgsz
| e
->valid
;
319 cr
->ram
= e
->pa
| e
->endian
| e
->elsz
| e
->mixed
;
325 * load_iotlb_entry - Set an iommu tlb entry
327 * @e: an iommu tlb entry info
329 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
335 if (!obj
|| !obj
->nr_tlb_entries
|| !e
)
338 pm_runtime_get_sync(obj
->dev
);
340 iotlb_lock_get(obj
, &l
);
341 if (l
.base
== obj
->nr_tlb_entries
) {
342 dev_warn(obj
->dev
, "%s: preserve entries full\n", __func__
);
350 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, tmp
)
351 if (!iotlb_cr_valid(&tmp
))
354 if (i
== obj
->nr_tlb_entries
) {
355 dev_dbg(obj
->dev
, "%s: full: no entry\n", __func__
);
360 iotlb_lock_get(obj
, &l
);
363 iotlb_lock_set(obj
, &l
);
366 cr
= iotlb_alloc_cr(obj
, e
);
368 pm_runtime_put_sync(obj
->dev
);
372 iotlb_load_cr(obj
, cr
);
377 /* increment victim for next tlb load */
378 if (++l
.vict
== obj
->nr_tlb_entries
)
380 iotlb_lock_set(obj
, &l
);
382 pm_runtime_put_sync(obj
->dev
);
386 #else /* !PREFETCH_IOTLB */
388 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
393 #endif /* !PREFETCH_IOTLB */
395 static int prefetch_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
397 return load_iotlb_entry(obj
, e
);
401 * flush_iotlb_page - Clear an iommu tlb entry
403 * @da: iommu device virtual address
405 * Clear an iommu tlb entry which includes 'da' address.
407 static void flush_iotlb_page(struct omap_iommu
*obj
, u32 da
)
412 pm_runtime_get_sync(obj
->dev
);
414 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, cr
) {
418 if (!iotlb_cr_valid(&cr
))
421 start
= iotlb_cr_to_virt(&cr
);
422 bytes
= iopgsz_to_bytes(cr
.cam
& 3);
424 if ((start
<= da
) && (da
< start
+ bytes
)) {
425 dev_dbg(obj
->dev
, "%s: %08x<=%08x(%zx)\n",
426 __func__
, start
, da
, bytes
);
427 iotlb_load_cr(obj
, &cr
);
428 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
432 pm_runtime_put_sync(obj
->dev
);
434 if (i
== obj
->nr_tlb_entries
)
435 dev_dbg(obj
->dev
, "%s: no page for %08x\n", __func__
, da
);
439 * flush_iotlb_all - Clear all iommu tlb entries
442 static void flush_iotlb_all(struct omap_iommu
*obj
)
446 pm_runtime_get_sync(obj
->dev
);
450 iotlb_lock_set(obj
, &l
);
452 iommu_write_reg(obj
, 1, MMU_GFLUSH
);
454 pm_runtime_put_sync(obj
->dev
);
458 * H/W pagetable operations
460 static void flush_iopte_range(struct device
*dev
, dma_addr_t dma
,
461 unsigned long offset
, int num_entries
)
463 size_t size
= num_entries
* sizeof(u32
);
465 dma_sync_single_range_for_device(dev
, dma
, offset
, size
, DMA_TO_DEVICE
);
468 static void iopte_free(struct omap_iommu
*obj
, u32
*iopte
, bool dma_valid
)
472 /* Note: freed iopte's must be clean ready for re-use */
475 pt_dma
= virt_to_phys(iopte
);
476 dma_unmap_single(obj
->dev
, pt_dma
, IOPTE_TABLE_SIZE
,
480 kmem_cache_free(iopte_cachep
, iopte
);
484 static u32
*iopte_alloc(struct omap_iommu
*obj
, u32
*iopgd
,
485 dma_addr_t
*pt_dma
, u32 da
)
488 unsigned long offset
= iopgd_index(da
) * sizeof(da
);
490 /* a table has already existed */
495 * do the allocation outside the page table lock
497 spin_unlock(&obj
->page_table_lock
);
498 iopte
= kmem_cache_zalloc(iopte_cachep
, GFP_KERNEL
);
499 spin_lock(&obj
->page_table_lock
);
503 return ERR_PTR(-ENOMEM
);
505 *pt_dma
= dma_map_single(obj
->dev
, iopte
, IOPTE_TABLE_SIZE
,
507 if (dma_mapping_error(obj
->dev
, *pt_dma
)) {
508 dev_err(obj
->dev
, "DMA map error for L2 table\n");
509 iopte_free(obj
, iopte
, false);
510 return ERR_PTR(-ENOMEM
);
514 * we rely on dma address and the physical address to be
515 * the same for mapping the L2 table
517 if (WARN_ON(*pt_dma
!= virt_to_phys(iopte
))) {
518 dev_err(obj
->dev
, "DMA translation error for L2 table\n");
519 dma_unmap_single(obj
->dev
, *pt_dma
, IOPTE_TABLE_SIZE
,
521 iopte_free(obj
, iopte
, false);
522 return ERR_PTR(-ENOMEM
);
525 *iopgd
= virt_to_phys(iopte
) | IOPGD_TABLE
;
527 flush_iopte_range(obj
->dev
, obj
->pd_dma
, offset
, 1);
528 dev_vdbg(obj
->dev
, "%s: a new pte:%p\n", __func__
, iopte
);
530 /* We raced, free the reduniovant table */
531 iopte_free(obj
, iopte
, false);
535 iopte
= iopte_offset(iopgd
, da
);
536 *pt_dma
= iopgd_page_paddr(iopgd
);
538 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
539 __func__
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
544 static int iopgd_alloc_section(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
546 u32
*iopgd
= iopgd_offset(obj
, da
);
547 unsigned long offset
= iopgd_index(da
) * sizeof(da
);
549 if ((da
| pa
) & ~IOSECTION_MASK
) {
550 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
551 __func__
, da
, pa
, IOSECTION_SIZE
);
555 *iopgd
= (pa
& IOSECTION_MASK
) | prot
| IOPGD_SECTION
;
556 flush_iopte_range(obj
->dev
, obj
->pd_dma
, offset
, 1);
560 static int iopgd_alloc_super(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
562 u32
*iopgd
= iopgd_offset(obj
, da
);
563 unsigned long offset
= iopgd_index(da
) * sizeof(da
);
566 if ((da
| pa
) & ~IOSUPER_MASK
) {
567 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
568 __func__
, da
, pa
, IOSUPER_SIZE
);
572 for (i
= 0; i
< 16; i
++)
573 *(iopgd
+ i
) = (pa
& IOSUPER_MASK
) | prot
| IOPGD_SUPER
;
574 flush_iopte_range(obj
->dev
, obj
->pd_dma
, offset
, 16);
578 static int iopte_alloc_page(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
580 u32
*iopgd
= iopgd_offset(obj
, da
);
582 u32
*iopte
= iopte_alloc(obj
, iopgd
, &pt_dma
, da
);
583 unsigned long offset
= iopte_index(da
) * sizeof(da
);
586 return PTR_ERR(iopte
);
588 *iopte
= (pa
& IOPAGE_MASK
) | prot
| IOPTE_SMALL
;
589 flush_iopte_range(obj
->dev
, pt_dma
, offset
, 1);
591 dev_vdbg(obj
->dev
, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
592 __func__
, da
, pa
, iopte
, *iopte
);
597 static int iopte_alloc_large(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
599 u32
*iopgd
= iopgd_offset(obj
, da
);
601 u32
*iopte
= iopte_alloc(obj
, iopgd
, &pt_dma
, da
);
602 unsigned long offset
= iopte_index(da
) * sizeof(da
);
605 if ((da
| pa
) & ~IOLARGE_MASK
) {
606 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
607 __func__
, da
, pa
, IOLARGE_SIZE
);
612 return PTR_ERR(iopte
);
614 for (i
= 0; i
< 16; i
++)
615 *(iopte
+ i
) = (pa
& IOLARGE_MASK
) | prot
| IOPTE_LARGE
;
616 flush_iopte_range(obj
->dev
, pt_dma
, offset
, 16);
621 iopgtable_store_entry_core(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
623 int (*fn
)(struct omap_iommu
*, u32
, u32
, u32
);
631 case MMU_CAM_PGSZ_16M
:
632 fn
= iopgd_alloc_super
;
634 case MMU_CAM_PGSZ_1M
:
635 fn
= iopgd_alloc_section
;
637 case MMU_CAM_PGSZ_64K
:
638 fn
= iopte_alloc_large
;
640 case MMU_CAM_PGSZ_4K
:
641 fn
= iopte_alloc_page
;
651 prot
= get_iopte_attr(e
);
653 spin_lock(&obj
->page_table_lock
);
654 err
= fn(obj
, e
->da
, e
->pa
, prot
);
655 spin_unlock(&obj
->page_table_lock
);
661 * omap_iopgtable_store_entry - Make an iommu pte entry
663 * @e: an iommu tlb entry info
666 omap_iopgtable_store_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
670 flush_iotlb_page(obj
, e
->da
);
671 err
= iopgtable_store_entry_core(obj
, e
);
673 prefetch_iotlb_entry(obj
, e
);
678 * iopgtable_lookup_entry - Lookup an iommu pte entry
680 * @da: iommu device virtual address
681 * @ppgd: iommu pgd entry pointer to be returned
682 * @ppte: iommu pte entry pointer to be returned
685 iopgtable_lookup_entry(struct omap_iommu
*obj
, u32 da
, u32
**ppgd
, u32
**ppte
)
687 u32
*iopgd
, *iopte
= NULL
;
689 iopgd
= iopgd_offset(obj
, da
);
693 if (iopgd_is_table(*iopgd
))
694 iopte
= iopte_offset(iopgd
, da
);
700 static size_t iopgtable_clear_entry_core(struct omap_iommu
*obj
, u32 da
)
703 u32
*iopgd
= iopgd_offset(obj
, da
);
706 unsigned long pd_offset
= iopgd_index(da
) * sizeof(da
);
707 unsigned long pt_offset
= iopte_index(da
) * sizeof(da
);
712 if (iopgd_is_table(*iopgd
)) {
714 u32
*iopte
= iopte_offset(iopgd
, da
);
717 if (*iopte
& IOPTE_LARGE
) {
719 /* rewind to the 1st entry */
720 iopte
= iopte_offset(iopgd
, (da
& IOLARGE_MASK
));
723 memset(iopte
, 0, nent
* sizeof(*iopte
));
724 pt_dma
= iopgd_page_paddr(iopgd
);
725 flush_iopte_range(obj
->dev
, pt_dma
, pt_offset
, nent
);
728 * do table walk to check if this table is necessary or not
730 iopte
= iopte_offset(iopgd
, 0);
731 for (i
= 0; i
< PTRS_PER_IOPTE
; i
++)
735 iopte_free(obj
, iopte
, true);
736 nent
= 1; /* for the next L1 entry */
739 if ((*iopgd
& IOPGD_SUPER
) == IOPGD_SUPER
) {
741 /* rewind to the 1st entry */
742 iopgd
= iopgd_offset(obj
, (da
& IOSUPER_MASK
));
746 memset(iopgd
, 0, nent
* sizeof(*iopgd
));
747 flush_iopte_range(obj
->dev
, obj
->pd_dma
, pd_offset
, nent
);
753 * iopgtable_clear_entry - Remove an iommu pte entry
755 * @da: iommu device virtual address
757 static size_t iopgtable_clear_entry(struct omap_iommu
*obj
, u32 da
)
761 spin_lock(&obj
->page_table_lock
);
763 bytes
= iopgtable_clear_entry_core(obj
, da
);
764 flush_iotlb_page(obj
, da
);
766 spin_unlock(&obj
->page_table_lock
);
771 static void iopgtable_clear_entry_all(struct omap_iommu
*obj
)
773 unsigned long offset
;
776 spin_lock(&obj
->page_table_lock
);
778 for (i
= 0; i
< PTRS_PER_IOPGD
; i
++) {
782 da
= i
<< IOPGD_SHIFT
;
783 iopgd
= iopgd_offset(obj
, da
);
784 offset
= iopgd_index(da
) * sizeof(da
);
789 if (iopgd_is_table(*iopgd
))
790 iopte_free(obj
, iopte_offset(iopgd
, 0), true);
793 flush_iopte_range(obj
->dev
, obj
->pd_dma
, offset
, 1);
796 flush_iotlb_all(obj
);
798 spin_unlock(&obj
->page_table_lock
);
802 * Device IOMMU generic operations
804 static irqreturn_t
iommu_fault_handler(int irq
, void *data
)
808 struct omap_iommu
*obj
= data
;
809 struct iommu_domain
*domain
= obj
->domain
;
810 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
812 if (!omap_domain
->dev
)
815 errs
= iommu_report_fault(obj
, &da
);
819 /* Fault callback or TLB/PTE Dynamic loading */
820 if (!report_iommu_fault(domain
, obj
->dev
, da
, 0))
823 iommu_write_reg(obj
, 0, MMU_IRQENABLE
);
825 iopgd
= iopgd_offset(obj
, da
);
827 if (!iopgd_is_table(*iopgd
)) {
828 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
829 obj
->name
, errs
, da
, iopgd
, *iopgd
);
833 iopte
= iopte_offset(iopgd
, da
);
835 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
836 obj
->name
, errs
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
842 * omap_iommu_attach() - attach iommu device to an iommu domain
843 * @obj: target omap iommu device
846 static int omap_iommu_attach(struct omap_iommu
*obj
, u32
*iopgd
)
850 spin_lock(&obj
->iommu_lock
);
852 obj
->pd_dma
= dma_map_single(obj
->dev
, iopgd
, IOPGD_TABLE_SIZE
,
854 if (dma_mapping_error(obj
->dev
, obj
->pd_dma
)) {
855 dev_err(obj
->dev
, "DMA map error for L1 table\n");
861 err
= iommu_enable(obj
);
864 flush_iotlb_all(obj
);
866 spin_unlock(&obj
->iommu_lock
);
868 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
873 spin_unlock(&obj
->iommu_lock
);
879 * omap_iommu_detach - release iommu device
882 static void omap_iommu_detach(struct omap_iommu
*obj
)
884 if (!obj
|| IS_ERR(obj
))
887 spin_lock(&obj
->iommu_lock
);
889 dma_unmap_single(obj
->dev
, obj
->pd_dma
, IOPGD_TABLE_SIZE
,
895 spin_unlock(&obj
->iommu_lock
);
897 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
900 static void omap_iommu_save_tlb_entries(struct omap_iommu
*obj
)
902 struct iotlb_lock lock
;
907 /* check if there are any locked tlbs to save */
908 iotlb_lock_get(obj
, &lock
);
909 obj
->num_cr_ctx
= lock
.base
;
910 if (!obj
->num_cr_ctx
)
914 for_each_iotlb_cr(obj
, obj
->num_cr_ctx
, i
, cr
)
918 static void omap_iommu_restore_tlb_entries(struct omap_iommu
*obj
)
924 /* no locked tlbs to restore */
925 if (!obj
->num_cr_ctx
)
930 for (i
= 0; i
< obj
->num_cr_ctx
; i
++, tmp
++) {
932 iotlb_lock_set(obj
, &l
);
933 iotlb_load_cr(obj
, tmp
);
935 l
.base
= obj
->num_cr_ctx
;
937 iotlb_lock_set(obj
, &l
);
941 * omap_iommu_domain_deactivate - deactivate attached iommu devices
942 * @domain: iommu domain attached to the target iommu device
944 * This API allows the client devices of IOMMU devices to suspend
945 * the IOMMUs they control at runtime, after they are idled and
946 * suspended all activity. System Suspend will leverage the PM
947 * driver late callbacks.
949 int omap_iommu_domain_deactivate(struct iommu_domain
*domain
)
951 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
952 struct omap_iommu_device
*iommu
;
953 struct omap_iommu
*oiommu
;
956 if (!omap_domain
->dev
)
959 iommu
= omap_domain
->iommus
;
960 iommu
+= (omap_domain
->num_iommus
- 1);
961 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
--) {
962 oiommu
= iommu
->iommu_dev
;
963 pm_runtime_put_sync(oiommu
->dev
);
968 EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate
);
971 * omap_iommu_domain_activate - activate attached iommu devices
972 * @domain: iommu domain attached to the target iommu device
974 * This API allows the client devices of IOMMU devices to resume the
975 * IOMMUs they control at runtime, before they can resume operations.
976 * System Resume will leverage the PM driver late callbacks.
978 int omap_iommu_domain_activate(struct iommu_domain
*domain
)
980 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
981 struct omap_iommu_device
*iommu
;
982 struct omap_iommu
*oiommu
;
985 if (!omap_domain
->dev
)
988 iommu
= omap_domain
->iommus
;
989 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
++) {
990 oiommu
= iommu
->iommu_dev
;
991 pm_runtime_get_sync(oiommu
->dev
);
996 EXPORT_SYMBOL_GPL(omap_iommu_domain_activate
);
999 * omap_iommu_runtime_suspend - disable an iommu device
1000 * @dev: iommu device
1002 * This function performs all that is necessary to disable an
1003 * IOMMU device, either during final detachment from a client
1004 * device, or during system/runtime suspend of the device. This
1005 * includes programming all the appropriate IOMMU registers, and
1006 * managing the associated omap_hwmod's state and the device's
1007 * reset line. This function also saves the context of any
1008 * locked TLBs if suspending.
1010 static __maybe_unused
int omap_iommu_runtime_suspend(struct device
*dev
)
1012 struct platform_device
*pdev
= to_platform_device(dev
);
1013 struct iommu_platform_data
*pdata
= dev_get_platdata(dev
);
1014 struct omap_iommu
*obj
= to_iommu(dev
);
1017 /* save the TLBs only during suspend, and not for power down */
1018 if (obj
->domain
&& obj
->iopgd
)
1019 omap_iommu_save_tlb_entries(obj
);
1021 omap2_iommu_disable(obj
);
1023 if (pdata
&& pdata
->device_idle
)
1024 pdata
->device_idle(pdev
);
1026 if (pdata
&& pdata
->assert_reset
)
1027 pdata
->assert_reset(pdev
, pdata
->reset_name
);
1029 if (pdata
&& pdata
->set_pwrdm_constraint
) {
1030 ret
= pdata
->set_pwrdm_constraint(pdev
, false, &obj
->pwrst
);
1032 dev_warn(obj
->dev
, "pwrdm_constraint failed to be reset, status = %d\n",
1041 * omap_iommu_runtime_resume - enable an iommu device
1042 * @dev: iommu device
1044 * This function performs all that is necessary to enable an
1045 * IOMMU device, either during initial attachment to a client
1046 * device, or during system/runtime resume of the device. This
1047 * includes programming all the appropriate IOMMU registers, and
1048 * managing the associated omap_hwmod's state and the device's
1049 * reset line. The function also restores any locked TLBs if
1050 * resuming after a suspend.
1052 static __maybe_unused
int omap_iommu_runtime_resume(struct device
*dev
)
1054 struct platform_device
*pdev
= to_platform_device(dev
);
1055 struct iommu_platform_data
*pdata
= dev_get_platdata(dev
);
1056 struct omap_iommu
*obj
= to_iommu(dev
);
1059 if (pdata
&& pdata
->set_pwrdm_constraint
) {
1060 ret
= pdata
->set_pwrdm_constraint(pdev
, true, &obj
->pwrst
);
1062 dev_warn(obj
->dev
, "pwrdm_constraint failed to be set, status = %d\n",
1067 if (pdata
&& pdata
->deassert_reset
) {
1068 ret
= pdata
->deassert_reset(pdev
, pdata
->reset_name
);
1070 dev_err(dev
, "deassert_reset failed: %d\n", ret
);
1075 if (pdata
&& pdata
->device_enable
)
1076 pdata
->device_enable(pdev
);
1078 /* restore the TLBs only during resume, and not for power up */
1080 omap_iommu_restore_tlb_entries(obj
);
1082 ret
= omap2_iommu_enable(obj
);
1088 * omap_iommu_prepare - prepare() dev_pm_ops implementation
1089 * @dev: iommu device
1091 * This function performs the necessary checks to determine if the IOMMU
1092 * device needs suspending or not. The function checks if the runtime_pm
1093 * status of the device is suspended, and returns 1 in that case. This
1094 * results in the PM core to skip invoking any of the Sleep PM callbacks
1095 * (suspend, suspend_late, resume, resume_early etc).
1097 static int omap_iommu_prepare(struct device
*dev
)
1099 if (pm_runtime_status_suspended(dev
))
1104 static bool omap_iommu_can_register(struct platform_device
*pdev
)
1106 struct device_node
*np
= pdev
->dev
.of_node
;
1108 if (!of_device_is_compatible(np
, "ti,dra7-dsp-iommu"))
1112 * restrict IOMMU core registration only for processor-port MDMA MMUs
1115 if ((!strcmp(dev_name(&pdev
->dev
), "40d01000.mmu")) ||
1116 (!strcmp(dev_name(&pdev
->dev
), "41501000.mmu")))
1122 static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device
*pdev
,
1123 struct omap_iommu
*obj
)
1125 struct device_node
*np
= pdev
->dev
.of_node
;
1128 if (!of_device_is_compatible(np
, "ti,dra7-dsp-iommu"))
1131 if (!of_property_read_bool(np
, "ti,syscon-mmuconfig")) {
1132 dev_err(&pdev
->dev
, "ti,syscon-mmuconfig property is missing\n");
1137 syscon_regmap_lookup_by_phandle(np
, "ti,syscon-mmuconfig");
1138 if (IS_ERR(obj
->syscfg
)) {
1139 /* can fail with -EPROBE_DEFER */
1140 ret
= PTR_ERR(obj
->syscfg
);
1144 if (of_property_read_u32_index(np
, "ti,syscon-mmuconfig", 1,
1146 dev_err(&pdev
->dev
, "couldn't get the IOMMU instance id within subsystem\n");
1150 if (obj
->id
!= 0 && obj
->id
!= 1) {
1151 dev_err(&pdev
->dev
, "invalid IOMMU instance id\n");
1159 * OMAP Device MMU(IOMMU) detection
1161 static int omap_iommu_probe(struct platform_device
*pdev
)
1165 struct omap_iommu
*obj
;
1166 struct resource
*res
;
1167 struct device_node
*of
= pdev
->dev
.of_node
;
1170 pr_err("%s: only DT-based devices are supported\n", __func__
);
1174 obj
= devm_kzalloc(&pdev
->dev
, sizeof(*obj
) + MMU_REG_SIZE
, GFP_KERNEL
);
1179 * self-manage the ordering dependencies between omap_device_enable/idle
1180 * and omap_device_assert/deassert_hardreset API
1182 if (pdev
->dev
.pm_domain
) {
1183 dev_dbg(&pdev
->dev
, "device pm_domain is being reset\n");
1184 pdev
->dev
.pm_domain
= NULL
;
1187 obj
->name
= dev_name(&pdev
->dev
);
1188 obj
->nr_tlb_entries
= 32;
1189 err
= of_property_read_u32(of
, "ti,#tlb-entries", &obj
->nr_tlb_entries
);
1190 if (err
&& err
!= -EINVAL
)
1192 if (obj
->nr_tlb_entries
!= 32 && obj
->nr_tlb_entries
!= 8)
1194 if (of_property_read_bool(of
, "ti,iommu-bus-err-back"))
1195 obj
->has_bus_err_back
= MMU_GP_REG_BUS_ERR_BACK_EN
;
1197 obj
->dev
= &pdev
->dev
;
1198 obj
->ctx
= (void *)obj
+ sizeof(*obj
);
1199 obj
->cr_ctx
= devm_kzalloc(&pdev
->dev
,
1200 sizeof(*obj
->cr_ctx
) * obj
->nr_tlb_entries
,
1205 spin_lock_init(&obj
->iommu_lock
);
1206 spin_lock_init(&obj
->page_table_lock
);
1208 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1209 obj
->regbase
= devm_ioremap_resource(obj
->dev
, res
);
1210 if (IS_ERR(obj
->regbase
))
1211 return PTR_ERR(obj
->regbase
);
1213 err
= omap_iommu_dra7_get_dsp_system_cfg(pdev
, obj
);
1217 irq
= platform_get_irq(pdev
, 0);
1221 err
= devm_request_irq(obj
->dev
, irq
, iommu_fault_handler
, IRQF_SHARED
,
1222 dev_name(obj
->dev
), obj
);
1225 platform_set_drvdata(pdev
, obj
);
1227 if (omap_iommu_can_register(pdev
)) {
1228 err
= iommu_device_sysfs_add(&obj
->iommu
, obj
->dev
, NULL
,
1233 obj
->has_iommu_driver
= true;
1236 err
= iommu_device_register(&obj
->iommu
, &omap_iommu_ops
, &pdev
->dev
);
1240 pm_runtime_enable(obj
->dev
);
1242 omap_iommu_debugfs_add(obj
);
1244 dev_info(&pdev
->dev
, "%s registered\n", obj
->name
);
1249 if (obj
->has_iommu_driver
)
1250 iommu_device_sysfs_remove(&obj
->iommu
);
1254 static void omap_iommu_remove(struct platform_device
*pdev
)
1256 struct omap_iommu
*obj
= platform_get_drvdata(pdev
);
1258 if (obj
->has_iommu_driver
)
1259 iommu_device_sysfs_remove(&obj
->iommu
);
1261 iommu_device_unregister(&obj
->iommu
);
1263 omap_iommu_debugfs_remove(obj
);
1265 pm_runtime_disable(obj
->dev
);
1267 dev_info(&pdev
->dev
, "%s removed\n", obj
->name
);
1270 static const struct dev_pm_ops omap_iommu_pm_ops
= {
1271 .prepare
= omap_iommu_prepare
,
1272 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
1273 pm_runtime_force_resume
)
1274 SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend
,
1275 omap_iommu_runtime_resume
, NULL
)
1278 static const struct of_device_id omap_iommu_of_match
[] = {
1279 { .compatible
= "ti,omap2-iommu" },
1280 { .compatible
= "ti,omap4-iommu" },
1281 { .compatible
= "ti,dra7-iommu" },
1282 { .compatible
= "ti,dra7-dsp-iommu" },
1286 static struct platform_driver omap_iommu_driver
= {
1287 .probe
= omap_iommu_probe
,
1288 .remove_new
= omap_iommu_remove
,
1290 .name
= "omap-iommu",
1291 .pm
= &omap_iommu_pm_ops
,
1292 .of_match_table
= of_match_ptr(omap_iommu_of_match
),
1296 static u32
iotlb_init_entry(struct iotlb_entry
*e
, u32 da
, u32 pa
, int pgsz
)
1298 memset(e
, 0, sizeof(*e
));
1302 e
->valid
= MMU_CAM_V
;
1304 e
->endian
= MMU_RAM_ENDIAN_LITTLE
;
1305 e
->elsz
= MMU_RAM_ELSZ_8
;
1308 return iopgsz_to_bytes(e
->pgsz
);
1311 static int omap_iommu_map(struct iommu_domain
*domain
, unsigned long da
,
1312 phys_addr_t pa
, size_t bytes
, size_t count
,
1313 int prot
, gfp_t gfp
, size_t *mapped
)
1315 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1316 struct device
*dev
= omap_domain
->dev
;
1317 struct omap_iommu_device
*iommu
;
1318 struct omap_iommu
*oiommu
;
1319 struct iotlb_entry e
;
1324 omap_pgsz
= bytes_to_iopgsz(bytes
);
1325 if (omap_pgsz
< 0) {
1326 dev_err(dev
, "invalid size to map: %zu\n", bytes
);
1330 dev_dbg(dev
, "mapping da 0x%lx to pa %pa size 0x%zx\n", da
, &pa
, bytes
);
1332 iotlb_init_entry(&e
, da
, pa
, omap_pgsz
);
1334 iommu
= omap_domain
->iommus
;
1335 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
++) {
1336 oiommu
= iommu
->iommu_dev
;
1337 ret
= omap_iopgtable_store_entry(oiommu
, &e
);
1339 dev_err(dev
, "omap_iopgtable_store_entry failed: %d\n",
1348 oiommu
= iommu
->iommu_dev
;
1349 iopgtable_clear_entry(oiommu
, da
);
1358 static size_t omap_iommu_unmap(struct iommu_domain
*domain
, unsigned long da
,
1359 size_t size
, size_t count
, struct iommu_iotlb_gather
*gather
)
1361 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1362 struct device
*dev
= omap_domain
->dev
;
1363 struct omap_iommu_device
*iommu
;
1364 struct omap_iommu
*oiommu
;
1369 dev_dbg(dev
, "unmapping da 0x%lx size %zu\n", da
, size
);
1371 iommu
= omap_domain
->iommus
;
1372 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
++) {
1373 oiommu
= iommu
->iommu_dev
;
1374 bytes
= iopgtable_clear_entry(oiommu
, da
);
1380 * simplify return - we are only checking if any of the iommus
1381 * reported an error, but not if all of them are unmapping the
1382 * same number of entries. This should not occur due to the
1383 * mirror programming.
1385 return error
? 0 : bytes
;
1388 static int omap_iommu_count(struct device
*dev
)
1390 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
1393 while (arch_data
->iommu_dev
) {
1401 /* caller should call cleanup if this function fails */
1402 static int omap_iommu_attach_init(struct device
*dev
,
1403 struct omap_iommu_domain
*odomain
)
1405 struct omap_iommu_device
*iommu
;
1408 odomain
->num_iommus
= omap_iommu_count(dev
);
1409 if (!odomain
->num_iommus
)
1412 odomain
->iommus
= kcalloc(odomain
->num_iommus
, sizeof(*iommu
),
1414 if (!odomain
->iommus
)
1417 iommu
= odomain
->iommus
;
1418 for (i
= 0; i
< odomain
->num_iommus
; i
++, iommu
++) {
1419 iommu
->pgtable
= kzalloc(IOPGD_TABLE_SIZE
, GFP_ATOMIC
);
1420 if (!iommu
->pgtable
)
1424 * should never fail, but please keep this around to ensure
1425 * we keep the hardware happy
1427 if (WARN_ON(!IS_ALIGNED((long)iommu
->pgtable
,
1435 static void omap_iommu_detach_fini(struct omap_iommu_domain
*odomain
)
1438 struct omap_iommu_device
*iommu
= odomain
->iommus
;
1440 for (i
= 0; iommu
&& i
< odomain
->num_iommus
; i
++, iommu
++)
1441 kfree(iommu
->pgtable
);
1443 kfree(odomain
->iommus
);
1444 odomain
->num_iommus
= 0;
1445 odomain
->iommus
= NULL
;
1449 omap_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1451 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
1452 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1453 struct omap_iommu_device
*iommu
;
1454 struct omap_iommu
*oiommu
;
1458 if (!arch_data
|| !arch_data
->iommu_dev
) {
1459 dev_err(dev
, "device doesn't have an associated iommu\n");
1463 spin_lock(&omap_domain
->lock
);
1465 /* only a single client device can be attached to a domain */
1466 if (omap_domain
->dev
) {
1467 dev_err(dev
, "iommu domain is already attached\n");
1472 ret
= omap_iommu_attach_init(dev
, omap_domain
);
1474 dev_err(dev
, "failed to allocate required iommu data %d\n",
1479 iommu
= omap_domain
->iommus
;
1480 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
++, arch_data
++) {
1481 /* configure and enable the omap iommu */
1482 oiommu
= arch_data
->iommu_dev
;
1483 ret
= omap_iommu_attach(oiommu
, iommu
->pgtable
);
1485 dev_err(dev
, "can't get omap iommu: %d\n", ret
);
1489 oiommu
->domain
= domain
;
1490 iommu
->iommu_dev
= oiommu
;
1493 omap_domain
->dev
= dev
;
1501 oiommu
= iommu
->iommu_dev
;
1502 omap_iommu_detach(oiommu
);
1503 iommu
->iommu_dev
= NULL
;
1504 oiommu
->domain
= NULL
;
1507 omap_iommu_detach_fini(omap_domain
);
1509 spin_unlock(&omap_domain
->lock
);
1513 static void _omap_iommu_detach_dev(struct omap_iommu_domain
*omap_domain
,
1516 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
1517 struct omap_iommu_device
*iommu
= omap_domain
->iommus
;
1518 struct omap_iommu
*oiommu
;
1521 if (!omap_domain
->dev
) {
1522 dev_err(dev
, "domain has no attached device\n");
1526 /* only a single device is supported per domain for now */
1527 if (omap_domain
->dev
!= dev
) {
1528 dev_err(dev
, "invalid attached device\n");
1533 * cleanup in the reverse order of attachment - this addresses
1534 * any h/w dependencies between multiple instances, if any
1536 iommu
+= (omap_domain
->num_iommus
- 1);
1537 arch_data
+= (omap_domain
->num_iommus
- 1);
1538 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
--, arch_data
--) {
1539 oiommu
= iommu
->iommu_dev
;
1540 iopgtable_clear_entry_all(oiommu
);
1542 omap_iommu_detach(oiommu
);
1543 iommu
->iommu_dev
= NULL
;
1544 oiommu
->domain
= NULL
;
1547 omap_iommu_detach_fini(omap_domain
);
1549 omap_domain
->dev
= NULL
;
1552 static int omap_iommu_identity_attach(struct iommu_domain
*identity_domain
,
1555 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1556 struct omap_iommu_domain
*omap_domain
;
1558 if (domain
== identity_domain
|| !domain
)
1561 omap_domain
= to_omap_domain(domain
);
1562 spin_lock(&omap_domain
->lock
);
1563 _omap_iommu_detach_dev(omap_domain
, dev
);
1564 spin_unlock(&omap_domain
->lock
);
1568 static struct iommu_domain_ops omap_iommu_identity_ops
= {
1569 .attach_dev
= omap_iommu_identity_attach
,
1572 static struct iommu_domain omap_iommu_identity_domain
= {
1573 .type
= IOMMU_DOMAIN_IDENTITY
,
1574 .ops
= &omap_iommu_identity_ops
,
1577 static struct iommu_domain
*omap_iommu_domain_alloc_paging(struct device
*dev
)
1579 struct omap_iommu_domain
*omap_domain
;
1581 omap_domain
= kzalloc(sizeof(*omap_domain
), GFP_KERNEL
);
1585 spin_lock_init(&omap_domain
->lock
);
1587 omap_domain
->domain
.geometry
.aperture_start
= 0;
1588 omap_domain
->domain
.geometry
.aperture_end
= (1ULL << 32) - 1;
1589 omap_domain
->domain
.geometry
.force_aperture
= true;
1591 return &omap_domain
->domain
;
1594 static void omap_iommu_domain_free(struct iommu_domain
*domain
)
1596 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1599 * An iommu device is still attached
1600 * (currently, only one device can be attached) ?
1602 if (omap_domain
->dev
)
1603 _omap_iommu_detach_dev(omap_domain
, omap_domain
->dev
);
1608 static phys_addr_t
omap_iommu_iova_to_phys(struct iommu_domain
*domain
,
1611 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1612 struct omap_iommu_device
*iommu
= omap_domain
->iommus
;
1613 struct omap_iommu
*oiommu
= iommu
->iommu_dev
;
1614 struct device
*dev
= oiommu
->dev
;
1616 phys_addr_t ret
= 0;
1619 * all the iommus within the domain will have identical programming,
1620 * so perform the lookup using just the first iommu
1622 iopgtable_lookup_entry(oiommu
, da
, &pgd
, &pte
);
1625 if (iopte_is_small(*pte
))
1626 ret
= omap_iommu_translate(*pte
, da
, IOPTE_MASK
);
1627 else if (iopte_is_large(*pte
))
1628 ret
= omap_iommu_translate(*pte
, da
, IOLARGE_MASK
);
1630 dev_err(dev
, "bogus pte 0x%x, da 0x%llx", *pte
,
1631 (unsigned long long)da
);
1633 if (iopgd_is_section(*pgd
))
1634 ret
= omap_iommu_translate(*pgd
, da
, IOSECTION_MASK
);
1635 else if (iopgd_is_super(*pgd
))
1636 ret
= omap_iommu_translate(*pgd
, da
, IOSUPER_MASK
);
1638 dev_err(dev
, "bogus pgd 0x%x, da 0x%llx", *pgd
,
1639 (unsigned long long)da
);
1645 static struct iommu_device
*omap_iommu_probe_device(struct device
*dev
)
1647 struct omap_iommu_arch_data
*arch_data
, *tmp
;
1648 struct platform_device
*pdev
;
1649 struct omap_iommu
*oiommu
;
1650 struct device_node
*np
;
1654 * Allocate the per-device iommu structure for DT-based devices.
1656 * TODO: Simplify this when removing non-DT support completely from the
1660 return ERR_PTR(-ENODEV
);
1663 * retrieve the count of IOMMU nodes using phandle size as element size
1664 * since #iommu-cells = 0 for OMAP
1666 num_iommus
= of_property_count_elems_of_size(dev
->of_node
, "iommus",
1669 return ERR_PTR(-ENODEV
);
1671 arch_data
= kcalloc(num_iommus
+ 1, sizeof(*arch_data
), GFP_KERNEL
);
1673 return ERR_PTR(-ENOMEM
);
1675 for (i
= 0, tmp
= arch_data
; i
< num_iommus
; i
++, tmp
++) {
1676 np
= of_parse_phandle(dev
->of_node
, "iommus", i
);
1679 return ERR_PTR(-EINVAL
);
1682 pdev
= of_find_device_by_node(np
);
1686 return ERR_PTR(-ENODEV
);
1689 oiommu
= platform_get_drvdata(pdev
);
1693 return ERR_PTR(-EINVAL
);
1696 tmp
->iommu_dev
= oiommu
;
1697 tmp
->dev
= &pdev
->dev
;
1702 dev_iommu_priv_set(dev
, arch_data
);
1705 * use the first IOMMU alone for the sysfs device linking.
1706 * TODO: Evaluate if a single iommu_group needs to be
1707 * maintained for both IOMMUs
1709 oiommu
= arch_data
->iommu_dev
;
1711 return &oiommu
->iommu
;
1714 static void omap_iommu_release_device(struct device
*dev
)
1716 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
1718 if (!dev
->of_node
|| !arch_data
)
1725 static int omap_iommu_of_xlate(struct device
*dev
, const struct of_phandle_args
*args
)
1727 /* TODO: collect args->np to save re-parsing in probe above */
1731 static const struct iommu_ops omap_iommu_ops
= {
1732 .identity_domain
= &omap_iommu_identity_domain
,
1733 .domain_alloc_paging
= omap_iommu_domain_alloc_paging
,
1734 .probe_device
= omap_iommu_probe_device
,
1735 .release_device
= omap_iommu_release_device
,
1736 .device_group
= generic_single_device_group
,
1737 .of_xlate
= omap_iommu_of_xlate
,
1738 .pgsize_bitmap
= OMAP_IOMMU_PGSIZES
,
1739 .default_domain_ops
= &(const struct iommu_domain_ops
) {
1740 .attach_dev
= omap_iommu_attach_dev
,
1741 .map_pages
= omap_iommu_map
,
1742 .unmap_pages
= omap_iommu_unmap
,
1743 .iova_to_phys
= omap_iommu_iova_to_phys
,
1744 .free
= omap_iommu_domain_free
,
1748 static int __init
omap_iommu_init(void)
1750 struct kmem_cache
*p
;
1751 const slab_flags_t flags
= SLAB_HWCACHE_ALIGN
;
1752 size_t align
= 1 << 10; /* L2 pagetable alignement */
1753 struct device_node
*np
;
1756 np
= of_find_matching_node(NULL
, omap_iommu_of_match
);
1762 p
= kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE
, align
, flags
,
1768 omap_iommu_debugfs_init();
1770 ret
= platform_driver_register(&omap_iommu_driver
);
1772 pr_err("%s: failed to register driver\n", __func__
);
1779 kmem_cache_destroy(iopte_cachep
);
1782 subsys_initcall(omap_iommu_init
);
1783 /* must be ready before omap3isp is probed */