1 // SPDX-License-Identifier: GPL-2.0-only
3 * omap iommu: tlb and pagetable primitives
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
8 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
9 * Paul Mundt and Toshihiro Kobayashi
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/ioport.h>
17 #include <linux/platform_device.h>
18 #include <linux/iommu.h>
19 #include <linux/omap-iommu.h>
20 #include <linux/mutex.h>
21 #include <linux/spinlock.h>
23 #include <linux/pm_runtime.h>
25 #include <linux/of_iommu.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_platform.h>
28 #include <linux/regmap.h>
29 #include <linux/mfd/syscon.h>
31 #include <linux/platform_data/iommu-omap.h>
33 #include "omap-iopgtable.h"
34 #include "omap-iommu.h"
36 static const struct iommu_ops omap_iommu_ops
;
38 #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
40 /* bitmap of the page sizes currently supported */
41 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
43 #define MMU_LOCK_BASE_SHIFT 10
44 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
45 #define MMU_LOCK_BASE(x) \
46 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
48 #define MMU_LOCK_VICT_SHIFT 4
49 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
50 #define MMU_LOCK_VICT(x) \
51 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
53 static struct platform_driver omap_iommu_driver
;
54 static struct kmem_cache
*iopte_cachep
;
57 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
58 * @dom: generic iommu domain handle
60 static struct omap_iommu_domain
*to_omap_domain(struct iommu_domain
*dom
)
62 return container_of(dom
, struct omap_iommu_domain
, domain
);
66 * omap_iommu_save_ctx - Save registers for pm off-mode support
69 * This should be treated as an deprecated API. It is preserved only
70 * to maintain existing functionality for OMAP3 ISP driver.
72 void omap_iommu_save_ctx(struct device
*dev
)
74 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
75 struct omap_iommu
*obj
;
82 while (arch_data
->iommu_dev
) {
83 obj
= arch_data
->iommu_dev
;
85 for (i
= 0; i
< (MMU_REG_SIZE
/ sizeof(u32
)); i
++) {
86 p
[i
] = iommu_read_reg(obj
, i
* sizeof(u32
));
87 dev_dbg(obj
->dev
, "%s\t[%02d] %08x\n", __func__
, i
,
93 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx
);
96 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
99 * This should be treated as an deprecated API. It is preserved only
100 * to maintain existing functionality for OMAP3 ISP driver.
102 void omap_iommu_restore_ctx(struct device
*dev
)
104 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
105 struct omap_iommu
*obj
;
112 while (arch_data
->iommu_dev
) {
113 obj
= arch_data
->iommu_dev
;
115 for (i
= 0; i
< (MMU_REG_SIZE
/ sizeof(u32
)); i
++) {
116 iommu_write_reg(obj
, p
[i
], i
* sizeof(u32
));
117 dev_dbg(obj
->dev
, "%s\t[%02d] %08x\n", __func__
, i
,
123 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx
);
125 static void dra7_cfg_dspsys_mmu(struct omap_iommu
*obj
, bool enable
)
132 mask
= (1 << (obj
->id
* DSP_SYS_MMU_CONFIG_EN_SHIFT
));
133 val
= enable
? mask
: 0;
134 regmap_update_bits(obj
->syscfg
, DSP_SYS_MMU_CONFIG
, mask
, val
);
137 static void __iommu_set_twl(struct omap_iommu
*obj
, bool on
)
139 u32 l
= iommu_read_reg(obj
, MMU_CNTL
);
142 iommu_write_reg(obj
, MMU_IRQ_TWL_MASK
, MMU_IRQENABLE
);
144 iommu_write_reg(obj
, MMU_IRQ_TLB_MISS_MASK
, MMU_IRQENABLE
);
148 l
|= (MMU_CNTL_MMU_EN
| MMU_CNTL_TWL_EN
);
150 l
|= (MMU_CNTL_MMU_EN
);
152 iommu_write_reg(obj
, l
, MMU_CNTL
);
155 static int omap2_iommu_enable(struct omap_iommu
*obj
)
159 if (!obj
->iopgd
|| !IS_ALIGNED((unsigned long)obj
->iopgd
, SZ_16K
))
162 pa
= virt_to_phys(obj
->iopgd
);
163 if (!IS_ALIGNED(pa
, SZ_16K
))
166 l
= iommu_read_reg(obj
, MMU_REVISION
);
167 dev_info(obj
->dev
, "%s: version %d.%d\n", obj
->name
,
168 (l
>> 4) & 0xf, l
& 0xf);
170 iommu_write_reg(obj
, pa
, MMU_TTB
);
172 dra7_cfg_dspsys_mmu(obj
, true);
174 if (obj
->has_bus_err_back
)
175 iommu_write_reg(obj
, MMU_GP_REG_BUS_ERR_BACK_EN
, MMU_GP_REG
);
177 __iommu_set_twl(obj
, true);
182 static void omap2_iommu_disable(struct omap_iommu
*obj
)
184 u32 l
= iommu_read_reg(obj
, MMU_CNTL
);
187 iommu_write_reg(obj
, l
, MMU_CNTL
);
188 dra7_cfg_dspsys_mmu(obj
, false);
190 dev_dbg(obj
->dev
, "%s is shutting down\n", obj
->name
);
193 static int iommu_enable(struct omap_iommu
*obj
)
197 ret
= pm_runtime_get_sync(obj
->dev
);
199 pm_runtime_put_noidle(obj
->dev
);
201 return ret
< 0 ? ret
: 0;
204 static void iommu_disable(struct omap_iommu
*obj
)
206 pm_runtime_put_sync(obj
->dev
);
212 static u32
iotlb_cr_to_virt(struct cr_regs
*cr
)
214 u32 page_size
= cr
->cam
& MMU_CAM_PGSZ_MASK
;
215 u32 mask
= get_cam_va_mask(cr
->cam
& page_size
);
217 return cr
->cam
& mask
;
220 static u32
get_iopte_attr(struct iotlb_entry
*e
)
224 attr
= e
->mixed
<< 5;
226 attr
|= e
->elsz
>> 3;
227 attr
<<= (((e
->pgsz
== MMU_CAM_PGSZ_4K
) ||
228 (e
->pgsz
== MMU_CAM_PGSZ_64K
)) ? 0 : 6);
232 static u32
iommu_report_fault(struct omap_iommu
*obj
, u32
*da
)
234 u32 status
, fault_addr
;
236 status
= iommu_read_reg(obj
, MMU_IRQSTATUS
);
237 status
&= MMU_IRQ_MASK
;
243 fault_addr
= iommu_read_reg(obj
, MMU_FAULT_AD
);
246 iommu_write_reg(obj
, status
, MMU_IRQSTATUS
);
251 void iotlb_lock_get(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
255 val
= iommu_read_reg(obj
, MMU_LOCK
);
257 l
->base
= MMU_LOCK_BASE(val
);
258 l
->vict
= MMU_LOCK_VICT(val
);
261 void iotlb_lock_set(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
265 val
= (l
->base
<< MMU_LOCK_BASE_SHIFT
);
266 val
|= (l
->vict
<< MMU_LOCK_VICT_SHIFT
);
268 iommu_write_reg(obj
, val
, MMU_LOCK
);
271 static void iotlb_read_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
273 cr
->cam
= iommu_read_reg(obj
, MMU_READ_CAM
);
274 cr
->ram
= iommu_read_reg(obj
, MMU_READ_RAM
);
277 static void iotlb_load_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
279 iommu_write_reg(obj
, cr
->cam
| MMU_CAM_V
, MMU_CAM
);
280 iommu_write_reg(obj
, cr
->ram
, MMU_RAM
);
282 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
283 iommu_write_reg(obj
, 1, MMU_LD_TLB
);
286 /* only used in iotlb iteration for-loop */
287 struct cr_regs
__iotlb_read_cr(struct omap_iommu
*obj
, int n
)
292 iotlb_lock_get(obj
, &l
);
294 iotlb_lock_set(obj
, &l
);
295 iotlb_read_cr(obj
, &cr
);
300 #ifdef PREFETCH_IOTLB
301 static struct cr_regs
*iotlb_alloc_cr(struct omap_iommu
*obj
,
302 struct iotlb_entry
*e
)
309 if (e
->da
& ~(get_cam_va_mask(e
->pgsz
))) {
310 dev_err(obj
->dev
, "%s:\twrong alignment: %08x\n", __func__
,
312 return ERR_PTR(-EINVAL
);
315 cr
= kmalloc(sizeof(*cr
), GFP_KERNEL
);
317 return ERR_PTR(-ENOMEM
);
319 cr
->cam
= (e
->da
& MMU_CAM_VATAG_MASK
) | e
->prsvd
| e
->pgsz
| e
->valid
;
320 cr
->ram
= e
->pa
| e
->endian
| e
->elsz
| e
->mixed
;
326 * load_iotlb_entry - Set an iommu tlb entry
328 * @e: an iommu tlb entry info
330 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
336 if (!obj
|| !obj
->nr_tlb_entries
|| !e
)
339 pm_runtime_get_sync(obj
->dev
);
341 iotlb_lock_get(obj
, &l
);
342 if (l
.base
== obj
->nr_tlb_entries
) {
343 dev_warn(obj
->dev
, "%s: preserve entries full\n", __func__
);
351 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, tmp
)
352 if (!iotlb_cr_valid(&tmp
))
355 if (i
== obj
->nr_tlb_entries
) {
356 dev_dbg(obj
->dev
, "%s: full: no entry\n", __func__
);
361 iotlb_lock_get(obj
, &l
);
364 iotlb_lock_set(obj
, &l
);
367 cr
= iotlb_alloc_cr(obj
, e
);
369 pm_runtime_put_sync(obj
->dev
);
373 iotlb_load_cr(obj
, cr
);
378 /* increment victim for next tlb load */
379 if (++l
.vict
== obj
->nr_tlb_entries
)
381 iotlb_lock_set(obj
, &l
);
383 pm_runtime_put_sync(obj
->dev
);
387 #else /* !PREFETCH_IOTLB */
389 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
394 #endif /* !PREFETCH_IOTLB */
396 static int prefetch_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
398 return load_iotlb_entry(obj
, e
);
402 * flush_iotlb_page - Clear an iommu tlb entry
404 * @da: iommu device virtual address
406 * Clear an iommu tlb entry which includes 'da' address.
408 static void flush_iotlb_page(struct omap_iommu
*obj
, u32 da
)
413 pm_runtime_get_sync(obj
->dev
);
415 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, cr
) {
419 if (!iotlb_cr_valid(&cr
))
422 start
= iotlb_cr_to_virt(&cr
);
423 bytes
= iopgsz_to_bytes(cr
.cam
& 3);
425 if ((start
<= da
) && (da
< start
+ bytes
)) {
426 dev_dbg(obj
->dev
, "%s: %08x<=%08x(%zx)\n",
427 __func__
, start
, da
, bytes
);
428 iotlb_load_cr(obj
, &cr
);
429 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
433 pm_runtime_put_sync(obj
->dev
);
435 if (i
== obj
->nr_tlb_entries
)
436 dev_dbg(obj
->dev
, "%s: no page for %08x\n", __func__
, da
);
440 * flush_iotlb_all - Clear all iommu tlb entries
443 static void flush_iotlb_all(struct omap_iommu
*obj
)
447 pm_runtime_get_sync(obj
->dev
);
451 iotlb_lock_set(obj
, &l
);
453 iommu_write_reg(obj
, 1, MMU_GFLUSH
);
455 pm_runtime_put_sync(obj
->dev
);
459 * H/W pagetable operations
461 static void flush_iopte_range(struct device
*dev
, dma_addr_t dma
,
462 unsigned long offset
, int num_entries
)
464 size_t size
= num_entries
* sizeof(u32
);
466 dma_sync_single_range_for_device(dev
, dma
, offset
, size
, DMA_TO_DEVICE
);
469 static void iopte_free(struct omap_iommu
*obj
, u32
*iopte
, bool dma_valid
)
473 /* Note: freed iopte's must be clean ready for re-use */
476 pt_dma
= virt_to_phys(iopte
);
477 dma_unmap_single(obj
->dev
, pt_dma
, IOPTE_TABLE_SIZE
,
481 kmem_cache_free(iopte_cachep
, iopte
);
485 static u32
*iopte_alloc(struct omap_iommu
*obj
, u32
*iopgd
,
486 dma_addr_t
*pt_dma
, u32 da
)
489 unsigned long offset
= iopgd_index(da
) * sizeof(da
);
491 /* a table has already existed */
496 * do the allocation outside the page table lock
498 spin_unlock(&obj
->page_table_lock
);
499 iopte
= kmem_cache_zalloc(iopte_cachep
, GFP_KERNEL
);
500 spin_lock(&obj
->page_table_lock
);
504 return ERR_PTR(-ENOMEM
);
506 *pt_dma
= dma_map_single(obj
->dev
, iopte
, IOPTE_TABLE_SIZE
,
508 if (dma_mapping_error(obj
->dev
, *pt_dma
)) {
509 dev_err(obj
->dev
, "DMA map error for L2 table\n");
510 iopte_free(obj
, iopte
, false);
511 return ERR_PTR(-ENOMEM
);
515 * we rely on dma address and the physical address to be
516 * the same for mapping the L2 table
518 if (WARN_ON(*pt_dma
!= virt_to_phys(iopte
))) {
519 dev_err(obj
->dev
, "DMA translation error for L2 table\n");
520 dma_unmap_single(obj
->dev
, *pt_dma
, IOPTE_TABLE_SIZE
,
522 iopte_free(obj
, iopte
, false);
523 return ERR_PTR(-ENOMEM
);
526 *iopgd
= virt_to_phys(iopte
) | IOPGD_TABLE
;
528 flush_iopte_range(obj
->dev
, obj
->pd_dma
, offset
, 1);
529 dev_vdbg(obj
->dev
, "%s: a new pte:%p\n", __func__
, iopte
);
531 /* We raced, free the reduniovant table */
532 iopte_free(obj
, iopte
, false);
536 iopte
= iopte_offset(iopgd
, da
);
537 *pt_dma
= iopgd_page_paddr(iopgd
);
539 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
540 __func__
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
545 static int iopgd_alloc_section(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
547 u32
*iopgd
= iopgd_offset(obj
, da
);
548 unsigned long offset
= iopgd_index(da
) * sizeof(da
);
550 if ((da
| pa
) & ~IOSECTION_MASK
) {
551 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
552 __func__
, da
, pa
, IOSECTION_SIZE
);
556 *iopgd
= (pa
& IOSECTION_MASK
) | prot
| IOPGD_SECTION
;
557 flush_iopte_range(obj
->dev
, obj
->pd_dma
, offset
, 1);
561 static int iopgd_alloc_super(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
563 u32
*iopgd
= iopgd_offset(obj
, da
);
564 unsigned long offset
= iopgd_index(da
) * sizeof(da
);
567 if ((da
| pa
) & ~IOSUPER_MASK
) {
568 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
569 __func__
, da
, pa
, IOSUPER_SIZE
);
573 for (i
= 0; i
< 16; i
++)
574 *(iopgd
+ i
) = (pa
& IOSUPER_MASK
) | prot
| IOPGD_SUPER
;
575 flush_iopte_range(obj
->dev
, obj
->pd_dma
, offset
, 16);
579 static int iopte_alloc_page(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
581 u32
*iopgd
= iopgd_offset(obj
, da
);
583 u32
*iopte
= iopte_alloc(obj
, iopgd
, &pt_dma
, da
);
584 unsigned long offset
= iopte_index(da
) * sizeof(da
);
587 return PTR_ERR(iopte
);
589 *iopte
= (pa
& IOPAGE_MASK
) | prot
| IOPTE_SMALL
;
590 flush_iopte_range(obj
->dev
, pt_dma
, offset
, 1);
592 dev_vdbg(obj
->dev
, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
593 __func__
, da
, pa
, iopte
, *iopte
);
598 static int iopte_alloc_large(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
600 u32
*iopgd
= iopgd_offset(obj
, da
);
602 u32
*iopte
= iopte_alloc(obj
, iopgd
, &pt_dma
, da
);
603 unsigned long offset
= iopte_index(da
) * sizeof(da
);
606 if ((da
| pa
) & ~IOLARGE_MASK
) {
607 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
608 __func__
, da
, pa
, IOLARGE_SIZE
);
613 return PTR_ERR(iopte
);
615 for (i
= 0; i
< 16; i
++)
616 *(iopte
+ i
) = (pa
& IOLARGE_MASK
) | prot
| IOPTE_LARGE
;
617 flush_iopte_range(obj
->dev
, pt_dma
, offset
, 16);
622 iopgtable_store_entry_core(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
624 int (*fn
)(struct omap_iommu
*, u32
, u32
, u32
);
632 case MMU_CAM_PGSZ_16M
:
633 fn
= iopgd_alloc_super
;
635 case MMU_CAM_PGSZ_1M
:
636 fn
= iopgd_alloc_section
;
638 case MMU_CAM_PGSZ_64K
:
639 fn
= iopte_alloc_large
;
641 case MMU_CAM_PGSZ_4K
:
642 fn
= iopte_alloc_page
;
652 prot
= get_iopte_attr(e
);
654 spin_lock(&obj
->page_table_lock
);
655 err
= fn(obj
, e
->da
, e
->pa
, prot
);
656 spin_unlock(&obj
->page_table_lock
);
662 * omap_iopgtable_store_entry - Make an iommu pte entry
664 * @e: an iommu tlb entry info
667 omap_iopgtable_store_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
671 flush_iotlb_page(obj
, e
->da
);
672 err
= iopgtable_store_entry_core(obj
, e
);
674 prefetch_iotlb_entry(obj
, e
);
679 * iopgtable_lookup_entry - Lookup an iommu pte entry
681 * @da: iommu device virtual address
682 * @ppgd: iommu pgd entry pointer to be returned
683 * @ppte: iommu pte entry pointer to be returned
686 iopgtable_lookup_entry(struct omap_iommu
*obj
, u32 da
, u32
**ppgd
, u32
**ppte
)
688 u32
*iopgd
, *iopte
= NULL
;
690 iopgd
= iopgd_offset(obj
, da
);
694 if (iopgd_is_table(*iopgd
))
695 iopte
= iopte_offset(iopgd
, da
);
701 static size_t iopgtable_clear_entry_core(struct omap_iommu
*obj
, u32 da
)
704 u32
*iopgd
= iopgd_offset(obj
, da
);
707 unsigned long pd_offset
= iopgd_index(da
) * sizeof(da
);
708 unsigned long pt_offset
= iopte_index(da
) * sizeof(da
);
713 if (iopgd_is_table(*iopgd
)) {
715 u32
*iopte
= iopte_offset(iopgd
, da
);
718 if (*iopte
& IOPTE_LARGE
) {
720 /* rewind to the 1st entry */
721 iopte
= iopte_offset(iopgd
, (da
& IOLARGE_MASK
));
724 memset(iopte
, 0, nent
* sizeof(*iopte
));
725 pt_dma
= iopgd_page_paddr(iopgd
);
726 flush_iopte_range(obj
->dev
, pt_dma
, pt_offset
, nent
);
729 * do table walk to check if this table is necessary or not
731 iopte
= iopte_offset(iopgd
, 0);
732 for (i
= 0; i
< PTRS_PER_IOPTE
; i
++)
736 iopte_free(obj
, iopte
, true);
737 nent
= 1; /* for the next L1 entry */
740 if ((*iopgd
& IOPGD_SUPER
) == IOPGD_SUPER
) {
742 /* rewind to the 1st entry */
743 iopgd
= iopgd_offset(obj
, (da
& IOSUPER_MASK
));
747 memset(iopgd
, 0, nent
* sizeof(*iopgd
));
748 flush_iopte_range(obj
->dev
, obj
->pd_dma
, pd_offset
, nent
);
754 * iopgtable_clear_entry - Remove an iommu pte entry
756 * @da: iommu device virtual address
758 static size_t iopgtable_clear_entry(struct omap_iommu
*obj
, u32 da
)
762 spin_lock(&obj
->page_table_lock
);
764 bytes
= iopgtable_clear_entry_core(obj
, da
);
765 flush_iotlb_page(obj
, da
);
767 spin_unlock(&obj
->page_table_lock
);
772 static void iopgtable_clear_entry_all(struct omap_iommu
*obj
)
774 unsigned long offset
;
777 spin_lock(&obj
->page_table_lock
);
779 for (i
= 0; i
< PTRS_PER_IOPGD
; i
++) {
783 da
= i
<< IOPGD_SHIFT
;
784 iopgd
= iopgd_offset(obj
, da
);
785 offset
= iopgd_index(da
) * sizeof(da
);
790 if (iopgd_is_table(*iopgd
))
791 iopte_free(obj
, iopte_offset(iopgd
, 0), true);
794 flush_iopte_range(obj
->dev
, obj
->pd_dma
, offset
, 1);
797 flush_iotlb_all(obj
);
799 spin_unlock(&obj
->page_table_lock
);
803 * Device IOMMU generic operations
805 static irqreturn_t
iommu_fault_handler(int irq
, void *data
)
809 struct omap_iommu
*obj
= data
;
810 struct iommu_domain
*domain
= obj
->domain
;
811 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
813 if (!omap_domain
->dev
)
816 errs
= iommu_report_fault(obj
, &da
);
820 /* Fault callback or TLB/PTE Dynamic loading */
821 if (!report_iommu_fault(domain
, obj
->dev
, da
, 0))
824 iommu_write_reg(obj
, 0, MMU_IRQENABLE
);
826 iopgd
= iopgd_offset(obj
, da
);
828 if (!iopgd_is_table(*iopgd
)) {
829 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
830 obj
->name
, errs
, da
, iopgd
, *iopgd
);
834 iopte
= iopte_offset(iopgd
, da
);
836 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
837 obj
->name
, errs
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
843 * omap_iommu_attach() - attach iommu device to an iommu domain
844 * @obj: target omap iommu device
847 static int omap_iommu_attach(struct omap_iommu
*obj
, u32
*iopgd
)
851 spin_lock(&obj
->iommu_lock
);
853 obj
->pd_dma
= dma_map_single(obj
->dev
, iopgd
, IOPGD_TABLE_SIZE
,
855 if (dma_mapping_error(obj
->dev
, obj
->pd_dma
)) {
856 dev_err(obj
->dev
, "DMA map error for L1 table\n");
862 err
= iommu_enable(obj
);
865 flush_iotlb_all(obj
);
867 spin_unlock(&obj
->iommu_lock
);
869 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
874 spin_unlock(&obj
->iommu_lock
);
880 * omap_iommu_detach - release iommu device
883 static void omap_iommu_detach(struct omap_iommu
*obj
)
885 if (!obj
|| IS_ERR(obj
))
888 spin_lock(&obj
->iommu_lock
);
890 dma_unmap_single(obj
->dev
, obj
->pd_dma
, IOPGD_TABLE_SIZE
,
896 spin_unlock(&obj
->iommu_lock
);
898 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
901 static void omap_iommu_save_tlb_entries(struct omap_iommu
*obj
)
903 struct iotlb_lock lock
;
908 /* check if there are any locked tlbs to save */
909 iotlb_lock_get(obj
, &lock
);
910 obj
->num_cr_ctx
= lock
.base
;
911 if (!obj
->num_cr_ctx
)
915 for_each_iotlb_cr(obj
, obj
->num_cr_ctx
, i
, cr
)
919 static void omap_iommu_restore_tlb_entries(struct omap_iommu
*obj
)
925 /* no locked tlbs to restore */
926 if (!obj
->num_cr_ctx
)
931 for (i
= 0; i
< obj
->num_cr_ctx
; i
++, tmp
++) {
933 iotlb_lock_set(obj
, &l
);
934 iotlb_load_cr(obj
, tmp
);
936 l
.base
= obj
->num_cr_ctx
;
938 iotlb_lock_set(obj
, &l
);
942 * omap_iommu_domain_deactivate - deactivate attached iommu devices
943 * @domain: iommu domain attached to the target iommu device
945 * This API allows the client devices of IOMMU devices to suspend
946 * the IOMMUs they control at runtime, after they are idled and
947 * suspended all activity. System Suspend will leverage the PM
948 * driver late callbacks.
950 int omap_iommu_domain_deactivate(struct iommu_domain
*domain
)
952 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
953 struct omap_iommu_device
*iommu
;
954 struct omap_iommu
*oiommu
;
957 if (!omap_domain
->dev
)
960 iommu
= omap_domain
->iommus
;
961 iommu
+= (omap_domain
->num_iommus
- 1);
962 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
--) {
963 oiommu
= iommu
->iommu_dev
;
964 pm_runtime_put_sync(oiommu
->dev
);
969 EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate
);
972 * omap_iommu_domain_activate - activate attached iommu devices
973 * @domain: iommu domain attached to the target iommu device
975 * This API allows the client devices of IOMMU devices to resume the
976 * IOMMUs they control at runtime, before they can resume operations.
977 * System Resume will leverage the PM driver late callbacks.
979 int omap_iommu_domain_activate(struct iommu_domain
*domain
)
981 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
982 struct omap_iommu_device
*iommu
;
983 struct omap_iommu
*oiommu
;
986 if (!omap_domain
->dev
)
989 iommu
= omap_domain
->iommus
;
990 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
++) {
991 oiommu
= iommu
->iommu_dev
;
992 pm_runtime_get_sync(oiommu
->dev
);
997 EXPORT_SYMBOL_GPL(omap_iommu_domain_activate
);
1000 * omap_iommu_runtime_suspend - disable an iommu device
1001 * @dev: iommu device
1003 * This function performs all that is necessary to disable an
1004 * IOMMU device, either during final detachment from a client
1005 * device, or during system/runtime suspend of the device. This
1006 * includes programming all the appropriate IOMMU registers, and
1007 * managing the associated omap_hwmod's state and the device's
1008 * reset line. This function also saves the context of any
1009 * locked TLBs if suspending.
1011 static __maybe_unused
int omap_iommu_runtime_suspend(struct device
*dev
)
1013 struct platform_device
*pdev
= to_platform_device(dev
);
1014 struct iommu_platform_data
*pdata
= dev_get_platdata(dev
);
1015 struct omap_iommu
*obj
= to_iommu(dev
);
1018 /* save the TLBs only during suspend, and not for power down */
1019 if (obj
->domain
&& obj
->iopgd
)
1020 omap_iommu_save_tlb_entries(obj
);
1022 omap2_iommu_disable(obj
);
1024 if (pdata
&& pdata
->device_idle
)
1025 pdata
->device_idle(pdev
);
1027 if (pdata
&& pdata
->assert_reset
)
1028 pdata
->assert_reset(pdev
, pdata
->reset_name
);
1030 if (pdata
&& pdata
->set_pwrdm_constraint
) {
1031 ret
= pdata
->set_pwrdm_constraint(pdev
, false, &obj
->pwrst
);
1033 dev_warn(obj
->dev
, "pwrdm_constraint failed to be reset, status = %d\n",
1042 * omap_iommu_runtime_resume - enable an iommu device
1043 * @dev: iommu device
1045 * This function performs all that is necessary to enable an
1046 * IOMMU device, either during initial attachment to a client
1047 * device, or during system/runtime resume of the device. This
1048 * includes programming all the appropriate IOMMU registers, and
1049 * managing the associated omap_hwmod's state and the device's
1050 * reset line. The function also restores any locked TLBs if
1051 * resuming after a suspend.
1053 static __maybe_unused
int omap_iommu_runtime_resume(struct device
*dev
)
1055 struct platform_device
*pdev
= to_platform_device(dev
);
1056 struct iommu_platform_data
*pdata
= dev_get_platdata(dev
);
1057 struct omap_iommu
*obj
= to_iommu(dev
);
1060 if (pdata
&& pdata
->set_pwrdm_constraint
) {
1061 ret
= pdata
->set_pwrdm_constraint(pdev
, true, &obj
->pwrst
);
1063 dev_warn(obj
->dev
, "pwrdm_constraint failed to be set, status = %d\n",
1068 if (pdata
&& pdata
->deassert_reset
) {
1069 ret
= pdata
->deassert_reset(pdev
, pdata
->reset_name
);
1071 dev_err(dev
, "deassert_reset failed: %d\n", ret
);
1076 if (pdata
&& pdata
->device_enable
)
1077 pdata
->device_enable(pdev
);
1079 /* restore the TLBs only during resume, and not for power up */
1081 omap_iommu_restore_tlb_entries(obj
);
1083 ret
= omap2_iommu_enable(obj
);
1089 * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
1090 * @dev: iommu device
1092 * This function performs the necessary checks to determine if the IOMMU
1093 * device needs suspending or not. The function checks if the runtime_pm
1094 * status of the device is suspended, and returns 1 in that case. This
1095 * results in the PM core to skip invoking any of the Sleep PM callbacks
1096 * (suspend, suspend_late, resume, resume_early etc).
1098 static int omap_iommu_prepare(struct device
*dev
)
1100 if (pm_runtime_status_suspended(dev
))
1105 static bool omap_iommu_can_register(struct platform_device
*pdev
)
1107 struct device_node
*np
= pdev
->dev
.of_node
;
1109 if (!of_device_is_compatible(np
, "ti,dra7-dsp-iommu"))
1113 * restrict IOMMU core registration only for processor-port MDMA MMUs
1116 if ((!strcmp(dev_name(&pdev
->dev
), "40d01000.mmu")) ||
1117 (!strcmp(dev_name(&pdev
->dev
), "41501000.mmu")))
1123 static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device
*pdev
,
1124 struct omap_iommu
*obj
)
1126 struct device_node
*np
= pdev
->dev
.of_node
;
1129 if (!of_device_is_compatible(np
, "ti,dra7-dsp-iommu"))
1132 if (!of_property_read_bool(np
, "ti,syscon-mmuconfig")) {
1133 dev_err(&pdev
->dev
, "ti,syscon-mmuconfig property is missing\n");
1138 syscon_regmap_lookup_by_phandle(np
, "ti,syscon-mmuconfig");
1139 if (IS_ERR(obj
->syscfg
)) {
1140 /* can fail with -EPROBE_DEFER */
1141 ret
= PTR_ERR(obj
->syscfg
);
1145 if (of_property_read_u32_index(np
, "ti,syscon-mmuconfig", 1,
1147 dev_err(&pdev
->dev
, "couldn't get the IOMMU instance id within subsystem\n");
1151 if (obj
->id
!= 0 && obj
->id
!= 1) {
1152 dev_err(&pdev
->dev
, "invalid IOMMU instance id\n");
1160 * OMAP Device MMU(IOMMU) detection
1162 static int omap_iommu_probe(struct platform_device
*pdev
)
1166 struct omap_iommu
*obj
;
1167 struct resource
*res
;
1168 struct device_node
*of
= pdev
->dev
.of_node
;
1171 pr_err("%s: only DT-based devices are supported\n", __func__
);
1175 obj
= devm_kzalloc(&pdev
->dev
, sizeof(*obj
) + MMU_REG_SIZE
, GFP_KERNEL
);
1180 * self-manage the ordering dependencies between omap_device_enable/idle
1181 * and omap_device_assert/deassert_hardreset API
1183 if (pdev
->dev
.pm_domain
) {
1184 dev_dbg(&pdev
->dev
, "device pm_domain is being reset\n");
1185 pdev
->dev
.pm_domain
= NULL
;
1188 obj
->name
= dev_name(&pdev
->dev
);
1189 obj
->nr_tlb_entries
= 32;
1190 err
= of_property_read_u32(of
, "ti,#tlb-entries", &obj
->nr_tlb_entries
);
1191 if (err
&& err
!= -EINVAL
)
1193 if (obj
->nr_tlb_entries
!= 32 && obj
->nr_tlb_entries
!= 8)
1195 if (of_find_property(of
, "ti,iommu-bus-err-back", NULL
))
1196 obj
->has_bus_err_back
= MMU_GP_REG_BUS_ERR_BACK_EN
;
1198 obj
->dev
= &pdev
->dev
;
1199 obj
->ctx
= (void *)obj
+ sizeof(*obj
);
1200 obj
->cr_ctx
= devm_kzalloc(&pdev
->dev
,
1201 sizeof(*obj
->cr_ctx
) * obj
->nr_tlb_entries
,
1206 spin_lock_init(&obj
->iommu_lock
);
1207 spin_lock_init(&obj
->page_table_lock
);
1209 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1210 obj
->regbase
= devm_ioremap_resource(obj
->dev
, res
);
1211 if (IS_ERR(obj
->regbase
))
1212 return PTR_ERR(obj
->regbase
);
1214 err
= omap_iommu_dra7_get_dsp_system_cfg(pdev
, obj
);
1218 irq
= platform_get_irq(pdev
, 0);
1222 err
= devm_request_irq(obj
->dev
, irq
, iommu_fault_handler
, IRQF_SHARED
,
1223 dev_name(obj
->dev
), obj
);
1226 platform_set_drvdata(pdev
, obj
);
1228 if (omap_iommu_can_register(pdev
)) {
1229 obj
->group
= iommu_group_alloc();
1230 if (IS_ERR(obj
->group
))
1231 return PTR_ERR(obj
->group
);
1233 err
= iommu_device_sysfs_add(&obj
->iommu
, obj
->dev
, NULL
,
1238 iommu_device_set_ops(&obj
->iommu
, &omap_iommu_ops
);
1239 iommu_device_set_fwnode(&obj
->iommu
, &of
->fwnode
);
1241 err
= iommu_device_register(&obj
->iommu
);
1246 pm_runtime_enable(obj
->dev
);
1248 omap_iommu_debugfs_add(obj
);
1250 dev_info(&pdev
->dev
, "%s registered\n", obj
->name
);
1252 /* Re-probe bus to probe device attached to this IOMMU */
1253 bus_iommu_probe(&platform_bus_type
);
1258 iommu_device_sysfs_remove(&obj
->iommu
);
1260 iommu_group_put(obj
->group
);
1264 static int omap_iommu_remove(struct platform_device
*pdev
)
1266 struct omap_iommu
*obj
= platform_get_drvdata(pdev
);
1269 iommu_group_put(obj
->group
);
1272 iommu_device_sysfs_remove(&obj
->iommu
);
1273 iommu_device_unregister(&obj
->iommu
);
1276 omap_iommu_debugfs_remove(obj
);
1278 pm_runtime_disable(obj
->dev
);
1280 dev_info(&pdev
->dev
, "%s removed\n", obj
->name
);
1284 static const struct dev_pm_ops omap_iommu_pm_ops
= {
1285 .prepare
= omap_iommu_prepare
,
1286 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
1287 pm_runtime_force_resume
)
1288 SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend
,
1289 omap_iommu_runtime_resume
, NULL
)
1292 static const struct of_device_id omap_iommu_of_match
[] = {
1293 { .compatible
= "ti,omap2-iommu" },
1294 { .compatible
= "ti,omap4-iommu" },
1295 { .compatible
= "ti,dra7-iommu" },
1296 { .compatible
= "ti,dra7-dsp-iommu" },
1300 static struct platform_driver omap_iommu_driver
= {
1301 .probe
= omap_iommu_probe
,
1302 .remove
= omap_iommu_remove
,
1304 .name
= "omap-iommu",
1305 .pm
= &omap_iommu_pm_ops
,
1306 .of_match_table
= of_match_ptr(omap_iommu_of_match
),
1310 static u32
iotlb_init_entry(struct iotlb_entry
*e
, u32 da
, u32 pa
, int pgsz
)
1312 memset(e
, 0, sizeof(*e
));
1316 e
->valid
= MMU_CAM_V
;
1318 e
->endian
= MMU_RAM_ENDIAN_LITTLE
;
1319 e
->elsz
= MMU_RAM_ELSZ_8
;
1322 return iopgsz_to_bytes(e
->pgsz
);
1325 static int omap_iommu_map(struct iommu_domain
*domain
, unsigned long da
,
1326 phys_addr_t pa
, size_t bytes
, int prot
, gfp_t gfp
)
1328 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1329 struct device
*dev
= omap_domain
->dev
;
1330 struct omap_iommu_device
*iommu
;
1331 struct omap_iommu
*oiommu
;
1332 struct iotlb_entry e
;
1337 omap_pgsz
= bytes_to_iopgsz(bytes
);
1338 if (omap_pgsz
< 0) {
1339 dev_err(dev
, "invalid size to map: %zu\n", bytes
);
1343 dev_dbg(dev
, "mapping da 0x%lx to pa %pa size 0x%zx\n", da
, &pa
, bytes
);
1345 iotlb_init_entry(&e
, da
, pa
, omap_pgsz
);
1347 iommu
= omap_domain
->iommus
;
1348 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
++) {
1349 oiommu
= iommu
->iommu_dev
;
1350 ret
= omap_iopgtable_store_entry(oiommu
, &e
);
1352 dev_err(dev
, "omap_iopgtable_store_entry failed: %d\n",
1361 oiommu
= iommu
->iommu_dev
;
1362 iopgtable_clear_entry(oiommu
, da
);
1369 static size_t omap_iommu_unmap(struct iommu_domain
*domain
, unsigned long da
,
1370 size_t size
, struct iommu_iotlb_gather
*gather
)
1372 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1373 struct device
*dev
= omap_domain
->dev
;
1374 struct omap_iommu_device
*iommu
;
1375 struct omap_iommu
*oiommu
;
1380 dev_dbg(dev
, "unmapping da 0x%lx size %zu\n", da
, size
);
1382 iommu
= omap_domain
->iommus
;
1383 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
++) {
1384 oiommu
= iommu
->iommu_dev
;
1385 bytes
= iopgtable_clear_entry(oiommu
, da
);
1391 * simplify return - we are only checking if any of the iommus
1392 * reported an error, but not if all of them are unmapping the
1393 * same number of entries. This should not occur due to the
1394 * mirror programming.
1396 return error
? 0 : bytes
;
1399 static int omap_iommu_count(struct device
*dev
)
1401 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
1404 while (arch_data
->iommu_dev
) {
1412 /* caller should call cleanup if this function fails */
1413 static int omap_iommu_attach_init(struct device
*dev
,
1414 struct omap_iommu_domain
*odomain
)
1416 struct omap_iommu_device
*iommu
;
1419 odomain
->num_iommus
= omap_iommu_count(dev
);
1420 if (!odomain
->num_iommus
)
1423 odomain
->iommus
= kcalloc(odomain
->num_iommus
, sizeof(*iommu
),
1425 if (!odomain
->iommus
)
1428 iommu
= odomain
->iommus
;
1429 for (i
= 0; i
< odomain
->num_iommus
; i
++, iommu
++) {
1430 iommu
->pgtable
= kzalloc(IOPGD_TABLE_SIZE
, GFP_ATOMIC
);
1431 if (!iommu
->pgtable
)
1435 * should never fail, but please keep this around to ensure
1436 * we keep the hardware happy
1438 if (WARN_ON(!IS_ALIGNED((long)iommu
->pgtable
,
1446 static void omap_iommu_detach_fini(struct omap_iommu_domain
*odomain
)
1449 struct omap_iommu_device
*iommu
= odomain
->iommus
;
1451 for (i
= 0; iommu
&& i
< odomain
->num_iommus
; i
++, iommu
++)
1452 kfree(iommu
->pgtable
);
1454 kfree(odomain
->iommus
);
1455 odomain
->num_iommus
= 0;
1456 odomain
->iommus
= NULL
;
1460 omap_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1462 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
1463 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1464 struct omap_iommu_device
*iommu
;
1465 struct omap_iommu
*oiommu
;
1469 if (!arch_data
|| !arch_data
->iommu_dev
) {
1470 dev_err(dev
, "device doesn't have an associated iommu\n");
1474 spin_lock(&omap_domain
->lock
);
1476 /* only a single client device can be attached to a domain */
1477 if (omap_domain
->dev
) {
1478 dev_err(dev
, "iommu domain is already attached\n");
1483 ret
= omap_iommu_attach_init(dev
, omap_domain
);
1485 dev_err(dev
, "failed to allocate required iommu data %d\n",
1490 iommu
= omap_domain
->iommus
;
1491 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
++, arch_data
++) {
1492 /* configure and enable the omap iommu */
1493 oiommu
= arch_data
->iommu_dev
;
1494 ret
= omap_iommu_attach(oiommu
, iommu
->pgtable
);
1496 dev_err(dev
, "can't get omap iommu: %d\n", ret
);
1500 oiommu
->domain
= domain
;
1501 iommu
->iommu_dev
= oiommu
;
1504 omap_domain
->dev
= dev
;
1512 oiommu
= iommu
->iommu_dev
;
1513 omap_iommu_detach(oiommu
);
1514 iommu
->iommu_dev
= NULL
;
1515 oiommu
->domain
= NULL
;
1518 omap_iommu_detach_fini(omap_domain
);
1520 spin_unlock(&omap_domain
->lock
);
1524 static void _omap_iommu_detach_dev(struct omap_iommu_domain
*omap_domain
,
1527 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
1528 struct omap_iommu_device
*iommu
= omap_domain
->iommus
;
1529 struct omap_iommu
*oiommu
;
1532 if (!omap_domain
->dev
) {
1533 dev_err(dev
, "domain has no attached device\n");
1537 /* only a single device is supported per domain for now */
1538 if (omap_domain
->dev
!= dev
) {
1539 dev_err(dev
, "invalid attached device\n");
1544 * cleanup in the reverse order of attachment - this addresses
1545 * any h/w dependencies between multiple instances, if any
1547 iommu
+= (omap_domain
->num_iommus
- 1);
1548 arch_data
+= (omap_domain
->num_iommus
- 1);
1549 for (i
= 0; i
< omap_domain
->num_iommus
; i
++, iommu
--, arch_data
--) {
1550 oiommu
= iommu
->iommu_dev
;
1551 iopgtable_clear_entry_all(oiommu
);
1553 omap_iommu_detach(oiommu
);
1554 iommu
->iommu_dev
= NULL
;
1555 oiommu
->domain
= NULL
;
1558 omap_iommu_detach_fini(omap_domain
);
1560 omap_domain
->dev
= NULL
;
1563 static void omap_iommu_detach_dev(struct iommu_domain
*domain
,
1566 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1568 spin_lock(&omap_domain
->lock
);
1569 _omap_iommu_detach_dev(omap_domain
, dev
);
1570 spin_unlock(&omap_domain
->lock
);
1573 static struct iommu_domain
*omap_iommu_domain_alloc(unsigned type
)
1575 struct omap_iommu_domain
*omap_domain
;
1577 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
1580 omap_domain
= kzalloc(sizeof(*omap_domain
), GFP_KERNEL
);
1584 spin_lock_init(&omap_domain
->lock
);
1586 omap_domain
->domain
.geometry
.aperture_start
= 0;
1587 omap_domain
->domain
.geometry
.aperture_end
= (1ULL << 32) - 1;
1588 omap_domain
->domain
.geometry
.force_aperture
= true;
1590 return &omap_domain
->domain
;
1593 static void omap_iommu_domain_free(struct iommu_domain
*domain
)
1595 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1598 * An iommu device is still attached
1599 * (currently, only one device can be attached) ?
1601 if (omap_domain
->dev
)
1602 _omap_iommu_detach_dev(omap_domain
, omap_domain
->dev
);
1607 static phys_addr_t
omap_iommu_iova_to_phys(struct iommu_domain
*domain
,
1610 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1611 struct omap_iommu_device
*iommu
= omap_domain
->iommus
;
1612 struct omap_iommu
*oiommu
= iommu
->iommu_dev
;
1613 struct device
*dev
= oiommu
->dev
;
1615 phys_addr_t ret
= 0;
1618 * all the iommus within the domain will have identical programming,
1619 * so perform the lookup using just the first iommu
1621 iopgtable_lookup_entry(oiommu
, da
, &pgd
, &pte
);
1624 if (iopte_is_small(*pte
))
1625 ret
= omap_iommu_translate(*pte
, da
, IOPTE_MASK
);
1626 else if (iopte_is_large(*pte
))
1627 ret
= omap_iommu_translate(*pte
, da
, IOLARGE_MASK
);
1629 dev_err(dev
, "bogus pte 0x%x, da 0x%llx", *pte
,
1630 (unsigned long long)da
);
1632 if (iopgd_is_section(*pgd
))
1633 ret
= omap_iommu_translate(*pgd
, da
, IOSECTION_MASK
);
1634 else if (iopgd_is_super(*pgd
))
1635 ret
= omap_iommu_translate(*pgd
, da
, IOSUPER_MASK
);
1637 dev_err(dev
, "bogus pgd 0x%x, da 0x%llx", *pgd
,
1638 (unsigned long long)da
);
1644 static struct iommu_device
*omap_iommu_probe_device(struct device
*dev
)
1646 struct omap_iommu_arch_data
*arch_data
, *tmp
;
1647 struct platform_device
*pdev
;
1648 struct omap_iommu
*oiommu
;
1649 struct device_node
*np
;
1653 * Allocate the per-device iommu structure for DT-based devices.
1655 * TODO: Simplify this when removing non-DT support completely from the
1659 return ERR_PTR(-ENODEV
);
1662 * retrieve the count of IOMMU nodes using phandle size as element size
1663 * since #iommu-cells = 0 for OMAP
1665 num_iommus
= of_property_count_elems_of_size(dev
->of_node
, "iommus",
1670 arch_data
= kcalloc(num_iommus
+ 1, sizeof(*arch_data
), GFP_KERNEL
);
1672 return ERR_PTR(-ENOMEM
);
1674 for (i
= 0, tmp
= arch_data
; i
< num_iommus
; i
++, tmp
++) {
1675 np
= of_parse_phandle(dev
->of_node
, "iommus", i
);
1678 return ERR_PTR(-EINVAL
);
1681 pdev
= of_find_device_by_node(np
);
1685 return ERR_PTR(-ENODEV
);
1688 oiommu
= platform_get_drvdata(pdev
);
1692 return ERR_PTR(-EINVAL
);
1695 tmp
->iommu_dev
= oiommu
;
1696 tmp
->dev
= &pdev
->dev
;
1701 dev_iommu_priv_set(dev
, arch_data
);
1704 * use the first IOMMU alone for the sysfs device linking.
1705 * TODO: Evaluate if a single iommu_group needs to be
1706 * maintained for both IOMMUs
1708 oiommu
= arch_data
->iommu_dev
;
1710 return &oiommu
->iommu
;
1713 static void omap_iommu_release_device(struct device
*dev
)
1715 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
1717 if (!dev
->of_node
|| !arch_data
)
1720 dev_iommu_priv_set(dev
, NULL
);
1725 static struct iommu_group
*omap_iommu_device_group(struct device
*dev
)
1727 struct omap_iommu_arch_data
*arch_data
= dev_iommu_priv_get(dev
);
1728 struct iommu_group
*group
= ERR_PTR(-EINVAL
);
1731 return ERR_PTR(-ENODEV
);
1733 if (arch_data
->iommu_dev
)
1734 group
= iommu_group_ref_get(arch_data
->iommu_dev
->group
);
1739 static const struct iommu_ops omap_iommu_ops
= {
1740 .domain_alloc
= omap_iommu_domain_alloc
,
1741 .domain_free
= omap_iommu_domain_free
,
1742 .attach_dev
= omap_iommu_attach_dev
,
1743 .detach_dev
= omap_iommu_detach_dev
,
1744 .map
= omap_iommu_map
,
1745 .unmap
= omap_iommu_unmap
,
1746 .iova_to_phys
= omap_iommu_iova_to_phys
,
1747 .probe_device
= omap_iommu_probe_device
,
1748 .release_device
= omap_iommu_release_device
,
1749 .device_group
= omap_iommu_device_group
,
1750 .pgsize_bitmap
= OMAP_IOMMU_PGSIZES
,
1753 static int __init
omap_iommu_init(void)
1755 struct kmem_cache
*p
;
1756 const slab_flags_t flags
= SLAB_HWCACHE_ALIGN
;
1757 size_t align
= 1 << 10; /* L2 pagetable alignement */
1758 struct device_node
*np
;
1761 np
= of_find_matching_node(NULL
, omap_iommu_of_match
);
1767 p
= kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE
, align
, flags
,
1773 omap_iommu_debugfs_init();
1775 ret
= platform_driver_register(&omap_iommu_driver
);
1777 pr_err("%s: failed to register driver\n", __func__
);
1781 ret
= bus_set_iommu(&platform_bus_type
, &omap_iommu_ops
);
1788 platform_driver_unregister(&omap_iommu_driver
);
1790 kmem_cache_destroy(iopte_cachep
);
1793 subsys_initcall(omap_iommu_init
);
1794 /* must be ready before omap3isp is probed */