1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013--2024 Intel Corporation
5 #include <asm/barrier.h>
7 #include <linux/align.h>
8 #include <linux/atomic.h>
9 #include <linux/bitops.h>
10 #include <linux/bits.h>
11 #include <linux/bug.h>
12 #include <linux/cacheflush.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/gfp.h>
17 #include <linux/iova.h>
18 #include <linux/math.h>
19 #include <linux/minmax.h>
21 #include <linux/pfn.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/vmalloc.h>
30 #include "ipu6-platform-regs.h"
32 #define ISP_PAGE_SHIFT 12
33 #define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
34 #define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1))
36 #define ISP_L1PT_SHIFT 22
37 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
39 #define ISP_L2PT_SHIFT 12
40 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
42 #define ISP_L1PT_PTES 1024
43 #define ISP_L2PT_PTES 1024
45 #define ISP_PADDR_SHIFT 12
47 #define REG_TLB_INVALIDATE 0x0000
49 #define REG_L1_PHYS 0x0004 /* 27-bit pfn */
50 #define REG_INFO 0x0008
52 #define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
54 static void tlb_invalidate(struct ipu6_mmu
*mmu
)
59 spin_lock_irqsave(&mmu
->ready_lock
, flags
);
61 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
65 for (i
= 0; i
< mmu
->nr_mmus
; i
++) {
67 * To avoid the HW bug induced dead lock in some of the IPU6
68 * MMUs on successive invalidate calls, we need to first do a
69 * read to the page table base before writing the invalidate
70 * register. MMUs which need to implement this WA, will have
71 * the insert_read_before_invalidate flags set as true.
72 * Disregard the return value of the read.
74 if (mmu
->mmu_hw
[i
].insert_read_before_invalidate
)
75 readl(mmu
->mmu_hw
[i
].base
+ REG_L1_PHYS
);
77 writel(0xffffffff, mmu
->mmu_hw
[i
].base
+
80 * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
81 * When the actual MMIO write reaches the IPU6 TLB Invalidate
82 * register, wmb() will force the TLB invalidate out if the CPU
83 * attempts to update the IOMMU page table (or sooner).
87 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
91 static void page_table_dump(struct ipu6_mmu_info
*mmu_info
)
95 dev_dbg(mmu_info
->dev
, "begin IOMMU page table dump\n");
97 for (l1_idx
= 0; l1_idx
< ISP_L1PT_PTES
; l1_idx
++) {
99 u32 iova
= (phys_addr_t
)l1_idx
<< ISP_L1PT_SHIFT
;
102 if (mmu_info
->l1_pt
[l1_idx
] == mmu_info
->dummy_l2_pteval
)
105 l2_phys
= TBL_PHYS_ADDR(mmu_info
->l1_pt
[l1_idx
];)
106 dev_dbg(mmu_info
->dev
,
107 "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pap\n",
108 l1_idx
, iova
, iova
+ ISP_PAGE_SIZE
, &l2_phys
);
110 for (l2_idx
= 0; l2_idx
< ISP_L2PT_PTES
; l2_idx
++) {
111 u32
*l2_pt
= mmu_info
->l2_pts
[l1_idx
];
112 u32 iova2
= iova
+ (l2_idx
<< ISP_L2PT_SHIFT
);
114 if (l2_pt
[l2_idx
] == mmu_info
->dummy_page_pteval
)
117 dev_dbg(mmu_info
->dev
,
118 "\tl2 entry %u; iova 0x%8.8x, phys %pa\n",
120 TBL_PHYS_ADDR(l2_pt
[l2_idx
]));
124 dev_dbg(mmu_info
->dev
, "end IOMMU page table dump\n");
128 static dma_addr_t
map_single(struct ipu6_mmu_info
*mmu_info
, void *ptr
)
132 dma
= dma_map_single(mmu_info
->dev
, ptr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
133 if (dma_mapping_error(mmu_info
->dev
, dma
))
139 static int get_dummy_page(struct ipu6_mmu_info
*mmu_info
)
141 void *pt
= (void *)get_zeroed_page(GFP_ATOMIC
| GFP_DMA32
);
147 dev_dbg(mmu_info
->dev
, "dummy_page: get_zeroed_page() == %p\n", pt
);
149 dma
= map_single(mmu_info
, pt
);
151 dev_err(mmu_info
->dev
, "Failed to map dummy page\n");
155 mmu_info
->dummy_page
= pt
;
156 mmu_info
->dummy_page_pteval
= dma
>> ISP_PAGE_SHIFT
;
161 free_page((unsigned long)pt
);
165 static void free_dummy_page(struct ipu6_mmu_info
*mmu_info
)
167 dma_unmap_single(mmu_info
->dev
,
168 TBL_PHYS_ADDR(mmu_info
->dummy_page_pteval
),
169 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
170 free_page((unsigned long)mmu_info
->dummy_page
);
173 static int alloc_dummy_l2_pt(struct ipu6_mmu_info
*mmu_info
)
175 u32
*pt
= (u32
*)get_zeroed_page(GFP_ATOMIC
| GFP_DMA32
);
182 dev_dbg(mmu_info
->dev
, "dummy_l2: get_zeroed_page() = %p\n", pt
);
184 dma
= map_single(mmu_info
, pt
);
186 dev_err(mmu_info
->dev
, "Failed to map l2pt page\n");
190 for (i
= 0; i
< ISP_L2PT_PTES
; i
++)
191 pt
[i
] = mmu_info
->dummy_page_pteval
;
193 mmu_info
->dummy_l2_pt
= pt
;
194 mmu_info
->dummy_l2_pteval
= dma
>> ISP_PAGE_SHIFT
;
199 free_page((unsigned long)pt
);
203 static void free_dummy_l2_pt(struct ipu6_mmu_info
*mmu_info
)
205 dma_unmap_single(mmu_info
->dev
,
206 TBL_PHYS_ADDR(mmu_info
->dummy_l2_pteval
),
207 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
208 free_page((unsigned long)mmu_info
->dummy_l2_pt
);
211 static u32
*alloc_l1_pt(struct ipu6_mmu_info
*mmu_info
)
213 u32
*pt
= (u32
*)get_zeroed_page(GFP_ATOMIC
| GFP_DMA32
);
220 dev_dbg(mmu_info
->dev
, "alloc_l1: get_zeroed_page() = %p\n", pt
);
222 for (i
= 0; i
< ISP_L1PT_PTES
; i
++)
223 pt
[i
] = mmu_info
->dummy_l2_pteval
;
225 dma
= map_single(mmu_info
, pt
);
227 dev_err(mmu_info
->dev
, "Failed to map l1pt page\n");
231 mmu_info
->l1_pt_dma
= dma
>> ISP_PADDR_SHIFT
;
232 dev_dbg(mmu_info
->dev
, "l1 pt %p mapped at %pad\n", pt
, &dma
);
237 free_page((unsigned long)pt
);
241 static u32
*alloc_l2_pt(struct ipu6_mmu_info
*mmu_info
)
243 u32
*pt
= (u32
*)get_zeroed_page(GFP_ATOMIC
| GFP_DMA32
);
249 dev_dbg(mmu_info
->dev
, "alloc_l2: get_zeroed_page() = %p\n", pt
);
251 for (i
= 0; i
< ISP_L1PT_PTES
; i
++)
252 pt
[i
] = mmu_info
->dummy_page_pteval
;
257 static void l2_unmap(struct ipu6_mmu_info
*mmu_info
, unsigned long iova
,
258 phys_addr_t dummy
, size_t size
)
260 unsigned int l2_entries
;
266 spin_lock_irqsave(&mmu_info
->lock
, flags
);
267 for (l1_idx
= iova
>> ISP_L1PT_SHIFT
;
268 size
> 0 && l1_idx
< ISP_L1PT_PTES
; l1_idx
++) {
269 dev_dbg(mmu_info
->dev
,
270 "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n",
273 if (mmu_info
->l1_pt
[l1_idx
] == mmu_info
->dummy_l2_pteval
) {
274 dev_err(mmu_info
->dev
,
275 "unmap not mapped iova 0x%8.8lx l1 index %u\n",
279 l2_pt
= mmu_info
->l2_pts
[l1_idx
];
282 for (l2_idx
= (iova
& ISP_L2PT_MASK
) >> ISP_L2PT_SHIFT
;
283 size
> 0 && l2_idx
< ISP_L2PT_PTES
; l2_idx
++) {
284 phys_addr_t pteval
= TBL_PHYS_ADDR(l2_pt
[l2_idx
]);
286 dev_dbg(mmu_info
->dev
,
287 "unmap l2 index %u with pteval 0x%p\n",
289 l2_pt
[l2_idx
] = mmu_info
->dummy_page_pteval
;
291 iova
+= ISP_PAGE_SIZE
;
292 size
-= ISP_PAGE_SIZE
;
297 WARN_ON_ONCE(!l2_entries
);
298 clflush_cache_range(&l2_pt
[l2_idx
- l2_entries
],
299 sizeof(l2_pt
[0]) * l2_entries
);
303 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
306 static int l2_map(struct ipu6_mmu_info
*mmu_info
, unsigned long iova
,
307 phys_addr_t paddr
, size_t size
)
309 struct device
*dev
= mmu_info
->dev
;
310 unsigned int l2_entries
;
311 u32
*l2_pt
, *l2_virt
;
320 spin_lock_irqsave(&mmu_info
->lock
, flags
);
322 paddr
= ALIGN(paddr
, ISP_PAGE_SIZE
);
323 for (l1_idx
= iova
>> ISP_L1PT_SHIFT
;
324 size
> 0 && l1_idx
< ISP_L1PT_PTES
; l1_idx
++) {
326 "mapping l2 page table for l1 index %u (iova %8.8x)\n",
329 l1_entry
= mmu_info
->l1_pt
[l1_idx
];
330 if (l1_entry
== mmu_info
->dummy_l2_pteval
) {
331 l2_virt
= mmu_info
->l2_pts
[l1_idx
];
332 if (likely(!l2_virt
)) {
333 l2_virt
= alloc_l2_pt(mmu_info
);
340 dma
= map_single(mmu_info
, l2_virt
);
342 dev_err(dev
, "Failed to map l2pt page\n");
343 free_page((unsigned long)l2_virt
);
348 l1_entry
= dma
>> ISP_PADDR_SHIFT
;
350 dev_dbg(dev
, "page for l1_idx %u %p allocated\n",
352 mmu_info
->l1_pt
[l1_idx
] = l1_entry
;
353 mmu_info
->l2_pts
[l1_idx
] = l2_virt
;
355 clflush_cache_range(&mmu_info
->l1_pt
[l1_idx
],
356 sizeof(mmu_info
->l1_pt
[l1_idx
]));
359 l2_pt
= mmu_info
->l2_pts
[l1_idx
];
362 for (l2_idx
= (iova
& ISP_L2PT_MASK
) >> ISP_L2PT_SHIFT
;
363 size
> 0 && l2_idx
< ISP_L2PT_PTES
; l2_idx
++) {
364 l2_pt
[l2_idx
] = paddr
>> ISP_PADDR_SHIFT
;
366 dev_dbg(dev
, "l2 index %u mapped as 0x%8.8x\n", l2_idx
,
369 iova
+= ISP_PAGE_SIZE
;
370 paddr
+= ISP_PAGE_SIZE
;
371 mapped
+= ISP_PAGE_SIZE
;
372 size
-= ISP_PAGE_SIZE
;
377 WARN_ON_ONCE(!l2_entries
);
378 clflush_cache_range(&l2_pt
[l2_idx
- l2_entries
],
379 sizeof(l2_pt
[0]) * l2_entries
);
382 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
387 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
388 /* unroll mapping in case something went wrong */
390 l2_unmap(mmu_info
, iova
- mapped
, paddr
- mapped
, mapped
);
395 static int __ipu6_mmu_map(struct ipu6_mmu_info
*mmu_info
, unsigned long iova
,
396 phys_addr_t paddr
, size_t size
)
398 u32 iova_start
= round_down(iova
, ISP_PAGE_SIZE
);
399 u32 iova_end
= ALIGN(iova
+ size
, ISP_PAGE_SIZE
);
401 dev_dbg(mmu_info
->dev
,
402 "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n",
403 iova_start
, iova_end
, size
, &paddr
);
405 return l2_map(mmu_info
, iova_start
, paddr
, size
);
408 static void __ipu6_mmu_unmap(struct ipu6_mmu_info
*mmu_info
,
409 unsigned long iova
, size_t size
)
411 l2_unmap(mmu_info
, iova
, 0, size
);
414 static int allocate_trash_buffer(struct ipu6_mmu
*mmu
)
416 unsigned int n_pages
= PFN_UP(IPU6_MMUV2_TRASH_RANGE
);
420 unsigned long iova_addr
;
423 /* Allocate 8MB in iova range */
424 iova
= alloc_iova(&mmu
->dmap
->iovad
, n_pages
,
425 PHYS_PFN(mmu
->dmap
->mmu_info
->aperture_end
), 0);
427 dev_err(mmu
->dev
, "cannot allocate iova range for trash\n");
431 dma
= dma_map_page(mmu
->dmap
->mmu_info
->dev
, mmu
->trash_page
, 0,
432 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
433 if (dma_mapping_error(mmu
->dmap
->mmu_info
->dev
, dma
)) {
434 dev_err(mmu
->dmap
->mmu_info
->dev
, "Failed to map trash page\n");
439 mmu
->pci_trash_page
= dma
;
442 * Map the 8MB iova address range to the same physical trash page
443 * mmu->trash_page which is already reserved at the probe
445 iova_addr
= iova
->pfn_lo
;
446 for (i
= 0; i
< n_pages
; i
++) {
447 ret
= ipu6_mmu_map(mmu
->dmap
->mmu_info
, PFN_PHYS(iova_addr
),
448 mmu
->pci_trash_page
, PAGE_SIZE
);
451 "mapping trash buffer range failed\n");
458 mmu
->iova_trash_page
= PFN_PHYS(iova
->pfn_lo
);
459 dev_dbg(mmu
->dev
, "iova trash buffer for MMUID: %d is %u\n",
460 mmu
->mmid
, (unsigned int)mmu
->iova_trash_page
);
464 ipu6_mmu_unmap(mmu
->dmap
->mmu_info
, PFN_PHYS(iova
->pfn_lo
),
465 PFN_PHYS(iova_size(iova
)));
466 dma_unmap_page(mmu
->dmap
->mmu_info
->dev
, mmu
->pci_trash_page
,
467 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
469 __free_iova(&mmu
->dmap
->iovad
, iova
);
473 int ipu6_mmu_hw_init(struct ipu6_mmu
*mmu
)
475 struct ipu6_mmu_info
*mmu_info
;
479 mmu_info
= mmu
->dmap
->mmu_info
;
481 /* Initialise the each MMU HW block */
482 for (i
= 0; i
< mmu
->nr_mmus
; i
++) {
483 struct ipu6_mmu_hw
*mmu_hw
= &mmu
->mmu_hw
[i
];
487 /* Write page table address per MMU */
488 writel((phys_addr_t
)mmu_info
->l1_pt_dma
,
489 mmu
->mmu_hw
[i
].base
+ REG_L1_PHYS
);
491 /* Set info bits per MMU */
492 writel(mmu
->mmu_hw
[i
].info_bits
,
493 mmu
->mmu_hw
[i
].base
+ REG_INFO
);
495 /* Configure MMU TLB stream configuration for L1 */
496 for (j
= 0, block_addr
= 0; j
< mmu_hw
->nr_l1streams
;
497 block_addr
+= mmu
->mmu_hw
[i
].l1_block_sz
[j
], j
++) {
498 if (block_addr
> IPU6_MAX_LI_BLOCK_ADDR
) {
499 dev_err(mmu
->dev
, "invalid L1 configuration\n");
503 /* Write block start address for each streams */
504 writel(block_addr
, mmu_hw
->base
+
505 mmu_hw
->l1_stream_id_reg_offset
+ 4 * j
);
508 /* Configure MMU TLB stream configuration for L2 */
509 for (j
= 0, block_addr
= 0; j
< mmu_hw
->nr_l2streams
;
510 block_addr
+= mmu
->mmu_hw
[i
].l2_block_sz
[j
], j
++) {
511 if (block_addr
> IPU6_MAX_L2_BLOCK_ADDR
) {
512 dev_err(mmu
->dev
, "invalid L2 configuration\n");
516 writel(block_addr
, mmu_hw
->base
+
517 mmu_hw
->l2_stream_id_reg_offset
+ 4 * j
);
521 if (!mmu
->trash_page
) {
524 mmu
->trash_page
= alloc_page(GFP_KERNEL
);
525 if (!mmu
->trash_page
) {
526 dev_err(mmu
->dev
, "insufficient memory for trash buffer\n");
530 ret
= allocate_trash_buffer(mmu
);
532 __free_page(mmu
->trash_page
);
533 mmu
->trash_page
= NULL
;
534 dev_err(mmu
->dev
, "trash buffer allocation failed\n");
539 spin_lock_irqsave(&mmu
->ready_lock
, flags
);
541 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
545 EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init
, "INTEL_IPU6");
547 static struct ipu6_mmu_info
*ipu6_mmu_alloc(struct ipu6_device
*isp
)
549 struct ipu6_mmu_info
*mmu_info
;
552 mmu_info
= kzalloc(sizeof(*mmu_info
), GFP_KERNEL
);
556 mmu_info
->aperture_start
= 0;
557 mmu_info
->aperture_end
=
558 (dma_addr_t
)DMA_BIT_MASK(isp
->secure_mode
?
560 IPU6_MMU_ADDR_BITS_NON_SECURE
);
561 mmu_info
->pgsize_bitmap
= SZ_4K
;
562 mmu_info
->dev
= &isp
->pdev
->dev
;
564 ret
= get_dummy_page(mmu_info
);
568 ret
= alloc_dummy_l2_pt(mmu_info
);
570 goto err_free_dummy_page
;
572 mmu_info
->l2_pts
= vzalloc(ISP_L2PT_PTES
* sizeof(*mmu_info
->l2_pts
));
573 if (!mmu_info
->l2_pts
)
574 goto err_free_dummy_l2_pt
;
577 * We always map the L1 page table (a single page as well as
578 * the L2 page tables).
580 mmu_info
->l1_pt
= alloc_l1_pt(mmu_info
);
581 if (!mmu_info
->l1_pt
)
582 goto err_free_l2_pts
;
584 spin_lock_init(&mmu_info
->lock
);
586 dev_dbg(mmu_info
->dev
, "domain initialised\n");
591 vfree(mmu_info
->l2_pts
);
592 err_free_dummy_l2_pt
:
593 free_dummy_l2_pt(mmu_info
);
595 free_dummy_page(mmu_info
);
602 void ipu6_mmu_hw_cleanup(struct ipu6_mmu
*mmu
)
606 spin_lock_irqsave(&mmu
->ready_lock
, flags
);
608 spin_unlock_irqrestore(&mmu
->ready_lock
, flags
);
610 EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup
, "INTEL_IPU6");
612 static struct ipu6_dma_mapping
*alloc_dma_mapping(struct ipu6_device
*isp
)
614 struct ipu6_dma_mapping
*dmap
;
616 dmap
= kzalloc(sizeof(*dmap
), GFP_KERNEL
);
620 dmap
->mmu_info
= ipu6_mmu_alloc(isp
);
621 if (!dmap
->mmu_info
) {
626 init_iova_domain(&dmap
->iovad
, SZ_4K
, 1);
627 dmap
->mmu_info
->dmap
= dmap
;
629 dev_dbg(&isp
->pdev
->dev
, "alloc mapping\n");
636 phys_addr_t
ipu6_mmu_iova_to_phys(struct ipu6_mmu_info
*mmu_info
,
639 phys_addr_t phy_addr
;
643 spin_lock_irqsave(&mmu_info
->lock
, flags
);
644 l2_pt
= mmu_info
->l2_pts
[iova
>> ISP_L1PT_SHIFT
];
645 phy_addr
= (phys_addr_t
)l2_pt
[(iova
& ISP_L2PT_MASK
) >> ISP_L2PT_SHIFT
];
646 phy_addr
<<= ISP_PAGE_SHIFT
;
647 spin_unlock_irqrestore(&mmu_info
->lock
, flags
);
652 void ipu6_mmu_unmap(struct ipu6_mmu_info
*mmu_info
, unsigned long iova
,
655 unsigned int min_pagesz
;
657 dev_dbg(mmu_info
->dev
, "unmapping iova 0x%lx size 0x%zx\n", iova
, size
);
659 /* find out the minimum page size supported */
660 min_pagesz
= 1 << __ffs(mmu_info
->pgsize_bitmap
);
663 * The virtual address and the size of the mapping must be
664 * aligned (at least) to the size of the smallest page supported
667 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
668 dev_err(NULL
, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
669 iova
, size
, min_pagesz
);
673 __ipu6_mmu_unmap(mmu_info
, iova
, size
);
676 int ipu6_mmu_map(struct ipu6_mmu_info
*mmu_info
, unsigned long iova
,
677 phys_addr_t paddr
, size_t size
)
679 unsigned int min_pagesz
;
681 if (mmu_info
->pgsize_bitmap
== 0UL)
684 /* find out the minimum page size supported */
685 min_pagesz
= 1 << __ffs(mmu_info
->pgsize_bitmap
);
688 * both the virtual address and the physical one, as well as
689 * the size of the mapping, must be aligned (at least) to the
690 * size of the smallest page supported by the hardware
692 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
693 dev_err(mmu_info
->dev
,
694 "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n",
695 iova
, &paddr
, size
, min_pagesz
);
699 dev_dbg(mmu_info
->dev
, "map: iova 0x%lx pa %pa size 0x%zx\n",
702 return __ipu6_mmu_map(mmu_info
, iova
, paddr
, size
);
705 static void ipu6_mmu_destroy(struct ipu6_mmu
*mmu
)
707 struct ipu6_dma_mapping
*dmap
= mmu
->dmap
;
708 struct ipu6_mmu_info
*mmu_info
= dmap
->mmu_info
;
712 if (mmu
->iova_trash_page
) {
713 iova
= find_iova(&dmap
->iovad
, PHYS_PFN(mmu
->iova_trash_page
));
715 /* unmap and free the trash buffer iova */
716 ipu6_mmu_unmap(mmu_info
, PFN_PHYS(iova
->pfn_lo
),
717 PFN_PHYS(iova_size(iova
)));
718 __free_iova(&dmap
->iovad
, iova
);
720 dev_err(mmu
->dev
, "trash buffer iova not found.\n");
723 mmu
->iova_trash_page
= 0;
724 dma_unmap_page(mmu_info
->dev
, mmu
->pci_trash_page
,
725 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
726 mmu
->pci_trash_page
= 0;
727 __free_page(mmu
->trash_page
);
730 for (l1_idx
= 0; l1_idx
< ISP_L1PT_PTES
; l1_idx
++) {
731 if (mmu_info
->l1_pt
[l1_idx
] != mmu_info
->dummy_l2_pteval
) {
732 dma_unmap_single(mmu_info
->dev
,
733 TBL_PHYS_ADDR(mmu_info
->l1_pt
[l1_idx
]),
734 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
735 free_page((unsigned long)mmu_info
->l2_pts
[l1_idx
]);
739 vfree(mmu_info
->l2_pts
);
740 free_dummy_page(mmu_info
);
741 dma_unmap_single(mmu_info
->dev
, TBL_PHYS_ADDR(mmu_info
->l1_pt_dma
),
742 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
743 free_page((unsigned long)mmu_info
->dummy_l2_pt
);
744 free_page((unsigned long)mmu_info
->l1_pt
);
748 struct ipu6_mmu
*ipu6_mmu_init(struct device
*dev
,
749 void __iomem
*base
, int mmid
,
750 const struct ipu6_hw_variants
*hw
)
752 struct ipu6_device
*isp
= pci_get_drvdata(to_pci_dev(dev
));
753 struct ipu6_mmu_pdata
*pdata
;
754 struct ipu6_mmu
*mmu
;
757 if (hw
->nr_mmus
> IPU6_MMU_MAX_DEVICES
)
758 return ERR_PTR(-EINVAL
);
760 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
762 return ERR_PTR(-ENOMEM
);
764 for (i
= 0; i
< hw
->nr_mmus
; i
++) {
765 struct ipu6_mmu_hw
*pdata_mmu
= &pdata
->mmu_hw
[i
];
766 const struct ipu6_mmu_hw
*src_mmu
= &hw
->mmu_hw
[i
];
768 if (src_mmu
->nr_l1streams
> IPU6_MMU_MAX_TLB_L1_STREAMS
||
769 src_mmu
->nr_l2streams
> IPU6_MMU_MAX_TLB_L2_STREAMS
)
770 return ERR_PTR(-EINVAL
);
772 *pdata_mmu
= *src_mmu
;
773 pdata_mmu
->base
= base
+ src_mmu
->offset
;
776 mmu
= devm_kzalloc(dev
, sizeof(*mmu
), GFP_KERNEL
);
778 return ERR_PTR(-ENOMEM
);
781 mmu
->mmu_hw
= pdata
->mmu_hw
;
782 mmu
->nr_mmus
= hw
->nr_mmus
;
783 mmu
->tlb_invalidate
= tlb_invalidate
;
785 INIT_LIST_HEAD(&mmu
->vma_list
);
786 spin_lock_init(&mmu
->ready_lock
);
788 mmu
->dmap
= alloc_dma_mapping(isp
);
790 dev_err(dev
, "can't alloc dma mapping\n");
791 return ERR_PTR(-ENOMEM
);
797 void ipu6_mmu_cleanup(struct ipu6_mmu
*mmu
)
799 struct ipu6_dma_mapping
*dmap
= mmu
->dmap
;
801 ipu6_mmu_destroy(mmu
);
804 put_iova_domain(&dmap
->iovad
);