module: Convert symbol namespace to string literal
[linux.git] / drivers / media / pci / intel / ipu6 / ipu6-mmu.c
blob6d1c0b90169d4086cfa010bdda80ee61c445d90f
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013--2024 Intel Corporation
4 */
5 #include <asm/barrier.h>
7 #include <linux/align.h>
8 #include <linux/atomic.h>
9 #include <linux/bitops.h>
10 #include <linux/bits.h>
11 #include <linux/bug.h>
12 #include <linux/cacheflush.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/gfp.h>
16 #include <linux/io.h>
17 #include <linux/iova.h>
18 #include <linux/math.h>
19 #include <linux/minmax.h>
20 #include <linux/mm.h>
21 #include <linux/pfn.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/vmalloc.h>
27 #include "ipu6.h"
28 #include "ipu6-dma.h"
29 #include "ipu6-mmu.h"
30 #include "ipu6-platform-regs.h"
32 #define ISP_PAGE_SHIFT 12
33 #define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
34 #define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1))
36 #define ISP_L1PT_SHIFT 22
37 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
39 #define ISP_L2PT_SHIFT 12
40 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
42 #define ISP_L1PT_PTES 1024
43 #define ISP_L2PT_PTES 1024
45 #define ISP_PADDR_SHIFT 12
47 #define REG_TLB_INVALIDATE 0x0000
49 #define REG_L1_PHYS 0x0004 /* 27-bit pfn */
50 #define REG_INFO 0x0008
52 #define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
54 static void tlb_invalidate(struct ipu6_mmu *mmu)
56 unsigned long flags;
57 unsigned int i;
59 spin_lock_irqsave(&mmu->ready_lock, flags);
60 if (!mmu->ready) {
61 spin_unlock_irqrestore(&mmu->ready_lock, flags);
62 return;
65 for (i = 0; i < mmu->nr_mmus; i++) {
67 * To avoid the HW bug induced dead lock in some of the IPU6
68 * MMUs on successive invalidate calls, we need to first do a
69 * read to the page table base before writing the invalidate
70 * register. MMUs which need to implement this WA, will have
71 * the insert_read_before_invalidate flags set as true.
72 * Disregard the return value of the read.
74 if (mmu->mmu_hw[i].insert_read_before_invalidate)
75 readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
77 writel(0xffffffff, mmu->mmu_hw[i].base +
78 REG_TLB_INVALIDATE);
80 * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
81 * When the actual MMIO write reaches the IPU6 TLB Invalidate
82 * register, wmb() will force the TLB invalidate out if the CPU
83 * attempts to update the IOMMU page table (or sooner).
85 wmb();
87 spin_unlock_irqrestore(&mmu->ready_lock, flags);
90 #ifdef DEBUG
91 static void page_table_dump(struct ipu6_mmu_info *mmu_info)
93 u32 l1_idx;
95 dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n");
97 for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
98 u32 l2_idx;
99 u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
100 phys_addr_t l2_phys;
102 if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval)
103 continue;
105 l2_phys = TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx];)
106 dev_dbg(mmu_info->dev,
107 "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pap\n",
108 l1_idx, iova, iova + ISP_PAGE_SIZE, &l2_phys);
110 for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
111 u32 *l2_pt = mmu_info->l2_pts[l1_idx];
112 u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT);
114 if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval)
115 continue;
117 dev_dbg(mmu_info->dev,
118 "\tl2 entry %u; iova 0x%8.8x, phys %pa\n",
119 l2_idx, iova2,
120 TBL_PHYS_ADDR(l2_pt[l2_idx]));
124 dev_dbg(mmu_info->dev, "end IOMMU page table dump\n");
126 #endif /* DEBUG */
128 static dma_addr_t map_single(struct ipu6_mmu_info *mmu_info, void *ptr)
130 dma_addr_t dma;
132 dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
133 if (dma_mapping_error(mmu_info->dev, dma))
134 return 0;
136 return dma;
139 static int get_dummy_page(struct ipu6_mmu_info *mmu_info)
141 void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
142 dma_addr_t dma;
144 if (!pt)
145 return -ENOMEM;
147 dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt);
149 dma = map_single(mmu_info, pt);
150 if (!dma) {
151 dev_err(mmu_info->dev, "Failed to map dummy page\n");
152 goto err_free_page;
155 mmu_info->dummy_page = pt;
156 mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT;
158 return 0;
160 err_free_page:
161 free_page((unsigned long)pt);
162 return -ENOMEM;
165 static void free_dummy_page(struct ipu6_mmu_info *mmu_info)
167 dma_unmap_single(mmu_info->dev,
168 TBL_PHYS_ADDR(mmu_info->dummy_page_pteval),
169 PAGE_SIZE, DMA_BIDIRECTIONAL);
170 free_page((unsigned long)mmu_info->dummy_page);
173 static int alloc_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
175 u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
176 dma_addr_t dma;
177 unsigned int i;
179 if (!pt)
180 return -ENOMEM;
182 dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt);
184 dma = map_single(mmu_info, pt);
185 if (!dma) {
186 dev_err(mmu_info->dev, "Failed to map l2pt page\n");
187 goto err_free_page;
190 for (i = 0; i < ISP_L2PT_PTES; i++)
191 pt[i] = mmu_info->dummy_page_pteval;
193 mmu_info->dummy_l2_pt = pt;
194 mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT;
196 return 0;
198 err_free_page:
199 free_page((unsigned long)pt);
200 return -ENOMEM;
203 static void free_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
205 dma_unmap_single(mmu_info->dev,
206 TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval),
207 PAGE_SIZE, DMA_BIDIRECTIONAL);
208 free_page((unsigned long)mmu_info->dummy_l2_pt);
211 static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info)
213 u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
214 dma_addr_t dma;
215 unsigned int i;
217 if (!pt)
218 return NULL;
220 dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt);
222 for (i = 0; i < ISP_L1PT_PTES; i++)
223 pt[i] = mmu_info->dummy_l2_pteval;
225 dma = map_single(mmu_info, pt);
226 if (!dma) {
227 dev_err(mmu_info->dev, "Failed to map l1pt page\n");
228 goto err_free_page;
231 mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
232 dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma);
234 return pt;
236 err_free_page:
237 free_page((unsigned long)pt);
238 return NULL;
241 static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
243 u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
244 unsigned int i;
246 if (!pt)
247 return NULL;
249 dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt);
251 for (i = 0; i < ISP_L1PT_PTES; i++)
252 pt[i] = mmu_info->dummy_page_pteval;
254 return pt;
257 static void l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
258 phys_addr_t dummy, size_t size)
260 unsigned int l2_entries;
261 unsigned int l2_idx;
262 unsigned long flags;
263 u32 l1_idx;
264 u32 *l2_pt;
266 spin_lock_irqsave(&mmu_info->lock, flags);
267 for (l1_idx = iova >> ISP_L1PT_SHIFT;
268 size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) {
269 dev_dbg(mmu_info->dev,
270 "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n",
271 l1_idx, iova);
273 if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
274 dev_err(mmu_info->dev,
275 "unmap not mapped iova 0x%8.8lx l1 index %u\n",
276 iova, l1_idx);
277 continue;
279 l2_pt = mmu_info->l2_pts[l1_idx];
281 l2_entries = 0;
282 for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
283 size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) {
284 phys_addr_t pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
286 dev_dbg(mmu_info->dev,
287 "unmap l2 index %u with pteval 0x%p\n",
288 l2_idx, &pteval);
289 l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
291 iova += ISP_PAGE_SIZE;
292 size -= ISP_PAGE_SIZE;
294 l2_entries++;
297 WARN_ON_ONCE(!l2_entries);
298 clflush_cache_range(&l2_pt[l2_idx - l2_entries],
299 sizeof(l2_pt[0]) * l2_entries);
302 WARN_ON_ONCE(size);
303 spin_unlock_irqrestore(&mmu_info->lock, flags);
306 static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
307 phys_addr_t paddr, size_t size)
309 struct device *dev = mmu_info->dev;
310 unsigned int l2_entries;
311 u32 *l2_pt, *l2_virt;
312 unsigned int l2_idx;
313 unsigned long flags;
314 size_t mapped = 0;
315 dma_addr_t dma;
316 u32 l1_entry;
317 u32 l1_idx;
318 int err = 0;
320 spin_lock_irqsave(&mmu_info->lock, flags);
322 paddr = ALIGN(paddr, ISP_PAGE_SIZE);
323 for (l1_idx = iova >> ISP_L1PT_SHIFT;
324 size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) {
325 dev_dbg(dev,
326 "mapping l2 page table for l1 index %u (iova %8.8x)\n",
327 l1_idx, (u32)iova);
329 l1_entry = mmu_info->l1_pt[l1_idx];
330 if (l1_entry == mmu_info->dummy_l2_pteval) {
331 l2_virt = mmu_info->l2_pts[l1_idx];
332 if (likely(!l2_virt)) {
333 l2_virt = alloc_l2_pt(mmu_info);
334 if (!l2_virt) {
335 err = -ENOMEM;
336 goto error;
340 dma = map_single(mmu_info, l2_virt);
341 if (!dma) {
342 dev_err(dev, "Failed to map l2pt page\n");
343 free_page((unsigned long)l2_virt);
344 err = -EINVAL;
345 goto error;
348 l1_entry = dma >> ISP_PADDR_SHIFT;
350 dev_dbg(dev, "page for l1_idx %u %p allocated\n",
351 l1_idx, l2_virt);
352 mmu_info->l1_pt[l1_idx] = l1_entry;
353 mmu_info->l2_pts[l1_idx] = l2_virt;
355 clflush_cache_range(&mmu_info->l1_pt[l1_idx],
356 sizeof(mmu_info->l1_pt[l1_idx]));
359 l2_pt = mmu_info->l2_pts[l1_idx];
360 l2_entries = 0;
362 for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
363 size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) {
364 l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
366 dev_dbg(dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx,
367 l2_pt[l2_idx]);
369 iova += ISP_PAGE_SIZE;
370 paddr += ISP_PAGE_SIZE;
371 mapped += ISP_PAGE_SIZE;
372 size -= ISP_PAGE_SIZE;
374 l2_entries++;
377 WARN_ON_ONCE(!l2_entries);
378 clflush_cache_range(&l2_pt[l2_idx - l2_entries],
379 sizeof(l2_pt[0]) * l2_entries);
382 spin_unlock_irqrestore(&mmu_info->lock, flags);
384 return 0;
386 error:
387 spin_unlock_irqrestore(&mmu_info->lock, flags);
388 /* unroll mapping in case something went wrong */
389 if (mapped)
390 l2_unmap(mmu_info, iova - mapped, paddr - mapped, mapped);
392 return err;
395 static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
396 phys_addr_t paddr, size_t size)
398 u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
399 u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
401 dev_dbg(mmu_info->dev,
402 "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n",
403 iova_start, iova_end, size, &paddr);
405 return l2_map(mmu_info, iova_start, paddr, size);
408 static void __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
409 unsigned long iova, size_t size)
411 l2_unmap(mmu_info, iova, 0, size);
414 static int allocate_trash_buffer(struct ipu6_mmu *mmu)
416 unsigned int n_pages = PFN_UP(IPU6_MMUV2_TRASH_RANGE);
417 struct iova *iova;
418 unsigned int i;
419 dma_addr_t dma;
420 unsigned long iova_addr;
421 int ret;
423 /* Allocate 8MB in iova range */
424 iova = alloc_iova(&mmu->dmap->iovad, n_pages,
425 PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
426 if (!iova) {
427 dev_err(mmu->dev, "cannot allocate iova range for trash\n");
428 return -ENOMEM;
431 dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
432 PAGE_SIZE, DMA_BIDIRECTIONAL);
433 if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
434 dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
435 ret = -ENOMEM;
436 goto out_free_iova;
439 mmu->pci_trash_page = dma;
442 * Map the 8MB iova address range to the same physical trash page
443 * mmu->trash_page which is already reserved at the probe
445 iova_addr = iova->pfn_lo;
446 for (i = 0; i < n_pages; i++) {
447 ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
448 mmu->pci_trash_page, PAGE_SIZE);
449 if (ret) {
450 dev_err(mmu->dev,
451 "mapping trash buffer range failed\n");
452 goto out_unmap;
455 iova_addr++;
458 mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo);
459 dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
460 mmu->mmid, (unsigned int)mmu->iova_trash_page);
461 return 0;
463 out_unmap:
464 ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
465 PFN_PHYS(iova_size(iova)));
466 dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
467 PAGE_SIZE, DMA_BIDIRECTIONAL);
468 out_free_iova:
469 __free_iova(&mmu->dmap->iovad, iova);
470 return ret;
473 int ipu6_mmu_hw_init(struct ipu6_mmu *mmu)
475 struct ipu6_mmu_info *mmu_info;
476 unsigned long flags;
477 unsigned int i;
479 mmu_info = mmu->dmap->mmu_info;
481 /* Initialise the each MMU HW block */
482 for (i = 0; i < mmu->nr_mmus; i++) {
483 struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
484 unsigned int j;
485 u16 block_addr;
487 /* Write page table address per MMU */
488 writel((phys_addr_t)mmu_info->l1_pt_dma,
489 mmu->mmu_hw[i].base + REG_L1_PHYS);
491 /* Set info bits per MMU */
492 writel(mmu->mmu_hw[i].info_bits,
493 mmu->mmu_hw[i].base + REG_INFO);
495 /* Configure MMU TLB stream configuration for L1 */
496 for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams;
497 block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) {
498 if (block_addr > IPU6_MAX_LI_BLOCK_ADDR) {
499 dev_err(mmu->dev, "invalid L1 configuration\n");
500 return -EINVAL;
503 /* Write block start address for each streams */
504 writel(block_addr, mmu_hw->base +
505 mmu_hw->l1_stream_id_reg_offset + 4 * j);
508 /* Configure MMU TLB stream configuration for L2 */
509 for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams;
510 block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) {
511 if (block_addr > IPU6_MAX_L2_BLOCK_ADDR) {
512 dev_err(mmu->dev, "invalid L2 configuration\n");
513 return -EINVAL;
516 writel(block_addr, mmu_hw->base +
517 mmu_hw->l2_stream_id_reg_offset + 4 * j);
521 if (!mmu->trash_page) {
522 int ret;
524 mmu->trash_page = alloc_page(GFP_KERNEL);
525 if (!mmu->trash_page) {
526 dev_err(mmu->dev, "insufficient memory for trash buffer\n");
527 return -ENOMEM;
530 ret = allocate_trash_buffer(mmu);
531 if (ret) {
532 __free_page(mmu->trash_page);
533 mmu->trash_page = NULL;
534 dev_err(mmu->dev, "trash buffer allocation failed\n");
535 return ret;
539 spin_lock_irqsave(&mmu->ready_lock, flags);
540 mmu->ready = true;
541 spin_unlock_irqrestore(&mmu->ready_lock, flags);
543 return 0;
545 EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, "INTEL_IPU6");
547 static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp)
549 struct ipu6_mmu_info *mmu_info;
550 int ret;
552 mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
553 if (!mmu_info)
554 return NULL;
556 mmu_info->aperture_start = 0;
557 mmu_info->aperture_end =
558 (dma_addr_t)DMA_BIT_MASK(isp->secure_mode ?
559 IPU6_MMU_ADDR_BITS :
560 IPU6_MMU_ADDR_BITS_NON_SECURE);
561 mmu_info->pgsize_bitmap = SZ_4K;
562 mmu_info->dev = &isp->pdev->dev;
564 ret = get_dummy_page(mmu_info);
565 if (ret)
566 goto err_free_info;
568 ret = alloc_dummy_l2_pt(mmu_info);
569 if (ret)
570 goto err_free_dummy_page;
572 mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts));
573 if (!mmu_info->l2_pts)
574 goto err_free_dummy_l2_pt;
577 * We always map the L1 page table (a single page as well as
578 * the L2 page tables).
580 mmu_info->l1_pt = alloc_l1_pt(mmu_info);
581 if (!mmu_info->l1_pt)
582 goto err_free_l2_pts;
584 spin_lock_init(&mmu_info->lock);
586 dev_dbg(mmu_info->dev, "domain initialised\n");
588 return mmu_info;
590 err_free_l2_pts:
591 vfree(mmu_info->l2_pts);
592 err_free_dummy_l2_pt:
593 free_dummy_l2_pt(mmu_info);
594 err_free_dummy_page:
595 free_dummy_page(mmu_info);
596 err_free_info:
597 kfree(mmu_info);
599 return NULL;
602 void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu)
604 unsigned long flags;
606 spin_lock_irqsave(&mmu->ready_lock, flags);
607 mmu->ready = false;
608 spin_unlock_irqrestore(&mmu->ready_lock, flags);
610 EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, "INTEL_IPU6");
612 static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp)
614 struct ipu6_dma_mapping *dmap;
616 dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
617 if (!dmap)
618 return NULL;
620 dmap->mmu_info = ipu6_mmu_alloc(isp);
621 if (!dmap->mmu_info) {
622 kfree(dmap);
623 return NULL;
626 init_iova_domain(&dmap->iovad, SZ_4K, 1);
627 dmap->mmu_info->dmap = dmap;
629 dev_dbg(&isp->pdev->dev, "alloc mapping\n");
631 iova_cache_get();
633 return dmap;
636 phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
637 dma_addr_t iova)
639 phys_addr_t phy_addr;
640 unsigned long flags;
641 u32 *l2_pt;
643 spin_lock_irqsave(&mmu_info->lock, flags);
644 l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT];
645 phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT];
646 phy_addr <<= ISP_PAGE_SHIFT;
647 spin_unlock_irqrestore(&mmu_info->lock, flags);
649 return phy_addr;
652 void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
653 size_t size)
655 unsigned int min_pagesz;
657 dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size);
659 /* find out the minimum page size supported */
660 min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
663 * The virtual address and the size of the mapping must be
664 * aligned (at least) to the size of the smallest page supported
665 * by the hardware
667 if (!IS_ALIGNED(iova | size, min_pagesz)) {
668 dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
669 iova, size, min_pagesz);
670 return;
673 __ipu6_mmu_unmap(mmu_info, iova, size);
676 int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
677 phys_addr_t paddr, size_t size)
679 unsigned int min_pagesz;
681 if (mmu_info->pgsize_bitmap == 0UL)
682 return -ENODEV;
684 /* find out the minimum page size supported */
685 min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
688 * both the virtual address and the physical one, as well as
689 * the size of the mapping, must be aligned (at least) to the
690 * size of the smallest page supported by the hardware
692 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
693 dev_err(mmu_info->dev,
694 "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n",
695 iova, &paddr, size, min_pagesz);
696 return -EINVAL;
699 dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
700 iova, &paddr, size);
702 return __ipu6_mmu_map(mmu_info, iova, paddr, size);
705 static void ipu6_mmu_destroy(struct ipu6_mmu *mmu)
707 struct ipu6_dma_mapping *dmap = mmu->dmap;
708 struct ipu6_mmu_info *mmu_info = dmap->mmu_info;
709 struct iova *iova;
710 u32 l1_idx;
712 if (mmu->iova_trash_page) {
713 iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page));
714 if (iova) {
715 /* unmap and free the trash buffer iova */
716 ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo),
717 PFN_PHYS(iova_size(iova)));
718 __free_iova(&dmap->iovad, iova);
719 } else {
720 dev_err(mmu->dev, "trash buffer iova not found.\n");
723 mmu->iova_trash_page = 0;
724 dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
725 PAGE_SIZE, DMA_BIDIRECTIONAL);
726 mmu->pci_trash_page = 0;
727 __free_page(mmu->trash_page);
730 for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
731 if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) {
732 dma_unmap_single(mmu_info->dev,
733 TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]),
734 PAGE_SIZE, DMA_BIDIRECTIONAL);
735 free_page((unsigned long)mmu_info->l2_pts[l1_idx]);
739 vfree(mmu_info->l2_pts);
740 free_dummy_page(mmu_info);
741 dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma),
742 PAGE_SIZE, DMA_BIDIRECTIONAL);
743 free_page((unsigned long)mmu_info->dummy_l2_pt);
744 free_page((unsigned long)mmu_info->l1_pt);
745 kfree(mmu_info);
748 struct ipu6_mmu *ipu6_mmu_init(struct device *dev,
749 void __iomem *base, int mmid,
750 const struct ipu6_hw_variants *hw)
752 struct ipu6_device *isp = pci_get_drvdata(to_pci_dev(dev));
753 struct ipu6_mmu_pdata *pdata;
754 struct ipu6_mmu *mmu;
755 unsigned int i;
757 if (hw->nr_mmus > IPU6_MMU_MAX_DEVICES)
758 return ERR_PTR(-EINVAL);
760 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
761 if (!pdata)
762 return ERR_PTR(-ENOMEM);
764 for (i = 0; i < hw->nr_mmus; i++) {
765 struct ipu6_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
766 const struct ipu6_mmu_hw *src_mmu = &hw->mmu_hw[i];
768 if (src_mmu->nr_l1streams > IPU6_MMU_MAX_TLB_L1_STREAMS ||
769 src_mmu->nr_l2streams > IPU6_MMU_MAX_TLB_L2_STREAMS)
770 return ERR_PTR(-EINVAL);
772 *pdata_mmu = *src_mmu;
773 pdata_mmu->base = base + src_mmu->offset;
776 mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
777 if (!mmu)
778 return ERR_PTR(-ENOMEM);
780 mmu->mmid = mmid;
781 mmu->mmu_hw = pdata->mmu_hw;
782 mmu->nr_mmus = hw->nr_mmus;
783 mmu->tlb_invalidate = tlb_invalidate;
784 mmu->ready = false;
785 INIT_LIST_HEAD(&mmu->vma_list);
786 spin_lock_init(&mmu->ready_lock);
788 mmu->dmap = alloc_dma_mapping(isp);
789 if (!mmu->dmap) {
790 dev_err(dev, "can't alloc dma mapping\n");
791 return ERR_PTR(-ENOMEM);
794 return mmu;
797 void ipu6_mmu_cleanup(struct ipu6_mmu *mmu)
799 struct ipu6_dma_mapping *dmap = mmu->dmap;
801 ipu6_mmu_destroy(mmu);
802 mmu->dmap = NULL;
803 iova_cache_put();
804 put_iova_domain(&dmap->iovad);
805 kfree(dmap);