1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <linux/bitfield.h>
7 #include <linux/highmem.h>
8 #include <linux/set_memory.h>
9 #include <linux/vmalloc.h>
11 #include <drm/drm_cache.h>
16 #include "ivpu_mmu_context.h"
18 #define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12)
19 #define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
20 #define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
21 #define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
22 #define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)
23 #define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0))
24 #define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)
25 #define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
26 #define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
27 #define IVPU_MMU_ENTRY_FLAG_RO BIT(7)
28 #define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
29 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
30 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
31 #define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)
33 #define IVPU_MMU_PAGE_SIZE SZ_4K
34 #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
35 #define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
36 #define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
37 #define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
38 #define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
39 #define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
41 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
42 #define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
43 #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
44 #define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
45 IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
47 static void *ivpu_pgtable_alloc_page(struct ivpu_device
*vdev
, dma_addr_t
*dma
)
53 page
= alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
);
57 set_pages_array_wc(&page
, 1);
59 dma_addr
= dma_map_page(vdev
->drm
.dev
, page
, 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
60 if (dma_mapping_error(vdev
->drm
.dev
, dma_addr
))
63 cpu
= vmap(&page
, 1, VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
65 goto err_dma_unmap_page
;
72 dma_unmap_page(vdev
->drm
.dev
, dma_addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
79 static void ivpu_pgtable_free_page(struct ivpu_device
*vdev
, u64
*cpu_addr
, dma_addr_t dma_addr
)
84 page
= vmalloc_to_page(cpu_addr
);
86 dma_unmap_page(vdev
->drm
.dev
, dma_addr
& ~IVPU_MMU_ENTRY_FLAGS_MASK
, PAGE_SIZE
,
88 set_pages_array_wb(&page
, 1);
93 static void ivpu_mmu_pgtables_free(struct ivpu_device
*vdev
, struct ivpu_mmu_pgtable
*pgtable
)
95 int pgd_idx
, pud_idx
, pmd_idx
;
96 dma_addr_t pud_dma
, pmd_dma
, pte_dma
;
97 u64
*pud_dma_ptr
, *pmd_dma_ptr
, *pte_dma_ptr
;
99 for (pgd_idx
= 0; pgd_idx
< IVPU_MMU_PGTABLE_ENTRIES
; ++pgd_idx
) {
100 pud_dma_ptr
= pgtable
->pud_ptrs
[pgd_idx
];
101 pud_dma
= pgtable
->pgd_dma_ptr
[pgd_idx
];
106 for (pud_idx
= 0; pud_idx
< IVPU_MMU_PGTABLE_ENTRIES
; ++pud_idx
) {
107 pmd_dma_ptr
= pgtable
->pmd_ptrs
[pgd_idx
][pud_idx
];
108 pmd_dma
= pgtable
->pud_ptrs
[pgd_idx
][pud_idx
];
113 for (pmd_idx
= 0; pmd_idx
< IVPU_MMU_PGTABLE_ENTRIES
; ++pmd_idx
) {
114 pte_dma_ptr
= pgtable
->pte_ptrs
[pgd_idx
][pud_idx
][pmd_idx
];
115 pte_dma
= pgtable
->pmd_ptrs
[pgd_idx
][pud_idx
][pmd_idx
];
117 ivpu_pgtable_free_page(vdev
, pte_dma_ptr
, pte_dma
);
120 kfree(pgtable
->pte_ptrs
[pgd_idx
][pud_idx
]);
121 ivpu_pgtable_free_page(vdev
, pmd_dma_ptr
, pmd_dma
);
124 kfree(pgtable
->pmd_ptrs
[pgd_idx
]);
125 kfree(pgtable
->pte_ptrs
[pgd_idx
]);
126 ivpu_pgtable_free_page(vdev
, pud_dma_ptr
, pud_dma
);
129 ivpu_pgtable_free_page(vdev
, pgtable
->pgd_dma_ptr
, pgtable
->pgd_dma
);
130 pgtable
->pgd_dma_ptr
= NULL
;
131 pgtable
->pgd_dma
= 0;
135 ivpu_mmu_ensure_pgd(struct ivpu_device
*vdev
, struct ivpu_mmu_pgtable
*pgtable
)
137 u64
*pgd_dma_ptr
= pgtable
->pgd_dma_ptr
;
143 pgd_dma_ptr
= ivpu_pgtable_alloc_page(vdev
, &pgd_dma
);
147 pgtable
->pgd_dma_ptr
= pgd_dma_ptr
;
148 pgtable
->pgd_dma
= pgd_dma
;
154 ivpu_mmu_ensure_pud(struct ivpu_device
*vdev
, struct ivpu_mmu_pgtable
*pgtable
, int pgd_idx
)
156 u64
*pud_dma_ptr
= pgtable
->pud_ptrs
[pgd_idx
];
162 pud_dma_ptr
= ivpu_pgtable_alloc_page(vdev
, &pud_dma
);
166 drm_WARN_ON(&vdev
->drm
, pgtable
->pmd_ptrs
[pgd_idx
]);
167 pgtable
->pmd_ptrs
[pgd_idx
] = kzalloc(IVPU_MMU_PGTABLE_SIZE
, GFP_KERNEL
);
168 if (!pgtable
->pmd_ptrs
[pgd_idx
])
169 goto err_free_pud_dma_ptr
;
171 drm_WARN_ON(&vdev
->drm
, pgtable
->pte_ptrs
[pgd_idx
]);
172 pgtable
->pte_ptrs
[pgd_idx
] = kzalloc(IVPU_MMU_PGTABLE_SIZE
, GFP_KERNEL
);
173 if (!pgtable
->pte_ptrs
[pgd_idx
])
174 goto err_free_pmd_ptrs
;
176 pgtable
->pud_ptrs
[pgd_idx
] = pud_dma_ptr
;
177 pgtable
->pgd_dma_ptr
[pgd_idx
] = pud_dma
| IVPU_MMU_ENTRY_VALID
;
182 kfree(pgtable
->pmd_ptrs
[pgd_idx
]);
184 err_free_pud_dma_ptr
:
185 ivpu_pgtable_free_page(vdev
, pud_dma_ptr
, pud_dma
);
190 ivpu_mmu_ensure_pmd(struct ivpu_device
*vdev
, struct ivpu_mmu_pgtable
*pgtable
, int pgd_idx
,
193 u64
*pmd_dma_ptr
= pgtable
->pmd_ptrs
[pgd_idx
][pud_idx
];
199 pmd_dma_ptr
= ivpu_pgtable_alloc_page(vdev
, &pmd_dma
);
203 drm_WARN_ON(&vdev
->drm
, pgtable
->pte_ptrs
[pgd_idx
][pud_idx
]);
204 pgtable
->pte_ptrs
[pgd_idx
][pud_idx
] = kzalloc(IVPU_MMU_PGTABLE_SIZE
, GFP_KERNEL
);
205 if (!pgtable
->pte_ptrs
[pgd_idx
][pud_idx
])
206 goto err_free_pmd_dma_ptr
;
208 pgtable
->pmd_ptrs
[pgd_idx
][pud_idx
] = pmd_dma_ptr
;
209 pgtable
->pud_ptrs
[pgd_idx
][pud_idx
] = pmd_dma
| IVPU_MMU_ENTRY_VALID
;
213 err_free_pmd_dma_ptr
:
214 ivpu_pgtable_free_page(vdev
, pmd_dma_ptr
, pmd_dma
);
219 ivpu_mmu_ensure_pte(struct ivpu_device
*vdev
, struct ivpu_mmu_pgtable
*pgtable
,
220 int pgd_idx
, int pud_idx
, int pmd_idx
)
222 u64
*pte_dma_ptr
= pgtable
->pte_ptrs
[pgd_idx
][pud_idx
][pmd_idx
];
228 pte_dma_ptr
= ivpu_pgtable_alloc_page(vdev
, &pte_dma
);
232 pgtable
->pte_ptrs
[pgd_idx
][pud_idx
][pmd_idx
] = pte_dma_ptr
;
233 pgtable
->pmd_ptrs
[pgd_idx
][pud_idx
][pmd_idx
] = pte_dma
| IVPU_MMU_ENTRY_VALID
;
239 ivpu_mmu_context_map_page(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
,
240 u64 vpu_addr
, dma_addr_t dma_addr
, u64 prot
)
243 int pgd_idx
= FIELD_GET(IVPU_MMU_PGD_INDEX_MASK
, vpu_addr
);
244 int pud_idx
= FIELD_GET(IVPU_MMU_PUD_INDEX_MASK
, vpu_addr
);
245 int pmd_idx
= FIELD_GET(IVPU_MMU_PMD_INDEX_MASK
, vpu_addr
);
246 int pte_idx
= FIELD_GET(IVPU_MMU_PTE_INDEX_MASK
, vpu_addr
);
248 drm_WARN_ON(&vdev
->drm
, ctx
->id
== IVPU_RESERVED_CONTEXT_MMU_SSID
);
250 /* Allocate PGD - first level page table if needed */
251 if (!ivpu_mmu_ensure_pgd(vdev
, &ctx
->pgtable
))
254 /* Allocate PUD - second level page table if needed */
255 if (!ivpu_mmu_ensure_pud(vdev
, &ctx
->pgtable
, pgd_idx
))
258 /* Allocate PMD - third level page table if needed */
259 if (!ivpu_mmu_ensure_pmd(vdev
, &ctx
->pgtable
, pgd_idx
, pud_idx
))
262 /* Allocate PTE - fourth level page table if needed */
263 pte
= ivpu_mmu_ensure_pte(vdev
, &ctx
->pgtable
, pgd_idx
, pud_idx
, pmd_idx
);
268 pte
[pte_idx
] = dma_addr
| prot
;
274 ivpu_mmu_context_map_cont_64k(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
, u64 vpu_addr
,
275 dma_addr_t dma_addr
, u64 prot
)
277 size_t size
= IVPU_MMU_CONT_PAGES_SIZE
;
279 drm_WARN_ON(&vdev
->drm
, !IS_ALIGNED(vpu_addr
, size
));
280 drm_WARN_ON(&vdev
->drm
, !IS_ALIGNED(dma_addr
, size
));
282 prot
|= IVPU_MMU_ENTRY_FLAG_CONT
;
285 int ret
= ivpu_mmu_context_map_page(vdev
, ctx
, vpu_addr
, dma_addr
, prot
);
290 size
-= IVPU_MMU_PAGE_SIZE
;
291 vpu_addr
+= IVPU_MMU_PAGE_SIZE
;
292 dma_addr
+= IVPU_MMU_PAGE_SIZE
;
298 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context
*ctx
, u64 vpu_addr
)
300 int pgd_idx
= FIELD_GET(IVPU_MMU_PGD_INDEX_MASK
, vpu_addr
);
301 int pud_idx
= FIELD_GET(IVPU_MMU_PUD_INDEX_MASK
, vpu_addr
);
302 int pmd_idx
= FIELD_GET(IVPU_MMU_PMD_INDEX_MASK
, vpu_addr
);
303 int pte_idx
= FIELD_GET(IVPU_MMU_PTE_INDEX_MASK
, vpu_addr
);
305 /* Update PTE with dummy physical address and clear flags */
306 ctx
->pgtable
.pte_ptrs
[pgd_idx
][pud_idx
][pmd_idx
][pte_idx
] = IVPU_MMU_ENTRY_INVALID
;
310 ivpu_mmu_context_map_pages(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
,
311 u64 vpu_addr
, dma_addr_t dma_addr
, size_t size
, u64 prot
)
317 if (!ivpu_disable_mmu_cont_pages
&& size
>= IVPU_MMU_CONT_PAGES_SIZE
&&
318 IS_ALIGNED(vpu_addr
| dma_addr
, IVPU_MMU_CONT_PAGES_SIZE
)) {
319 ret
= ivpu_mmu_context_map_cont_64k(vdev
, ctx
, vpu_addr
, dma_addr
, prot
);
320 map_size
= IVPU_MMU_CONT_PAGES_SIZE
;
322 ret
= ivpu_mmu_context_map_page(vdev
, ctx
, vpu_addr
, dma_addr
, prot
);
323 map_size
= IVPU_MMU_PAGE_SIZE
;
329 vpu_addr
+= map_size
;
330 dma_addr
+= map_size
;
337 static void ivpu_mmu_context_set_page_ro(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
,
340 int pgd_idx
= FIELD_GET(IVPU_MMU_PGD_INDEX_MASK
, vpu_addr
);
341 int pud_idx
= FIELD_GET(IVPU_MMU_PUD_INDEX_MASK
, vpu_addr
);
342 int pmd_idx
= FIELD_GET(IVPU_MMU_PMD_INDEX_MASK
, vpu_addr
);
343 int pte_idx
= FIELD_GET(IVPU_MMU_PTE_INDEX_MASK
, vpu_addr
);
345 ctx
->pgtable
.pte_ptrs
[pgd_idx
][pud_idx
][pmd_idx
][pte_idx
] |= IVPU_MMU_ENTRY_FLAG_RO
;
348 static void ivpu_mmu_context_split_page(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
,
351 int pgd_idx
= FIELD_GET(IVPU_MMU_PGD_INDEX_MASK
, vpu_addr
);
352 int pud_idx
= FIELD_GET(IVPU_MMU_PUD_INDEX_MASK
, vpu_addr
);
353 int pmd_idx
= FIELD_GET(IVPU_MMU_PMD_INDEX_MASK
, vpu_addr
);
354 int pte_idx
= FIELD_GET(IVPU_MMU_PTE_INDEX_MASK
, vpu_addr
);
356 ctx
->pgtable
.pte_ptrs
[pgd_idx
][pud_idx
][pmd_idx
][pte_idx
] &= ~IVPU_MMU_ENTRY_FLAG_CONT
;
359 static void ivpu_mmu_context_split_64k_page(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
,
362 u64 start
= ALIGN_DOWN(vpu_addr
, IVPU_MMU_CONT_PAGES_SIZE
);
363 u64 end
= ALIGN(vpu_addr
, IVPU_MMU_CONT_PAGES_SIZE
);
366 ivpu_dbg(vdev
, MMU_MAP
, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx
->id
, vpu_addr
);
368 while (start
+ offset
< end
) {
369 ivpu_mmu_context_split_page(vdev
, ctx
, start
+ offset
);
370 offset
+= IVPU_MMU_PAGE_SIZE
;
375 ivpu_mmu_context_set_pages_ro(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
, u64 vpu_addr
,
378 u64 end
= vpu_addr
+ size
;
379 size_t size_left
= size
;
385 if (drm_WARN_ON(&vdev
->drm
, !IS_ALIGNED(vpu_addr
| size
, IVPU_MMU_PAGE_SIZE
)))
388 mutex_lock(&ctx
->lock
);
390 ivpu_dbg(vdev
, MMU_MAP
, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",
391 ctx
->id
, vpu_addr
, size
);
393 if (!ivpu_disable_mmu_cont_pages
) {
394 /* Split 64K contiguous page at the beginning if needed */
395 if (!IS_ALIGNED(vpu_addr
, IVPU_MMU_CONT_PAGES_SIZE
))
396 ivpu_mmu_context_split_64k_page(vdev
, ctx
, vpu_addr
);
398 /* Split 64K contiguous page at the end if needed */
399 if (!IS_ALIGNED(vpu_addr
+ size
, IVPU_MMU_CONT_PAGES_SIZE
))
400 ivpu_mmu_context_split_64k_page(vdev
, ctx
, vpu_addr
+ size
);
405 ivpu_mmu_context_set_page_ro(vdev
, ctx
, vpu_addr
);
407 vpu_addr
+= IVPU_MMU_PAGE_SIZE
;
408 size_left
-= IVPU_MMU_PAGE_SIZE
;
411 /* Ensure page table modifications are flushed from wc buffers to memory */
414 mutex_unlock(&ctx
->lock
);
415 ret
= ivpu_mmu_invalidate_tlb(vdev
, ctx
->id
);
417 ivpu_err(vdev
, "Failed to invalidate TLB for ctx %u: %d\n", ctx
->id
, ret
);
422 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context
*ctx
, u64 vpu_addr
, size_t size
)
425 ivpu_mmu_context_unmap_page(ctx
, vpu_addr
);
426 vpu_addr
+= IVPU_MMU_PAGE_SIZE
;
427 size
-= IVPU_MMU_PAGE_SIZE
;
432 ivpu_mmu_context_map_sgt(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
,
433 u64 vpu_addr
, struct sg_table
*sgt
, bool llc_coherent
)
435 size_t start_vpu_addr
= vpu_addr
;
436 struct scatterlist
*sg
;
441 if (drm_WARN_ON(&vdev
->drm
, !ctx
))
444 if (!IS_ALIGNED(vpu_addr
, IVPU_MMU_PAGE_SIZE
))
447 if (vpu_addr
& ~IVPU_MMU_VPU_ADDRESS_MASK
)
450 prot
= IVPU_MMU_ENTRY_MAPPED
;
452 prot
|= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT
;
454 mutex_lock(&ctx
->lock
);
456 for_each_sgtable_dma_sg(sgt
, sg
, i
) {
457 dma_addr_t dma_addr
= sg_dma_address(sg
) - sg
->offset
;
458 size_t size
= sg_dma_len(sg
) + sg
->offset
;
460 ivpu_dbg(vdev
, MMU_MAP
, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
461 ctx
->id
, dma_addr
, vpu_addr
, size
);
463 ret
= ivpu_mmu_context_map_pages(vdev
, ctx
, vpu_addr
, dma_addr
, size
, prot
);
465 ivpu_err(vdev
, "Failed to map context pages\n");
466 goto err_unmap_pages
;
471 if (!ctx
->is_cd_valid
) {
472 ret
= ivpu_mmu_cd_set(vdev
, ctx
->id
, &ctx
->pgtable
);
474 ivpu_err(vdev
, "Failed to set context descriptor for context %u: %d\n",
476 goto err_unmap_pages
;
478 ctx
->is_cd_valid
= true;
481 /* Ensure page table modifications are flushed from wc buffers to memory */
484 ret
= ivpu_mmu_invalidate_tlb(vdev
, ctx
->id
);
486 ivpu_err(vdev
, "Failed to invalidate TLB for ctx %u: %d\n", ctx
->id
, ret
);
487 goto err_unmap_pages
;
490 mutex_unlock(&ctx
->lock
);
494 ivpu_mmu_context_unmap_pages(ctx
, start_vpu_addr
, vpu_addr
- start_vpu_addr
);
495 mutex_unlock(&ctx
->lock
);
500 ivpu_mmu_context_unmap_sgt(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
,
501 u64 vpu_addr
, struct sg_table
*sgt
)
503 struct scatterlist
*sg
;
507 if (drm_WARN_ON(&vdev
->drm
, !ctx
))
510 mutex_lock(&ctx
->lock
);
512 for_each_sgtable_dma_sg(sgt
, sg
, i
) {
513 dma_addr_t dma_addr
= sg_dma_address(sg
) - sg
->offset
;
514 size_t size
= sg_dma_len(sg
) + sg
->offset
;
516 ivpu_dbg(vdev
, MMU_MAP
, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
517 ctx
->id
, dma_addr
, vpu_addr
, size
);
519 ivpu_mmu_context_unmap_pages(ctx
, vpu_addr
, size
);
523 /* Ensure page table modifications are flushed from wc buffers to memory */
526 mutex_unlock(&ctx
->lock
);
528 ret
= ivpu_mmu_invalidate_tlb(vdev
, ctx
->id
);
530 ivpu_warn(vdev
, "Failed to invalidate TLB for ctx %u: %d\n", ctx
->id
, ret
);
534 ivpu_mmu_context_insert_node(struct ivpu_mmu_context
*ctx
, const struct ivpu_addr_range
*range
,
535 u64 size
, struct drm_mm_node
*node
)
541 mutex_lock(&ctx
->lock
);
542 if (!ivpu_disable_mmu_cont_pages
&& size
>= IVPU_MMU_CONT_PAGES_SIZE
) {
543 ret
= drm_mm_insert_node_in_range(&ctx
->mm
, node
, size
, IVPU_MMU_CONT_PAGES_SIZE
, 0,
544 range
->start
, range
->end
, DRM_MM_INSERT_BEST
);
549 ret
= drm_mm_insert_node_in_range(&ctx
->mm
, node
, size
, IVPU_MMU_PAGE_SIZE
, 0,
550 range
->start
, range
->end
, DRM_MM_INSERT_BEST
);
552 mutex_unlock(&ctx
->lock
);
557 ivpu_mmu_context_remove_node(struct ivpu_mmu_context
*ctx
, struct drm_mm_node
*node
)
559 mutex_lock(&ctx
->lock
);
560 drm_mm_remove_node(node
);
561 mutex_unlock(&ctx
->lock
);
564 void ivpu_mmu_context_init(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
, u32 context_id
)
568 mutex_init(&ctx
->lock
);
571 start
= vdev
->hw
->ranges
.global
.start
;
572 end
= vdev
->hw
->ranges
.shave
.end
;
574 start
= min_t(u64
, vdev
->hw
->ranges
.user
.start
, vdev
->hw
->ranges
.shave
.start
);
575 end
= max_t(u64
, vdev
->hw
->ranges
.user
.end
, vdev
->hw
->ranges
.dma
.end
);
578 drm_mm_init(&ctx
->mm
, start
, end
- start
);
579 ctx
->id
= context_id
;
582 void ivpu_mmu_context_fini(struct ivpu_device
*vdev
, struct ivpu_mmu_context
*ctx
)
584 if (ctx
->is_cd_valid
) {
585 ivpu_mmu_cd_clear(vdev
, ctx
->id
);
586 ctx
->is_cd_valid
= false;
589 mutex_destroy(&ctx
->lock
);
590 ivpu_mmu_pgtables_free(vdev
, &ctx
->pgtable
);
591 drm_mm_takedown(&ctx
->mm
);
594 void ivpu_mmu_global_context_init(struct ivpu_device
*vdev
)
596 ivpu_mmu_context_init(vdev
, &vdev
->gctx
, IVPU_GLOBAL_CONTEXT_MMU_SSID
);
599 void ivpu_mmu_global_context_fini(struct ivpu_device
*vdev
)
601 ivpu_mmu_context_fini(vdev
, &vdev
->gctx
);
604 int ivpu_mmu_reserved_context_init(struct ivpu_device
*vdev
)
608 ivpu_mmu_context_init(vdev
, &vdev
->rctx
, IVPU_RESERVED_CONTEXT_MMU_SSID
);
610 mutex_lock(&vdev
->rctx
.lock
);
612 if (!ivpu_mmu_ensure_pgd(vdev
, &vdev
->rctx
.pgtable
)) {
613 ivpu_err(vdev
, "Failed to allocate root page table for reserved context\n");
618 ret
= ivpu_mmu_cd_set(vdev
, vdev
->rctx
.id
, &vdev
->rctx
.pgtable
);
620 ivpu_err(vdev
, "Failed to set context descriptor for reserved context\n");
625 mutex_unlock(&vdev
->rctx
.lock
);
629 void ivpu_mmu_reserved_context_fini(struct ivpu_device
*vdev
)
631 ivpu_mmu_cd_clear(vdev
, vdev
->rctx
.id
);
632 ivpu_mmu_context_fini(vdev
, &vdev
->rctx
);