1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
9 #include "include/hw_ip/mmu/mmu_general.h"
11 #include <linux/genalloc.h>
12 #include <linux/slab.h>
14 static inline u64
get_phys_addr(struct hl_ctx
*ctx
, u64 shadow_addr
);
16 static struct pgt_info
*get_pgt_info(struct hl_ctx
*ctx
, u64 hop_addr
)
18 struct pgt_info
*pgt_info
= NULL
;
20 hash_for_each_possible(ctx
->mmu_shadow_hash
, pgt_info
, node
,
21 (unsigned long) hop_addr
)
22 if (hop_addr
== pgt_info
->shadow_addr
)
28 static void free_hop(struct hl_ctx
*ctx
, u64 hop_addr
)
30 struct hl_device
*hdev
= ctx
->hdev
;
31 struct pgt_info
*pgt_info
= get_pgt_info(ctx
, hop_addr
);
33 gen_pool_free(hdev
->mmu_pgt_pool
, pgt_info
->phys_addr
,
34 hdev
->asic_prop
.mmu_hop_table_size
);
35 hash_del(&pgt_info
->node
);
36 kfree((u64
*) (uintptr_t) pgt_info
->shadow_addr
);
40 static u64
alloc_hop(struct hl_ctx
*ctx
)
42 struct hl_device
*hdev
= ctx
->hdev
;
43 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
44 struct pgt_info
*pgt_info
;
45 u64 phys_addr
, shadow_addr
;
47 pgt_info
= kmalloc(sizeof(*pgt_info
), GFP_KERNEL
);
51 phys_addr
= (u64
) gen_pool_alloc(hdev
->mmu_pgt_pool
,
52 prop
->mmu_hop_table_size
);
54 dev_err(hdev
->dev
, "failed to allocate page\n");
58 shadow_addr
= (u64
) (uintptr_t) kzalloc(prop
->mmu_hop_table_size
,
63 pgt_info
->phys_addr
= phys_addr
;
64 pgt_info
->shadow_addr
= shadow_addr
;
66 pgt_info
->num_of_ptes
= 0;
67 hash_add(ctx
->mmu_shadow_hash
, &pgt_info
->node
, shadow_addr
);
72 gen_pool_free(hdev
->mmu_pgt_pool
, phys_addr
, prop
->mmu_hop_table_size
);
79 static inline u64
get_phys_hop0_addr(struct hl_ctx
*ctx
)
81 return ctx
->hdev
->asic_prop
.mmu_pgt_addr
+
82 (ctx
->asid
* ctx
->hdev
->asic_prop
.mmu_hop_table_size
);
85 static inline u64
get_hop0_addr(struct hl_ctx
*ctx
)
87 return (u64
) (uintptr_t) ctx
->hdev
->mmu_shadow_hop0
+
88 (ctx
->asid
* ctx
->hdev
->asic_prop
.mmu_hop_table_size
);
91 static inline void flush(struct hl_ctx
*ctx
)
93 /* flush all writes from all cores to reach PCI */
95 ctx
->hdev
->asic_funcs
->read_pte(ctx
->hdev
, get_phys_hop0_addr(ctx
));
98 /* transform the value to physical address when writing to H/W */
99 static inline void write_pte(struct hl_ctx
*ctx
, u64 shadow_pte_addr
, u64 val
)
102 * The value to write is actually the address of the next shadow hop +
103 * flags at the 12 LSBs.
104 * Hence in order to get the value to write to the physical PTE, we
105 * clear the 12 LSBs and translate the shadow hop to its associated
106 * physical hop, and add back the original 12 LSBs.
108 u64 phys_val
= get_phys_addr(ctx
, val
& PTE_PHYS_ADDR_MASK
) |
111 ctx
->hdev
->asic_funcs
->write_pte(ctx
->hdev
,
112 get_phys_addr(ctx
, shadow_pte_addr
),
115 *(u64
*) (uintptr_t) shadow_pte_addr
= val
;
118 /* do not transform the value to physical address when writing to H/W */
119 static inline void write_final_pte(struct hl_ctx
*ctx
, u64 shadow_pte_addr
,
122 ctx
->hdev
->asic_funcs
->write_pte(ctx
->hdev
,
123 get_phys_addr(ctx
, shadow_pte_addr
),
125 *(u64
*) (uintptr_t) shadow_pte_addr
= val
;
128 /* clear the last and present bits */
129 static inline void clear_pte(struct hl_ctx
*ctx
, u64 pte_addr
)
131 /* no need to transform the value to physical address */
132 write_final_pte(ctx
, pte_addr
, 0);
135 static inline void get_pte(struct hl_ctx
*ctx
, u64 hop_addr
)
137 get_pgt_info(ctx
, hop_addr
)->num_of_ptes
++;
141 * put_pte - decrement the num of ptes and free the hop if possible
143 * @ctx: pointer to the context structure
144 * @hop_addr: addr of the hop
146 * This function returns the number of ptes left on this hop. If the number is
147 * 0, it means the pte was freed.
149 static inline int put_pte(struct hl_ctx
*ctx
, u64 hop_addr
)
151 struct pgt_info
*pgt_info
= get_pgt_info(ctx
, hop_addr
);
152 int num_of_ptes_left
;
154 pgt_info
->num_of_ptes
--;
157 * Need to save the number of ptes left because free_hop might free
160 num_of_ptes_left
= pgt_info
->num_of_ptes
;
161 if (!num_of_ptes_left
)
162 free_hop(ctx
, hop_addr
);
164 return num_of_ptes_left
;
167 static inline u64
get_hopN_pte_addr(struct hl_ctx
*ctx
, u64 hop_addr
,
168 u64 virt_addr
, u64 mask
, u64 shift
)
170 return hop_addr
+ ctx
->hdev
->asic_prop
.mmu_pte_size
*
171 ((virt_addr
& mask
) >> shift
);
174 static inline u64
get_hop0_pte_addr(struct hl_ctx
*ctx
, u64 hop_addr
, u64 vaddr
)
176 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, HOP0_MASK
, HOP0_SHIFT
);
179 static inline u64
get_hop1_pte_addr(struct hl_ctx
*ctx
, u64 hop_addr
, u64 vaddr
)
181 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, HOP1_MASK
, HOP1_SHIFT
);
184 static inline u64
get_hop2_pte_addr(struct hl_ctx
*ctx
, u64 hop_addr
, u64 vaddr
)
186 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, HOP2_MASK
, HOP2_SHIFT
);
189 static inline u64
get_hop3_pte_addr(struct hl_ctx
*ctx
, u64 hop_addr
, u64 vaddr
)
191 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, HOP3_MASK
, HOP3_SHIFT
);
194 static inline u64
get_hop4_pte_addr(struct hl_ctx
*ctx
, u64 hop_addr
, u64 vaddr
)
196 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, HOP4_MASK
, HOP4_SHIFT
);
199 static inline u64
get_next_hop_addr(struct hl_ctx
*ctx
, u64 curr_pte
)
201 if (curr_pte
& PAGE_PRESENT_MASK
)
202 return curr_pte
& PHYS_ADDR_MASK
;
207 static inline u64
get_alloc_next_hop_addr(struct hl_ctx
*ctx
, u64 curr_pte
,
210 u64 hop_addr
= get_next_hop_addr(ctx
, curr_pte
);
212 if (hop_addr
== ULLONG_MAX
) {
213 hop_addr
= alloc_hop(ctx
);
214 *is_new_hop
= (hop_addr
!= ULLONG_MAX
);
220 /* translates shadow address inside hop to a physical address */
221 static inline u64
get_phys_addr(struct hl_ctx
*ctx
, u64 shadow_addr
)
223 u64 page_mask
= (ctx
->hdev
->asic_prop
.mmu_hop_table_size
- 1);
224 u64 shadow_hop_addr
= shadow_addr
& ~page_mask
;
225 u64 pte_offset
= shadow_addr
& page_mask
;
228 if (shadow_hop_addr
!= get_hop0_addr(ctx
))
229 phys_hop_addr
= get_pgt_info(ctx
, shadow_hop_addr
)->phys_addr
;
231 phys_hop_addr
= get_phys_hop0_addr(ctx
);
233 return phys_hop_addr
+ pte_offset
;
236 static int dram_default_mapping_init(struct hl_ctx
*ctx
)
238 struct hl_device
*hdev
= ctx
->hdev
;
239 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
240 u64 num_of_hop3
, total_hops
, hop0_addr
, hop1_addr
, hop2_addr
,
241 hop2_pte_addr
, hop3_pte_addr
, pte_val
;
242 int rc
, i
, j
, hop3_allocated
= 0;
244 if ((!hdev
->dram_supports_virtual_memory
) ||
245 (!hdev
->dram_default_page_mapping
) ||
246 (ctx
->asid
== HL_KERNEL_ASID_ID
))
249 num_of_hop3
= prop
->dram_size_for_default_page_mapping
;
250 do_div(num_of_hop3
, prop
->dram_page_size
);
251 do_div(num_of_hop3
, PTE_ENTRIES_IN_HOP
);
253 /* add hop1 and hop2 */
254 total_hops
= num_of_hop3
+ 2;
256 ctx
->dram_default_hops
= kzalloc(HL_PTE_SIZE
* total_hops
, GFP_KERNEL
);
257 if (!ctx
->dram_default_hops
)
260 hop0_addr
= get_hop0_addr(ctx
);
262 hop1_addr
= alloc_hop(ctx
);
263 if (hop1_addr
== ULLONG_MAX
) {
264 dev_err(hdev
->dev
, "failed to alloc hop 1\n");
269 ctx
->dram_default_hops
[total_hops
- 1] = hop1_addr
;
271 hop2_addr
= alloc_hop(ctx
);
272 if (hop2_addr
== ULLONG_MAX
) {
273 dev_err(hdev
->dev
, "failed to alloc hop 2\n");
278 ctx
->dram_default_hops
[total_hops
- 2] = hop2_addr
;
280 for (i
= 0 ; i
< num_of_hop3
; i
++) {
281 ctx
->dram_default_hops
[i
] = alloc_hop(ctx
);
282 if (ctx
->dram_default_hops
[i
] == ULLONG_MAX
) {
283 dev_err(hdev
->dev
, "failed to alloc hop 3, i: %d\n", i
);
290 /* need only pte 0 in hops 0 and 1 */
291 pte_val
= (hop1_addr
& PTE_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
292 write_pte(ctx
, hop0_addr
, pte_val
);
294 pte_val
= (hop2_addr
& PTE_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
295 write_pte(ctx
, hop1_addr
, pte_val
);
296 get_pte(ctx
, hop1_addr
);
298 hop2_pte_addr
= hop2_addr
;
299 for (i
= 0 ; i
< num_of_hop3
; i
++) {
300 pte_val
= (ctx
->dram_default_hops
[i
] & PTE_PHYS_ADDR_MASK
) |
302 write_pte(ctx
, hop2_pte_addr
, pte_val
);
303 get_pte(ctx
, hop2_addr
);
304 hop2_pte_addr
+= HL_PTE_SIZE
;
307 pte_val
= (prop
->mmu_dram_default_page_addr
& PTE_PHYS_ADDR_MASK
) |
308 LAST_MASK
| PAGE_PRESENT_MASK
;
310 for (i
= 0 ; i
< num_of_hop3
; i
++) {
311 hop3_pte_addr
= ctx
->dram_default_hops
[i
];
312 for (j
= 0 ; j
< PTE_ENTRIES_IN_HOP
; j
++) {
313 write_final_pte(ctx
, hop3_pte_addr
, pte_val
);
314 get_pte(ctx
, ctx
->dram_default_hops
[i
]);
315 hop3_pte_addr
+= HL_PTE_SIZE
;
324 for (i
= 0 ; i
< hop3_allocated
; i
++)
325 free_hop(ctx
, ctx
->dram_default_hops
[i
]);
327 free_hop(ctx
, hop2_addr
);
329 free_hop(ctx
, hop1_addr
);
331 kfree(ctx
->dram_default_hops
);
336 static void dram_default_mapping_fini(struct hl_ctx
*ctx
)
338 struct hl_device
*hdev
= ctx
->hdev
;
339 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
340 u64 num_of_hop3
, total_hops
, hop0_addr
, hop1_addr
, hop2_addr
,
341 hop2_pte_addr
, hop3_pte_addr
;
344 if ((!hdev
->dram_supports_virtual_memory
) ||
345 (!hdev
->dram_default_page_mapping
) ||
346 (ctx
->asid
== HL_KERNEL_ASID_ID
))
349 num_of_hop3
= prop
->dram_size_for_default_page_mapping
;
350 do_div(num_of_hop3
, prop
->dram_page_size
);
351 do_div(num_of_hop3
, PTE_ENTRIES_IN_HOP
);
353 hop0_addr
= get_hop0_addr(ctx
);
354 /* add hop1 and hop2 */
355 total_hops
= num_of_hop3
+ 2;
356 hop1_addr
= ctx
->dram_default_hops
[total_hops
- 1];
357 hop2_addr
= ctx
->dram_default_hops
[total_hops
- 2];
359 for (i
= 0 ; i
< num_of_hop3
; i
++) {
360 hop3_pte_addr
= ctx
->dram_default_hops
[i
];
361 for (j
= 0 ; j
< PTE_ENTRIES_IN_HOP
; j
++) {
362 clear_pte(ctx
, hop3_pte_addr
);
363 put_pte(ctx
, ctx
->dram_default_hops
[i
]);
364 hop3_pte_addr
+= HL_PTE_SIZE
;
368 hop2_pte_addr
= hop2_addr
;
369 hop2_pte_addr
= hop2_addr
;
370 for (i
= 0 ; i
< num_of_hop3
; i
++) {
371 clear_pte(ctx
, hop2_pte_addr
);
372 put_pte(ctx
, hop2_addr
);
373 hop2_pte_addr
+= HL_PTE_SIZE
;
376 clear_pte(ctx
, hop1_addr
);
377 put_pte(ctx
, hop1_addr
);
378 clear_pte(ctx
, hop0_addr
);
380 kfree(ctx
->dram_default_hops
);
386 * hl_mmu_init() - initialize the MMU module.
387 * @hdev: habanalabs device structure.
389 * This function does the following:
390 * - Create a pool of pages for pgt_infos.
391 * - Create a shadow table for pgt
393 * Return: 0 for success, non-zero for failure.
395 int hl_mmu_init(struct hl_device
*hdev
)
397 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
400 if (!hdev
->mmu_enable
)
403 /* MMU H/W init was already done in device hw_init() */
406 gen_pool_create(__ffs(prop
->mmu_hop_table_size
), -1);
408 if (!hdev
->mmu_pgt_pool
) {
409 dev_err(hdev
->dev
, "Failed to create page gen pool\n");
413 rc
= gen_pool_add(hdev
->mmu_pgt_pool
, prop
->mmu_pgt_addr
+
414 prop
->mmu_hop0_tables_total_size
,
415 prop
->mmu_pgt_size
- prop
->mmu_hop0_tables_total_size
,
418 dev_err(hdev
->dev
, "Failed to add memory to page gen pool\n");
422 hdev
->mmu_shadow_hop0
= kvmalloc_array(prop
->max_asid
,
423 prop
->mmu_hop_table_size
,
424 GFP_KERNEL
| __GFP_ZERO
);
425 if (!hdev
->mmu_shadow_hop0
) {
433 gen_pool_destroy(hdev
->mmu_pgt_pool
);
439 * hl_mmu_fini() - release the MMU module.
440 * @hdev: habanalabs device structure.
442 * This function does the following:
443 * - Disable MMU in H/W.
444 * - Free the pgt_infos pool.
446 * All contexts should be freed before calling this function.
448 void hl_mmu_fini(struct hl_device
*hdev
)
450 if (!hdev
->mmu_enable
)
453 kvfree(hdev
->mmu_shadow_hop0
);
454 gen_pool_destroy(hdev
->mmu_pgt_pool
);
456 /* MMU H/W fini will be done in device hw_fini() */
460 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
461 * @ctx: pointer to the context structure to initialize.
463 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
464 * page tables hops related to this context.
465 * Return: 0 on success, non-zero otherwise.
467 int hl_mmu_ctx_init(struct hl_ctx
*ctx
)
469 struct hl_device
*hdev
= ctx
->hdev
;
471 if (!hdev
->mmu_enable
)
474 mutex_init(&ctx
->mmu_lock
);
475 hash_init(ctx
->mmu_phys_hash
);
476 hash_init(ctx
->mmu_shadow_hash
);
478 return dram_default_mapping_init(ctx
);
482 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
484 * @ctx: pointer to the context structure
486 * This function does the following:
487 * - Free any pgts which were not freed yet
489 * - Free DRAM default page mapping hops
491 void hl_mmu_ctx_fini(struct hl_ctx
*ctx
)
493 struct hl_device
*hdev
= ctx
->hdev
;
494 struct pgt_info
*pgt_info
;
495 struct hlist_node
*tmp
;
498 if (!hdev
->mmu_enable
)
501 dram_default_mapping_fini(ctx
);
503 if (!hash_empty(ctx
->mmu_shadow_hash
))
504 dev_err(hdev
->dev
, "ctx is freed while it has pgts in use\n");
506 hash_for_each_safe(ctx
->mmu_shadow_hash
, i
, tmp
, pgt_info
, node
) {
508 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
509 pgt_info
->phys_addr
, ctx
->asid
, pgt_info
->num_of_ptes
);
510 free_hop(ctx
, pgt_info
->shadow_addr
);
513 mutex_destroy(&ctx
->mmu_lock
);
516 static int _hl_mmu_unmap(struct hl_ctx
*ctx
, u64 virt_addr
)
518 struct hl_device
*hdev
= ctx
->hdev
;
519 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
520 u64 hop0_addr
= 0, hop0_pte_addr
= 0,
521 hop1_addr
= 0, hop1_pte_addr
= 0,
522 hop2_addr
= 0, hop2_pte_addr
= 0,
523 hop3_addr
= 0, hop3_pte_addr
= 0,
524 hop4_addr
= 0, hop4_pte_addr
= 0,
526 bool is_dram_addr
, is_huge
, clear_hop3
= true;
528 is_dram_addr
= hl_mem_area_inside_range(virt_addr
, PAGE_SIZE_2MB
,
529 prop
->va_space_dram_start_address
,
530 prop
->va_space_dram_end_address
);
532 hop0_addr
= get_hop0_addr(ctx
);
533 hop0_pte_addr
= get_hop0_pte_addr(ctx
, hop0_addr
, virt_addr
);
535 curr_pte
= *(u64
*) (uintptr_t) hop0_pte_addr
;
537 hop1_addr
= get_next_hop_addr(ctx
, curr_pte
);
539 if (hop1_addr
== ULLONG_MAX
)
542 hop1_pte_addr
= get_hop1_pte_addr(ctx
, hop1_addr
, virt_addr
);
544 curr_pte
= *(u64
*) (uintptr_t) hop1_pte_addr
;
546 hop2_addr
= get_next_hop_addr(ctx
, curr_pte
);
548 if (hop2_addr
== ULLONG_MAX
)
551 hop2_pte_addr
= get_hop2_pte_addr(ctx
, hop2_addr
, virt_addr
);
553 curr_pte
= *(u64
*) (uintptr_t) hop2_pte_addr
;
555 hop3_addr
= get_next_hop_addr(ctx
, curr_pte
);
557 if (hop3_addr
== ULLONG_MAX
)
560 hop3_pte_addr
= get_hop3_pte_addr(ctx
, hop3_addr
, virt_addr
);
562 curr_pte
= *(u64
*) (uintptr_t) hop3_pte_addr
;
564 is_huge
= curr_pte
& LAST_MASK
;
566 if (is_dram_addr
&& !is_huge
) {
568 "DRAM unmapping should use huge pages only\n");
573 hop4_addr
= get_next_hop_addr(ctx
, curr_pte
);
575 if (hop4_addr
== ULLONG_MAX
)
578 hop4_pte_addr
= get_hop4_pte_addr(ctx
, hop4_addr
, virt_addr
);
580 curr_pte
= *(u64
*) (uintptr_t) hop4_pte_addr
;
585 if (hdev
->dram_default_page_mapping
&& is_dram_addr
) {
586 u64 default_pte
= (prop
->mmu_dram_default_page_addr
&
587 PTE_PHYS_ADDR_MASK
) | LAST_MASK
|
589 if (curr_pte
== default_pte
) {
591 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
596 if (!(curr_pte
& PAGE_PRESENT_MASK
)) {
598 "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
603 write_final_pte(ctx
, hop3_pte_addr
, default_pte
);
604 put_pte(ctx
, hop3_addr
);
606 if (!(curr_pte
& PAGE_PRESENT_MASK
))
610 clear_pte(ctx
, hop4_pte_addr
);
612 clear_pte(ctx
, hop3_pte_addr
);
614 if (hop4_addr
&& !put_pte(ctx
, hop4_addr
))
620 clear_pte(ctx
, hop3_pte_addr
);
622 if (put_pte(ctx
, hop3_addr
))
625 clear_pte(ctx
, hop2_pte_addr
);
627 if (put_pte(ctx
, hop2_addr
))
630 clear_pte(ctx
, hop1_pte_addr
);
632 if (put_pte(ctx
, hop1_addr
))
635 clear_pte(ctx
, hop0_pte_addr
);
644 dev_err(hdev
->dev
, "virt addr 0x%llx is not mapped to phys addr\n",
651 * hl_mmu_unmap - unmaps a virtual addr
653 * @ctx: pointer to the context structure
654 * @virt_addr: virt addr to map from
655 * @page_size: size of the page to unmap
657 * This function does the following:
658 * - Check that the virt addr is mapped
659 * - Unmap the virt addr and frees pgts if possible
660 * - Returns 0 on success, -EINVAL if the given addr is not mapped
662 * Because this function changes the page tables in the device and because it
663 * changes the MMU hash, it must be protected by a lock.
664 * However, because it maps only a single page, the lock should be implemented
665 * in a higher level in order to protect the entire mapping of the memory area
667 int hl_mmu_unmap(struct hl_ctx
*ctx
, u64 virt_addr
, u32 page_size
)
669 struct hl_device
*hdev
= ctx
->hdev
;
671 u32 real_page_size
, npages
;
674 if (!hdev
->mmu_enable
)
678 * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
679 * is bigger, we break it to sub-pages and unmap them separately.
681 if ((page_size
% PAGE_SIZE_2MB
) == 0) {
682 real_page_size
= PAGE_SIZE_2MB
;
683 } else if ((page_size
% PAGE_SIZE_4KB
) == 0) {
684 real_page_size
= PAGE_SIZE_4KB
;
687 "page size of %u is not 4KB nor 2MB aligned, can't unmap\n",
693 npages
= page_size
/ real_page_size
;
694 real_virt_addr
= virt_addr
;
696 for (i
= 0 ; i
< npages
; i
++) {
697 rc
= _hl_mmu_unmap(ctx
, real_virt_addr
);
701 real_virt_addr
+= real_page_size
;
707 static int _hl_mmu_map(struct hl_ctx
*ctx
, u64 virt_addr
, u64 phys_addr
,
710 struct hl_device
*hdev
= ctx
->hdev
;
711 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
712 u64 hop0_addr
= 0, hop0_pte_addr
= 0,
713 hop1_addr
= 0, hop1_pte_addr
= 0,
714 hop2_addr
= 0, hop2_pte_addr
= 0,
715 hop3_addr
= 0, hop3_pte_addr
= 0,
716 hop4_addr
= 0, hop4_pte_addr
= 0,
718 bool hop1_new
= false, hop2_new
= false, hop3_new
= false,
719 hop4_new
= false, is_huge
, is_dram_addr
;
723 * This mapping function can map a 4KB/2MB page. For 2MB page there are
724 * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB
725 * pages only but user memory could have been allocated with one of the
726 * two page sizes. Since this is a common code for all the three cases,
727 * we need this hugs page check.
729 is_huge
= page_size
== PAGE_SIZE_2MB
;
731 is_dram_addr
= hl_mem_area_inside_range(virt_addr
, page_size
,
732 prop
->va_space_dram_start_address
,
733 prop
->va_space_dram_end_address
);
735 if (is_dram_addr
&& !is_huge
) {
736 dev_err(hdev
->dev
, "DRAM mapping should use huge pages only\n");
740 hop0_addr
= get_hop0_addr(ctx
);
741 hop0_pte_addr
= get_hop0_pte_addr(ctx
, hop0_addr
, virt_addr
);
742 curr_pte
= *(u64
*) (uintptr_t) hop0_pte_addr
;
744 hop1_addr
= get_alloc_next_hop_addr(ctx
, curr_pte
, &hop1_new
);
745 if (hop1_addr
== ULLONG_MAX
)
748 hop1_pte_addr
= get_hop1_pte_addr(ctx
, hop1_addr
, virt_addr
);
749 curr_pte
= *(u64
*) (uintptr_t) hop1_pte_addr
;
751 hop2_addr
= get_alloc_next_hop_addr(ctx
, curr_pte
, &hop2_new
);
752 if (hop2_addr
== ULLONG_MAX
)
755 hop2_pte_addr
= get_hop2_pte_addr(ctx
, hop2_addr
, virt_addr
);
756 curr_pte
= *(u64
*) (uintptr_t) hop2_pte_addr
;
758 hop3_addr
= get_alloc_next_hop_addr(ctx
, curr_pte
, &hop3_new
);
759 if (hop3_addr
== ULLONG_MAX
)
762 hop3_pte_addr
= get_hop3_pte_addr(ctx
, hop3_addr
, virt_addr
);
763 curr_pte
= *(u64
*) (uintptr_t) hop3_pte_addr
;
766 hop4_addr
= get_alloc_next_hop_addr(ctx
, curr_pte
, &hop4_new
);
767 if (hop4_addr
== ULLONG_MAX
)
770 hop4_pte_addr
= get_hop4_pte_addr(ctx
, hop4_addr
, virt_addr
);
771 curr_pte
= *(u64
*) (uintptr_t) hop4_pte_addr
;
774 if (hdev
->dram_default_page_mapping
&& is_dram_addr
) {
775 u64 default_pte
= (prop
->mmu_dram_default_page_addr
&
776 PTE_PHYS_ADDR_MASK
) | LAST_MASK
|
779 if (curr_pte
!= default_pte
) {
781 "DRAM: mapping already exists for virt_addr 0x%llx\n",
787 if (hop1_new
|| hop2_new
|| hop3_new
|| hop4_new
) {
789 "DRAM mapping should not allocate more hops\n");
793 } else if (curr_pte
& PAGE_PRESENT_MASK
) {
795 "mapping already exists for virt_addr 0x%llx\n",
798 dev_dbg(hdev
->dev
, "hop0 pte: 0x%llx (0x%llx)\n",
799 *(u64
*) (uintptr_t) hop0_pte_addr
, hop0_pte_addr
);
800 dev_dbg(hdev
->dev
, "hop1 pte: 0x%llx (0x%llx)\n",
801 *(u64
*) (uintptr_t) hop1_pte_addr
, hop1_pte_addr
);
802 dev_dbg(hdev
->dev
, "hop2 pte: 0x%llx (0x%llx)\n",
803 *(u64
*) (uintptr_t) hop2_pte_addr
, hop2_pte_addr
);
804 dev_dbg(hdev
->dev
, "hop3 pte: 0x%llx (0x%llx)\n",
805 *(u64
*) (uintptr_t) hop3_pte_addr
, hop3_pte_addr
);
808 dev_dbg(hdev
->dev
, "hop4 pte: 0x%llx (0x%llx)\n",
809 *(u64
*) (uintptr_t) hop4_pte_addr
,
816 curr_pte
= (phys_addr
& PTE_PHYS_ADDR_MASK
) | LAST_MASK
820 write_final_pte(ctx
, hop3_pte_addr
, curr_pte
);
822 write_final_pte(ctx
, hop4_pte_addr
, curr_pte
);
826 (hop1_addr
& PTE_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
827 write_pte(ctx
, hop0_pte_addr
, curr_pte
);
831 (hop2_addr
& PTE_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
832 write_pte(ctx
, hop1_pte_addr
, curr_pte
);
833 get_pte(ctx
, hop1_addr
);
837 (hop3_addr
& PTE_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
838 write_pte(ctx
, hop2_pte_addr
, curr_pte
);
839 get_pte(ctx
, hop2_addr
);
844 curr_pte
= (hop4_addr
& PTE_PHYS_ADDR_MASK
) |
846 write_pte(ctx
, hop3_pte_addr
, curr_pte
);
847 get_pte(ctx
, hop3_addr
);
850 get_pte(ctx
, hop4_addr
);
852 get_pte(ctx
, hop3_addr
);
861 free_hop(ctx
, hop4_addr
);
863 free_hop(ctx
, hop3_addr
);
865 free_hop(ctx
, hop2_addr
);
867 free_hop(ctx
, hop1_addr
);
873 * hl_mmu_map - maps a virtual addr to physical addr
875 * @ctx: pointer to the context structure
876 * @virt_addr: virt addr to map from
877 * @phys_addr: phys addr to map to
878 * @page_size: physical page size
880 * This function does the following:
881 * - Check that the virt addr is not mapped
882 * - Allocate pgts as necessary in order to map the virt addr to the phys
883 * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
885 * Because this function changes the page tables in the device and because it
886 * changes the MMU hash, it must be protected by a lock.
887 * However, because it maps only a single page, the lock should be implemented
888 * in a higher level in order to protect the entire mapping of the memory area
890 int hl_mmu_map(struct hl_ctx
*ctx
, u64 virt_addr
, u64 phys_addr
, u32 page_size
)
892 struct hl_device
*hdev
= ctx
->hdev
;
893 u64 real_virt_addr
, real_phys_addr
;
894 u32 real_page_size
, npages
;
895 int i
, rc
, mapped_cnt
= 0;
897 if (!hdev
->mmu_enable
)
901 * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
902 * is bigger, we break it to sub-pages and map them separately.
904 if ((page_size
% PAGE_SIZE_2MB
) == 0) {
905 real_page_size
= PAGE_SIZE_2MB
;
906 } else if ((page_size
% PAGE_SIZE_4KB
) == 0) {
907 real_page_size
= PAGE_SIZE_4KB
;
910 "page size of %u is not 4KB nor 2MB aligned, can't map\n",
916 WARN_ONCE((phys_addr
& (real_page_size
- 1)),
917 "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
918 phys_addr
, real_page_size
);
920 npages
= page_size
/ real_page_size
;
921 real_virt_addr
= virt_addr
;
922 real_phys_addr
= phys_addr
;
924 for (i
= 0 ; i
< npages
; i
++) {
925 rc
= _hl_mmu_map(ctx
, real_virt_addr
, real_phys_addr
,
930 real_virt_addr
+= real_page_size
;
931 real_phys_addr
+= real_page_size
;
938 real_virt_addr
= virt_addr
;
939 for (i
= 0 ; i
< mapped_cnt
; i
++) {
940 if (_hl_mmu_unmap(ctx
, real_virt_addr
))
941 dev_warn_ratelimited(hdev
->dev
,
942 "failed to unmap va: 0x%llx\n", real_virt_addr
);
944 real_virt_addr
+= real_page_size
;
951 * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
953 * @ctx: pointer to the context structure
956 void hl_mmu_swap_out(struct hl_ctx
*ctx
)
962 * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
964 * @ctx: pointer to the context structure
967 void hl_mmu_swap_in(struct hl_ctx
*ctx
)