1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
9 #include "include/hw_ip/mmu/mmu_general.h"
11 #include <linux/genalloc.h>
12 #include <linux/slab.h>
14 static inline u64
get_phys_addr(struct hl_ctx
*ctx
, u64 shadow_addr
);
16 static struct pgt_info
*get_pgt_info(struct hl_ctx
*ctx
, u64 hop_addr
)
18 struct pgt_info
*pgt_info
= NULL
;
20 hash_for_each_possible(ctx
->mmu_shadow_hash
, pgt_info
, node
,
21 (unsigned long) hop_addr
)
22 if (hop_addr
== pgt_info
->shadow_addr
)
28 static void _free_hop(struct hl_ctx
*ctx
, struct pgt_info
*pgt_info
)
30 struct hl_device
*hdev
= ctx
->hdev
;
32 gen_pool_free(hdev
->mmu_pgt_pool
, pgt_info
->phys_addr
,
33 hdev
->asic_prop
.mmu_hop_table_size
);
34 hash_del(&pgt_info
->node
);
35 kfree((u64
*) (uintptr_t) pgt_info
->shadow_addr
);
39 static void free_hop(struct hl_ctx
*ctx
, u64 hop_addr
)
41 struct pgt_info
*pgt_info
= get_pgt_info(ctx
, hop_addr
);
43 _free_hop(ctx
, pgt_info
);
46 static u64
alloc_hop(struct hl_ctx
*ctx
)
48 struct hl_device
*hdev
= ctx
->hdev
;
49 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
50 struct pgt_info
*pgt_info
;
51 u64 phys_addr
, shadow_addr
;
53 pgt_info
= kmalloc(sizeof(*pgt_info
), GFP_KERNEL
);
57 phys_addr
= (u64
) gen_pool_alloc(hdev
->mmu_pgt_pool
,
58 prop
->mmu_hop_table_size
);
60 dev_err(hdev
->dev
, "failed to allocate page\n");
64 shadow_addr
= (u64
) (uintptr_t) kzalloc(prop
->mmu_hop_table_size
,
69 pgt_info
->phys_addr
= phys_addr
;
70 pgt_info
->shadow_addr
= shadow_addr
;
72 pgt_info
->num_of_ptes
= 0;
73 hash_add(ctx
->mmu_shadow_hash
, &pgt_info
->node
, shadow_addr
);
78 gen_pool_free(hdev
->mmu_pgt_pool
, phys_addr
, prop
->mmu_hop_table_size
);
85 static inline u64
get_phys_hop0_addr(struct hl_ctx
*ctx
)
87 return ctx
->hdev
->asic_prop
.mmu_pgt_addr
+
88 (ctx
->asid
* ctx
->hdev
->asic_prop
.mmu_hop_table_size
);
91 static inline u64
get_hop0_addr(struct hl_ctx
*ctx
)
93 return (u64
) (uintptr_t) ctx
->hdev
->mmu_shadow_hop0
+
94 (ctx
->asid
* ctx
->hdev
->asic_prop
.mmu_hop_table_size
);
97 static inline void flush(struct hl_ctx
*ctx
)
99 /* flush all writes from all cores to reach PCI */
101 ctx
->hdev
->asic_funcs
->read_pte(ctx
->hdev
, get_phys_hop0_addr(ctx
));
104 /* transform the value to physical address when writing to H/W */
105 static inline void write_pte(struct hl_ctx
*ctx
, u64 shadow_pte_addr
, u64 val
)
108 * The value to write is actually the address of the next shadow hop +
109 * flags at the 12 LSBs.
110 * Hence in order to get the value to write to the physical PTE, we
111 * clear the 12 LSBs and translate the shadow hop to its associated
112 * physical hop, and add back the original 12 LSBs.
114 u64 phys_val
= get_phys_addr(ctx
, val
& HOP_PHYS_ADDR_MASK
) |
117 ctx
->hdev
->asic_funcs
->write_pte(ctx
->hdev
,
118 get_phys_addr(ctx
, shadow_pte_addr
),
121 *(u64
*) (uintptr_t) shadow_pte_addr
= val
;
124 /* do not transform the value to physical address when writing to H/W */
125 static inline void write_final_pte(struct hl_ctx
*ctx
, u64 shadow_pte_addr
,
128 ctx
->hdev
->asic_funcs
->write_pte(ctx
->hdev
,
129 get_phys_addr(ctx
, shadow_pte_addr
),
131 *(u64
*) (uintptr_t) shadow_pte_addr
= val
;
134 /* clear the last and present bits */
135 static inline void clear_pte(struct hl_ctx
*ctx
, u64 pte_addr
)
137 /* no need to transform the value to physical address */
138 write_final_pte(ctx
, pte_addr
, 0);
141 static inline void get_pte(struct hl_ctx
*ctx
, u64 hop_addr
)
143 get_pgt_info(ctx
, hop_addr
)->num_of_ptes
++;
147 * put_pte - decrement the num of ptes and free the hop if possible
149 * @ctx: pointer to the context structure
150 * @hop_addr: addr of the hop
152 * This function returns the number of ptes left on this hop. If the number is
153 * 0, it means the pte was freed.
155 static inline int put_pte(struct hl_ctx
*ctx
, u64 hop_addr
)
157 struct pgt_info
*pgt_info
= get_pgt_info(ctx
, hop_addr
);
158 int num_of_ptes_left
;
160 pgt_info
->num_of_ptes
--;
163 * Need to save the number of ptes left because free_hop might free
166 num_of_ptes_left
= pgt_info
->num_of_ptes
;
167 if (!num_of_ptes_left
)
168 _free_hop(ctx
, pgt_info
);
170 return num_of_ptes_left
;
173 static inline u64
get_hopN_pte_addr(struct hl_ctx
*ctx
, u64 hop_addr
,
174 u64 virt_addr
, u64 mask
, u64 shift
)
176 return hop_addr
+ ctx
->hdev
->asic_prop
.mmu_pte_size
*
177 ((virt_addr
& mask
) >> shift
);
180 static inline u64
get_hop0_pte_addr(struct hl_ctx
*ctx
,
181 struct hl_mmu_properties
*mmu_prop
,
182 u64 hop_addr
, u64 vaddr
)
184 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_prop
->hop0_mask
,
185 mmu_prop
->hop0_shift
);
188 static inline u64
get_hop1_pte_addr(struct hl_ctx
*ctx
,
189 struct hl_mmu_properties
*mmu_prop
,
190 u64 hop_addr
, u64 vaddr
)
192 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_prop
->hop1_mask
,
193 mmu_prop
->hop1_shift
);
196 static inline u64
get_hop2_pte_addr(struct hl_ctx
*ctx
,
197 struct hl_mmu_properties
*mmu_prop
,
198 u64 hop_addr
, u64 vaddr
)
200 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_prop
->hop2_mask
,
201 mmu_prop
->hop2_shift
);
204 static inline u64
get_hop3_pte_addr(struct hl_ctx
*ctx
,
205 struct hl_mmu_properties
*mmu_prop
,
206 u64 hop_addr
, u64 vaddr
)
208 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_prop
->hop3_mask
,
209 mmu_prop
->hop3_shift
);
212 static inline u64
get_hop4_pte_addr(struct hl_ctx
*ctx
,
213 struct hl_mmu_properties
*mmu_prop
,
214 u64 hop_addr
, u64 vaddr
)
216 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_prop
->hop4_mask
,
217 mmu_prop
->hop4_shift
);
220 static inline u64
get_next_hop_addr(struct hl_ctx
*ctx
, u64 curr_pte
)
222 if (curr_pte
& PAGE_PRESENT_MASK
)
223 return curr_pte
& HOP_PHYS_ADDR_MASK
;
228 static inline u64
get_alloc_next_hop_addr(struct hl_ctx
*ctx
, u64 curr_pte
,
231 u64 hop_addr
= get_next_hop_addr(ctx
, curr_pte
);
233 if (hop_addr
== ULLONG_MAX
) {
234 hop_addr
= alloc_hop(ctx
);
235 *is_new_hop
= (hop_addr
!= ULLONG_MAX
);
241 /* translates shadow address inside hop to a physical address */
242 static inline u64
get_phys_addr(struct hl_ctx
*ctx
, u64 shadow_addr
)
244 u64 page_mask
= (ctx
->hdev
->asic_prop
.mmu_hop_table_size
- 1);
245 u64 shadow_hop_addr
= shadow_addr
& ~page_mask
;
246 u64 pte_offset
= shadow_addr
& page_mask
;
249 if (shadow_hop_addr
!= get_hop0_addr(ctx
))
250 phys_hop_addr
= get_pgt_info(ctx
, shadow_hop_addr
)->phys_addr
;
252 phys_hop_addr
= get_phys_hop0_addr(ctx
);
254 return phys_hop_addr
+ pte_offset
;
257 static int dram_default_mapping_init(struct hl_ctx
*ctx
)
259 struct hl_device
*hdev
= ctx
->hdev
;
260 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
261 u64 num_of_hop3
, total_hops
, hop0_addr
, hop1_addr
, hop2_addr
,
262 hop2_pte_addr
, hop3_pte_addr
, pte_val
;
263 int rc
, i
, j
, hop3_allocated
= 0;
265 if ((!hdev
->dram_supports_virtual_memory
) ||
266 (!hdev
->dram_default_page_mapping
) ||
267 (ctx
->asid
== HL_KERNEL_ASID_ID
))
270 num_of_hop3
= prop
->dram_size_for_default_page_mapping
;
271 do_div(num_of_hop3
, prop
->dram_page_size
);
272 do_div(num_of_hop3
, PTE_ENTRIES_IN_HOP
);
274 /* add hop1 and hop2 */
275 total_hops
= num_of_hop3
+ 2;
277 ctx
->dram_default_hops
= kzalloc(HL_PTE_SIZE
* total_hops
, GFP_KERNEL
);
278 if (!ctx
->dram_default_hops
)
281 hop0_addr
= get_hop0_addr(ctx
);
283 hop1_addr
= alloc_hop(ctx
);
284 if (hop1_addr
== ULLONG_MAX
) {
285 dev_err(hdev
->dev
, "failed to alloc hop 1\n");
290 ctx
->dram_default_hops
[total_hops
- 1] = hop1_addr
;
292 hop2_addr
= alloc_hop(ctx
);
293 if (hop2_addr
== ULLONG_MAX
) {
294 dev_err(hdev
->dev
, "failed to alloc hop 2\n");
299 ctx
->dram_default_hops
[total_hops
- 2] = hop2_addr
;
301 for (i
= 0 ; i
< num_of_hop3
; i
++) {
302 ctx
->dram_default_hops
[i
] = alloc_hop(ctx
);
303 if (ctx
->dram_default_hops
[i
] == ULLONG_MAX
) {
304 dev_err(hdev
->dev
, "failed to alloc hop 3, i: %d\n", i
);
311 /* need only pte 0 in hops 0 and 1 */
312 pte_val
= (hop1_addr
& HOP_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
313 write_pte(ctx
, hop0_addr
, pte_val
);
315 pte_val
= (hop2_addr
& HOP_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
316 write_pte(ctx
, hop1_addr
, pte_val
);
317 get_pte(ctx
, hop1_addr
);
319 hop2_pte_addr
= hop2_addr
;
320 for (i
= 0 ; i
< num_of_hop3
; i
++) {
321 pte_val
= (ctx
->dram_default_hops
[i
] & HOP_PHYS_ADDR_MASK
) |
323 write_pte(ctx
, hop2_pte_addr
, pte_val
);
324 get_pte(ctx
, hop2_addr
);
325 hop2_pte_addr
+= HL_PTE_SIZE
;
328 pte_val
= (prop
->mmu_dram_default_page_addr
& HOP_PHYS_ADDR_MASK
) |
329 LAST_MASK
| PAGE_PRESENT_MASK
;
331 for (i
= 0 ; i
< num_of_hop3
; i
++) {
332 hop3_pte_addr
= ctx
->dram_default_hops
[i
];
333 for (j
= 0 ; j
< PTE_ENTRIES_IN_HOP
; j
++) {
334 write_final_pte(ctx
, hop3_pte_addr
, pte_val
);
335 get_pte(ctx
, ctx
->dram_default_hops
[i
]);
336 hop3_pte_addr
+= HL_PTE_SIZE
;
345 for (i
= 0 ; i
< hop3_allocated
; i
++)
346 free_hop(ctx
, ctx
->dram_default_hops
[i
]);
348 free_hop(ctx
, hop2_addr
);
350 free_hop(ctx
, hop1_addr
);
352 kfree(ctx
->dram_default_hops
);
357 static void dram_default_mapping_fini(struct hl_ctx
*ctx
)
359 struct hl_device
*hdev
= ctx
->hdev
;
360 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
361 u64 num_of_hop3
, total_hops
, hop0_addr
, hop1_addr
, hop2_addr
,
362 hop2_pte_addr
, hop3_pte_addr
;
365 if ((!hdev
->dram_supports_virtual_memory
) ||
366 (!hdev
->dram_default_page_mapping
) ||
367 (ctx
->asid
== HL_KERNEL_ASID_ID
))
370 num_of_hop3
= prop
->dram_size_for_default_page_mapping
;
371 do_div(num_of_hop3
, prop
->dram_page_size
);
372 do_div(num_of_hop3
, PTE_ENTRIES_IN_HOP
);
374 hop0_addr
= get_hop0_addr(ctx
);
375 /* add hop1 and hop2 */
376 total_hops
= num_of_hop3
+ 2;
377 hop1_addr
= ctx
->dram_default_hops
[total_hops
- 1];
378 hop2_addr
= ctx
->dram_default_hops
[total_hops
- 2];
380 for (i
= 0 ; i
< num_of_hop3
; i
++) {
381 hop3_pte_addr
= ctx
->dram_default_hops
[i
];
382 for (j
= 0 ; j
< PTE_ENTRIES_IN_HOP
; j
++) {
383 clear_pte(ctx
, hop3_pte_addr
);
384 put_pte(ctx
, ctx
->dram_default_hops
[i
]);
385 hop3_pte_addr
+= HL_PTE_SIZE
;
389 hop2_pte_addr
= hop2_addr
;
390 hop2_pte_addr
= hop2_addr
;
391 for (i
= 0 ; i
< num_of_hop3
; i
++) {
392 clear_pte(ctx
, hop2_pte_addr
);
393 put_pte(ctx
, hop2_addr
);
394 hop2_pte_addr
+= HL_PTE_SIZE
;
397 clear_pte(ctx
, hop1_addr
);
398 put_pte(ctx
, hop1_addr
);
399 clear_pte(ctx
, hop0_addr
);
401 kfree(ctx
->dram_default_hops
);
407 * hl_mmu_init() - initialize the MMU module.
408 * @hdev: habanalabs device structure.
410 * This function does the following:
411 * - Create a pool of pages for pgt_infos.
412 * - Create a shadow table for pgt
414 * Return: 0 for success, non-zero for failure.
416 int hl_mmu_init(struct hl_device
*hdev
)
418 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
421 if (!hdev
->mmu_enable
)
425 gen_pool_create(__ffs(prop
->mmu_hop_table_size
), -1);
427 if (!hdev
->mmu_pgt_pool
) {
428 dev_err(hdev
->dev
, "Failed to create page gen pool\n");
432 rc
= gen_pool_add(hdev
->mmu_pgt_pool
, prop
->mmu_pgt_addr
+
433 prop
->mmu_hop0_tables_total_size
,
434 prop
->mmu_pgt_size
- prop
->mmu_hop0_tables_total_size
,
437 dev_err(hdev
->dev
, "Failed to add memory to page gen pool\n");
441 hdev
->mmu_shadow_hop0
= kvmalloc_array(prop
->max_asid
,
442 prop
->mmu_hop_table_size
,
443 GFP_KERNEL
| __GFP_ZERO
);
444 if (!hdev
->mmu_shadow_hop0
) {
449 /* MMU H/W init will be done in device hw_init() */
454 gen_pool_destroy(hdev
->mmu_pgt_pool
);
460 * hl_mmu_fini() - release the MMU module.
461 * @hdev: habanalabs device structure.
463 * This function does the following:
464 * - Disable MMU in H/W.
465 * - Free the pgt_infos pool.
467 * All contexts should be freed before calling this function.
469 void hl_mmu_fini(struct hl_device
*hdev
)
471 if (!hdev
->mmu_enable
)
474 /* MMU H/W fini was already done in device hw_fini() */
476 kvfree(hdev
->mmu_shadow_hop0
);
477 gen_pool_destroy(hdev
->mmu_pgt_pool
);
481 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
482 * @ctx: pointer to the context structure to initialize.
484 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
485 * page tables hops related to this context.
486 * Return: 0 on success, non-zero otherwise.
488 int hl_mmu_ctx_init(struct hl_ctx
*ctx
)
490 struct hl_device
*hdev
= ctx
->hdev
;
492 if (!hdev
->mmu_enable
)
495 mutex_init(&ctx
->mmu_lock
);
496 hash_init(ctx
->mmu_phys_hash
);
497 hash_init(ctx
->mmu_shadow_hash
);
499 return dram_default_mapping_init(ctx
);
503 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
505 * @ctx: pointer to the context structure
507 * This function does the following:
508 * - Free any pgts which were not freed yet
510 * - Free DRAM default page mapping hops
512 void hl_mmu_ctx_fini(struct hl_ctx
*ctx
)
514 struct hl_device
*hdev
= ctx
->hdev
;
515 struct pgt_info
*pgt_info
;
516 struct hlist_node
*tmp
;
519 if (!hdev
->mmu_enable
)
522 dram_default_mapping_fini(ctx
);
524 if (!hash_empty(ctx
->mmu_shadow_hash
))
525 dev_err(hdev
->dev
, "ctx %d is freed while it has pgts in use\n",
528 hash_for_each_safe(ctx
->mmu_shadow_hash
, i
, tmp
, pgt_info
, node
) {
529 dev_err_ratelimited(hdev
->dev
,
530 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
531 pgt_info
->phys_addr
, ctx
->asid
, pgt_info
->num_of_ptes
);
532 _free_hop(ctx
, pgt_info
);
535 mutex_destroy(&ctx
->mmu_lock
);
538 static int _hl_mmu_unmap(struct hl_ctx
*ctx
, u64 virt_addr
, bool is_dram_addr
)
540 struct hl_device
*hdev
= ctx
->hdev
;
541 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
542 struct hl_mmu_properties
*mmu_prop
;
543 u64 hop0_addr
= 0, hop0_pte_addr
= 0,
544 hop1_addr
= 0, hop1_pte_addr
= 0,
545 hop2_addr
= 0, hop2_pte_addr
= 0,
546 hop3_addr
= 0, hop3_pte_addr
= 0,
547 hop4_addr
= 0, hop4_pte_addr
= 0,
549 bool is_huge
, clear_hop3
= true;
551 mmu_prop
= is_dram_addr
? &prop
->dmmu
: &prop
->pmmu
;
553 hop0_addr
= get_hop0_addr(ctx
);
554 hop0_pte_addr
= get_hop0_pte_addr(ctx
, mmu_prop
, hop0_addr
, virt_addr
);
556 curr_pte
= *(u64
*) (uintptr_t) hop0_pte_addr
;
558 hop1_addr
= get_next_hop_addr(ctx
, curr_pte
);
560 if (hop1_addr
== ULLONG_MAX
)
563 hop1_pte_addr
= get_hop1_pte_addr(ctx
, mmu_prop
, hop1_addr
, virt_addr
);
565 curr_pte
= *(u64
*) (uintptr_t) hop1_pte_addr
;
567 hop2_addr
= get_next_hop_addr(ctx
, curr_pte
);
569 if (hop2_addr
== ULLONG_MAX
)
572 hop2_pte_addr
= get_hop2_pte_addr(ctx
, mmu_prop
, hop2_addr
, virt_addr
);
574 curr_pte
= *(u64
*) (uintptr_t) hop2_pte_addr
;
576 hop3_addr
= get_next_hop_addr(ctx
, curr_pte
);
578 if (hop3_addr
== ULLONG_MAX
)
581 hop3_pte_addr
= get_hop3_pte_addr(ctx
, mmu_prop
, hop3_addr
, virt_addr
);
583 curr_pte
= *(u64
*) (uintptr_t) hop3_pte_addr
;
585 is_huge
= curr_pte
& LAST_MASK
;
587 if (is_dram_addr
&& !is_huge
) {
589 "DRAM unmapping should use huge pages only\n");
594 hop4_addr
= get_next_hop_addr(ctx
, curr_pte
);
596 if (hop4_addr
== ULLONG_MAX
)
599 hop4_pte_addr
= get_hop4_pte_addr(ctx
, mmu_prop
, hop4_addr
,
602 curr_pte
= *(u64
*) (uintptr_t) hop4_pte_addr
;
607 if (hdev
->dram_default_page_mapping
&& is_dram_addr
) {
608 u64 default_pte
= (prop
->mmu_dram_default_page_addr
&
609 HOP_PHYS_ADDR_MASK
) | LAST_MASK
|
611 if (curr_pte
== default_pte
) {
613 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
618 if (!(curr_pte
& PAGE_PRESENT_MASK
)) {
620 "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
625 write_final_pte(ctx
, hop3_pte_addr
, default_pte
);
626 put_pte(ctx
, hop3_addr
);
628 if (!(curr_pte
& PAGE_PRESENT_MASK
))
632 clear_pte(ctx
, hop4_pte_addr
);
634 clear_pte(ctx
, hop3_pte_addr
);
636 if (hop4_addr
&& !put_pte(ctx
, hop4_addr
))
642 clear_pte(ctx
, hop3_pte_addr
);
644 if (put_pte(ctx
, hop3_addr
))
647 clear_pte(ctx
, hop2_pte_addr
);
649 if (put_pte(ctx
, hop2_addr
))
652 clear_pte(ctx
, hop1_pte_addr
);
654 if (put_pte(ctx
, hop1_addr
))
657 clear_pte(ctx
, hop0_pte_addr
);
666 dev_err(hdev
->dev
, "virt addr 0x%llx is not mapped to phys addr\n",
673 * hl_mmu_unmap - unmaps a virtual addr
675 * @ctx: pointer to the context structure
676 * @virt_addr: virt addr to map from
677 * @page_size: size of the page to unmap
679 * This function does the following:
680 * - Check that the virt addr is mapped
681 * - Unmap the virt addr and frees pgts if possible
682 * - Returns 0 on success, -EINVAL if the given addr is not mapped
684 * Because this function changes the page tables in the device and because it
685 * changes the MMU hash, it must be protected by a lock.
686 * However, because it maps only a single page, the lock should be implemented
687 * in a higher level in order to protect the entire mapping of the memory area
689 int hl_mmu_unmap(struct hl_ctx
*ctx
, u64 virt_addr
, u32 page_size
)
691 struct hl_device
*hdev
= ctx
->hdev
;
692 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
693 struct hl_mmu_properties
*mmu_prop
;
695 u32 real_page_size
, npages
;
699 if (!hdev
->mmu_enable
)
702 is_dram_addr
= hl_mem_area_inside_range(virt_addr
, prop
->dmmu
.page_size
,
703 prop
->va_space_dram_start_address
,
704 prop
->va_space_dram_end_address
);
706 mmu_prop
= is_dram_addr
? &prop
->dmmu
: &prop
->pmmu
;
709 * The H/W handles mapping of specific page sizes. Hence if the page
710 * size is bigger, we break it to sub-pages and unmap them separately.
712 if ((page_size
% mmu_prop
->huge_page_size
) == 0) {
713 real_page_size
= mmu_prop
->huge_page_size
;
714 } else if ((page_size
% mmu_prop
->page_size
) == 0) {
715 real_page_size
= mmu_prop
->page_size
;
718 "page size of %u is not %uKB nor %uMB aligned, can't unmap\n",
720 mmu_prop
->page_size
>> 10,
721 mmu_prop
->huge_page_size
>> 20);
726 npages
= page_size
/ real_page_size
;
727 real_virt_addr
= virt_addr
;
729 for (i
= 0 ; i
< npages
; i
++) {
730 rc
= _hl_mmu_unmap(ctx
, real_virt_addr
, is_dram_addr
);
734 real_virt_addr
+= real_page_size
;
740 static int _hl_mmu_map(struct hl_ctx
*ctx
, u64 virt_addr
, u64 phys_addr
,
741 u32 page_size
, bool is_dram_addr
)
743 struct hl_device
*hdev
= ctx
->hdev
;
744 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
745 struct hl_mmu_properties
*mmu_prop
;
746 u64 hop0_addr
= 0, hop0_pte_addr
= 0,
747 hop1_addr
= 0, hop1_pte_addr
= 0,
748 hop2_addr
= 0, hop2_pte_addr
= 0,
749 hop3_addr
= 0, hop3_pte_addr
= 0,
750 hop4_addr
= 0, hop4_pte_addr
= 0,
752 bool hop1_new
= false, hop2_new
= false, hop3_new
= false,
753 hop4_new
= false, is_huge
;
756 mmu_prop
= is_dram_addr
? &prop
->dmmu
: &prop
->pmmu
;
759 * This mapping function can map a page or a huge page. For huge page
760 * there are only 3 hops rather than 4. Currently the DRAM allocation
761 * uses huge pages only but user memory could have been allocated with
762 * one of the two page sizes. Since this is a common code for all the
763 * three cases, we need this hugs page check.
765 is_huge
= page_size
== mmu_prop
->huge_page_size
;
767 if (is_dram_addr
&& !is_huge
) {
768 dev_err(hdev
->dev
, "DRAM mapping should use huge pages only\n");
772 hop0_addr
= get_hop0_addr(ctx
);
773 hop0_pte_addr
= get_hop0_pte_addr(ctx
, mmu_prop
, hop0_addr
, virt_addr
);
774 curr_pte
= *(u64
*) (uintptr_t) hop0_pte_addr
;
776 hop1_addr
= get_alloc_next_hop_addr(ctx
, curr_pte
, &hop1_new
);
777 if (hop1_addr
== ULLONG_MAX
)
780 hop1_pte_addr
= get_hop1_pte_addr(ctx
, mmu_prop
, hop1_addr
, virt_addr
);
781 curr_pte
= *(u64
*) (uintptr_t) hop1_pte_addr
;
783 hop2_addr
= get_alloc_next_hop_addr(ctx
, curr_pte
, &hop2_new
);
784 if (hop2_addr
== ULLONG_MAX
)
787 hop2_pte_addr
= get_hop2_pte_addr(ctx
, mmu_prop
, hop2_addr
, virt_addr
);
788 curr_pte
= *(u64
*) (uintptr_t) hop2_pte_addr
;
790 hop3_addr
= get_alloc_next_hop_addr(ctx
, curr_pte
, &hop3_new
);
791 if (hop3_addr
== ULLONG_MAX
)
794 hop3_pte_addr
= get_hop3_pte_addr(ctx
, mmu_prop
, hop3_addr
, virt_addr
);
795 curr_pte
= *(u64
*) (uintptr_t) hop3_pte_addr
;
798 hop4_addr
= get_alloc_next_hop_addr(ctx
, curr_pte
, &hop4_new
);
799 if (hop4_addr
== ULLONG_MAX
)
802 hop4_pte_addr
= get_hop4_pte_addr(ctx
, mmu_prop
, hop4_addr
,
804 curr_pte
= *(u64
*) (uintptr_t) hop4_pte_addr
;
807 if (hdev
->dram_default_page_mapping
&& is_dram_addr
) {
808 u64 default_pte
= (prop
->mmu_dram_default_page_addr
&
809 HOP_PHYS_ADDR_MASK
) | LAST_MASK
|
812 if (curr_pte
!= default_pte
) {
814 "DRAM: mapping already exists for virt_addr 0x%llx\n",
820 if (hop1_new
|| hop2_new
|| hop3_new
|| hop4_new
) {
822 "DRAM mapping should not allocate more hops\n");
826 } else if (curr_pte
& PAGE_PRESENT_MASK
) {
828 "mapping already exists for virt_addr 0x%llx\n",
831 dev_dbg(hdev
->dev
, "hop0 pte: 0x%llx (0x%llx)\n",
832 *(u64
*) (uintptr_t) hop0_pte_addr
, hop0_pte_addr
);
833 dev_dbg(hdev
->dev
, "hop1 pte: 0x%llx (0x%llx)\n",
834 *(u64
*) (uintptr_t) hop1_pte_addr
, hop1_pte_addr
);
835 dev_dbg(hdev
->dev
, "hop2 pte: 0x%llx (0x%llx)\n",
836 *(u64
*) (uintptr_t) hop2_pte_addr
, hop2_pte_addr
);
837 dev_dbg(hdev
->dev
, "hop3 pte: 0x%llx (0x%llx)\n",
838 *(u64
*) (uintptr_t) hop3_pte_addr
, hop3_pte_addr
);
841 dev_dbg(hdev
->dev
, "hop4 pte: 0x%llx (0x%llx)\n",
842 *(u64
*) (uintptr_t) hop4_pte_addr
,
849 curr_pte
= (phys_addr
& HOP_PHYS_ADDR_MASK
) | LAST_MASK
853 write_final_pte(ctx
, hop3_pte_addr
, curr_pte
);
855 write_final_pte(ctx
, hop4_pte_addr
, curr_pte
);
859 (hop1_addr
& HOP_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
860 write_pte(ctx
, hop0_pte_addr
, curr_pte
);
864 (hop2_addr
& HOP_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
865 write_pte(ctx
, hop1_pte_addr
, curr_pte
);
866 get_pte(ctx
, hop1_addr
);
870 (hop3_addr
& HOP_PHYS_ADDR_MASK
) | PAGE_PRESENT_MASK
;
871 write_pte(ctx
, hop2_pte_addr
, curr_pte
);
872 get_pte(ctx
, hop2_addr
);
877 curr_pte
= (hop4_addr
& HOP_PHYS_ADDR_MASK
) |
879 write_pte(ctx
, hop3_pte_addr
, curr_pte
);
880 get_pte(ctx
, hop3_addr
);
883 get_pte(ctx
, hop4_addr
);
885 get_pte(ctx
, hop3_addr
);
894 free_hop(ctx
, hop4_addr
);
896 free_hop(ctx
, hop3_addr
);
898 free_hop(ctx
, hop2_addr
);
900 free_hop(ctx
, hop1_addr
);
906 * hl_mmu_map - maps a virtual addr to physical addr
908 * @ctx: pointer to the context structure
909 * @virt_addr: virt addr to map from
910 * @phys_addr: phys addr to map to
911 * @page_size: physical page size
913 * This function does the following:
914 * - Check that the virt addr is not mapped
915 * - Allocate pgts as necessary in order to map the virt addr to the phys
916 * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
918 * Because this function changes the page tables in the device and because it
919 * changes the MMU hash, it must be protected by a lock.
920 * However, because it maps only a single page, the lock should be implemented
921 * in a higher level in order to protect the entire mapping of the memory area
923 int hl_mmu_map(struct hl_ctx
*ctx
, u64 virt_addr
, u64 phys_addr
, u32 page_size
)
925 struct hl_device
*hdev
= ctx
->hdev
;
926 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
927 struct hl_mmu_properties
*mmu_prop
;
928 u64 real_virt_addr
, real_phys_addr
;
929 u32 real_page_size
, npages
;
930 int i
, rc
, mapped_cnt
= 0;
933 if (!hdev
->mmu_enable
)
936 is_dram_addr
= hl_mem_area_inside_range(virt_addr
, prop
->dmmu
.page_size
,
937 prop
->va_space_dram_start_address
,
938 prop
->va_space_dram_end_address
);
940 mmu_prop
= is_dram_addr
? &prop
->dmmu
: &prop
->pmmu
;
943 * The H/W handles mapping of specific page sizes. Hence if the page
944 * size is bigger, we break it to sub-pages and map them separately.
946 if ((page_size
% mmu_prop
->huge_page_size
) == 0) {
947 real_page_size
= mmu_prop
->huge_page_size
;
948 } else if ((page_size
% mmu_prop
->page_size
) == 0) {
949 real_page_size
= mmu_prop
->page_size
;
952 "page size of %u is not %dKB nor %dMB aligned, can't unmap\n",
954 mmu_prop
->page_size
>> 10,
955 mmu_prop
->huge_page_size
>> 20);
960 WARN_ONCE((phys_addr
& (real_page_size
- 1)),
961 "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
962 phys_addr
, real_page_size
);
964 npages
= page_size
/ real_page_size
;
965 real_virt_addr
= virt_addr
;
966 real_phys_addr
= phys_addr
;
968 for (i
= 0 ; i
< npages
; i
++) {
969 rc
= _hl_mmu_map(ctx
, real_virt_addr
, real_phys_addr
,
970 real_page_size
, is_dram_addr
);
974 real_virt_addr
+= real_page_size
;
975 real_phys_addr
+= real_page_size
;
982 real_virt_addr
= virt_addr
;
983 for (i
= 0 ; i
< mapped_cnt
; i
++) {
984 if (_hl_mmu_unmap(ctx
, real_virt_addr
, is_dram_addr
))
985 dev_warn_ratelimited(hdev
->dev
,
986 "failed to unmap va: 0x%llx\n", real_virt_addr
);
988 real_virt_addr
+= real_page_size
;
995 * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
997 * @ctx: pointer to the context structure
1000 void hl_mmu_swap_out(struct hl_ctx
*ctx
)
1006 * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
1008 * @ctx: pointer to the context structure
1011 void hl_mmu_swap_in(struct hl_ctx
*ctx
)